Example usage for java.sql PreparedStatement executeBatch

List of usage examples for java.sql PreparedStatement executeBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement executeBatch.

Prototype

int[] executeBatch() throws SQLException;

Source Link

Document

Submits a batch of commands to the database for execution and if all commands execute successfully, returns an array of update counts.

Usage

From source file:com.wso2telco.dep.mediator.dao.SMSMessagingDAO.java

/**
 * Outbound operatorsubs entry.//  w  w w .j  av a2s  .c  o m
 *
 * @param domainsubs
 *            the domainsubs
 * @param dnSubscriptionId
 *            the dnSubscriptionId
 * @return true, if successful
 * @throws Exception
 *             the exception
 */
public void outboundOperatorsubsEntry(List<OperatorSubscriptionDTO> domainsubs, Integer dnSubscriptionId)
        throws SQLException, Exception {

    Connection con = null;
    PreparedStatement insertStatement = null;
    PreparedStatement updateStatement = null;

    try {

        con = DbUtils.getDbConnection(DataSourceNames.WSO2TELCO_DEP_DB);

        if (con == null) {

            throw new Exception("Connection not found");
        }

        /**
         * Set autocommit off to handle the transaction
         */
        con.setAutoCommit(false);

        StringBuilder queryString = new StringBuilder("INSERT INTO ");
        queryString.append(DatabaseTables.OUTBOUND_OPERATORSUBS.getTableName());
        queryString.append(" (dn_subscription_did, domainurl, operator) ");
        queryString.append("VALUES (?, ?, ?)");

        insertStatement = con.prepareStatement(queryString.toString());

        for (OperatorSubscriptionDTO d : domainsubs) {

            insertStatement.setInt(1, dnSubscriptionId);
            insertStatement.setString(2, d.getDomain());
            insertStatement.setString(3, d.getOperator());

            insertStatement.addBatch();
        }

        log.debug("sql query in outboundOperatorsubsEntry : " + insertStatement);

        insertStatement.executeBatch();

        StringBuilder updateQueryString = new StringBuilder("UPDATE ");
        updateQueryString.append(DatabaseTables.OUTBOUND_SUBSCRIPTIONS.getTableName());
        updateQueryString.append(" SET is_active = ?");
        updateQueryString.append(" WHERE dn_subscription_did = ?");

        updateStatement = con.prepareStatement(updateQueryString.toString());

        updateStatement.setInt(1, 1);
        updateStatement.setInt(2, dnSubscriptionId);

        log.debug("sql query in outboundOperatorsubsEntry : " + updateStatement);

        updateStatement.executeUpdate();

        /**
         * commit the transaction if all success
         */
        con.commit();
    } catch (SQLException e) {

        /**
         * rollback if Exception occurs
         */
        con.rollback();

        log.error("database operation error in outboundOperatorsubsEntry : ", e);
        throw e;
    } catch (Exception e) {

        /**
         * rollback if Exception occurs
         */
        con.rollback();

        log.error("error in outboundOperatorsubsEntry : ", e);
        throw e;
    } finally {

        DbUtils.closeAllConnections(insertStatement, con, null);
        DbUtils.closeAllConnections(updateStatement, null, null);
    }
}

From source file:com.wso2telco.dep.mediator.dao.SMSMessagingDAO.java

public boolean operatorsubsEntry(List<OperatorSubscriptionDTO> domainsubs, Integer dnSubscriptionId)
        throws SQLException, Exception {

    Connection con = null;//from   www. jav  a2s . c  om
    PreparedStatement insertStatement = null;
    PreparedStatement updateStatement = null;

    try {

        con = DbUtils.getDbConnection(DataSourceNames.WSO2TELCO_DEP_DB);

        if (con == null) {

            throw new Exception("Connection not found");
        }

        /**
         * Set autocommit off to handle the transaction
         */
        con.setAutoCommit(false);

        StringBuilder queryString = new StringBuilder("INSERT INTO ");
        queryString.append(DatabaseTables.OUTBOUND_OPERATORSUBS.getTableName());
        queryString.append(" (dn_subscription_did, domainurl, operator) ");
        queryString.append("VALUES (?, ?, ?)");

        insertStatement = con.prepareStatement(queryString.toString());

        for (OperatorSubscriptionDTO d : domainsubs) {

            insertStatement.setInt(1, dnSubscriptionId);
            insertStatement.setString(2, d.getDomain());
            insertStatement.setString(3, d.getOperator());

            insertStatement.addBatch();
        }

        log.debug("sql query in outboundOperatorsubsEntry : " + insertStatement);

        insertStatement.executeBatch();

        StringBuilder updateQueryString = new StringBuilder("UPDATE ");
        updateQueryString.append(DatabaseTables.OUTBOUND_SUBSCRIPTIONS.getTableName());
        updateQueryString.append(" SET is_active = ?");
        updateQueryString.append(" WHERE dn_subscription_did = ?");

        updateStatement = con.prepareStatement(updateQueryString.toString());

        updateStatement.setInt(1, 1);
        updateStatement.setInt(2, dnSubscriptionId);

        log.debug("sql query in outboundOperatorsubsEntry : " + updateStatement);

        updateStatement.executeUpdate();

        /**
         * commit the transaction if all success
         */
        con.commit();
    } catch (SQLException e) {

        /**
         * rollback if Exception occurs
         */
        con.rollback();

        log.error("database operation error in outboundOperatorsubsEntry : ", e);
        throw e;
    } catch (Exception e) {

        /**
         * rollback if Exception occurs
         */
        con.rollback();

        log.error("error in outboundOperatorsubsEntry : ", e);
        throw e;
    } finally {

        DbUtils.closeAllConnections(insertStatement, con, null);
        DbUtils.closeAllConnections(updateStatement, null, null);
    }
    return true;
}

From source file:org.cerberus.crud.dao.impl.TestCaseCountryPropertiesDAO.java

@Override
public Answer createTestCaseCountryPropertiesBatch(List<TestCaseCountryProperties> listOfPropertiesToInsert) {
    Answer answer = new Answer();
    MessageEvent rs = null;//from w w w.  jav a  2  s  .c  o  m
    StringBuilder query = new StringBuilder();
    query.append(
            "INSERT INTO testcasecountryproperties (`Test`,`TestCase`,`Country`,`Property` , `Description`, `Type`");
    query.append(",`Database`,`Value1`,`Value2`,`Length`,`RowLimit`,`Nature`,`RetryNb`,`RetryPeriod`) ");
    query.append("VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)");

    Connection connection = this.databaseSpring.connect();
    try {
        PreparedStatement preStat = connection.prepareStatement(query.toString());
        try {
            for (TestCaseCountryProperties prop : listOfPropertiesToInsert) {
                preStat.setString(1, prop.getTest());
                preStat.setString(2, prop.getTestCase());
                preStat.setString(3, prop.getCountry());
                preStat.setString(4, prop.getProperty());
                preStat.setString(5, prop.getDescription());
                preStat.setString(6, prop.getType());
                preStat.setString(7, prop.getDatabase());
                preStat.setString(8, prop.getValue1());
                preStat.setString(9, prop.getValue2());
                preStat.setInt(10, prop.getLength());
                preStat.setInt(11, prop.getRowLimit());
                preStat.setString(12, prop.getNature());
                preStat.setInt(13, prop.getRetryNb());
                preStat.setInt(14, prop.getRetryPeriod());

                preStat.addBatch();
            }

            //executes the batch         
            preStat.executeBatch();

            int affectedRows[] = preStat.executeBatch();

            //verify if some of the statements failed
            boolean someFailed = ArrayUtils.contains(affectedRows, 0)
                    || ArrayUtils.contains(affectedRows, Statement.EXECUTE_FAILED);

            if (someFailed == false) {
                rs = new MessageEvent(MessageEventEnum.DATA_OPERATION_OK);
                rs.setDescription(
                        rs.getDescription().replace("%ITEM%", "Property").replace("%OPERATION%", "CREATE"));
            } else {
                rs = new MessageEvent(MessageEventEnum.DATA_OPERATION_ERROR_EXPECTED);
                rs.setDescription(rs.getDescription().replace("%ITEM%", "Property")
                        .replace("%OPERATION%", "CREATE")
                        .replace("%REASON%", "Some problem occurred while creating the new property! "));
            }

        } catch (SQLException exception) {
            rs = new MessageEvent(MessageEventEnum.DATA_OPERATION_ERROR_UNEXPECTED);
            rs.setDescription(
                    rs.getDescription().replace("%DESCRIPTION%", "It was not possible to update table."));
            LOG.error("Unable to execute query : " + exception.toString());
        } finally {
            if (preStat != null) {
                preStat.close();
            }
        }
    } catch (SQLException exception) {
        rs = new MessageEvent(MessageEventEnum.DATA_OPERATION_ERROR_UNEXPECTED);
        rs.setDescription(rs.getDescription().replace("%DESCRIPTION%", "It was not possible to update table."));
        LOG.error("Unable to execute query : " + exception.toString());
    } finally {
        try {
            if (!this.databaseSpring.isOnTransaction()) {
                if (connection != null) {
                    connection.close();
                }
            }
        } catch (SQLException e) {
            LOG.warn(e.toString());
        }
    }

    answer.setResultMessage(rs);
    return answer;
}

From source file:HSqlManager.java

private static void commonInitialize(int bps, Connection connection) throws SQLException, IOException {
    String base = new File("").getAbsolutePath();
    CSV.makeDirectory(new File(base + "/PhageData"));
    INSTANCE = ImportPhagelist.getInstance();
    INSTANCE.parseAllPhages(bps);//from w  w  w  . j  a  v a2 s  .c  o m
    written = true;
    Connection db = connection;
    db.setAutoCommit(false);
    Statement stat = db.createStatement();
    stat.execute("SET FILES LOG FALSE\n");
    PreparedStatement st = db.prepareStatement("Insert INTO Primerdb.Primers"
            + "(Bp,Sequence, CommonP, UniqueP, Picked, Strain, Cluster)" + " Values(?,?,true,false,false,?,?)");
    ResultSet call = stat.executeQuery("Select * From Primerdb.Phages;");
    List<String[]> phages = new ArrayList<>();
    while (call.next()) {
        String[] r = new String[3];
        r[0] = call.getString("Strain");
        r[1] = call.getString("Cluster");
        r[2] = call.getString("Name");
        phages.add(r);
    }
    phages.parallelStream().map(x -> x[0]).collect(Collectors.toSet()).parallelStream().forEach(x -> {
        phages.stream().filter(y -> y[0].equals(x)).map(y -> y[1]).collect(Collectors.toSet()).forEach(z -> {
            try {
                List<String> clustphages = phages.stream().filter(a -> a[0].equals(x) && a[1].equals(z))
                        .map(a -> a[2]).collect(Collectors.toList());
                Set<String> primers = Collections.synchronizedSet(CSV
                        .readCSV(base + "/PhageData/" + Integer.toString(bps) + clustphages.get(0) + ".csv"));
                clustphages.remove(0);
                clustphages.parallelStream().forEach(phage -> {
                    primers.retainAll(
                            CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + phage + ".csv"));
                });
                int i = 0;
                for (CharSequence a : primers) {
                    try {
                        //finish update
                        st.setInt(1, bps);
                        st.setString(2, a.toString());
                        st.setString(3, x);
                        st.setString(4, z);
                        st.addBatch();
                    } catch (SQLException e) {
                        e.printStackTrace();
                        System.out.println("Error occurred at " + x + " " + z);
                    }
                    i++;
                    if (i == 1000) {
                        i = 0;
                        st.executeBatch();
                        db.commit();
                    }
                }
                if (i > 0) {
                    st.executeBatch();
                    db.commit();
                }
            } catch (SQLException e) {
                e.printStackTrace();
                System.out.println("Error occurred at " + x + " " + z);
            }
        });
    });
    stat.execute("SET FILES LOG TRUE\n");
    st.close();
    stat.close();
    System.out.println("Common Updated");
}

From source file:hoot.services.db.DbUtils.java

public static void batchRecordsDirectWays(final long mapId, final List<?> records,
        final RecordBatchType recordBatchType, Connection conn, int maxRecordBatchSize) throws Exception {
    PreparedStatement ps = null;
    try {//from w w  w . java  2 s  .co  m
        String sql = null;
        long execResult = -1;
        //conn.setAutoCommit(false);
        int count = 0;

        switch (recordBatchType) {
        case INSERT:

            sql = "insert into current_ways_" + mapId
                    + " (id, changeset_id, \"timestamp\", visible, version, tags) "
                    + "values (?, ?, ?, ?, ?, ?)";

            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentWays way = (CurrentWays) o;

                ps.setLong(1, way.getId());
                ps.setLong(2, way.getChangesetId());
                ps.setTimestamp(3, way.getTimestamp());
                ps.setBoolean(4, way.getVisible());
                ps.setLong(5, way.getVersion());

                Map<String, String> tags = (Map<String, String>) way.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(6, hstoreStr, Types.OTHER);
                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }

            }

            break;

        case UPDATE:

            sql = "update current_ways_" + mapId
                    + " set changeset_id=?, visible=?, \"timestamp\"=?, version=?, tags=? " + "where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentWays way = (CurrentWays) o;

                ps.setLong(1, way.getChangesetId());
                ps.setBoolean(2, way.getVisible());
                ps.setTimestamp(3, way.getTimestamp());
                ps.setLong(4, way.getVersion());

                Map<String, String> tags = (Map<String, String>) way.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(5, hstoreStr, Types.OTHER);

                ps.setLong(6, way.getId());

                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }
            }

            break;

        case DELETE:

            sql = "delete from current_ways_" + mapId + " where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentWays way = (CurrentWays) o;

                ps.setLong(1, way.getId());

                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }

            }

            break;

        default:
            throw new Exception("");
        }

        ps.executeBatch();
        //conn.commit();
    } catch (Exception e) {
        conn.rollback();
        String msg = "Error executing batch query.";
        msg += "  " + e.getMessage();
        msg += " Cause:" + e.getCause().toString();
        throw new Exception(msg);
    } finally {
        if (ps != null) {
            ps.close();
        }
        //conn.setAutoCommit(true);
    }
}

From source file:cc.tooyoung.common.db.JdbcTemplate.java

@SuppressWarnings("unchecked")
public int[] batchUpdate(String sql, final BatchPreparedStatementSetter pss) throws DataAccessException {
    if (ApiLogger.isTraceEnabled()) {
        ApiLogger.trace("Executing SQL batch update [" + sql + "]");
    }/*w  w w  .  j  a v a2  s  . co m*/

    return (int[]) execute(sql, new PreparedStatementCallback() {
        public Object doInPreparedStatement(PreparedStatement ps) throws SQLException {
            try {
                int batchSize = pss.getBatchSize();
                InterruptibleBatchPreparedStatementSetter ipss = (pss instanceof InterruptibleBatchPreparedStatementSetter
                        ? (InterruptibleBatchPreparedStatementSetter) pss
                        : null);
                if (JdbcUtils.supportsBatchUpdates(ps.getConnection())) {
                    for (int i = 0; i < batchSize; i++) {
                        pss.setValues(ps, i);
                        if (ipss != null && ipss.isBatchExhausted(i)) {
                            break;
                        }
                        ps.addBatch();
                    }
                    return ps.executeBatch();
                } else {
                    List rowsAffected = new ArrayList();
                    for (int i = 0; i < batchSize; i++) {
                        pss.setValues(ps, i);
                        if (ipss != null && ipss.isBatchExhausted(i)) {
                            break;
                        }
                        rowsAffected.add(new Integer(ps.executeUpdate()));
                    }
                    int[] rowsAffectedArray = new int[rowsAffected.size()];
                    for (int i = 0; i < rowsAffectedArray.length; i++) {
                        rowsAffectedArray[i] = ((Integer) rowsAffected.get(i)).intValue();
                    }
                    return rowsAffectedArray;
                }
            } finally {
                if (pss instanceof ParameterDisposer) {
                    ((ParameterDisposer) pss).cleanupParameters();
                }
            }
        }
    }, true);
}

From source file:org.entrystore.rowstore.store.impl.PgDataset.java

/**
 * @see Dataset#populate(File)//from w ww  .  j a  v a 2s. com
 */
@Override
public boolean populate(File csvFile) throws IOException {
    if (csvFile == null) {
        throw new IllegalArgumentException("Argument must not be null");
    }

    String dataTable = getDataTable();
    if (dataTable == null) {
        log.error("Dataset has no data table assigned");
        return false;
    }

    setStatus(EtlStatus.PROCESSING);

    Connection conn = null;
    PreparedStatement stmt = null;
    CSVReader cr = null;
    try {
        conn = rowstore.getConnection();
        cr = new CSVReader(new FileReader(csvFile), ',', '"');
        int lineCount = 0;
        String[] labels = null;
        String[] line;

        conn.setAutoCommit(false);
        stmt = conn.prepareStatement("INSERT INTO " + dataTable + " (rownr, data) VALUES (?, ?)");
        while ((line = cr.readNext()) != null) {
            if (lineCount == 0) {
                labels = line;
            } else {
                JSONObject jsonLine = null;
                try {
                    jsonLine = csvLineToJsonObject(line, labels);
                } catch (Exception e) {
                    log.error(e.getMessage());
                    log.info("Rolling back transaction");
                    conn.rollback();
                    setStatus(EtlStatus.ERROR);
                    return false;
                }
                stmt.setInt(1, lineCount);
                PGobject jsonb = new PGobject();
                jsonb.setType("jsonb");
                jsonb.setValue(jsonLine.toString());
                stmt.setObject(2, jsonb);
                log.debug("Adding to batch: " + stmt);
                stmt.addBatch();
                // we execute the batch every 100th line
                if ((lineCount % 100) == 0) {
                    log.debug("Executing: " + stmt);
                    stmt.executeBatch();
                }
            }
            lineCount++;
        }
        // in case there are some inserts left to be sent (i.e.
        // batch size above was smaller than 100 when loop ended)
        log.debug("Executing: " + stmt);
        stmt.executeBatch();

        // we create an index over the data
        createIndex(conn, dataTable, labels);

        // we commit the transaction and free the resources of the statement
        conn.commit();

        setStatus(EtlStatus.AVAILABLE);
        return true;
    } catch (SQLException e) {
        SqlExceptionLogUtil.error(log, e);
        try {
            log.info("Rolling back transaction");
            conn.rollback();
        } catch (SQLException e1) {
            SqlExceptionLogUtil.error(log, e1);
        }
        setStatus(EtlStatus.ERROR);
        return false;
    } finally {
        if (cr != null) {
            try {
                cr.close();
            } catch (IOException e) {
                log.error(e.getMessage());
            }
        }
        if (stmt != null) {
            try {
                stmt.close();
            } catch (SQLException e) {
                SqlExceptionLogUtil.error(log, e);
            }
        }
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException e) {
                SqlExceptionLogUtil.error(log, e);
            }
        }
    }
}

From source file:hoot.services.db.DbUtils.java

public static void batchRecordsDirectRelations(final long mapId, final List<?> records,
        final RecordBatchType recordBatchType, Connection conn, int maxRecordBatchSize) throws Exception {
    PreparedStatement ps = null;
    try {//from w ww . jav  a 2  s .co m
        String sql = null;
        long execResult = -1;
        //conn.setAutoCommit(false);
        int count = 0;

        switch (recordBatchType) {
        case INSERT:

            sql = "insert into current_relations_" + mapId
                    + " (id, changeset_id, \"timestamp\", visible, version, tags) "
                    + "values (?, ?, ?, ?, ?, ?)";

            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentRelations rel = (CurrentRelations) o;

                ps.setLong(1, rel.getId());
                ps.setLong(2, rel.getChangesetId());
                ps.setTimestamp(3, rel.getTimestamp());
                ps.setBoolean(4, rel.getVisible());
                ps.setLong(5, rel.getVersion());

                Map<String, String> tags = (Map<String, String>) rel.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(6, hstoreStr, Types.OTHER);
                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }

            }

            break;

        case UPDATE:

            sql = "update current_relations_" + mapId
                    + " set changeset_id=?, visible=?, \"timestamp\"=?, version=?, tags=? " + "where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentRelations rel = (CurrentRelations) o;

                ps.setLong(1, rel.getChangesetId());
                ps.setBoolean(2, rel.getVisible());
                ps.setTimestamp(3, rel.getTimestamp());
                ps.setLong(4, rel.getVersion());

                Map<String, String> tags = (Map<String, String>) rel.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(5, hstoreStr, Types.OTHER);

                ps.setLong(6, rel.getId());

                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }
            }

            break;

        case DELETE:

            sql = "delete from current_relations_" + mapId + " where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentRelations rel = (CurrentRelations) o;

                ps.setLong(1, rel.getId());

                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                    }
                }

            }

            break;

        default:
            throw new Exception("");
        }

        ps.executeBatch();
        //conn.commit();
    } catch (Exception e) {
        conn.rollback();
        String msg = "Error executing batch query.";
        msg += "  " + e.getMessage();
        msg += " Cause:" + e.getCause().toString();
        throw new Exception(msg);
    } finally {
        if (ps != null) {
            ps.close();
        }
        //conn.setAutoCommit(true);
    }
}

From source file:AIR.Common.DB.AbstractDLL.java

protected void executePreparedStatementBatch(SQLConnection connection, String query,
        List<Map<Integer, Object>> paramsList) throws ReturnStatusException {
    PreparedStatement prepStmt = null;
    try {//  w w  w  . java2s .com
        boolean preexistingAutoCommitMode = connection.getAutoCommit();
        connection.setAutoCommit(false);
        prepStmt = connection.prepareStatement(query);

        if (paramsList != null) {
            for (Map<Integer, Object> params : paramsList) {
                Iterator<Entry<Integer, Object>> param = params.entrySet().iterator();
                while (param.hasNext()) {
                    Entry<Integer, Object> entry = param.next();
                    if (entry.getValue() instanceof String) {
                        prepStmt.setString(entry.getKey(), entry.getValue().toString());
                    } else if (entry.getValue() instanceof Integer) {
                        prepStmt.setInt(entry.getKey(), (Integer) entry.getValue());
                    } else if (entry.getValue() instanceof Date) {
                        prepStmt.setString(entry.getKey(), String.format("%s", AbstractDateUtilDll
                                .getDateAsFormattedMillisecondsString((Date) entry.getValue())));
                    } else if (entry.getValue() instanceof UUID) {
                        String newStr = entry.getValue().toString().replaceAll("-", "");
                        prepStmt.setBytes(entry.getKey(), DatatypeConverter.parseHexBinary(newStr));
                    } else if (entry.getValue() instanceof Boolean) {
                        prepStmt.setBoolean(entry.getKey(), (Boolean) entry.getValue());
                    }

                }
                prepStmt.addBatch();
            }
        }
        prepStmt.executeBatch();
        prepStmt.close();
        connection.commit();
        // reset autocommit.
        connection.setAutoCommit(preexistingAutoCommitMode);
    } catch (SQLException exp) {
        throw new ReturnStatusException(exp);
    } finally {
        if (prepStmt != null)
            try {
                prepStmt.close();
            } catch (SQLException e) {

            }
    }
}

From source file:com.archivas.clienttools.arcutils.utils.database.ManagedJobSchema.java

/**
 * @param conn/* ww w . j a  va 2  s . c o m*/
 *            - the db connection to use
 * @param filesToDiscover
 *            - list of files to insert
 * @param isInitialList
 *            - true if the list is generated by what the user selected/specified in the job
 *            definition
 * @return - the max RECORD_ID inserted
 * @throws DatabaseException
 *             -
 * @throws SQLException
 *             - if a db error occurred
 */
private long insertFilesToDiscover(PooledDbConnection conn, Collection<ArcProcessFile> filesToDiscover,
        boolean isInitialList) throws DatabaseException, SQLException {

    PreparedStatement preparedStatement = conn.prepareStatement(INSERT_FILES_TO_DISCOVER_STMT_NAME,
            insertFilesToDiscoverySql);
    long recordId = getNextBlockOfDbRecordIds(filesToDiscover.size()); // the last record id
                                                                       // inserted; we're going
                                                                       // to increment back up
                                                                       // to this
    int maxPathDepth = 0;
    for (ArcProcessFile file : filesToDiscover) {
        int pathDepth = file.getPathDepth();
        maxPathDepth = Math.max(maxPathDepth, pathDepth);
        recordId++;
        setInsertFilesToDiscoverySqlParams(file, preparedStatement, isInitialList, recordId, pathDepth);
        preparedStatement.addBatch();
    }

    // execute the batch statement we created in the for loop
    preparedStatement.executeBatch();

    if (!isInitialList) {
        // now update overall job stats to reflect these changes. A "select count(*)" from a
        // large table is a table scan! so we keep track of the count ourselves
        ManagedJobsSchema.getInstance().updateDiscoveredObjCnt(conn, jobId, filesToDiscover.size(), recordId,
                maxPathDepth);
    }

    return recordId;
}