Example usage for java.sql PreparedStatement addBatch

List of usage examples for java.sql PreparedStatement addBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement addBatch.

Prototype

void addBatch() throws SQLException;

Source Link

Document

Adds a set of parameters to this PreparedStatement object's batch of commands.

Usage

From source file:it.cnr.icar.eric.server.persistence.rdb.SlotDAO.java

/**
 * @param parentInsert It should be set to true if Slot insert is part of new
 * RegistryObject insert (i.e. in the case        of SubmitObjectsRequest). It should
 * be set to false in the case of AddSlotsRequest because the parent of the
 * slot is expected to be already submitted by previous SubmitObjectRequest.
 * In the latter case whether the parents of the slots exist will be checked.
 *///from  www . j  a  v a 2s  .c o m
public void insert(List<?> slots, boolean parentInsert) throws RegistryException {
    PreparedStatement pstmt = null;

    String parentId = (String) parent;

    if (slots.size() == 0) {
        return;
    }

    try {
        String sql = "INSERT INTO " + getTableName() + " (sequenceId, " + "name_, slotType, value, parent)"
                + " VALUES(?, ?, ?, ?, ?)";
        pstmt = context.getConnection().prepareStatement(sql);

        List<String> duplicateSlotsNames = getDuplicateSlots(slots);

        if (duplicateSlotsNames.size() > 0) {
            // Some slots have duplicate name
            throw new DuplicateSlotsException(parentId, duplicateSlotsNames);
        }

        RegistryObjectDAO roDAO = new RegistryObjectDAO(context);

        // Check whether the parent exist in database, in case the parent
        // has been inserted by the previous SubmitObjectsRequest
        // (i.e. in the case of AddSlotsRequest)
        if (!parentInsert && !roDAO.registryObjectExist(parentId)) {
            // The parent does not exist
            throw new SlotsParentNotExistException(parentId);
        }

        List<String> slotsNamesAlreadyExist = slotsExist(parentId, slots);

        if (slotsNamesAlreadyExist.size() > 0) {
            // Some slots for this RegistryObject already exist
            throw new SlotsExistException(parentId, slotsNamesAlreadyExist);
        }

        Iterator<?> iter = slots.iterator();
        @SuppressWarnings("unused")
        Vector<Object> slotNames = new Vector<Object>();

        while (iter.hasNext()) {
            SlotType1 slot = (SlotType1) iter.next();
            String slotName = slot.getName();
            String slotType = slot.getSlotType();
            List<String> values = slot.getValueList().getValue();
            int size = values.size();

            for (int j = 0; j < size; j++) {
                String value = values.get(j);
                pstmt.setInt(1, j);
                pstmt.setString(2, slotName);
                pstmt.setString(3, slotType);
                pstmt.setString(4, value);
                pstmt.setString(5, parentId);

                log.trace("stmt = " + pstmt.toString());
                pstmt.addBatch();
            }
        }

        if (slots.size() > 0) {
            @SuppressWarnings("unused")
            int[] updateCounts = pstmt.executeBatch();
        }
    } catch (SQLException e) {
        log.error(ServerResourceBundle.getInstance().getString("message.CaughtException1"), e);
        throw new RegistryException(e);
    } finally {
        closeStatement(pstmt);
    }
}

From source file:com.streamsets.pipeline.lib.jdbc.JdbcMultiRowRecordWriter.java

@SuppressWarnings("unchecked")
private void processPartition(Connection connection, Multimap<Long, Record> partitions, Long partitionKey,
        List<OnRecordErrorException> errorRecords) throws SQLException, OnRecordErrorException {
    Collection<Record> partition = partitions.get(partitionKey);
    // Fetch the base insert query for this partition.
    SortedMap<String, String> columnsToParameters = getFilteredColumnsToParameters(getColumnsToParameters(),
            partition.iterator().next());

    // put all the records in a queue for consumption
    LinkedList<Record> queue = new LinkedList<>(partition);

    // compute number of rows per batch
    if (columnsToParameters.isEmpty()) {
        throw new OnRecordErrorException(Errors.JDBCDEST_22);
    }//  w w w  .  j a va2  s .c om
    int maxRowsPerBatch = maxPrepStmtParameters / columnsToParameters.size();

    PreparedStatement statement = null;

    // parameters are indexed starting with 1
    int paramIdx = 1;
    int rowCount = 0;
    while (!queue.isEmpty()) {
        // we're at the start of a batch.
        if (statement == null) {
            // instantiate the new statement
            statement = generatePreparedStatement(columnsToParameters,
                    // the next batch will have either the max number of records, or however many are left.
                    Math.min(maxRowsPerBatch, queue.size()), getTableName(), connection);
        }

        // process the next record into the current statement
        Record record = queue.removeFirst();
        for (String column : columnsToParameters.keySet()) {
            Field field = record.get(getColumnsToFields().get(column));
            Field.Type fieldType = field.getType();
            Object value = field.getValue();

            try {
                switch (fieldType) {
                case LIST:
                    List<Object> unpackedList = unpackList((List<Field>) value);
                    Array array = connection.createArrayOf(getSQLTypeName(fieldType), unpackedList.toArray());
                    statement.setArray(paramIdx, array);
                    break;
                case DATE:
                case DATETIME:
                    // Java Date types are not accepted by JDBC drivers, so we need to convert to java.sql.Date
                    java.util.Date date = field.getValueAsDatetime();
                    statement.setObject(paramIdx, new java.sql.Date(date.getTime()));
                    break;
                default:
                    statement.setObject(paramIdx, value, getColumnType(column));
                    break;
                }
            } catch (SQLException e) {
                LOG.error(Errors.JDBCDEST_23.getMessage(), column, fieldType.toString(), e);
                throw new OnRecordErrorException(record, Errors.JDBCDEST_23, column, fieldType.toString());
            }
            ++paramIdx;
        }

        rowCount++;

        // check if we've filled up the current batch
        if (rowCount == maxRowsPerBatch) {
            // time to execute the current batch
            statement.addBatch();
            statement.executeBatch();
            statement.close();
            statement = null;

            // reset our counters
            rowCount = 0;
            paramIdx = 1;
        }
    }

    // check if there are any records left. this should occur whenever there isn't *exactly* maxRowsPerBatch records in
    // this partition.
    if (statement != null) {
        statement.addBatch();
        statement.executeBatch();
        statement.close();
    }
}

From source file:HSqlManager.java

public static void uniqueDB(Connection connection, int bps) throws ClassNotFoundException, SQLException,
        InstantiationException, IllegalAccessException, IOException {
    DpalLoad.main(new String[1]);
    HSqlPrimerDesign.Dpal_Inst = DpalLoad.INSTANCE_WIN64;
    String base = new File("").getAbsolutePath();
    if (!written) {
        CSV.makeDirectory(new File(base + "/PhageData"));
        INSTANCE.readFileAll(INSTANCE.path).stream().forEach(x -> {
            try {
                CSV.writeDataCSV(x[1], Fasta.process(x[1], bps), bps);
            } catch (IOException e) {
                e.printStackTrace();//from  w w w .  j a  v a  2 s . c o m
            }
        });
    }
    Connection db = connection;
    db.setAutoCommit(false);
    Statement stat = db.createStatement();
    PrintWriter log = new PrintWriter(new File("javalog.log"));
    stat.execute("SET FILES LOG FALSE;\n");
    PreparedStatement st = db
            .prepareStatement("UPDATE Primerdb.Primers" + " SET UniqueP = true, Tm = ?, GC =?, Hairpin =?"
                    + "WHERE Cluster = ? and Strain = ? and " + "Sequence = ? and Bp = ?");
    ResultSet call = stat.executeQuery("Select * From Primerdb.Phages;");
    List<String[]> phages = new ArrayList<>();
    while (call.next()) {
        String[] r = new String[3];
        r[0] = call.getString("Strain");
        r[1] = call.getString("Cluster");
        r[2] = call.getString("Name");
        phages.add(r);
    }
    phages.stream().map(x -> x[0]).collect(Collectors.toSet()).stream().forEach(x -> {
        phages.stream().filter(y -> y[0].equals(x)).map(y -> y[1]).collect(Collectors.toSet()).parallelStream()
                .forEach(z -> {
                    try {
                        Set<String> nonclustphages = phages.stream()
                                .filter(a -> a[0].equals(x) && !a[1].equals(z)).map(a -> a[2])
                                .collect(Collectors.toSet());
                        ResultSet resultSet = stat.executeQuery("Select Sequence from primerdb.primers"
                                + " where Strain ='" + x + "' and Cluster ='" + z + "' and CommonP = true"
                                + " and Bp = " + Integer.valueOf(bps) + " ");
                        Set<CharSequence> primers = Collections.synchronizedSet(new HashSet<>());
                        while (resultSet.next()) {
                            primers.add(resultSet.getString("Sequence"));
                        }
                        for (String phage : nonclustphages) {
                            CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + phage + ".csv")
                                    .parallelStream().filter(primer -> primers.contains(primer))
                                    .forEach(primers::remove);

                        }
                        int i = 0;
                        for (CharSequence a : primers) {
                            try {
                                st.setDouble(1, HSqlPrimerDesign.primerTm(a, 0, 800, 1.5, 0.2));
                                st.setDouble(2, HSqlPrimerDesign.gcContent(a));
                                st.setBoolean(3, HSqlPrimerDesign.calcHairpin((String) a, 4));
                                st.setString(4, z);
                                st.setString(5, x);
                                st.setString(6, a.toString());
                                st.setInt(7, bps);
                                st.addBatch();
                            } catch (SQLException e) {
                                e.printStackTrace();
                                System.out.println("Error occurred at " + x + " " + z);
                            }
                            i++;
                            if (i == 1000) {
                                i = 0;
                                st.executeBatch();
                                db.commit();
                            }
                        }
                        if (i > 0) {
                            st.executeBatch();
                            db.commit();
                        }
                    } catch (SQLException e) {
                        e.printStackTrace();
                        System.out.println("Error occurred at " + x + " " + z);
                    }
                    log.println(z);
                    log.flush();
                    System.gc();
                });
    });
    stat.execute("SET FILES LOG TRUE\n");
    st.close();
    stat.close();
    System.out.println("Unique Updated");
}

From source file:org.rimudb.Table.java

public void addRecordToBatch(Session session, Record record, boolean ignoreAutoCommitBatchErrors)
        throws RimuDBException {
    PreparedStatement stmt = null;
    int statID = 0;

    try {//from  ww w.  ja  v  a2s .co m
        String sql = sqlStatementCache.getInsertSQL();
        if (sql == null) {
            sql = sqlAdapter.getInsertStatement(tableMetaData, getTableName());
            sqlStatementCache.setInsertSQL(sql);
        }

        // Get the statistic ID
        int loggingType = getDatabase().getDatabaseConfiguration().getLoggingType();
        if (loggingType == DatabaseConfiguration.LOG_STATISTICS) {
            statID = StatisticCollector.getInstance().createStatistic(sql);
        } else if (loggingType == DatabaseConfiguration.LOG_SQL_ONLY) {
            log.info("SQL=" + sql);
        }

        stmt = session.getBatchStatement(this, Session.BATCH_INSERT);
        if (stmt == null) {
            stmt = createPreparedStatement(session.getConnection(), sql, CrudType.CREATE);
            session.setBatchStatement(this, stmt, Session.BATCH_INSERT);
        }

        recordBinder.bindStatementForInsert(stmt, record);

        if (statID > 0)
            StatisticCollector.getInstance().logEvent(statID, "preparetime");

        stmt.addBatch();

        if (statID > 0)
            StatisticCollector.getInstance().logEvent(statID, "executetime");

        if (statID > 0) {
            StatisticCollector.getInstance().logEvent(statID, "processtime");
            if (StatisticCollector.getInstance().exceedsThreshold(statID,
                    getDatabase().getDatabaseConfiguration().getLoggingThreshold())) {
                String text = StatisticCollector.getInstance().formatStatistics(statID,
                        getDatabase().getStatisticFormatter());
                log.info(text);
            }
            StatisticCollector.getInstance().removeID(statID);
        }

    } catch (SQLException e) {

        throw new RimuDBException(e);

    }
}

From source file:mil.army.usace.data.nativequery.rdbms.NativeRdbmsQuery.java

private void runDml(DML dmlType, List records, boolean useDeclaredOnly) {
    PreparedStatement st = null;
    boolean inTrans = inTransaction();
    int batchCount = 0;
    String command = null;//w w w  .java 2 s .c om
    if (!inTrans)
        startTransaction();
    try {
        Object obj = records.get(0);
        Class objClass = obj.getClass();
        String schema = getEntitySchema(objClass);
        Boolean isCamelCased = useCamelCase(objClass);
        HashMap<Method, String> fieldMapping = getFieldMapping(objClass, GET, isCamelCased, useDeclaredOnly);
        HashMap<Integer, Method> indexMapping = new HashMap();
        String tableName = getTableName(objClass);
        if (tableName == null)
            tableName = getDbName(isCamelCased, objClass.getSimpleName(), null);

        if (dmlType == DML.UPDATE)
            command = getUpdateCommand(tableName, schema, fieldMapping, indexMapping);
        else if (dmlType == DML.INSERT)
            command = getInsertCommand(tableName, schema, fieldMapping, indexMapping);
        else
            command = getDeleteCommand(tableName, schema, fieldMapping, indexMapping);

        st = conn.prepareStatement(command);

        for (Object record : records) {
            for (int index : indexMapping.keySet()) {
                Object value = indexMapping.get(index).invoke(record, null);
                if (value instanceof java.util.Date) {
                    value = new java.sql.Date(((java.util.Date) value).getTime());
                }
                st.setObject((Integer) index, value);
            }

            if (useBatch == true)
                st.addBatch();
            else
                st.executeUpdate();

            if (useBatch == true && ++batchCount % batchSize == 0) {
                st.executeBatch();
            }
        }
        if (useBatch == true)
            st.executeBatch(); //flush out remaining records
        if (!inTrans)
            commitTransaction();
    } catch (Exception ex) {
        ex.printStackTrace();
        if (!inTrans)
            rollbackTransaction();
        throw new NativeQueryException(command, "runDml", ex);
    } finally {
        if (st != null) {
            try {
                st.close();
            } catch (Exception ex) {
            }
        }
    }
}

From source file:com.jabyftw.lobstercraft.player.PlayerHandlerService.java

/**
 * This should run on server close, so we don't need to synchronize as every player join is denied before.
 *
 * @param connection MySQL connection/*from w ww . j a va 2  s.c om*/
 * @throws SQLException in case of something going wrong
 */
private void saveChangedPlayerNames(@NotNull Connection connection) throws SQLException {
    long start = System.nanoTime();
    int numberOfEntriesUpdated = 0, numberOfEntriesInserted = 0;

    // Prepare statements
    PreparedStatement updateStatement = connection.prepareStatement(
            "UPDATE `minecraft`.`player_name_changes` SET `oldPlayerName` = ?, `changeDate` = ? WHERE `user_playerId` = ?;");
    PreparedStatement insertStatement = connection.prepareStatement(
            "INSERT INTO `minecraft`.`player_name_changes` (`user_playerId`, `oldPlayerName`, `changeDate`) VALUES  (?, ?, ?);");

    // Iterate through all entries
    for (NameChangeEntry nameChangeEntry : nameChangeEntries.values()) {
        if (nameChangeEntry.databaseState == DatabaseState.UPDATE_DATABASE) {
            // Set variables
            insertStatement.setString(1, nameChangeEntry.getOldPlayerName());
            insertStatement.setLong(2, nameChangeEntry.getChangeDate());
            insertStatement.setInt(3, nameChangeEntry.getPlayerId());

            // Add batch
            updateStatement.addBatch();
            numberOfEntriesUpdated++;
        } else if (nameChangeEntry.databaseState == DatabaseState.INSERT_TO_DATABASE) {
            // Set variables
            insertStatement.setInt(1, nameChangeEntry.getPlayerId());
            insertStatement.setString(2, nameChangeEntry.getOldPlayerName());
            insertStatement.setLong(3, nameChangeEntry.getChangeDate());

            // Add batch
            insertStatement.addBatch();
            numberOfEntriesInserted++;
        } else {
            // Lets not change their database state
            continue;
        }

        // Update their database state
        nameChangeEntry.databaseState = DatabaseState.ON_DATABASE;
    }

    // Delete those who wasn't updated
    PreparedStatement deleteStatement = connection
            .prepareStatement("DELETE FROM `minecraft`.`player_name_changes` WHERE `changeDate` > ?;");
    deleteStatement.setLong(1, System.currentTimeMillis() + REQUIRED_TIME_TO_ALLOW_NAME);
    deleteStatement.execute();
    deleteStatement.close();

    // Delete from cache too
    Iterator<NameChangeEntry> iterator = nameChangeEntries.values().iterator();
    while (iterator.hasNext()) {
        NameChangeEntry next = iterator.next();

        if (next.isNameAvailable())
            iterator.remove();
    }

    // Execute and announce if needed
    if (numberOfEntriesUpdated > 0)
        updateStatement.executeBatch();
    if (numberOfEntriesInserted > 0)
        insertStatement.executeBatch();
    if (numberOfEntriesUpdated > 0 || numberOfEntriesInserted > 0)
        LobsterCraft.logger.info(Util.appendStrings("Took us ",
                Util.formatDecimal(
                        (double) (System.nanoTime() - start) / (double) TimeUnit.MILLISECONDS.toNanos(1)),
                "ms to clean old, insert ", numberOfEntriesInserted, " and update ", numberOfEntriesUpdated,
                " name changes."));

    // Close statement
    updateStatement.close();
    insertStatement.close();
}

From source file:org.wso2.carbon.identity.oauth2.dao.AccessTokenDAOImpl.java

@Override
public void revokeAccessTokensInBatch(String[] tokens) throws IdentityOAuth2Exception {

    if (log.isDebugEnabled()) {
        if (IdentityUtil.isTokenLoggable(IdentityConstants.IdentityTokens.ACCESS_TOKEN)) {
            StringBuilder stringBuilder = new StringBuilder();
            for (String token : tokens) {
                stringBuilder.append(DigestUtils.sha256Hex(token)).append(" ");
            }/*from ww w.j  av a  2s .  c  o  m*/
            log.debug("Revoking access tokens(hashed): " + stringBuilder.toString());
        } else {
            log.debug("Revoking access tokens in batch mode");
        }
    }
    String accessTokenStoreTable = OAuthConstants.ACCESS_TOKEN_STORE_TABLE;
    Connection connection = IdentityDatabaseUtil.getDBConnection();
    PreparedStatement ps = null;
    if (tokens.length > 1) {
        try {
            List<String> oldTokens = new ArrayList<>();
            connection.setAutoCommit(false);
            String sqlQuery = SQLQueries.REVOKE_ACCESS_TOKEN.replace(IDN_OAUTH2_ACCESS_TOKEN,
                    accessTokenStoreTable);
            ps = connection.prepareStatement(sqlQuery);
            for (String token : tokens) {
                ps.setString(1, OAuthConstants.TokenStates.TOKEN_STATE_REVOKED);
                ps.setString(2, UUID.randomUUID().toString());
                ps.setString(3, getHashingPersistenceProcessor().getProcessedAccessTokenIdentifier(token));
                ps.addBatch();
                oldTokens.add(getHashingPersistenceProcessor().getProcessedAccessTokenIdentifier(token));
            }
            ps.executeBatch();
            connection.commit();
            // To revoke request objects which have persisted against the access token.
            OAuth2TokenUtil.postUpdateAccessTokens(Arrays.asList(tokens),
                    OAuthConstants.TokenStates.TOKEN_STATE_REVOKED);
            if (isTokenCleanupFeatureEnabled) {
                oldTokenCleanupObject.cleanupTokensInBatch(oldTokens, connection);
            }
            connection.commit();
        } catch (SQLException e) {
            IdentityDatabaseUtil.rollBack(connection);
            throw new IdentityOAuth2Exception(
                    "Error occurred while revoking Access Tokens : " + Arrays.toString(tokens), e);
        } finally {
            IdentityDatabaseUtil.closeAllConnections(connection, null, ps);
        }
    }
    if (tokens.length == 1) {
        try {
            connection.setAutoCommit(true);
            String sqlQuery = SQLQueries.REVOKE_ACCESS_TOKEN.replace(IDN_OAUTH2_ACCESS_TOKEN,
                    accessTokenStoreTable);
            ps = connection.prepareStatement(sqlQuery);
            ps.setString(1, OAuthConstants.TokenStates.TOKEN_STATE_REVOKED);
            ps.setString(2, UUID.randomUUID().toString());
            ps.setString(3, getHashingPersistenceProcessor().getProcessedAccessTokenIdentifier(tokens[0]));
            ps.executeUpdate();

            // To revoke request objects which have persisted against the access token.
            OAuth2TokenUtil.postUpdateAccessTokens(Arrays.asList(tokens),
                    OAuthConstants.TokenStates.TOKEN_STATE_REVOKED);
            if (isTokenCleanupFeatureEnabled) {
                oldTokenCleanupObject.cleanupTokenByTokenValue(
                        getHashingPersistenceProcessor().getProcessedAccessTokenIdentifier(tokens[0]),
                        connection);
            }

        } catch (SQLException e) {
            // IdentityDatabaseUtil.rollBack(connection);
            throw new IdentityOAuth2Exception(
                    "Error occurred while revoking Access Token : " + Arrays.toString(tokens), e);
        } finally {
            IdentityDatabaseUtil.closeAllConnections(connection, null, ps);
        }
    }
}

From source file:com.oltpbenchmark.benchmarks.auctionmark.AuctionMarkLoader.java

/**
 * Load the tuples for the given table name
 * @param tableName// w  ww.  ja v a2  s  .c  om
 */
protected void generateTableData(String tableName) throws SQLException {
    LOG.info("*** START " + tableName);
    final AbstractTableGenerator generator = this.generators.get(tableName);
    assert (generator != null);

    // Generate Data
    final Table catalog_tbl = benchmark.getCatalog().getTable(tableName);
    assert (catalog_tbl != null) : tableName;
    final List<Object[]> volt_table = generator.getVoltTable();
    final String sql = SQLUtil.getInsertSQL(catalog_tbl);
    final PreparedStatement stmt = conn.prepareStatement(sql);
    final int types[] = catalog_tbl.getColumnTypes();

    while (generator.hasMore()) {
        generator.generateBatch();

        //            StringBuilder sb = new StringBuilder();
        //            if (tableName.equalsIgnoreCase("USER_FEEDBACK")) { //  || tableName.equalsIgnoreCase("USER_ATTRIBUTES")) {
        //                sb.append(tableName + "\n");
        //                for (int i = 0; i < volt_table.size(); i++) {
        //                    sb.append(String.format("[%03d] %s\n", i, StringUtil.abbrv(Arrays.toString(volt_table.get(i)), 100)));
        //                }
        //                LOG.info(sb.toString() + "\n");
        //            }

        for (Object row[] : volt_table) {
            for (int i = 0; i < row.length; i++) {
                if (row[i] != null) {
                    stmt.setObject(i + 1, row[i]);
                } else {
                    stmt.setNull(i + 1, types[i]);
                }
            } // FOR
            stmt.addBatch();
        } // FOR
        try {
            stmt.executeBatch();
            conn.commit();
            stmt.clearBatch();
        } catch (SQLException ex) {
            if (ex.getNextException() != null)
                ex = ex.getNextException();
            LOG.warn(tableName + " - " + ex.getMessage());
            throw ex;
            // SKIP
        }

        this.tableSizes.put(tableName, volt_table.size());

        // Release anything to the sub-generators if we have it
        // We have to do this to ensure that all of the parent tuples get
        // insert first for foreign-key relationships
        generator.releaseHoldsToSubTableGenerators();
    } // WHILE
    stmt.close();

    // Mark as finished
    if (this.fail == false) {
        generator.markAsFinished();
        synchronized (this) {
            this.finished.add(tableName);
            LOG.info(String.format("*** FINISH %s - %d tuples - [%d / %d]", tableName,
                    this.tableSizes.get(tableName), this.finished.size(), this.generators.size()));
            if (LOG.isDebugEnabled()) {
                LOG.debug("Remaining Tables: "
                        + CollectionUtils.subtract(this.generators.keySet(), this.finished));
            }
        } // SYNCH
    }
}

From source file:mil.army.usace.data.dataquery.rdbms.RdbmsDataQuery.java

private void runDml(DML dmlType, List records, boolean useDeclaredOnly, List<String> includeFields) {
    PreparedStatement st = null;
    boolean inTrans = inTransaction();
    int batchCount = 0;
    String command = null;//from   w  ww  .  ja v  a2s .co m
    if (!inTrans)
        startTransaction();
    try {
        Object obj = records.get(0);
        Class objClass = obj.getClass();
        String schema = getEntitySchema(objClass);
        Boolean isCamelCased = useCamelCase(objClass);
        HashMap<Method, String> fieldMapping = getFieldMapping(objClass, GET, isCamelCased, useDeclaredOnly);
        HashMap<Integer, Method> indexMapping = new HashMap();
        String tableName = getTableName(objClass);
        if (tableName == null)
            tableName = getDbName(isCamelCased, objClass.getSimpleName(), null);

        if (dmlType == DML.UPDATE)
            command = getUpdateCommand(tableName, schema, fieldMapping, indexMapping, includeFields);
        else if (dmlType == DML.INSERT)
            command = getInsertCommand(tableName, schema, fieldMapping, indexMapping);
        else
            command = getDeleteCommand(tableName, schema, fieldMapping, indexMapping);

        st = conn.prepareStatement(command);

        for (Object record : records) {
            for (int index : indexMapping.keySet()) {
                Object value = indexMapping.get(index).invoke(record, null);
                if (value instanceof java.util.Date) {
                    value = new java.sql.Date(((java.util.Date) value).getTime());
                }
                st.setObject((Integer) index, value);
            }

            if (useBatch == true)
                st.addBatch();
            else
                st.executeUpdate();

            if (useBatch == true && ++batchCount % batchSize == 0) {
                st.executeBatch();
            }
        }
        if (useBatch == true)
            st.executeBatch(); //flush out remaining records
        if (!inTrans)
            commitTransaction();
    } catch (Exception ex) {
        ex.printStackTrace();
        if (!inTrans)
            rollbackTransaction();
        throw new DataQueryException(command, "runDml", ex);
    } finally {
        if (st != null) {
            try {
                st.close();
            } catch (Exception ex) {
            }
        }
    }
}

From source file:org.wso2.carbon.idp.mgt.dao.IdPManagementDAO.java

/**
 * @param conn//from  ww w  . j  a  v a 2  s.  c o  m
 * @param idPId
 * @param addedRoles
 * @param deletedRoles
 * @param renamedOldRoles
 * @param renamedNewRoles
 * @throws SQLException
 */
private void updateIdPRoles(Connection conn, int idPId, List<String> addedRoles, List<String> deletedRoles,
        List<String> renamedOldRoles, List<String> renamedNewRoles) throws SQLException {

    PreparedStatement prepStmt = null;
    String sqlStmt = null;

    try {

        for (String deletedRole : deletedRoles) {
            sqlStmt = IdPManagementConstants.SQLQueries.DELETE_IDP_ROLES_SQL;
            prepStmt = conn.prepareStatement(sqlStmt);
            prepStmt.setInt(1, idPId);
            prepStmt.setString(2, deletedRole);
            prepStmt.addBatch();
        }

        prepStmt.executeBatch();
        prepStmt.clearParameters();
        prepStmt.clearBatch();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);

        for (String addedRole : addedRoles) {
            sqlStmt = IdPManagementConstants.SQLQueries.ADD_IDP_ROLES_SQL;
            prepStmt = conn.prepareStatement(sqlStmt);
            prepStmt.setInt(1, idPId);
            prepStmt.setString(2, CharacterEncoder.getSafeText(addedRole));
            prepStmt.addBatch();
        }

        prepStmt.executeBatch();
        prepStmt.clearParameters();
        prepStmt.clearBatch();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);

        for (int i = 0; i < renamedOldRoles.size(); i++) {
            sqlStmt = IdPManagementConstants.SQLQueries.UPDATE_IDP_ROLES_SQL;
            prepStmt = conn.prepareStatement(sqlStmt);
            prepStmt.setString(1, CharacterEncoder.getSafeText(renamedNewRoles.get(i)));
            prepStmt.setInt(2, idPId);
            prepStmt.setString(3, CharacterEncoder.getSafeText(renamedOldRoles.get(i)));
            prepStmt.addBatch();
        }

        prepStmt.executeBatch();

    } finally {
        prepStmt.clearParameters();
        prepStmt.clearBatch();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
    }

}