Example usage for java.sql PreparedStatement addBatch

List of usage examples for java.sql PreparedStatement addBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement addBatch.

Prototype

void addBatch() throws SQLException;

Source Link

Document

Adds a set of parameters to this PreparedStatement object's batch of commands.

Usage

From source file:org.wso2.carbon.idp.mgt.dao.IdPManagementDAO.java

/**
 * @param conn//from ww w . j a v  a2 s.c om
 * @param idPId
 * @param tenantId
 * @param roleMappings
 * @throws SQLException
 * @throws IdentityProviderManagementException
 */
private void addIdPRoleMappings(Connection conn, int idPId, int tenantId, RoleMapping[] roleMappings)
        throws SQLException, IdentityProviderManagementException {

    Map<String, Integer> roleIdMap = new HashMap<String, Integer>();
    PreparedStatement prepStmt = null;
    ResultSet rs = null;

    // SP_IDP_ROLE_ID, SP_IDP_ROL
    String sqlStmt = IdPManagementConstants.SQLQueries.GET_IDP_ROLES_SQL;

    try {

        prepStmt = conn.prepareStatement(sqlStmt);
        prepStmt.setInt(1, idPId);
        rs = prepStmt.executeQuery();

        while (rs.next()) {
            int idpRoleId = rs.getInt("ID");
            String roleName = rs.getString("ROLE");
            roleIdMap.put(roleName, idpRoleId);
        }

        prepStmt.clearParameters();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
        IdentityApplicationManagementUtil.closeResultSet(rs);

        if (roleIdMap.isEmpty()) {
            String message = "No Identity Provider roles defined for tenant " + tenantId;
            throw new IdentityProviderManagementException(message);
        }

        sqlStmt = IdPManagementConstants.SQLQueries.ADD_IDP_ROLE_MAPPINGS_SQL;
        prepStmt = conn.prepareStatement(sqlStmt);

        for (RoleMapping mapping : roleMappings) {
            if (mapping.getRemoteRole() != null && roleIdMap.containsKey(mapping.getRemoteRole())) {

                int idpRoleId = roleIdMap.get(mapping.getRemoteRole());

                String userStoreId = mapping.getLocalRole().getUserStoreId();
                String localRole = mapping.getLocalRole().getLocalRoleName();

                // SP_IDP_ROLE_ID, SP_TENANT_ID, SP_USER_STORE_ID, SP_LOCAL_ROLE
                prepStmt.setInt(1, idpRoleId);
                prepStmt.setInt(2, tenantId);
                prepStmt.setString(3, CharacterEncoder.getSafeText(userStoreId));
                prepStmt.setString(4, CharacterEncoder.getSafeText(localRole));
                prepStmt.addBatch();
            } else {
                throw new IdentityProviderManagementException("Cannot find Identity Provider role "
                        + mapping.getRemoteRole() + " for tenant " + tenantId);
            }
        }

        prepStmt.executeBatch();

    } finally {
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
        IdentityApplicationManagementUtil.closeResultSet(rs);
    }

}

From source file:org.jamwiki.db.AnsiQueryHandler.java

/**
 *
 *///from www .  j a v a2 s  .  co m
public void updateTopicNamespaces(List<Topic> topics, Connection conn) throws SQLException {
    PreparedStatement stmt = null;
    try {
        stmt = conn.prepareStatement(STATEMENT_UPDATE_TOPIC_NAMESPACE);
        for (Topic topic : topics) {
            stmt.setInt(1, topic.getNamespace().getId());
            stmt.setString(2, topic.getPageName());
            stmt.setString(3, topic.getPageName().toLowerCase());
            stmt.setInt(4, topic.getTopicId());
            stmt.addBatch();
        }
        stmt.executeBatch();
    } finally {
        DatabaseConnection.closeStatement(stmt);
    }
}

From source file:com.archivas.clienttools.arcutils.utils.database.ManagedJobSchema.java

/**
 * Updates the stats for all of the files passed in, and also updates the general job statistics
 * to reflect how many success/failures there were
 * //from w ww .  j  ava  2  s . c o m
 * @param files
 *            -
 * @throws DatabaseException
 *             -
 */
@SuppressWarnings({ "ThrowableResultOfMethodCallIgnored" })
public void markFilesProcessed(Collection<FileStats> files) throws DatabaseException {
    synchronized (DatabaseResourceManager.DB_LOCK) {
        PooledDbConnection conn = null;
        try {

            conn = connPool.getConnection();
            PreparedStatement stmt = conn.prepareStatement(MARK_FILE_PROCESSED_STMT_NAME, markFileProcessedSql);
            conn.setAutoCommit(false);
            long totalCnt = 0;
            long totalSize = 0;
            long successCnt = 0;
            long successSize = 0;
            long failCnt = 0;
            long failSize = 0;
            long failDirCnt = 0;
            for (FileStats file : files) {
                FileStatus status = file.getStatus();
                switch (status) {
                case SUCCEEDED: {
                    if (file.includeInStats()) {
                        successCnt++;
                        successSize += file.getSize();
                    }
                    break;
                }
                case FAILED: {
                    if (file.includeInStats()) {
                        failCnt++;
                        failSize += file.getSize();
                        if (file.failedDuringFind()) {
                            // in this case we need to increment the total counts as well
                            totalCnt++;
                            totalSize += file.getSize(); // probably zero since we didn't
                                                         // get that far
                        }
                    }
                    if (file.getArcProcssFile().isDirectory()) {
                        failDirCnt++;
                    }
                    break;
                }
                default:
                    throw new RuntimeException("Unsupported file status: " + status);
                }
                stmt.clearParameters();
                stmt.setInt(1, file.getStatus().ordinal());
                stmt.setInt(2, file.getRetries());
                Date startTime = file.getStartTime();
                if (startTime != null) {
                    stmt.setLong(3, startTime.getTime());
                } else {
                    stmt.setNull(3, java.sql.Types.BIGINT);
                }
                Date endTime = file.getEndTime();
                if (endTime != null) {
                    stmt.setLong(4, endTime.getTime());
                } else {
                    stmt.setNull(4, java.sql.Types.BIGINT);
                }
                stmt.setLong(5, file.getRunTimeMs());

                if (file.getException() == null) {
                    stmt.setNull(6, java.sql.Types.VARCHAR);
                } else {
                    stmt.setString(6, file.getException().getMessage());
                }

                if (file.getStatusCode() == null) {
                    stmt.setNull(7, java.sql.Types.INTEGER);
                } else {
                    stmt.setInt(7, file.getStatusCode());
                }
                stmt.setLong(8, file.getDatabaseRecordId());

                stmt.addBatch();
            }

            // execute the batch statment to update all of the file rows
            stmt.executeBatch();

            // now update overall job stats to reflect these changes
            ManagedJobsSchema.getInstance().updateProcessedFilesStats(conn, jobId, totalCnt, totalSize,
                    successCnt, successSize, failCnt, failSize, failDirCnt);
            conn.commit();

        } catch (Exception e) {
            rollback(conn);
            throw new DatabaseException(DBUtils.getErrorMessage(
                    "An error occurred updating file stats on table " + qualifiedFilesTableName, e), e);
        } finally {
            connPool.returnConnection(conn);
        }
    }
}

From source file:com.flexive.ejb.beans.structure.AssignmentEngineBean.java

private void storeOptions(Connection con, String table, String primaryColumn, long primaryId, Long assignmentId,
        List<FxStructureOption> options) throws SQLException, FxInvalidParameterException {
    PreparedStatement ps = null;
    try {// w ww.  j  a va2  s.  c  o m
        if (assignmentId == null) {
            ps = con.prepareStatement(
                    "DELETE FROM " + table + " WHERE " + primaryColumn + "=? AND ASSID IS NULL");
        } else {
            ps = con.prepareStatement("DELETE FROM " + table + " WHERE " + primaryColumn + "=? AND ASSID=?");
            ps.setLong(2, assignmentId);
        }
        ps.setLong(1, primaryId);
        ps.executeUpdate();
        if (options == null || options.size() == 0)
            return;
        ps.close();
        ps = con.prepareStatement("INSERT INTO " + table + " (" + primaryColumn
                + ",ASSID,OPTKEY,MAYOVERRIDE,ISINHERITED,OPTVALUE)VALUES(?,?,?,?,?,?)");
        for (FxStructureOption option : options) {
            ps.setLong(1, primaryId);
            if (assignmentId != null)
                ps.setLong(2, assignmentId);
            else
                ps.setNull(2, java.sql.Types.NUMERIC);
            if (StringUtils.isEmpty(option.getKey()))
                throw new FxInvalidParameterException("key", "ex.structure.option.key.empty",
                        option.getValue());
            ps.setString(3, option.getKey());
            ps.setBoolean(4, option.isOverridable());
            ps.setBoolean(5, option.getIsInherited());
            ps.setString(6, option.getValue());
            ps.addBatch();
        }
        ps.executeBatch();
    } finally {
        if (ps != null)
            ps.close();
    }
}

From source file:gemlite.core.internal.db.DBSynchronizer.java

public boolean processEvents(List<AsyncEvent> events) {
    if (this.shutDown) {
        return false;
    }//  www .  j  ava  2s.com
    boolean completedSucessfully = false;
    String listOfEventsString = null;
    // The retval will be considered true only if the list was iterated
    // completely. If the List iteration was incomplete we will return
    // false so that the events are not removed during failure.
    // As for individual events, they can get exceptions due to constraint
    // violations etc but will not cause return value to be false.
    Statement stmt = null;
    PreparedStatement ps = null;
    // keep track of the previous prepared statement in case we can optimize
    // by create a batch when the previous and current statements match
    PreparedStatement prevPS = null;
    AsyncEvent prevEvent = null;
    boolean prevPSHasBatch = false;
    Iterator<AsyncEvent> itr = events.iterator();
    AsyncEvent event = null;
    String eventString = null;
    String prevEventStr = null;
    try {
        while (!(completedSucessfully = !itr.hasNext())) {
            event = itr.next();
            Operation operation = event.getOperation();
            if (logger.isDebugEnabled()) {
                eventString = event.toString();
                if (prevEvent != null) {
                    prevEventStr = prevEvent.toString();
                }
                logger.info("DBSynchronizer::processEvents :processing PK based " + "event=" + eventString
                        + " AsyncEvent Operation=" + operation);
            } else {
                eventString = null;
                prevEventStr = null;
            }
            try {
                if (operation.isPutAll() || operation.isCreate())
                    ps = getExecutableInsertPrepStmntPKBased(event, prevPS);
                else if (operation.isUpdate())
                    ps = getExecutableUpdatePrepStmntPKBased(event, prevPS);
                else if (operation.isDestroy())
                    ps = getExecutableDeletePrepStmntPKBased(event, prevPS);
                else {
                    logger.error("DBSynchronizer::processEvents: unexpected " + "eventType " + operation
                            + " for " + event);
                    continue;
                }
            } catch (SQLException sqle) {
                SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__1, null, event,
                        eventString, logger, true);
                if (handler.breakTheLoop()) {
                    break;
                }
            } catch (RegionDestroyedException rde) {
                if (logger.isInfoEnabled()) {
                    logger.info("DBSynchronizer::processEvents: WBCLEvent " + event
                            + " will  be discarded as the underlying region "
                            + "for the table has been destroyed");
                }
                continue;
            }
            if (logger.isDebugEnabled()) {
                if (eventString == null) {
                    eventString = event.toString();
                }
                logger.debug("DBSynchronizer::processEvents: Statement=" + (ps != null ? ps : stmt)
                        + " for event=" + eventString);
            }
            try {
                int num;
                if (prevPS != null && prevPS != ps) {
                    try {
                        if (prevPSHasBatch) {
                            prevPS.addBatch();
                            if (logger.isDebugEnabled()) {
                                logger.info("DBSynchronizer::processEvents executing "
                                        + "batch statement for prepared statement=" + prevPS + " for event="
                                        + prevEventStr);
                            }
                            final int[] res = prevPS.executeBatch();
                            num = res.length;
                            prevPSHasBatch = false;
                        } else {
                            num = prevPS.executeUpdate();
                        }
                        if (logger.isDebugEnabled()) {
                            logger.info("DBSynchronizer::processEvents total num rows " + "modified=" + num
                                    + " for prepared statement=" + prevPS + " for event=" + prevEventStr);
                        }
                        // clear event from failure map if present
                        helper.removeEventFromFailureMap(prevEvent);
                    } catch (SQLException sqle) {
                        if (prevPSHasBatch) {
                            try {
                                prevPS.clearBatch();
                            } catch (SQLException e) {
                                // ignored
                            }
                            prevPSHasBatch = false;
                        }
                        SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__3, prevPS,
                                prevEvent, prevEventStr, logger, false);
                        if (handler.breakTheLoop()) {
                            break;
                        }
                        prevPS = null;
                        prevEvent = null;
                        prevPSHasBatch = false;
                    }
                }
                // in case previous prepared statement matches the current
                // one,
                // it will already be added as a batch when setting the
                // arguments
                // by AsyncEventHelper#setColumnInPrepStatement()
                else if (prevPS != null && ps != null) {
                    prevPSHasBatch = true;
                    if (logger.isDebugEnabled()) {
                        logger.info("DBSynchronizer::processEvents added new row "
                                + "as a batch for prepared statement=" + ps + " for event=" + eventString);
                    }
                }

                prevPS = ps;
                prevEvent = event;
            } catch (SQLException sqle) {
                if (prevPS != null && prevPSHasBatch) {
                    try {
                        prevPS.clearBatch();
                    } catch (SQLException e) {
                        // ignored
                    }
                }
                SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__3,
                        ps != null ? ps : stmt, event, eventString, logger, false);
                if (handler.breakTheLoop()) {
                    break;
                }
            }
        } // end of while (event list processing loop)

        // now handle the last statement in the above loop since it is still
        // pending due to anticipated batching
        if (completedSucessfully) {
            try {
                if (logger.isInfoEnabled()) {
                    if (listOfEventsString == null) {
                        listOfEventsString = events.toString();
                    }
                    logger.info("DBSynchronizer::processEvents: " + "before commit of events="
                            + listOfEventsString);
                }
                int num;
                // first the case when the previous statement was a batched
                // one
                // so add current one as batch and execute
                if (prevPSHasBatch) {
                    ps.addBatch();
                    if (logger.isDebugEnabled()) {
                        logger.info("DBSynchronizer::processEvents executing batch "
                                + "statement for prepared statement=" + ps + " for event=" + eventString);
                    }
                    final int[] res = ps.executeBatch();
                    num = res.length;
                }
                // next the case of a non BULK_INSERT operation;
                // BULK_INSERT operations are always executed as a single
                // batch
                // by itself, so will never reach here
                else if (ps != null) {
                    num = ps.executeUpdate();
                    if (event != null) {
                        // clear event from failure map if present
                        helper.removeEventFromFailureMap(event);
                    }
                } else {
                    num = 0;
                }
                // clear event from failure map if present
                helper.removeEventFromFailureMap(event);
                if (logger.isDebugEnabled()) {
                    if (ps != null) {
                        logger.info("DBSynchronizer::processEvents num rows modified=" + num
                                + " for prepared statement=" + ps + " for event=" + eventString);
                    }
                }
                this.conn.commit();
                if (logger.isInfoEnabled()) {
                    if (listOfEventsString == null) {
                        listOfEventsString = events.toString();
                    }
                    logger.info("DBSynchronizer::processEvents: " + "committed successfully for events="
                            + listOfEventsString);
                }
            } catch (SQLException sqle) {

                if (ps != null && prevPSHasBatch) {
                    try {
                        ps.clearBatch();
                    } catch (SQLException e) {
                        // ignored
                    }
                }

                SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__7,
                        ps != null ? ps : stmt, event, eventString, logger, true);
                if (handler != SqlExceptionHandler.IGNORE) {
                    completedSucessfully = false;
                }
            }
        }
    } catch (Exception e) {

        if (logger != null && logger.isErrorEnabled() && !(event != null && helper.skipFailureLogging(event))) {
            StringBuilder sb = new StringBuilder();
            if (event != null) {
                if (eventString == null) {
                    eventString = event.toString();
                }
                sb.append("[FAILED: ").append(eventString).append(" ]");
            }
            while (itr.hasNext()) {
                sb.append("[ ").append(itr.next().toString()).append(" ]");
            }
            helper.logFormat(logger, Level.SEVERE, e, DB_SYNCHRONIZER__2, sb.toString());
        }
        SqlExceptionHandler.CLEANUP.execute(this);
        completedSucessfully = false;
    }

    if (completedSucessfully) {
        // on successful completion, log any pending errors to XML file;
        // when
        // unsuccessful then we know that batch will be retried so don't log
        // in
        // that case else it can get logged multiple times
        // clear event from failure map if present
        flushErrorEventsToLog();
    }

    if (logger.isDebugEnabled()) {
        logger.info("DBSynchronizer::processEvents: processed " + events.size() + " events, success="
                + completedSucessfully);
    }

    return completedSucessfully;
}

From source file:org.apache.tajo.catalog.store.AbstractDBStore.java

@Override
public void addPartitions(String databaseName, String tableName,
        List<CatalogProtos.PartitionDescProto> partitions, boolean ifNotExists)
        throws UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {

    final int databaseId = getDatabaseId(databaseName);
    final int tableId = getTableId(databaseId, databaseName, tableName);
    ensurePartitionTable(tableName, tableId);

    Connection conn = null;/*from w w  w.ja  va 2  s  .c om*/
    // To delete existing partition keys
    PreparedStatement pstmt1 = null;
    // To delete existing partition;
    PreparedStatement pstmt2 = null;
    // To insert a partition
    PreparedStatement pstmt3 = null;
    // To insert partition keys
    PreparedStatement pstmt4 = null;

    PartitionDescProto partitionDesc = null;

    try {
        conn = getConnection();
        conn.setAutoCommit(false);

        int currentIndex = 0, lastIndex = 0;

        pstmt1 = conn.prepareStatement(deletePartitionKeysSql);
        pstmt2 = conn.prepareStatement(deletePartitionSql);
        pstmt3 = conn.prepareStatement(insertPartitionSql);
        pstmt4 = conn.prepareStatement(insertPartitionKeysSql);

        // Set a batch size like 1000. This avoids SQL injection and also takes care of out of memory issue.
        int batchSize = conf.getInt(TajoConf.ConfVars.PARTITION_DYNAMIC_BULK_INSERT_BATCH_SIZE.varname, 1000);
        for (currentIndex = 0; currentIndex < partitions.size(); currentIndex++) {
            PartitionDescProto partition = partitions.get(currentIndex);

            try {
                partitionDesc = getPartition(databaseName, tableName, partition.getPartitionName());
                // Delete existing partition and partition keys
                if (ifNotExists) {
                    pstmt1.setInt(1, partitionDesc.getId());
                    pstmt1.addBatch();
                    pstmt1.clearParameters();

                    pstmt2.setInt(1, partitionDesc.getId());
                    pstmt2.addBatch();
                    pstmt2.clearParameters();
                }
            } catch (UndefinedPartitionException e) {
            }

            // Insert partition
            pstmt3.setInt(1, tableId);
            pstmt3.setString(2, partition.getPartitionName());
            pstmt3.setString(3, partition.getPath());
            pstmt3.setLong(4, partition.getNumBytes());
            pstmt3.addBatch();
            pstmt3.clearParameters();

            // Insert partition keys
            for (int i = 0; i < partition.getPartitionKeysCount(); i++) {
                PartitionKeyProto partitionKey = partition.getPartitionKeys(i);
                pstmt4.setInt(1, tableId);
                pstmt4.setString(2, partition.getPartitionName());
                pstmt4.setInt(3, tableId);
                pstmt4.setString(4, partitionKey.getColumnName());
                pstmt4.setString(5, partitionKey.getPartitionValue());

                pstmt4.addBatch();
                pstmt4.clearParameters();
            }

            // Execute batch
            if (currentIndex >= lastIndex + batchSize && lastIndex != currentIndex) {
                pstmt1.executeBatch();
                pstmt1.clearBatch();
                pstmt2.executeBatch();
                pstmt2.clearBatch();
                pstmt3.executeBatch();
                pstmt3.clearBatch();
                pstmt4.executeBatch();
                pstmt4.clearBatch();
                lastIndex = currentIndex;
            }
        }

        // Execute existing batch queries
        if (lastIndex != currentIndex) {
            pstmt1.executeBatch();
            pstmt2.executeBatch();
            pstmt3.executeBatch();
            pstmt4.executeBatch();
        }

        if (conn != null) {
            conn.commit();
        }
    } catch (SQLException se) {
        if (conn != null) {
            try {
                conn.rollback();
            } catch (SQLException e) {
                LOG.error(e, e);
            }
        }
        throw new TajoInternalError(se);
    } finally {
        CatalogUtil.closeQuietly(pstmt1);
        CatalogUtil.closeQuietly(pstmt2);
        CatalogUtil.closeQuietly(pstmt3);
        CatalogUtil.closeQuietly(pstmt4);
    }
}

From source file:org.wso2.carbon.idp.mgt.dao.IdPManagementDAO.java

/**
 * @param conn/*from   w  w w.  j a  va 2s  . c o  m*/
 * @param idPId
 * @param tenantId
 * @param claimMappings
 * @throws SQLException
 * @throws IdentityProviderManagementException
 */
private void addIdPClaimMappings(Connection conn, int idPId, int tenantId, ClaimMapping[] claimMappings)
        throws SQLException, IdentityProviderManagementException {

    Map<String, Integer> claimIdMap = new HashMap<String, Integer>();
    PreparedStatement prepStmt = null;
    ResultSet rs = null;

    try {

        if (claimMappings == null || claimMappings.length == 0) {
            return;
        }

        String sqlStmt = IdPManagementConstants.SQLQueries.GET_IDP_CLAIMS_SQL;
        prepStmt = conn.prepareStatement(sqlStmt);
        prepStmt.setInt(1, idPId);
        rs = prepStmt.executeQuery();

        while (rs.next()) {
            int id = rs.getInt("ID");
            String claim = rs.getString("CLAIM");
            claimIdMap.put(claim, id);
        }

        prepStmt.clearParameters();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
        IdentityApplicationManagementUtil.closeResultSet(rs);

        if (claimIdMap.isEmpty()) {
            String message = "No Identity Provider claim URIs defined for tenant " + tenantId;
            throw new IdentityProviderManagementException(message);
        }

        sqlStmt = IdPManagementConstants.SQLQueries.ADD_IDP_CLAIM_MAPPINGS_SQL;
        prepStmt = conn.prepareStatement(sqlStmt);
        for (ClaimMapping mapping : claimMappings) {
            if (mapping != null && mapping.getRemoteClaim() != null
                    && claimIdMap.containsKey(mapping.getRemoteClaim().getClaimUri())) {

                int idpClaimId = claimIdMap.get(mapping.getRemoteClaim().getClaimUri());
                String localClaimURI = mapping.getLocalClaim().getClaimUri();

                prepStmt.setInt(1, idpClaimId);
                prepStmt.setInt(2, tenantId);
                prepStmt.setString(3, CharacterEncoder.getSafeText(localClaimURI));
                prepStmt.setString(4, CharacterEncoder.getSafeText(mapping.getDefaultValue()));

                if (mapping.isRequested()) {
                    prepStmt.setString(5, "1");
                } else {
                    prepStmt.setString(5, "0");
                }

                prepStmt.addBatch();
            } else {
                throw new IdentityProviderManagementException(
                        "Cannot find Identity Provider claim mapping for tenant " + tenantId);
            }
        }

        prepStmt.executeBatch();

    } finally {
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
        IdentityApplicationManagementUtil.closeResultSet(rs);
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
    }
}

From source file:org.jamwiki.db.AnsiQueryHandler.java

/**
 *
 *///from  www  .j av  a  2  s .  c om
public void orderTopicVersions(Topic topic, int virtualWikiId, List<Integer> topicVersionIdList)
        throws SQLException {
    Connection conn = null;
    PreparedStatement stmt = null;
    try {
        conn = DatabaseConnection.getConnection();
        conn.setAutoCommit(false);
        stmt = conn.prepareStatement(STATEMENT_UPDATE_TOPIC_VERSION_PREVIOUS_VERSION_ID);
        Integer previousTopicVersionId = null;
        boolean hasBatchData = false;
        for (int topicVersionId : topicVersionIdList) {
            if (previousTopicVersionId != null) {
                stmt.setInt(1, previousTopicVersionId);
                stmt.setInt(2, topicVersionId);
                stmt.addBatch();
                hasBatchData = true;
            }
            previousTopicVersionId = topicVersionId;
        }
        if (hasBatchData) {
            stmt.executeBatch();
        }
        TopicVersion topicVersion = this.lookupTopicVersion(previousTopicVersionId, conn);
        topic.setCurrentVersionId(previousTopicVersionId);
        topic.setTopicContent(topicVersion.getVersionContent());
        this.updateTopic(topic, virtualWikiId, conn);
        conn.commit();
    } catch (SQLException e) {
        if (conn != null) {
            try {
                conn.rollback();
            } catch (Exception ex) {
            }
        }
        throw e;
    } finally {
        DatabaseConnection.closeConnection(conn, stmt);
        // explicitly null the variable to improve garbage collection.
        // with very large loops this can help avoid OOM "GC overhead
        // limit exceeded" errors.
        stmt = null;
        conn = null;
    }
}

From source file:org.jamwiki.db.AnsiQueryHandler.java

/**
 *
 *///  w  ww. java2  s.c  om
public void updateConfiguration(Map<String, String> configuration, Connection conn) throws SQLException {
    Statement stmt = null;
    PreparedStatement pstmt = null;
    try {
        stmt = conn.createStatement();
        stmt.executeUpdate(STATEMENT_DELETE_CONFIGURATION);
        pstmt = conn.prepareStatement(STATEMENT_INSERT_CONFIGURATION);
        for (Map.Entry<String, String> entry : configuration.entrySet()) {
            pstmt.setString(1, entry.getKey());
            // FIXME - Oracle cannot store an empty string - it converts them
            // to null - so add a hack to work around the problem.
            String value = entry.getValue();
            if (StringUtils.isBlank(value)) {
                value = " ";
            }
            pstmt.setString(2, value);
            pstmt.addBatch();
        }
        pstmt.executeBatch();
    } finally {
        DatabaseConnection.closeStatement(pstmt);
        DatabaseConnection.closeStatement(stmt);
    }
}

From source file:org.apache.ddlutils.platform.PlatformImplBase.java

/**
 * {@inheritDoc}/*from ww  w. j a v  a 2s .  co m*/
 */
public void insert(Connection connection, Database model, Collection dynaBeans)
        throws DatabaseOperationException {
    SqlDynaClass dynaClass = null;
    SqlDynaProperty[] properties = null;
    PreparedStatement statement = null;
    int addedStmts = 0;
    boolean identityWarningPrinted = false;

    for (Iterator it = dynaBeans.iterator(); it.hasNext();) {
        DynaBean dynaBean = (DynaBean) it.next();
        SqlDynaClass curDynaClass = model.getDynaClassFor(dynaBean);

        if (curDynaClass != dynaClass) {
            if (dynaClass != null) {
                executeBatch(statement, addedStmts, dynaClass.getTable());
                addedStmts = 0;
            }

            dynaClass = curDynaClass;
            properties = getPropertiesForInsertion(model, curDynaClass, dynaBean);

            if (properties.length == 0) {
                _log.warn("Cannot insert instances of type " + dynaClass
                        + " because it has no usable properties");
                continue;
            }
            if (!identityWarningPrinted
                    && (getRelevantIdentityColumns(model, curDynaClass, dynaBean).length > 0)) {
                _log.warn(
                        "Updating the bean properties corresponding to auto-increment columns is not supported in batch mode");
                identityWarningPrinted = true;
            }

            String insertSql = createInsertSql(model, dynaClass, properties, null);

            if (_log.isDebugEnabled()) {
                _log.debug("Starting new batch with SQL: " + insertSql);
            }
            try {
                statement = connection.prepareStatement(insertSql);
            } catch (SQLException ex) {
                throw new DatabaseOperationException("Error while preparing insert statement", ex);
            }
        }
        try {
            for (int idx = 0; idx < properties.length; idx++) {
                setObject(statement, idx + 1, dynaBean, properties[idx]);
            }
            statement.addBatch();
            addedStmts++;
        } catch (SQLException ex) {
            throw new DatabaseOperationException("Error while adding batch insert", ex);
        }
    }
    if (dynaClass != null) {
        executeBatch(statement, addedStmts, dynaClass.getTable());
    }
}