Example usage for java.sql PreparedStatement executeBatch

List of usage examples for java.sql PreparedStatement executeBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement executeBatch.

Prototype

int[] executeBatch() throws SQLException;

Source Link

Document

Submits a batch of commands to the database for execution and if all commands execute successfully, returns an array of update counts.

Usage

From source file:org.wso2.carbon.idp.mgt.dao.IdPManagementDAO.java

/**
 * @param conn//from w  w  w  .j a  v a 2s  .  c  om
 * @param idPId
 * @param tenantId
 * @param roleMappings
 * @throws SQLException
 * @throws IdentityProviderManagementException
 */
private void addIdPRoleMappings(Connection conn, int idPId, int tenantId, RoleMapping[] roleMappings)
        throws SQLException, IdentityProviderManagementException {

    Map<String, Integer> roleIdMap = new HashMap<String, Integer>();
    PreparedStatement prepStmt = null;
    ResultSet rs = null;

    // SP_IDP_ROLE_ID, SP_IDP_ROL
    String sqlStmt = IdPManagementConstants.SQLQueries.GET_IDP_ROLES_SQL;

    try {

        prepStmt = conn.prepareStatement(sqlStmt);
        prepStmt.setInt(1, idPId);
        rs = prepStmt.executeQuery();

        while (rs.next()) {
            int idpRoleId = rs.getInt("ID");
            String roleName = rs.getString("ROLE");
            roleIdMap.put(roleName, idpRoleId);
        }

        prepStmt.clearParameters();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
        IdentityApplicationManagementUtil.closeResultSet(rs);

        if (roleIdMap.isEmpty()) {
            String message = "No Identity Provider roles defined for tenant " + tenantId;
            throw new IdentityProviderManagementException(message);
        }

        sqlStmt = IdPManagementConstants.SQLQueries.ADD_IDP_ROLE_MAPPINGS_SQL;
        prepStmt = conn.prepareStatement(sqlStmt);

        for (RoleMapping mapping : roleMappings) {
            if (mapping.getRemoteRole() != null && roleIdMap.containsKey(mapping.getRemoteRole())) {

                int idpRoleId = roleIdMap.get(mapping.getRemoteRole());

                String userStoreId = mapping.getLocalRole().getUserStoreId();
                String localRole = mapping.getLocalRole().getLocalRoleName();

                // SP_IDP_ROLE_ID, SP_TENANT_ID, SP_USER_STORE_ID, SP_LOCAL_ROLE
                prepStmt.setInt(1, idpRoleId);
                prepStmt.setInt(2, tenantId);
                prepStmt.setString(3, CharacterEncoder.getSafeText(userStoreId));
                prepStmt.setString(4, CharacterEncoder.getSafeText(localRole));
                prepStmt.addBatch();
            } else {
                throw new IdentityProviderManagementException("Cannot find Identity Provider role "
                        + mapping.getRemoteRole() + " for tenant " + tenantId);
            }
        }

        prepStmt.executeBatch();

    } finally {
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
        IdentityApplicationManagementUtil.closeResultSet(rs);
    }

}

From source file:org.wso2.carbon.apimgt.migration.client.MigrateFrom110to200.java

private boolean updateIdnTableConsumerKeys(Connection connection) throws SQLException {
    log.info("Updating consumer keys in IDN Tables");
    Statement consumerAppsLookup = null;
    PreparedStatement consumerAppsDelete = null;
    PreparedStatement consumerAppsInsert = null;
    PreparedStatement consumerAppsDeleteFailedRecords = null;
    PreparedStatement accessTokenUpdate = null;
    PreparedStatement accessTokenDelete = null;

    ResultSet consumerAppsResultSet = null;
    boolean continueUpdatingDB = true;

    try {/*from  w w  w .  j ava 2s  . c o m*/
        String consumerAppsQuery = "SELECT * FROM IDN_OAUTH_CONSUMER_APPS";
        consumerAppsLookup = connection.createStatement();
        consumerAppsLookup.setFetchSize(50);
        consumerAppsResultSet = consumerAppsLookup.executeQuery(consumerAppsQuery);

        ArrayList<ConsumerAppsTableDTO> consumerAppsTableDTOs = new ArrayList<>();
        ArrayList<ConsumerAppsTableDTO> consumerAppsTableDTOsFailed = new ArrayList<>();

        while (consumerAppsResultSet.next()) {
            ConsumerKeyDTO consumerKeyDTO = new ConsumerKeyDTO();
            consumerKeyDTO.setEncryptedConsumerKey(consumerAppsResultSet.getString("CONSUMER_KEY"));

            ConsumerAppsTableDTO consumerAppsTableDTO = new ConsumerAppsTableDTO();
            consumerAppsTableDTO.setConsumerKey(consumerKeyDTO);
            consumerAppsTableDTO.setConsumerSecret(consumerAppsResultSet.getString("CONSUMER_SECRET"));
            consumerAppsTableDTO.setUsername(consumerAppsResultSet.getString("USERNAME"));
            consumerAppsTableDTO.setTenantID(consumerAppsResultSet.getInt("TENANT_ID"));
            consumerAppsTableDTO.setAppName(consumerAppsResultSet.getString("APP_NAME"));
            consumerAppsTableDTO.setOauthVersion(consumerAppsResultSet.getString("OAUTH_VERSION"));
            consumerAppsTableDTO.setCallbackURL(consumerAppsResultSet.getString("CALLBACK_URL"));
            consumerAppsTableDTO.setGrantTypes(consumerAppsResultSet.getString("GRANT_TYPES"));
            if (ResourceModifier.decryptConsumerKeyIfEncrypted(consumerKeyDTO)) {
                consumerAppsTableDTOs.add(consumerAppsTableDTO);
                log.debug("Successfully decrypted consumer key : " + consumerKeyDTO.getEncryptedConsumerKey()
                        + " in IDN_OAUTH_CONSUMER_APPS table");
            } else {
                consumerAppsTableDTOsFailed.add(consumerAppsTableDTO);
                log.error("Cannot decrypt consumer key : " + consumerKeyDTO.getEncryptedConsumerKey()
                        + " in IDN_OAUTH_CONSUMER_APPS table");
                //If its not allowed to remove decryption failed entries from DB, we will not continue updating 
                // tables even with successfully decrypted entries to maintain DB integrity
                if (!removeDecryptionFailedKeysFromDB) {
                    continueUpdatingDB = false;
                }
            }
        }

        if (continueUpdatingDB) {
            // Add new entries for decrypted consumer keys into IDN_OAUTH_CONSUMER_APPS
            consumerAppsInsert = connection
                    .prepareStatement("INSERT INTO IDN_OAUTH_CONSUMER_APPS (CONSUMER_KEY, "
                            + "CONSUMER_SECRET, USERNAME, TENANT_ID, APP_NAME, OAUTH_VERSION, "
                            + "CALLBACK_URL, GRANT_TYPES) VALUES (?, ?, ?, ?, ?, ?, ?, ?)");

            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOs) {
                updateIdnConsumerApps(consumerAppsInsert, consumerAppsTableDTO);
            }
            consumerAppsInsert.executeBatch();
            log.info("Inserted entries in IDN_OAUTH_CONSUMER_APPS");

            // Update IDN_OAUTH2_ACCESS_TOKEN foreign key reference to CONSUMER_KEY
            accessTokenUpdate = connection.prepareStatement(
                    "UPDATE IDN_OAUTH2_ACCESS_TOKEN SET CONSUMER_KEY = ? " + "WHERE CONSUMER_KEY = ?");

            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOs) {
                ConsumerKeyDTO consumerKeyDTO = consumerAppsTableDTO.getConsumerKey();
                updateIdnAccessToken(accessTokenUpdate, consumerKeyDTO);
            }
            accessTokenUpdate.executeBatch();
            log.info("Updated entries in IDN_OAUTH2_ACCESS_TOKEN");

            // Remove redundant records in IDN_OAUTH_CONSUMER_APPS
            consumerAppsDelete = connection
                    .prepareStatement("DELETE FROM IDN_OAUTH_CONSUMER_APPS WHERE " + "CONSUMER_KEY = ?");

            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOs) {
                ConsumerKeyDTO consumerKeyDTO = consumerAppsTableDTO.getConsumerKey();
                deleteIdnConsumerApps(consumerAppsDelete, consumerKeyDTO);
            }
            consumerAppsDelete.executeBatch();
            log.info("Removed redundant entries in IDN_OAUTH_CONSUMER_APPS");

            //deleting rows where consumer key decryption was unsuccessful from IDN_OAUTH_CONSUMER_APPS table
            consumerAppsDeleteFailedRecords = connection
                    .prepareStatement("DELETE FROM IDN_OAUTH_CONSUMER_APPS WHERE " + "CONSUMER_KEY = ?");
            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOsFailed) {
                ConsumerKeyDTO consumerKeyDTO = consumerAppsTableDTO.getConsumerKey();
                deleteIdnConsumerApps(consumerAppsDeleteFailedRecords, consumerKeyDTO);
            }
            consumerAppsDeleteFailedRecords.executeBatch();
            log.info("Removed decryption failed entries in IDN_OAUTH_CONSUMER_APPS");

            //deleting rows where consumer key decryption was unsuccessful from IDN_OAUTH2_ACCESS_TOKEN table
            accessTokenDelete = connection
                    .prepareStatement("DELETE FROM IDN_OAUTH2_ACCESS_TOKEN " + "WHERE CONSUMER_KEY = ?");
            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOsFailed) {
                ConsumerKeyDTO consumerKeyDTO = consumerAppsTableDTO.getConsumerKey();
                deleteIdnAccessToken(consumerAppsDeleteFailedRecords, consumerKeyDTO);
            }
            accessTokenDelete.executeBatch();
            log.info("Removed decryption failed entries in IDN_OAUTH2_ACCESS_TOKEN");
        }
    } finally {
        if (consumerAppsLookup != null)
            consumerAppsLookup.close();
        if (consumerAppsDelete != null)
            consumerAppsDelete.close();
        if (consumerAppsDeleteFailedRecords != null)
            consumerAppsDeleteFailedRecords.close();
        if (consumerAppsInsert != null)
            consumerAppsInsert.close();
        if (accessTokenUpdate != null)
            accessTokenUpdate.close();
        if (accessTokenDelete != null)
            accessTokenDelete.close();
        if (consumerAppsResultSet != null)
            consumerAppsResultSet.close();
    }

    return continueUpdatingDB;
}

From source file:org.jamwiki.db.AnsiQueryHandler.java

/**
 *
 *//*from  w  w w.jav a 2 s . c o  m*/
public void updateConfiguration(Map<String, String> configuration, Connection conn) throws SQLException {
    Statement stmt = null;
    PreparedStatement pstmt = null;
    try {
        stmt = conn.createStatement();
        stmt.executeUpdate(STATEMENT_DELETE_CONFIGURATION);
        pstmt = conn.prepareStatement(STATEMENT_INSERT_CONFIGURATION);
        for (Map.Entry<String, String> entry : configuration.entrySet()) {
            pstmt.setString(1, entry.getKey());
            // FIXME - Oracle cannot store an empty string - it converts them
            // to null - so add a hack to work around the problem.
            String value = entry.getValue();
            if (StringUtils.isBlank(value)) {
                value = " ";
            }
            pstmt.setString(2, value);
            pstmt.addBatch();
        }
        pstmt.executeBatch();
    } finally {
        DatabaseConnection.closeStatement(pstmt);
        DatabaseConnection.closeStatement(stmt);
    }
}

From source file:org.jamwiki.db.AnsiQueryHandler.java

/**
 *
 *//*  w  w  w .j  a v a  2  s.c o  m*/
public void orderTopicVersions(Topic topic, int virtualWikiId, List<Integer> topicVersionIdList)
        throws SQLException {
    Connection conn = null;
    PreparedStatement stmt = null;
    try {
        conn = DatabaseConnection.getConnection();
        conn.setAutoCommit(false);
        stmt = conn.prepareStatement(STATEMENT_UPDATE_TOPIC_VERSION_PREVIOUS_VERSION_ID);
        Integer previousTopicVersionId = null;
        boolean hasBatchData = false;
        for (int topicVersionId : topicVersionIdList) {
            if (previousTopicVersionId != null) {
                stmt.setInt(1, previousTopicVersionId);
                stmt.setInt(2, topicVersionId);
                stmt.addBatch();
                hasBatchData = true;
            }
            previousTopicVersionId = topicVersionId;
        }
        if (hasBatchData) {
            stmt.executeBatch();
        }
        TopicVersion topicVersion = this.lookupTopicVersion(previousTopicVersionId, conn);
        topic.setCurrentVersionId(previousTopicVersionId);
        topic.setTopicContent(topicVersion.getVersionContent());
        this.updateTopic(topic, virtualWikiId, conn);
        conn.commit();
    } catch (SQLException e) {
        if (conn != null) {
            try {
                conn.rollback();
            } catch (Exception ex) {
            }
        }
        throw e;
    } finally {
        DatabaseConnection.closeConnection(conn, stmt);
        // explicitly null the variable to improve garbage collection.
        // with very large loops this can help avoid OOM "GC overhead
        // limit exceeded" errors.
        stmt = null;
        conn = null;
    }
}

From source file:com.flexive.core.storage.genericSQL.GenericHierarchicalStorage.java

/**
 * {@inheritDoc}/*  www.  j  av a  2  s .  c o  m*/
 */
@Override
public FxPK contentCreateVersion(Connection con, FxEnvironment env, StringBuilder sql, FxContent content)
        throws FxCreateException, FxInvalidParameterException {
    if (content.getPk().isNew())
        throw new FxInvalidParameterException("content", "ex.content.pk.invalid.newVersion", content.getPk());
    if (content.isForcePkOnCreate()) {
        throw new FxInvalidParameterException("content", "ex.content.save.force.pk.update");
    }

    content.getRootGroup().removeEmptyEntries();
    content.getRootGroup().compactPositions(true);
    content.checkValidity();
    final FxType type = CacheAdmin.getEnvironment().getType(content.getTypeId());

    FxPK pk;
    PreparedStatement ps = null;
    FulltextIndexer ft = null;
    try {
        int new_version = getContentVersionInfo(con, content.getPk().getId()).getMaxVersion() + 1;
        final boolean stepsUpdated = updateStepDependencies(con, content.getPk().getId(), new_version, env,
                env.getType(content.getTypeId()), content.getStepId());
        final boolean keepCreatedLCI = EJBLookup.getConfigurationEngine()
                .get(SystemParameters.STORAGE_KEEP_CREATION_DATES);

        pk = createMainEntry(con, content.getPk().getId(), new_version, content, keepCreatedLCI);

        ft = getFulltextIndexer(pk, con);
        if (sql == null)
            sql = new StringBuilder(2000);
        ps = con.prepareStatement(CONTENT_DATA_INSERT);
        createDetailEntries(con, ps, ft, sql, pk, content.isMaxVersion(), content.isLiveVersion(),
                content.getData("/"));

        ps.executeBatch();

        if (type.isContainsFlatStorageAssignments()) {
            FxFlatStorage flatStorage = FxFlatStorageManager.getInstance();
            flatStorage.setPropertyData(con, pk, content.getTypeId(), content.getStepId(),
                    content.isMaxVersion(), content.isLiveVersion(),
                    flatStorage.getFlatPropertyData(content.getRootGroup()), true);
        }
        checkUniqueConstraints(con, env, sql, pk, content.getTypeId());
        if (content.getBinaryPreviewId() != -1) {
            binaryStorage.updateContentBinaryEntry(con, pk, content.getBinaryPreviewId(),
                    content.getBinaryPreviewACL());
        }
        ft.commitChanges();

        fixContentVersionStats(con, env, type, content.getPk().getId(), true, stepsUpdated);
    } catch (FxApplicationException e) {
        if (e instanceof FxCreateException)
            throw (FxCreateException) e;
        if (e instanceof FxInvalidParameterException)
            throw (FxInvalidParameterException) e;
        throw new FxCreateException(e);
    } catch (SQLException e) {
        throw new FxCreateException(LOG, e, "ex.db.sqlError", e.getMessage());
    } finally {
        Database.closeObjects(GenericHierarchicalStorage.class, ps);
        if (ft != null)
            ft.cleanup();
    }

    final FxContent newVersion;
    try {
        sql.setLength(0);
        newVersion = contentLoad(con, pk, env, sql);
        syncFQNName(con, newVersion, pk, null);
    } catch (FxApplicationException e) {
        throw new FxCreateException(e);
    }

    if (type.isTrackHistory())
        EJBLookup.getHistoryTrackerEngine().track(type, pk, ConversionEngine.getXStream().toXML(newVersion),
                "history.content.created.version", pk.getVersion());
    return pk;
}

From source file:org.wso2.carbon.idp.mgt.dao.IdPManagementDAO.java

/**
 * @param conn/* w  ww.j  av  a  2  s .  com*/
 * @param idPId
 * @param tenantId
 * @param claimMappings
 * @throws SQLException
 * @throws IdentityProviderManagementException
 */
private void addIdPClaimMappings(Connection conn, int idPId, int tenantId, ClaimMapping[] claimMappings)
        throws SQLException, IdentityProviderManagementException {

    Map<String, Integer> claimIdMap = new HashMap<String, Integer>();
    PreparedStatement prepStmt = null;
    ResultSet rs = null;

    try {

        if (claimMappings == null || claimMappings.length == 0) {
            return;
        }

        String sqlStmt = IdPManagementConstants.SQLQueries.GET_IDP_CLAIMS_SQL;
        prepStmt = conn.prepareStatement(sqlStmt);
        prepStmt.setInt(1, idPId);
        rs = prepStmt.executeQuery();

        while (rs.next()) {
            int id = rs.getInt("ID");
            String claim = rs.getString("CLAIM");
            claimIdMap.put(claim, id);
        }

        prepStmt.clearParameters();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
        IdentityApplicationManagementUtil.closeResultSet(rs);

        if (claimIdMap.isEmpty()) {
            String message = "No Identity Provider claim URIs defined for tenant " + tenantId;
            throw new IdentityProviderManagementException(message);
        }

        sqlStmt = IdPManagementConstants.SQLQueries.ADD_IDP_CLAIM_MAPPINGS_SQL;
        prepStmt = conn.prepareStatement(sqlStmt);
        for (ClaimMapping mapping : claimMappings) {
            if (mapping != null && mapping.getRemoteClaim() != null
                    && claimIdMap.containsKey(mapping.getRemoteClaim().getClaimUri())) {

                int idpClaimId = claimIdMap.get(mapping.getRemoteClaim().getClaimUri());
                String localClaimURI = mapping.getLocalClaim().getClaimUri();

                prepStmt.setInt(1, idpClaimId);
                prepStmt.setInt(2, tenantId);
                prepStmt.setString(3, CharacterEncoder.getSafeText(localClaimURI));
                prepStmt.setString(4, CharacterEncoder.getSafeText(mapping.getDefaultValue()));

                if (mapping.isRequested()) {
                    prepStmt.setString(5, "1");
                } else {
                    prepStmt.setString(5, "0");
                }

                prepStmt.addBatch();
            } else {
                throw new IdentityProviderManagementException(
                        "Cannot find Identity Provider claim mapping for tenant " + tenantId);
            }
        }

        prepStmt.executeBatch();

    } finally {
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
        IdentityApplicationManagementUtil.closeResultSet(rs);
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
    }
}

From source file:org.wso2.carbon.identity.application.mgt.dao.impl.ApplicationDAOImpl.java

/**
 * @param applicationId/*  ww w.j  a  va 2s.c o m*/
 * @param outBoundProvisioningConfig
 * @param connection
 * @throws SQLException
 */
private void updateOutboundProvisioningConfiguration(int applicationId,
        OutboundProvisioningConfig outBoundProvisioningConfig, Connection connection) throws SQLException {

    int tenantID = CarbonContext.getThreadLocalCarbonContext().getTenantId();
    PreparedStatement outboundProConfigPrepStmt = null;

    IdentityProvider[] proProviders = outBoundProvisioningConfig.getProvisioningIdentityProviders();

    try {
        if (outBoundProvisioningConfig == null || proProviders == null || proProviders.length == 0) {
            // no in-bound authentication requests defined.
            return;
        }

        outboundProConfigPrepStmt = connection.prepareStatement(ApplicationMgtDBQueries.STORE_PRO_CONNECTORS);
        // TENANT_ID, IDP_NAME, CONNECTOR_NAME, APP_ID

        for (IdentityProvider proProvider : proProviders) {
            if (proProvider != null) {
                ProvisioningConnectorConfig proConnector = proProvider.getDefaultProvisioningConnectorConfig();
                if (proConnector == null) {
                    continue;
                }

                String jitEnabled = "0";

                if (proProvider.getJustInTimeProvisioningConfig() != null
                        && proProvider.getJustInTimeProvisioningConfig().isProvisioningEnabled()) {
                    jitEnabled = "1";
                }

                String blocking = "0";

                if (proProvider.getDefaultProvisioningConnectorConfig() != null
                        && proProvider.getDefaultProvisioningConnectorConfig().isBlocking()) {
                    blocking = "1";
                }

                outboundProConfigPrepStmt.setInt(1, tenantID);
                outboundProConfigPrepStmt.setString(2,
                        CharacterEncoder.getSafeText(proProvider.getIdentityProviderName()));
                outboundProConfigPrepStmt.setString(3, CharacterEncoder.getSafeText(proConnector.getName()));
                outboundProConfigPrepStmt.setInt(4, applicationId);
                outboundProConfigPrepStmt.setString(5, CharacterEncoder.getSafeText(jitEnabled));
                outboundProConfigPrepStmt.setString(6, CharacterEncoder.getSafeText(blocking));
                outboundProConfigPrepStmt.addBatch();

            }
        }

        outboundProConfigPrepStmt.executeBatch();

    } finally {
        IdentityApplicationManagementUtil.closeStatement(outboundProConfigPrepStmt);
    }
}

From source file:gemlite.core.internal.db.DBSynchronizer.java

public boolean processEvents(List<AsyncEvent> events) {
    if (this.shutDown) {
        return false;
    }/*from w ww . j  a  v a 2 s . co m*/
    boolean completedSucessfully = false;
    String listOfEventsString = null;
    // The retval will be considered true only if the list was iterated
    // completely. If the List iteration was incomplete we will return
    // false so that the events are not removed during failure.
    // As for individual events, they can get exceptions due to constraint
    // violations etc but will not cause return value to be false.
    Statement stmt = null;
    PreparedStatement ps = null;
    // keep track of the previous prepared statement in case we can optimize
    // by create a batch when the previous and current statements match
    PreparedStatement prevPS = null;
    AsyncEvent prevEvent = null;
    boolean prevPSHasBatch = false;
    Iterator<AsyncEvent> itr = events.iterator();
    AsyncEvent event = null;
    String eventString = null;
    String prevEventStr = null;
    try {
        while (!(completedSucessfully = !itr.hasNext())) {
            event = itr.next();
            Operation operation = event.getOperation();
            if (logger.isDebugEnabled()) {
                eventString = event.toString();
                if (prevEvent != null) {
                    prevEventStr = prevEvent.toString();
                }
                logger.info("DBSynchronizer::processEvents :processing PK based " + "event=" + eventString
                        + " AsyncEvent Operation=" + operation);
            } else {
                eventString = null;
                prevEventStr = null;
            }
            try {
                if (operation.isPutAll() || operation.isCreate())
                    ps = getExecutableInsertPrepStmntPKBased(event, prevPS);
                else if (operation.isUpdate())
                    ps = getExecutableUpdatePrepStmntPKBased(event, prevPS);
                else if (operation.isDestroy())
                    ps = getExecutableDeletePrepStmntPKBased(event, prevPS);
                else {
                    logger.error("DBSynchronizer::processEvents: unexpected " + "eventType " + operation
                            + " for " + event);
                    continue;
                }
            } catch (SQLException sqle) {
                SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__1, null, event,
                        eventString, logger, true);
                if (handler.breakTheLoop()) {
                    break;
                }
            } catch (RegionDestroyedException rde) {
                if (logger.isInfoEnabled()) {
                    logger.info("DBSynchronizer::processEvents: WBCLEvent " + event
                            + " will  be discarded as the underlying region "
                            + "for the table has been destroyed");
                }
                continue;
            }
            if (logger.isDebugEnabled()) {
                if (eventString == null) {
                    eventString = event.toString();
                }
                logger.debug("DBSynchronizer::processEvents: Statement=" + (ps != null ? ps : stmt)
                        + " for event=" + eventString);
            }
            try {
                int num;
                if (prevPS != null && prevPS != ps) {
                    try {
                        if (prevPSHasBatch) {
                            prevPS.addBatch();
                            if (logger.isDebugEnabled()) {
                                logger.info("DBSynchronizer::processEvents executing "
                                        + "batch statement for prepared statement=" + prevPS + " for event="
                                        + prevEventStr);
                            }
                            final int[] res = prevPS.executeBatch();
                            num = res.length;
                            prevPSHasBatch = false;
                        } else {
                            num = prevPS.executeUpdate();
                        }
                        if (logger.isDebugEnabled()) {
                            logger.info("DBSynchronizer::processEvents total num rows " + "modified=" + num
                                    + " for prepared statement=" + prevPS + " for event=" + prevEventStr);
                        }
                        // clear event from failure map if present
                        helper.removeEventFromFailureMap(prevEvent);
                    } catch (SQLException sqle) {
                        if (prevPSHasBatch) {
                            try {
                                prevPS.clearBatch();
                            } catch (SQLException e) {
                                // ignored
                            }
                            prevPSHasBatch = false;
                        }
                        SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__3, prevPS,
                                prevEvent, prevEventStr, logger, false);
                        if (handler.breakTheLoop()) {
                            break;
                        }
                        prevPS = null;
                        prevEvent = null;
                        prevPSHasBatch = false;
                    }
                }
                // in case previous prepared statement matches the current
                // one,
                // it will already be added as a batch when setting the
                // arguments
                // by AsyncEventHelper#setColumnInPrepStatement()
                else if (prevPS != null && ps != null) {
                    prevPSHasBatch = true;
                    if (logger.isDebugEnabled()) {
                        logger.info("DBSynchronizer::processEvents added new row "
                                + "as a batch for prepared statement=" + ps + " for event=" + eventString);
                    }
                }

                prevPS = ps;
                prevEvent = event;
            } catch (SQLException sqle) {
                if (prevPS != null && prevPSHasBatch) {
                    try {
                        prevPS.clearBatch();
                    } catch (SQLException e) {
                        // ignored
                    }
                }
                SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__3,
                        ps != null ? ps : stmt, event, eventString, logger, false);
                if (handler.breakTheLoop()) {
                    break;
                }
            }
        } // end of while (event list processing loop)

        // now handle the last statement in the above loop since it is still
        // pending due to anticipated batching
        if (completedSucessfully) {
            try {
                if (logger.isInfoEnabled()) {
                    if (listOfEventsString == null) {
                        listOfEventsString = events.toString();
                    }
                    logger.info("DBSynchronizer::processEvents: " + "before commit of events="
                            + listOfEventsString);
                }
                int num;
                // first the case when the previous statement was a batched
                // one
                // so add current one as batch and execute
                if (prevPSHasBatch) {
                    ps.addBatch();
                    if (logger.isDebugEnabled()) {
                        logger.info("DBSynchronizer::processEvents executing batch "
                                + "statement for prepared statement=" + ps + " for event=" + eventString);
                    }
                    final int[] res = ps.executeBatch();
                    num = res.length;
                }
                // next the case of a non BULK_INSERT operation;
                // BULK_INSERT operations are always executed as a single
                // batch
                // by itself, so will never reach here
                else if (ps != null) {
                    num = ps.executeUpdate();
                    if (event != null) {
                        // clear event from failure map if present
                        helper.removeEventFromFailureMap(event);
                    }
                } else {
                    num = 0;
                }
                // clear event from failure map if present
                helper.removeEventFromFailureMap(event);
                if (logger.isDebugEnabled()) {
                    if (ps != null) {
                        logger.info("DBSynchronizer::processEvents num rows modified=" + num
                                + " for prepared statement=" + ps + " for event=" + eventString);
                    }
                }
                this.conn.commit();
                if (logger.isInfoEnabled()) {
                    if (listOfEventsString == null) {
                        listOfEventsString = events.toString();
                    }
                    logger.info("DBSynchronizer::processEvents: " + "committed successfully for events="
                            + listOfEventsString);
                }
            } catch (SQLException sqle) {

                if (ps != null && prevPSHasBatch) {
                    try {
                        ps.clearBatch();
                    } catch (SQLException e) {
                        // ignored
                    }
                }

                SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__7,
                        ps != null ? ps : stmt, event, eventString, logger, true);
                if (handler != SqlExceptionHandler.IGNORE) {
                    completedSucessfully = false;
                }
            }
        }
    } catch (Exception e) {

        if (logger != null && logger.isErrorEnabled() && !(event != null && helper.skipFailureLogging(event))) {
            StringBuilder sb = new StringBuilder();
            if (event != null) {
                if (eventString == null) {
                    eventString = event.toString();
                }
                sb.append("[FAILED: ").append(eventString).append(" ]");
            }
            while (itr.hasNext()) {
                sb.append("[ ").append(itr.next().toString()).append(" ]");
            }
            helper.logFormat(logger, Level.SEVERE, e, DB_SYNCHRONIZER__2, sb.toString());
        }
        SqlExceptionHandler.CLEANUP.execute(this);
        completedSucessfully = false;
    }

    if (completedSucessfully) {
        // on successful completion, log any pending errors to XML file;
        // when
        // unsuccessful then we know that batch will be retried so don't log
        // in
        // that case else it can get logged multiple times
        // clear event from failure map if present
        flushErrorEventsToLog();
    }

    if (logger.isDebugEnabled()) {
        logger.info("DBSynchronizer::processEvents: processed " + events.size() + " events, success="
                + completedSucessfully);
    }

    return completedSucessfully;
}

From source file:org.jamwiki.db.AnsiQueryHandler.java

/**
 *
 *///from  www .  ja v a2s.c  om
public void updateNamespaceTranslations(List<Namespace> namespaces, String virtualWiki, int virtualWikiId,
        Connection conn) throws SQLException {
    PreparedStatement stmt = null;
    try {
        // delete any existing translation then add the new one
        stmt = conn.prepareStatement(STATEMENT_DELETE_NAMESPACE_TRANSLATIONS);
        stmt.setInt(1, virtualWikiId);
        stmt.executeUpdate();
    } finally {
        DatabaseConnection.closeStatement(stmt);
    }
    try {
        stmt = conn.prepareStatement(STATEMENT_INSERT_NAMESPACE_TRANSLATION);
        String translatedNamespace;
        for (Namespace namespace : namespaces) {
            translatedNamespace = namespace.getLabel(virtualWiki);
            if (translatedNamespace.equals(namespace.getDefaultLabel())) {
                continue;
            }
            stmt.setInt(1, namespace.getId());
            stmt.setInt(2, virtualWikiId);
            stmt.setString(3, translatedNamespace);
            stmt.addBatch();
        }
        stmt.executeBatch();
    } finally {
        DatabaseConnection.closeStatement(stmt);
    }
}

From source file:com.flexive.core.storage.genericSQL.GenericHierarchicalStorage.java

/**
 * Update all (multiple) ACL entries for a content instance
 *
 * @param con      an open and valid connection
 * @param content  the content containing the ACL'S
 * @param pk       primary key of the content
 * @param newEntry is this a new entry?//from  ww w  . ja v  a2s  .c om
 * @throws SQLException      on errors
 * @throws FxCreateException on errors
 * @throws FxUpdateException on errors
 */
protected void updateACLEntries(Connection con, FxContent content, FxPK pk, boolean newEntry)
        throws SQLException, FxCreateException, FxUpdateException {
    PreparedStatement ps = null;
    try {
        if (content.getAclIds().isEmpty()
                || (content.getAclIds().size() == 1 && content.getAclIds().get(0) == ACL.NULL_ACL_ID)) {
            if (newEntry) {
                throw new FxCreateException(LOG, "ex.content.noACL", pk);
            } else {
                throw new FxUpdateException(LOG, "ex.content.noACL", pk);
            }
        }
        if (!newEntry) {
            // first remove all ACLs, then update them
            ps = con.prepareStatement(CONTENT_ACLS_CLEAR);
            ps.setLong(1, pk.getId());
            ps.setInt(2, pk.getVersion());
            ps.executeUpdate();
        }
        final List<Long> aclIds = content.getAclIds();
        if (aclIds.size() <= 1) {
            return; // ACL saved in main table
        }

        //insert ACLs
        ps = con.prepareStatement(CONTENT_ACL_INSERT);
        for (long aclId : aclIds) {
            ps.setLong(1, pk.getId());
            ps.setInt(2, pk.getVersion());
            ps.setLong(3, aclId);
            ps.addBatch();
        }
        ps.executeBatch();

    } finally {
        Database.closeObjects(GenericHierarchicalStorage.class, null, ps);
    }
}