Example usage for java.sql PreparedStatement executeBatch

List of usage examples for java.sql PreparedStatement executeBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement executeBatch.

Prototype

int[] executeBatch() throws SQLException;

Source Link

Document

Submits a batch of commands to the database for execution and if all commands execute successfully, returns an array of update counts.

Usage

From source file:org.wso2.carbon.apimgt.migration.client.MigrateFrom110to200.java

private boolean updateAMApplicationKeyMapping(Connection connection) throws SQLException {
    log.info("Updating consumer keys in AM_APPLICATION_KEY_MAPPING");
    PreparedStatement preparedStatementUpdate = null;
    PreparedStatement preparedStatementDelete = null;
    Statement statement = null;/* ww w.j a  v  a  2 s  .c  o  m*/
    ResultSet resultSet = null;
    boolean continueUpdatingDB = true;
    long totalRecords = 0;
    long decryptionFailedRecords = 0;

    try {
        String query = "SELECT APPLICATION_ID, CONSUMER_KEY, KEY_TYPE FROM AM_APPLICATION_KEY_MAPPING";
        ArrayList<AppKeyMappingTableDTO> appKeyMappingTableDTOs = new ArrayList<>();
        ArrayList<AppKeyMappingTableDTO> appKeyMappingTableDTOsFailed = new ArrayList<>();

        statement = connection.createStatement();
        statement.setFetchSize(50);
        resultSet = statement.executeQuery(query);

        while (resultSet.next()) {
            ConsumerKeyDTO consumerKeyDTO = new ConsumerKeyDTO();
            consumerKeyDTO.setEncryptedConsumerKey(resultSet.getString("CONSUMER_KEY"));

            AppKeyMappingTableDTO appKeyMappingTableDTO = new AppKeyMappingTableDTO();
            appKeyMappingTableDTO.setApplicationId(resultSet.getString("APPLICATION_ID"));
            appKeyMappingTableDTO.setConsumerKey(consumerKeyDTO);
            appKeyMappingTableDTO.setKeyType(resultSet.getString("KEY_TYPE"));
            totalRecords++;
            if (ResourceModifier.decryptConsumerKeyIfEncrypted(consumerKeyDTO)) {

                appKeyMappingTableDTOs.add(appKeyMappingTableDTO);
                log.debug("Successfully decrypted consumer key : " + consumerKeyDTO.getEncryptedConsumerKey()
                        + " as : " + consumerKeyDTO.getDecryptedConsumerKey()
                        + " in AM_APPLICATION_KEY_MAPPING table");
            } else {
                log.error("Cannot decrypt consumer key : " + consumerKeyDTO.getEncryptedConsumerKey()
                        + " in AM_APPLICATION_KEY_MAPPING table");
                decryptionFailedRecords++;
                appKeyMappingTableDTOsFailed.add(appKeyMappingTableDTO);

                //If its not allowed to remove decryption failed entries from DB, we will not continue updating 
                // tables even with successfully decrypted entries to maintain DB integrity
                if (!removeDecryptionFailedKeysFromDB) {
                    continueUpdatingDB = false;
                }
            }
        }

        if (continueUpdatingDB) {
            preparedStatementUpdate = connection
                    .prepareStatement("UPDATE AM_APPLICATION_KEY_MAPPING SET CONSUMER_KEY = ?"
                            + " WHERE APPLICATION_ID = ? AND KEY_TYPE = ?");

            for (AppKeyMappingTableDTO appKeyMappingTableDTO : appKeyMappingTableDTOs) {
                preparedStatementUpdate.setString(1,
                        appKeyMappingTableDTO.getConsumerKey().getDecryptedConsumerKey());
                preparedStatementUpdate.setString(2, appKeyMappingTableDTO.getApplicationId());
                preparedStatementUpdate.setString(3, appKeyMappingTableDTO.getKeyType());
                preparedStatementUpdate.addBatch();
            }
            preparedStatementUpdate.executeBatch();

            //deleting rows where consumer key decryption was unsuccessful
            preparedStatementDelete = connection
                    .prepareStatement("DELETE FROM AM_APPLICATION_KEY_MAPPING WHERE CONSUMER_KEY = ?");

            for (AppKeyMappingTableDTO appKeyMappingTableDTO : appKeyMappingTableDTOsFailed) {
                preparedStatementDelete.setString(1,
                        appKeyMappingTableDTO.getConsumerKey().getEncryptedConsumerKey());
                preparedStatementDelete.addBatch();
            }
            preparedStatementDelete.executeBatch();
            log.info("AM_APPLICATION_KEY_MAPPING table updated with " + decryptionFailedRecords + "/"
                    + totalRecords + " of the CONSUMER_KEY entries deleted as they cannot be decrypted");
        } else {
            log.error("AM_APPLICATION_KEY_MAPPING table not updated as " + decryptionFailedRecords + "/"
                    + totalRecords + " of the CONSUMER_KEY entries cannot be decrypted");
        }
    } finally {
        if (preparedStatementUpdate != null)
            preparedStatementUpdate.close();
        if (preparedStatementDelete != null)
            preparedStatementDelete.close();
        if (statement != null)
            statement.close();
        if (resultSet != null)
            resultSet.close();
    }

    return continueUpdatingDB;
}

From source file:org.wso2.carbon.identity.application.mgt.dao.impl.ApplicationDAOImpl.java

/**
 * @param applicationId/*from   w  w w. j a  v a  2  s.  c o m*/
 * @param claimConfiguration
 * @param applicationID
 * @param connection
 * @throws SQLException
 */
private void updateClaimConfiguration(int applicationId, ClaimConfig claimConfiguration, int applicationID,
        Connection connection) throws SQLException {

    int tenantID = CarbonContext.getThreadLocalCarbonContext().getTenantId();

    PreparedStatement storeRoleClaimPrepStmt = null;
    PreparedStatement storeClaimDialectPrepStmt = null;
    PreparedStatement storeSendLocalSubIdPrepStmt = null;

    if (claimConfiguration == null) {
        return;
    }

    try {
        // update the application data
        String roleClaim = claimConfiguration.getRoleClaimURI();
        if (roleClaim != null) {
            storeRoleClaimPrepStmt = connection
                    .prepareStatement(ApplicationMgtDBQueries.UPDATE_BASIC_APPINFO_WITH_ROLE_CLAIM);
            // ROLE_CLAIM=? WHERE TENANT_ID= ? AND ID =
            storeRoleClaimPrepStmt.setString(1, CharacterEncoder.getSafeText(roleClaim));
            storeRoleClaimPrepStmt.setInt(2, tenantID);
            storeRoleClaimPrepStmt.setInt(3, applicationId);
            storeRoleClaimPrepStmt.executeUpdate();
        }

    } finally {
        IdentityApplicationManagementUtil.closeStatement(storeRoleClaimPrepStmt);
    }

    try {
        storeClaimDialectPrepStmt = connection
                .prepareStatement(ApplicationMgtDBQueries.UPDATE_BASIC_APPINFO_WITH_CLAIM_DIALEECT);
        // IS_LOCAL_CLAIM_DIALECT=? WHERE TENANT_ID= ? AND ID = ?
        storeClaimDialectPrepStmt.setString(1, claimConfiguration.isLocalClaimDialect() ? "1" : "0");
        storeClaimDialectPrepStmt.setInt(2, tenantID);
        storeClaimDialectPrepStmt.setInt(3, applicationId);
        storeClaimDialectPrepStmt.executeUpdate();
    } finally {
        IdentityApplicationManagementUtil.closeStatement(storeClaimDialectPrepStmt);
    }

    try {
        storeSendLocalSubIdPrepStmt = connection
                .prepareStatement(ApplicationMgtDBQueries.UPDATE_BASIC_APPINFO_WITH_SEND_LOCAL_SUB_ID);
        // IS_SEND_LOCAL_SUBJECT_ID=? WHERE TENANT_ID= ? AND ID = ?
        storeSendLocalSubIdPrepStmt.setString(1,
                claimConfiguration.isAlwaysSendMappedLocalSubjectId() ? "1" : "0");
        storeSendLocalSubIdPrepStmt.setInt(2, tenantID);
        storeSendLocalSubIdPrepStmt.setInt(3, applicationId);
        storeSendLocalSubIdPrepStmt.executeUpdate();
    } finally {
        IdentityApplicationManagementUtil.closeStatement(storeSendLocalSubIdPrepStmt);
    }

    if (claimConfiguration.getClaimMappings() == null || claimConfiguration.getClaimMappings().length == 0) {
        return;
    }

    List<ClaimMapping> claimMappings = Arrays.asList(claimConfiguration.getClaimMappings());

    if (claimConfiguration == null || claimMappings.isEmpty()) {
        log.debug("No claim mapping found, Skipping ..");
        return;
    }

    PreparedStatement storeClaimMapPrepStmt = null;
    try {
        storeClaimMapPrepStmt = connection.prepareStatement(ApplicationMgtDBQueries.STORE_CLAIM_MAPPING);

        for (ClaimMapping mapping : claimMappings) {
            if (mapping.getLocalClaim() == null || mapping.getLocalClaim().getClaimUri() == null
                    || mapping.getRemoteClaim().getClaimUri() == null || mapping.getRemoteClaim() == null) {
                continue;
            }
            // TENANT_ID, IDP_CLAIM, SP_CLAIM, APP_ID, IS_REQUESTED
            storeClaimMapPrepStmt.setInt(1, tenantID);
            storeClaimMapPrepStmt.setString(2,
                    CharacterEncoder.getSafeText(mapping.getLocalClaim().getClaimUri()));
            storeClaimMapPrepStmt.setString(3,
                    CharacterEncoder.getSafeText(mapping.getRemoteClaim().getClaimUri()));
            storeClaimMapPrepStmt.setInt(4, applicationID);
            if (mapping.isRequested()) {
                storeClaimMapPrepStmt.setString(5, "1");
            } else {
                storeClaimMapPrepStmt.setString(5, "0");
            }
            storeClaimMapPrepStmt.setString(6, CharacterEncoder.getSafeText(mapping.getDefaultValue()));
            storeClaimMapPrepStmt.addBatch();

            if (debugMode) {
                log.debug("Storing Claim Mapping. Local Claim: " + mapping.getLocalClaim().getClaimUri()
                        + " SPClaim: " + mapping.getRemoteClaim().getClaimUri());
            }
        }

        storeClaimMapPrepStmt.executeBatch();
    } finally {
        IdentityApplicationManagementUtil.closeStatement(storeClaimMapPrepStmt);
    }
}

From source file:HSqlManager.java

@SuppressWarnings("Duplicates")
@Deprecated/*from   ww w .  j  a  v  a 2 s.  c om*/
private static void mycoCommonInitialize(int bps, Connection connection) throws SQLException, IOException {
    long time = System.currentTimeMillis();
    String base = new File("").getAbsolutePath();
    CSV.makeDirectory(new File(base + "/PhageData"));
    INSTANCE = ImportPhagelist.getInstance();
    //        INSTANCE.parseAllPhages(bps);
    written = true;
    Connection db = connection;
    db.setAutoCommit(false);
    Statement stat = db.createStatement();
    stat.execute("SET FILES LOG FALSE\n");
    PreparedStatement st = db.prepareStatement("Insert INTO Primerdb.Primers"
            + "(Bp,Sequence, CommonP, UniqueP, Picked, Strain, Cluster)" + " Values(?,?,true,false,false,?,?)");
    ResultSet call = stat.executeQuery("Select * From Primerdb.Phages;");
    List<String[]> phages = new ArrayList<>();
    String strain = "";
    while (call.next()) {
        String[] r = new String[3];
        r[0] = call.getString("Strain");
        r[1] = call.getString("Cluster");
        r[2] = call.getString("Name");
        phages.add(r);
        if (r[2].equals("xkcd")) {
            strain = r[0];
        }
    }
    call.close();
    String x = strain;
    Set<String> clust = phages.stream().filter(y -> y[0].equals(x)).map(y -> y[1]).collect(Collectors.toSet());
    Map<String, List<String>> clusters = new HashMap<>();
    clust.parallelStream().forEach(cluster -> clusters.put(cluster, phages.stream()
            .filter(a -> a[0].equals(x) && a[1].equals(cluster)).map(a -> a[2]).collect(Collectors.toList())));
    for (String z : clusters.keySet()) {
        try {
            List<String> clustphages = clusters.get(z);
            Set<String> primers = Collections.synchronizedSet(
                    CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + clustphages.get(0) + ".csv"));
            clustphages.remove(0);
            for (String phage : clustphages) {
                //                    String[] seqs = Fasta.parse(base + "/Fastas/" + phage + ".fasta");
                //                    String sequence =seqs[0]+seqs[1];
                //                    Map<String, List<Integer>> seqInd = new HashMap<>();
                //                    for (int i = 0; i <= sequence.length()-bps; i++) {
                //                        String sub=sequence.substring(i,i+bps);
                //                        if(seqInd.containsKey(sub)){
                //                            seqInd.get(sub).add(i);
                //                        }else {
                //                            List<Integer> list = new ArrayList<>();
                //                            list.add(i);
                //                            seqInd.put(sub,list);
                //                        }
                //                    }
                //                    primers = primers.stream().filter(seqInd::containsKey).collect(Collectors.toSet());
                //                    primers =Sets.intersection(primers,CSV.readCSV(base + "/PhageData/"+Integer.toString(bps)
                //                            + phage + ".csv"));
                //                    System.gc();
                //                            String[] seqs = Fasta.parse(base + "/Fastas/" + phage + ".fasta");
                //                            String sequence =seqs[0]+seqs[1];
                //                            primers.stream().filter(sequence::contains);
                primers.retainAll(CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + phage + ".csv"));
                //                    Set<CharSequence> prim = primers;
                //                    for (CharSequence primer: primers){
                //                        if(seqInd.containsKey(primer)){
                //                            prim.remove(primer);
                //                        }
                //                    }
                //                    primers=prim;
            }
            int i = 0;
            for (String a : primers) {
                try {
                    //finish update
                    st.setInt(1, bps);
                    st.setString(2, a);
                    st.setString(3, x);
                    st.setString(4, z);
                    st.addBatch();
                } catch (SQLException e) {
                    e.printStackTrace();
                    System.out.println("Error occurred at " + x + " " + z);
                }
                i++;
                if (i == 1000) {
                    i = 0;
                    st.executeBatch();
                    db.commit();
                }
            }
            if (i > 0) {
                st.executeBatch();
                db.commit();
            }
        } catch (SQLException e) {
            e.printStackTrace();
            System.out.println("Error occurred at " + x + " " + z);
        }
        System.out.println(z);
    }
    stat.execute("SET FILES LOG TRUE\n");
    st.close();
    stat.close();
    System.out.println("Common Updated");
    System.out.println((System.currentTimeMillis() - time) / Math.pow(10, 3) / 60);
}

From source file:org.obm.domain.dao.CalendarDaoJdbcImpl.java

@VisibleForTesting
void insertAttendees(AccessToken editor, Event ev, Connection con, List<Attendee> attendees)
        throws SQLException {
    String attQ = "INSERT INTO EventLink (" + ATT_INSERT_FIELDS + ") VALUES (" + "?, " + // event_id
            "?, " + // entity_id
            "?, " + // state
            "?, " + // required
            "?," + // percent
            "?," + // user_create
            "?" + // is_organizer
            ")";/*  w  w w  . ja v  a2 s.  c o m*/
    boolean shouldClearOrganizer = false;
    PreparedStatement ps = null;

    try {
        ps = con.prepareStatement(attQ);

        final int eventObmId = ev.getObmId().getObmId();
        final Set<Attendee> listAttendee = removeDuplicateAttendee(attendees);
        Set<EntityId> alreadyAddedAttendees = Sets.newHashSet();

        for (final Attendee at : listAttendee) {
            boolean isOrganizer = Objects.firstNonNull(at.isOrganizer(), false);

            String attendeeEmail = at.getEmail();
            EntityId userEntity = at.getEntityId();

            // There must be only one organizer in a given event
            if (isOrganizer) {
                shouldClearOrganizer = true;
            }

            if (alreadyAddedAttendees.contains(userEntity)) {
                logger.info("Attendee {} with entity ID {} already added, skipping.", attendeeEmail,
                        userEntity);

                continue;
            }

            ps.setInt(1, eventObmId);
            ps.setInt(2, userEntity.getId());
            ps.setObject(3, getJdbcObjectParticipation(at));
            ps.setObject(4, getJdbcObjectParticipationRole(at));
            ps.setInt(5, at.getPercent());
            ps.setInt(6, editor.getObmId());
            ps.setBoolean(7, isOrganizer);
            ps.addBatch();
            logger.info("Adding " + attendeeEmail + (isOrganizer ? " as organizer" : " as attendee"));

            alreadyAddedAttendees.add(userEntity);
        }

        // Clear the previous organizer if needed
        if (shouldClearOrganizer) {
            clearOrganizer(eventObmId, con);
        }

        ps.executeBatch();
    } finally {
        obmHelper.cleanup(null, ps, null);
    }
}

From source file:org.jamwiki.db.AnsiQueryHandler.java

/**
 *
 *//* w  w w  .j  a va 2s  .  c  o m*/
public void updateTopicNamespaces(List<Topic> topics, Connection conn) throws SQLException {
    PreparedStatement stmt = null;
    try {
        stmt = conn.prepareStatement(STATEMENT_UPDATE_TOPIC_NAMESPACE);
        for (Topic topic : topics) {
            stmt.setInt(1, topic.getNamespace().getId());
            stmt.setString(2, topic.getPageName());
            stmt.setString(3, topic.getPageName().toLowerCase());
            stmt.setInt(4, topic.getTopicId());
            stmt.addBatch();
        }
        stmt.executeBatch();
    } finally {
        DatabaseConnection.closeStatement(stmt);
    }
}

From source file:com.globalsight.everest.edit.offline.upload.UploadApi.java

private void editIssues(List<IssuesVo> issuesVos) {
    IssueImpl issue = null;/* w w  w . ja v  a  2  s  .c om*/

    Session session = HibernateUtil.getSession();
    Transaction tx = HibernateUtil.getTransaction();

    Connection conn = null;
    PreparedStatement stmt = null;
    try {

        conn = DbUtil.getConnection();
        conn.setAutoCommit(false);

        String sqlUpdate = "update ISSUE_HISTORY set DESCRIPTION= ? ," + "REPORTED_DATE = ? "
                + " Where REPORTED_BY = ? and REPORTED_DATE = ?";
        stmt = conn.prepareStatement(sqlUpdate);

        int batchUpdate = 0;
        for (IssuesVo vo : issuesVos) {
            issue = (IssueImpl) session.get(IssueImpl.class, new Long(vo.id));

            issue.setTitle(vo.title);
            issue.setPriority(vo.priority);
            issue.setStatus(vo.commentStatus);
            issue.setCategory(vo.failureType);
            issue.setOverwrite(false);
            issue.setShare(false);

            IssueHistoryImpl ih = (IssueHistoryImpl) issue.getHistory().get(0);

            Date date = ih.dateReportedAsDate();
            Date currentDate = Calendar.getInstance().getTime();

            ih.dateReported(Calendar.getInstance().getTime());
            ih.setComment(vo.comment);
            session.saveOrUpdate(ih);

            stmt.setString(1, vo.comment);
            stmt.setDate(2, new java.sql.Date(currentDate.getTime()));
            stmt.setString(3, vo.userId);
            stmt.setDate(4, new java.sql.Date(date.getTime()));

            batchUpdate++;
            if (batchUpdate > DbUtil.BATCH_INSERT_UNIT) {
                stmt.executeBatch();
                batchUpdate = 0;
            }
        }

        if (batchUpdate > 0) {
            stmt.executeBatch();
        }

        HibernateUtil.commit(tx);
    } catch (Exception ex) {
        HibernateUtil.rollback(tx);
        CATEGORY.error("Failed to edit issue.", ex);
    } finally {
        // session.close();
        DbUtil.silentClose(stmt);
        if (conn != null) {
            try {
                conn.commit();
            } catch (SQLException e) {
                CATEGORY.error(e);
            }

            DbUtil.silentReturnConnection(conn);
        }
    }
}

From source file:fr.aliacom.obm.common.calendar.CalendarDaoJdbcImpl.java

@VisibleForTesting
void insertAttendees(AccessToken editor, Event ev, Connection con, List<Attendee> attendees)
        throws SQLException {
    String attQ = "INSERT INTO EventLink (" + ATT_INSERT_FIELDS + ") VALUES (" + "?, " + // event_id
            "?, " + // entity_id
            "?, " + // state
            "?, " + // required
            "?," + // percent
            "?," + // user_create
            "?" + // is_organizer
            ")";/* w w  w  .  j av a 2  s  . c  o m*/
    boolean shouldClearOrganizer = false;
    PreparedStatement ps = null;

    try {
        ps = con.prepareStatement(attQ);

        final int eventObmId = ev.getObmId().getObmId();
        final Set<Attendee> listAttendee = removeDuplicateAttendee(attendees);
        Set<EntityId> alreadyAddedAttendees = Sets.newHashSet();

        for (final Attendee at : listAttendee) {
            boolean isOrganizer = Objects.firstNonNull(at.isOrganizer(), false);

            String attendeeEmail = at.getEmail();
            EntityId userEntity = at.getEntityId();

            // There must be only one organizer in a given event
            if (isOrganizer) {
                shouldClearOrganizer = true;
            }

            if (alreadyAddedAttendees.contains(userEntity)) {
                logger.info("Attendee {} with entity ID {} already added, skipping.", attendeeEmail,
                        userEntity);

                continue;
            }

            ps.setInt(1, eventObmId);
            ps.setInt(2, userEntity.getId());
            ps.setObject(3, getJdbcObjectParticipation(at));
            ps.setObject(4, getJdbcObjectParticipationRole(at));
            ps.setInt(5, at.getPercent());
            ps.setInt(6, editor.getObmId());
            ps.setBoolean(7, isOrganizer);
            ps.addBatch();
            logger.info(LogUtils.prefix(editor) + "Adding " + attendeeEmail
                    + (isOrganizer ? " as organizer" : " as attendee"));

            alreadyAddedAttendees.add(userEntity);
        }

        // Clear the previous organizer if needed
        if (shouldClearOrganizer) {
            clearOrganizer(eventObmId, con);
        }

        ps.executeBatch();
    } finally {
        obmHelper.cleanup(null, ps, null);
    }
}

From source file:com.flexive.ejb.beans.structure.AssignmentEngineBean.java

private void storeOptions(Connection con, String table, String primaryColumn, long primaryId, Long assignmentId,
        List<FxStructureOption> options) throws SQLException, FxInvalidParameterException {
    PreparedStatement ps = null;
    try {/*from   w w  w. j a va  2 s . co  m*/
        if (assignmentId == null) {
            ps = con.prepareStatement(
                    "DELETE FROM " + table + " WHERE " + primaryColumn + "=? AND ASSID IS NULL");
        } else {
            ps = con.prepareStatement("DELETE FROM " + table + " WHERE " + primaryColumn + "=? AND ASSID=?");
            ps.setLong(2, assignmentId);
        }
        ps.setLong(1, primaryId);
        ps.executeUpdate();
        if (options == null || options.size() == 0)
            return;
        ps.close();
        ps = con.prepareStatement("INSERT INTO " + table + " (" + primaryColumn
                + ",ASSID,OPTKEY,MAYOVERRIDE,ISINHERITED,OPTVALUE)VALUES(?,?,?,?,?,?)");
        for (FxStructureOption option : options) {
            ps.setLong(1, primaryId);
            if (assignmentId != null)
                ps.setLong(2, assignmentId);
            else
                ps.setNull(2, java.sql.Types.NUMERIC);
            if (StringUtils.isEmpty(option.getKey()))
                throw new FxInvalidParameterException("key", "ex.structure.option.key.empty",
                        option.getValue());
            ps.setString(3, option.getKey());
            ps.setBoolean(4, option.isOverridable());
            ps.setBoolean(5, option.getIsInherited());
            ps.setString(6, option.getValue());
            ps.addBatch();
        }
        ps.executeBatch();
    } finally {
        if (ps != null)
            ps.close();
    }
}

From source file:com.archivas.clienttools.arcutils.utils.database.ManagedJobSchema.java

/**
 * Updates the stats for all of the files passed in, and also updates the general job statistics
 * to reflect how many success/failures there were
 * //w w  w .  j av  a  2  s . c  o  m
 * @param files
 *            -
 * @throws DatabaseException
 *             -
 */
@SuppressWarnings({ "ThrowableResultOfMethodCallIgnored" })
public void markFilesProcessed(Collection<FileStats> files) throws DatabaseException {
    synchronized (DatabaseResourceManager.DB_LOCK) {
        PooledDbConnection conn = null;
        try {

            conn = connPool.getConnection();
            PreparedStatement stmt = conn.prepareStatement(MARK_FILE_PROCESSED_STMT_NAME, markFileProcessedSql);
            conn.setAutoCommit(false);
            long totalCnt = 0;
            long totalSize = 0;
            long successCnt = 0;
            long successSize = 0;
            long failCnt = 0;
            long failSize = 0;
            long failDirCnt = 0;
            for (FileStats file : files) {
                FileStatus status = file.getStatus();
                switch (status) {
                case SUCCEEDED: {
                    if (file.includeInStats()) {
                        successCnt++;
                        successSize += file.getSize();
                    }
                    break;
                }
                case FAILED: {
                    if (file.includeInStats()) {
                        failCnt++;
                        failSize += file.getSize();
                        if (file.failedDuringFind()) {
                            // in this case we need to increment the total counts as well
                            totalCnt++;
                            totalSize += file.getSize(); // probably zero since we didn't
                                                         // get that far
                        }
                    }
                    if (file.getArcProcssFile().isDirectory()) {
                        failDirCnt++;
                    }
                    break;
                }
                default:
                    throw new RuntimeException("Unsupported file status: " + status);
                }
                stmt.clearParameters();
                stmt.setInt(1, file.getStatus().ordinal());
                stmt.setInt(2, file.getRetries());
                Date startTime = file.getStartTime();
                if (startTime != null) {
                    stmt.setLong(3, startTime.getTime());
                } else {
                    stmt.setNull(3, java.sql.Types.BIGINT);
                }
                Date endTime = file.getEndTime();
                if (endTime != null) {
                    stmt.setLong(4, endTime.getTime());
                } else {
                    stmt.setNull(4, java.sql.Types.BIGINT);
                }
                stmt.setLong(5, file.getRunTimeMs());

                if (file.getException() == null) {
                    stmt.setNull(6, java.sql.Types.VARCHAR);
                } else {
                    stmt.setString(6, file.getException().getMessage());
                }

                if (file.getStatusCode() == null) {
                    stmt.setNull(7, java.sql.Types.INTEGER);
                } else {
                    stmt.setInt(7, file.getStatusCode());
                }
                stmt.setLong(8, file.getDatabaseRecordId());

                stmt.addBatch();
            }

            // execute the batch statment to update all of the file rows
            stmt.executeBatch();

            // now update overall job stats to reflect these changes
            ManagedJobsSchema.getInstance().updateProcessedFilesStats(conn, jobId, totalCnt, totalSize,
                    successCnt, successSize, failCnt, failSize, failDirCnt);
            conn.commit();

        } catch (Exception e) {
            rollback(conn);
            throw new DatabaseException(DBUtils.getErrorMessage(
                    "An error occurred updating file stats on table " + qualifiedFilesTableName, e), e);
        } finally {
            connPool.returnConnection(conn);
        }
    }
}

From source file:org.wso2.carbon.apimgt.migration.client.MigrateFrom19to110.java

private boolean updateIdnTableConsumerKeys(Connection connection) throws SQLException {
    log.info("Updating consumer keys in IDN Tables");

    Statement consumerAppsLookup = null;
    PreparedStatement consumerAppsDelete = null;
    PreparedStatement consumerAppsInsert = null;
    PreparedStatement consumerAppsDeleteFailedRecords = null;
    PreparedStatement accessTokenUpdate = null;
    PreparedStatement accessTokenDelete = null;

    ResultSet consumerAppsResultSet = null;
    boolean continueUpdatingDB = true;

    try {//from   w  w  w .  ja  v a2  s. co  m
        String consumerAppsQuery = "SELECT * FROM IDN_OAUTH_CONSUMER_APPS";
        consumerAppsLookup = connection.createStatement();
        consumerAppsLookup.setFetchSize(50);
        consumerAppsResultSet = consumerAppsLookup.executeQuery(consumerAppsQuery);

        ArrayList<ConsumerAppsTableDTO> consumerAppsTableDTOs = new ArrayList<>();
        ArrayList<ConsumerAppsTableDTO> consumerAppsTableDTOsFailed = new ArrayList<>();

        while (consumerAppsResultSet.next()) {
            ConsumerKeyDTO consumerKeyDTO = new ConsumerKeyDTO();
            consumerKeyDTO.setEncryptedConsumerKey(consumerAppsResultSet.getString("CONSUMER_KEY"));

            ConsumerAppsTableDTO consumerAppsTableDTO = new ConsumerAppsTableDTO();
            consumerAppsTableDTO.setConsumerKey(consumerKeyDTO);
            consumerAppsTableDTO.setConsumerSecret(consumerAppsResultSet.getString("CONSUMER_SECRET"));
            consumerAppsTableDTO.setUsername(consumerAppsResultSet.getString("USERNAME"));
            consumerAppsTableDTO.setTenantID(consumerAppsResultSet.getInt("TENANT_ID"));
            consumerAppsTableDTO.setAppName(consumerAppsResultSet.getString("APP_NAME"));
            consumerAppsTableDTO.setOauthVersion(consumerAppsResultSet.getString("OAUTH_VERSION"));
            consumerAppsTableDTO.setCallbackURL(consumerAppsResultSet.getString("CALLBACK_URL"));
            consumerAppsTableDTO.setGrantTypes(consumerAppsResultSet.getString("GRANT_TYPES"));
            if (ResourceModifier.decryptConsumerKeyIfEncrypted(consumerKeyDTO)) {
                consumerAppsTableDTOs.add(consumerAppsTableDTO);
                if (log.isDebugEnabled()) {
                    log.debug("Successfully decrypted consumer key : "
                            + consumerKeyDTO.getEncryptedConsumerKey() + " in IDN_OAUTH_CONSUMER_APPS table");
                }
            } else {
                consumerAppsTableDTOsFailed.add(consumerAppsTableDTO);
                log.error("Cannot decrypt consumer key : " + consumerKeyDTO.getEncryptedConsumerKey()
                        + " in IDN_OAUTH_CONSUMER_APPS table");
                //If its not allowed to remove decryption failed entries from DB, we will not continue updating 
                // tables even with successfully decrypted entries to maintain DB integrity
                if (!removeDecryptionFailedKeysFromDB) {
                    continueUpdatingDB = false;
                }
            }
        }

        if (continueUpdatingDB) {
            // Add new entries for decrypted consumer keys into IDN_OAUTH_CONSUMER_APPS
            consumerAppsInsert = connection
                    .prepareStatement("INSERT INTO IDN_OAUTH_CONSUMER_APPS (CONSUMER_KEY, "
                            + "CONSUMER_SECRET, USERNAME, TENANT_ID, APP_NAME, OAUTH_VERSION, "
                            + "CALLBACK_URL, GRANT_TYPES) VALUES (?, ?, ?, ?, ?, ?, ?, ?)");

            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOs) {
                updateIdnConsumerApps(consumerAppsInsert, consumerAppsTableDTO);
            }
            consumerAppsInsert.executeBatch();
            log.info("Inserted entries in IDN_OAUTH_CONSUMER_APPS");

            // Update IDN_OAUTH2_ACCESS_TOKEN foreign key reference to CONSUMER_KEY
            accessTokenUpdate = connection.prepareStatement(
                    "UPDATE IDN_OAUTH2_ACCESS_TOKEN SET CONSUMER_KEY = ? " + "WHERE CONSUMER_KEY = ?");

            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOs) {
                ConsumerKeyDTO consumerKeyDTO = consumerAppsTableDTO.getConsumerKey();
                updateIdnAccessToken(accessTokenUpdate, consumerKeyDTO);
            }
            accessTokenUpdate.executeBatch();
            log.info("Updated entries in IDN_OAUTH2_ACCESS_TOKEN");

            // Remove redundant records in IDN_OAUTH_CONSUMER_APPS
            consumerAppsDelete = connection
                    .prepareStatement("DELETE FROM IDN_OAUTH_CONSUMER_APPS WHERE " + "CONSUMER_KEY = ?");

            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOs) {
                ConsumerKeyDTO consumerKeyDTO = consumerAppsTableDTO.getConsumerKey();
                deleteIdnConsumerApps(consumerAppsDelete, consumerKeyDTO);
            }
            consumerAppsDelete.executeBatch();
            log.info("Removed redundant entries in IDN_OAUTH_CONSUMER_APPS");

            //deleting rows where consumer key decryption was unsuccessful from IDN_OAUTH_CONSUMER_APPS table
            consumerAppsDeleteFailedRecords = connection
                    .prepareStatement("DELETE FROM IDN_OAUTH_CONSUMER_APPS WHERE " + "CONSUMER_KEY = ?");
            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOsFailed) {
                ConsumerKeyDTO consumerKeyDTO = consumerAppsTableDTO.getConsumerKey();
                deleteIdnConsumerApps(consumerAppsDeleteFailedRecords, consumerKeyDTO);
            }
            consumerAppsDeleteFailedRecords.executeBatch();
            log.info("Removed decryption failed entries in IDN_OAUTH_CONSUMER_APPS");

            //deleting rows where consumer key decryption was unsuccessful from IDN_OAUTH2_ACCESS_TOKEN table
            accessTokenDelete = connection
                    .prepareStatement("DELETE FROM IDN_OAUTH2_ACCESS_TOKEN " + "WHERE CONSUMER_KEY = ?");
            for (ConsumerAppsTableDTO consumerAppsTableDTO : consumerAppsTableDTOsFailed) {
                ConsumerKeyDTO consumerKeyDTO = consumerAppsTableDTO.getConsumerKey();
                deleteIdnAccessToken(consumerAppsDeleteFailedRecords, consumerKeyDTO);
            }
            accessTokenDelete.executeBatch();
            log.info("Removed decryption failed entries in IDN_OAUTH2_ACCESS_TOKEN");
        }
    } finally {
        if (consumerAppsLookup != null)
            consumerAppsLookup.close();
        if (consumerAppsDelete != null)
            consumerAppsDelete.close();
        if (consumerAppsDeleteFailedRecords != null)
            consumerAppsDeleteFailedRecords.close();
        if (consumerAppsInsert != null)
            consumerAppsInsert.close();
        if (accessTokenUpdate != null)
            accessTokenUpdate.close();
        if (accessTokenDelete != null)
            accessTokenDelete.close();
        if (consumerAppsResultSet != null)
            consumerAppsResultSet.close();
    }

    return continueUpdatingDB;
}