Example usage for java.sql PreparedStatement executeBatch

List of usage examples for java.sql PreparedStatement executeBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement executeBatch.

Prototype

int[] executeBatch() throws SQLException;

Source Link

Document

Submits a batch of commands to the database for execution and if all commands execute successfully, returns an array of update counts.

Usage

From source file:org.wso2.carbon.appmgt.migration.client.MigrationClientImpl.java

private void migrateMobileAppRatings(Map<String, Float> appRating, String tenantDomain)
        throws APPMMigrationException {
    Connection connection = null;
    PreparedStatement statement = null;

    try {//from   w w  w. j ava2 s.  c  o m
        if (log.isDebugEnabled()) {
            log.debug("Executing: " + Constants.INSERT_SOCIAL_CACHE);
        }
        connection = getConnection(Constants.SOCIAL_DB_NAME);
        statement = connection.prepareStatement(Constants.INSERT_SOCIAL_CACHE);
        for (String contextId : appRating.keySet()) {
            statement.setString(1, contextId);
            Float rating = appRating.get(contextId);
            statement.setInt(2, rating.intValue());
            statement.setInt(3, 1);
            statement.setDouble(4, rating.doubleValue());
            statement.setString(5, tenantDomain);
            statement.addBatch();
        }
        statement.executeBatch();
    } catch (SQLException e) {
        handleException("Error occurred while migrating mobile application ratings for tenant " + tenantDomain,
                e);
    } catch (DataSourceException e) {
        handleException("Error occurred while obtaining datasource connection for " + Constants.SOCIAL_DB_NAME
                + " during mobile application ratings migration for tenant " + tenantDomain, e);
    } finally {
        closeConnection(connection);
        if (statement != null) {
            try {
                statement.close();
            } catch (SQLException e) {
                handleException(
                        "Error occurred while closing prepared statement for Mobile app Social Cache update "
                                + "for tenant " + tenantDomain,
                        e);
            }
        }
    }

}

From source file:com.alfaariss.oa.engine.user.provisioning.storage.internal.jdbc.JDBCInternalStorage.java

/**
 * Update the supplied user profile in the profile table.
 * @param oConnection the connection//www .  j a  v  a 2 s.  c o m
 * @param user the user that must be updated
 * @throws UserException if update fails
 */
private void updateProfile(Connection oConnection, ProvisioningUser user) throws UserException {
    ResultSet oResultSet = null;
    PreparedStatement psRetrieve = null;
    PreparedStatement psInsert = null;

    try {
        Vector<String> vExistingMethodIDs = new Vector<String>();
        psRetrieve = oConnection.prepareStatement(_sProfileSelect);
        psRetrieve.setString(1, user.getID());
        oResultSet = psRetrieve.executeQuery();

        String sUserID = user.getID();
        while (oResultSet.next()) {
            sUserID = oResultSet.getString(COLUMN_PROFILE_ID);
            String sMethodID = oResultSet.getString(COLUMN_PROFILE_AUTHSPID);
            vExistingMethodIDs.add(sMethodID);
        }

        psInsert = oConnection.prepareStatement(_sProfileInsert);
        psInsert.setString(1, sUserID);
        for (String sMethod : user.getAuthenticationMethods()) {
            if (!vExistingMethodIDs.contains(sMethod)) {
                psInsert.setString(2, sMethod);
                psInsert.setBoolean(3, user.isAuthenticationRegistered(sMethod));
                psInsert.addBatch();
            }
        }
        int[] iInserts = psInsert.executeBatch();

        _logger.debug("Total number of update queries performed in batch: " + iInserts.length);
    } catch (SQLException e) {
        _logger.error("Could not update profile for user with id: " + user.getID(), e);
        throw new UserException(SystemErrors.ERROR_RESOURCE_UPDATE);
    } catch (Exception e) {
        _logger.fatal("Could not update profile", e);
        throw new UserException(SystemErrors.ERROR_INTERNAL);
    } finally {
        try {
            if (psRetrieve != null)
                psRetrieve.close();
        } catch (Exception e) {
            _logger.error("Could not close retrieve statement", e);
        }
        try {
            if (psInsert != null)
                psInsert.close();
        } catch (Exception e) {
            _logger.error("Could not close insert statement", e);
        }
    }
}

From source file:com.pactera.edg.am.metamanager.extractor.dao.helper.ExtractorLogHelper.java

public Object doInPreparedStatement(PreparedStatement ps) throws SQLException, DataAccessException {
    String taskInstanceId = AdapterExtractorContext.getInstance().getTaskInstanceId();

    for (ExtractorLog extractorLog : extractorLogs) {
        ExtractorLogLevel level = extractorLog.getLevel();
        if (!TaskInstance.STATE_IMPORTED_ERROR
                .equals(AdapterExtractorContext.getInstance().getReturnStatus())) {
            // ?error??
            if (level == ExtractorLogLevel.ERROR) {
                AdapterExtractorContext.getInstance().setReturnStatus(TaskInstance.STATE_IMPORTED_ERROR);
            } else if (level == ExtractorLogLevel.WARN) {
                AdapterExtractorContext.getInstance().setReturnStatus(TaskInstance.STATE_IMPORTED_WARN);
            }/*w  w  w  . j  a  v  a 2  s .co  m*/
        }
        if (extractorLog.getMessage() == null) // 
            continue;

        ps.setString(1, taskInstanceId);
        ps.setString(2, String.valueOf(level.ordinal()));
        ps.setLong(3, extractorLog.getLogTime());
        // ?
        boolean msgTooLength = extractorLog.getMessage().length() > AdapterExtractorContext.getInstance()
                .getMaxLogSize();
        String message = msgTooLength
                ? extractorLog.getMessage().substring(0, AdapterExtractorContext.getInstance().getMaxLogSize())
                : extractorLog.getMessage();
        ps.setString(4, message);
        ps.setString(5, extractorLog.getLogType());

        ps.addBatch();
        ps.clearParameters();
    }

    ps.executeBatch();
    ps.clearBatch();

    return null;
}

From source file:com.mmnaseri.dragonfly.data.impl.DefaultDataAccess.java

private synchronized List<Integer> endBatch() {
    if (!isInBatchMode()) {
        throw new NoBatchOperationError();
    }//from  w w  w.ja v a  2  s .  co  m
    localCounts.get().clear();
    final List<BatchOperationDescriptor> descriptors = batchOperation.get();
    batchOperation.remove();
    batch.set(false);
    final ArrayList<Integer> result = new ArrayList<Integer>();
    if (descriptors == null) {
        return result;
    }
    log.info("There are " + descriptors.size() + " operation stack(s) to perform");
    while (!descriptors.isEmpty()) {
        final BatchOperationDescriptor descriptor = descriptors.get(0);
        descriptors.remove(0);
        final int[] batchResult;
        log.info("Executing batch operation for statement: " + descriptor.getSql());
        final PreparedStatement preparedStatement = descriptor.getPreparedStatement();
        final Connection connection;
        try {
            connection = preparedStatement.getConnection();
            long time = System.nanoTime();
            batchResult = preparedStatement.executeBatch();
            connection.commit();
            log.info(batchResult.length + " operation(s) completed successfully in "
                    + (System.nanoTime() - time) + "ns");
        } catch (SQLException e) {
            throw new BatchOperationExecutionError("Failed to execute operation batch", e);
        }
        if (StatementType.getStatementType(descriptor.getSql()).equals(StatementType.INSERT)) {
            try {
                final List<Object> deferredEntities = deferredKeys.get();
                final ResultSet generatedKeys = preparedStatement.getGeneratedKeys();
                while (generatedKeys.next()) {
                    final Object entity = deferredEntities.get(0);
                    deferredEntities.remove(0);
                    final EntityHandler<Object> entityHandler = entityHandlerContext.getHandler(entity);
                    entityHandler.setKey(entity, session.getDatabaseDialect().retrieveKey(generatedKeys));
                }
            } catch (SQLException e) {
                throw new BatchOperationExecutionError("Failed to retrieve generated keys", e);
            }
        }
        for (int i : batchResult) {
            result.add(i);
        }
        cleanUpStatement(preparedStatement);
    }
    return result;
}

From source file:org.wso2.carbon.device.mgt.core.archival.dao.impl.ArchivalDAOImpl.java

@Override
public void moveOperationResponses() throws ArchivalDAOException {
    Statement stmt = null;/*from   w  ww  .  ja  v  a  2  s  . c o m*/
    PreparedStatement stmt2 = null;
    Statement stmt3 = null;
    ResultSet rs = null;
    try {
        Connection conn = ArchivalSourceDAOFactory.getConnection();
        String sql = "SELECT * FROM DM_DEVICE_OPERATION_RESPONSE WHERE OPERATION_ID IN "
                + "(SELECT ID FROM DM_ARCHIVED_OPERATIONS)";
        stmt = this.createMemoryEfficientStatement(conn);
        rs = stmt.executeQuery(sql);

        Connection conn2 = ArchivalDestinationDAOFactory.getConnection();
        sql = "INSERT INTO DM_DEVICE_OPERATION_RESPONSE_ARCH VALUES(?, ?, ?, ?, ?,?,?)";
        stmt2 = conn2.prepareStatement(sql);

        int count = 0;
        while (rs.next()) {
            stmt2.setInt(1, rs.getInt("ID"));
            stmt2.setInt(2, rs.getInt("ENROLMENT_ID"));
            stmt2.setInt(3, rs.getInt("OPERATION_ID"));
            stmt2.setInt(4, rs.getInt("EN_OP_MAP_ID"));
            stmt2.setBytes(5, rs.getBytes("OPERATION_RESPONSE"));
            stmt2.setTimestamp(6, rs.getTimestamp("RECEIVED_TIMESTAMP"));
            stmt2.setTimestamp(7, this.currentTimestamp);
            stmt2.addBatch();

            if (++count % batchSize == 0) {
                stmt2.executeBatch();
                if (log.isDebugEnabled()) {
                    log.debug("Executing batch " + count);
                }
            }
        }
        stmt2.executeBatch();
        if (log.isDebugEnabled()) {
            log.debug(count + " [OPERATION_RESPONSES] Records copied to the archival table. Starting deletion");
        }
        //try the deletion now
        sql = "DELETE FROM DM_DEVICE_OPERATION_RESPONSE WHERE OPERATION_ID IN ("
                + " SELECT ID FROM DM_ARCHIVED_OPERATIONS)";
        stmt3 = conn.createStatement();
        int affected = stmt3.executeUpdate(sql);
        if (log.isDebugEnabled()) {
            log.debug(affected + " Rows deleted");
        }
    } catch (SQLException e) {
        throw new ArchivalDAOException("Error occurred while moving operations ", e);
    } finally {
        ArchivalDAOUtil.cleanupResources(stmt, rs);
        ArchivalDAOUtil.cleanupResources(stmt2);
        ArchivalDAOUtil.cleanupResources(stmt3);
    }
}

From source file:org.wso2.carbon.device.mgt.core.archival.dao.impl.ArchivalDAOImpl.java

@Override
public void moveNotifications() throws ArchivalDAOException {
    Statement stmt = null;// ww  w .j  a  v  a  2s .  c  o m
    PreparedStatement stmt2 = null;
    Statement stmt3 = null;
    ResultSet rs = null;
    try {
        Connection conn = ArchivalSourceDAOFactory.getConnection();
        String sql = "SELECT * FROM DM_NOTIFICATION WHERE OPERATION_ID IN (SELECT ID FROM DM_ARCHIVED_OPERATIONS)";
        stmt = this.createMemoryEfficientStatement(conn);
        rs = stmt.executeQuery(sql);

        //            ArchivalDestinationDAOFactory.beginTransaction();
        Connection conn2 = ArchivalDestinationDAOFactory.getConnection();

        sql = "INSERT INTO DM_NOTIFICATION_ARCH VALUES(?, ?, ?, ?, ?, ?, ?)";
        stmt2 = conn2.prepareStatement(sql);

        int count = 0;
        while (rs.next()) {
            stmt2.setInt(1, rs.getInt("NOTIFICATION_ID"));
            stmt2.setInt(2, rs.getInt("DEVICE_ID"));
            stmt2.setInt(3, rs.getInt("OPERATION_ID"));
            stmt2.setInt(4, rs.getInt("TENANT_ID"));
            stmt2.setString(5, rs.getString("STATUS"));
            stmt2.setString(6, rs.getString("DESCRIPTION"));
            stmt2.setTimestamp(7, this.currentTimestamp);
            stmt2.addBatch();

            if (++count % batchSize == 0) {
                stmt2.executeBatch();
            }
        }
        stmt2.executeBatch();
        //            ArchivalDestinationDAOFactory.commitTransaction();
        if (log.isDebugEnabled()) {
            log.debug(count + " [NOTIFICATIONS] Records copied to the archival table. Starting deletion");
        }
        sql = "DELETE FROM DM_NOTIFICATION" + "  WHERE OPERATION_ID IN (SELECT ID FROM DM_ARCHIVED_OPERATIONS)";
        stmt3 = conn.createStatement();
        int affected = stmt3.executeUpdate(sql);
        if (log.isDebugEnabled()) {
            log.debug(affected + " Rows deleted");
        }
    } catch (SQLException e) {
        throw new ArchivalDAOException("Error occurred while moving notifications ", e);
    } finally {
        ArchivalDAOUtil.cleanupResources(stmt, rs);
        ArchivalDAOUtil.cleanupResources(stmt2);
        ArchivalDAOUtil.cleanupResources(stmt3);
    }
}

From source file:com.archivas.clienttools.arcutils.utils.database.ManagedJobSchema.java

public void markFilesReadyToProcess(Collection<ArcProcessFile> filesReadyToProcess) throws DatabaseException {
    synchronized (DatabaseResourceManager.DB_LOCK) {
        PooledDbConnection conn = null;//ww w .  j  a va2  s .  com
        try {
            conn = connPool.getConnection();
            conn.setAutoCommit(false);
            PreparedStatement stmt = conn.prepareStatement(MARK_FILES_READY_TO_PROCESS_STMT_NAME,
                    markFilesReadyToProcessSql);
            long sizeToAdd = 0;
            for (ArcProcessFile file : filesReadyToProcess) {
                long fileSize = file.getSourceFile().getSize();
                sizeToAdd += fileSize;
                stmt.clearParameters();
                stmt.setLong(1, fileSize);
                stmt.setLong(2, file.getDatabaseRecordId());
                stmt.addBatch();
            }
            stmt.executeBatch();

            // now update overall job stats to reflect these changes
            ManagedJobsSchema.getInstance().updateTotalFilesStats(conn, jobId, filesReadyToProcess.size(),
                    sizeToAdd);

            conn.commit();
        } catch (Exception e) {
            rollback(conn);
            throw new DatabaseException(
                    DBUtils.getErrorMessage("An error occurred marking files ready to process", e), e);
        } finally {
            connPool.returnConnection(conn);
        }
    }
}

From source file:HSqlManager.java

public static void commonClusterNewPhages(Connection connection, int bps) throws SQLException, IOException,
        ClassNotFoundException, IllegalAccessException, InstantiationException {
    Connection db = connection;/*from  w ww  .j a  v  a 2  s  . c om*/
    String base = new File("").getAbsolutePath();
    db.setAutoCommit(false);
    PreparedStatement st = db.prepareStatement("UPDATE Primerdb.Primers SET CommonP = False,"
            + " UniqueP = False" + " WHERE Cluster = ? and " + "Strain = ? and Sequence = ? and Bp =?");
    Statement stat = db.createStatement();
    if (newPhages != null) {
        List<String[]> phages = newPhages;
        phages.forEach(x -> {
            try {
                CSV.writeDataCSV(x[0], Fasta.process(x[0], bps), bps);
                CSV.writeDataCSV(x[0], Fasta.processPrimers(x[0], bps), bps);
            } catch (IOException e) {
                e.printStackTrace();
            }
            Set<CharSequence> primers = new HashSet<>();
            try {
                ResultSet rs = stat.executeQuery(
                        "SELECT * FROM Primerdb.Primers WHERE" + " Sequence = '" + x[1] + "' and Clusters = '"
                                + x[2] + "' and CommonP = True" + "and Bp =" + Integer.valueOf(bps));
                while (rs.next()) {
                    primers.add((CharSequence) rs.getString("Sequence"));
                }
                primers.removeAll(CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + x[0] + ".csv"));
                if (primers.size() != 0) {
                    primers.forEach(y -> {
                        try {
                            //finish update
                            st.setString(1, x[1]);
                            st.setString(2, x[2]);
                            st.setString(3, y.toString());
                            st.setInt(4, bps);
                            st.addBatch();
                        } catch (SQLException e) {
                            e.printStackTrace();
                        }
                    });
                    st.executeBatch();
                    db.commit();
                }
            } catch (SQLException e) {
                e.printStackTrace();
            }
        });
    }
    System.out.println("Common Updated");
    st.close();
}

From source file:org.cartoweb.stats.imports.Import.java

/**
 * Imports one file into the DB.//w  w  w .  j  a  va2  s  .co m
 */
private void convertFile(final Connection con, File file) throws IOException, SQLException {
    try {
        final String query = "INSERT INTO " + tableName + " (" + MAPPER.getFieldNames() + ") VALUES ("
                + MAPPER.getInsertPlaceHolders() + ")";
        final PreparedStatement layerStmt = wantLayers
                ? con.prepareStatement("INSERT INTO " + tableName + "_layers (id, layer) VALUES (?,?)")
                : null;

        StatsReader reader = createReader(file);

        JdbcUtilities.runInsertQuery("inserting stats", query, con, reader, 500,
                new JdbcUtilities.InsertTask<StatsRecord>() {
                    private int cptLayers = 0;

                    public boolean marshall(PreparedStatement stmt, StatsRecord item) throws SQLException {
                        if (item != null) {
                            item.setId(curId++);
                            MAPPER.saveToDb(stmt, item, 1);

                            if (wantLayers && item.getLayerArray() != null) {
                                for (int i = 0; i < item.getLayerArray().size(); i++) {
                                    Integer val = item.getLayerArray().get(i);
                                    layerStmt.setLong(1, item.getId());
                                    layerStmt.setInt(2, val);
                                    layerStmt.addBatch();
                                    if ((++cptLayers % 500) == 0) {
                                        layerStmt.executeBatch();
                                    }
                                }
                            }
                            return true;
                        } else {
                            return false;
                        }
                    }
                });

        if (layerStmt != null) {
            layerStmt.executeBatch();
            layerStmt.close();
        }
    } catch (BatchUpdateException ex) {
        ex.getNextException().printStackTrace();
        throw ex;
    }
}

From source file:org.wso2.carbon.device.mgt.core.archival.dao.impl.ArchivalDAOImpl.java

@Override
public void moveEnrolmentMappings() throws ArchivalDAOException {
    Statement stmt = null;/*  w  ww .  jav a2 s .  c  o m*/
    PreparedStatement stmt2 = null;
    Statement stmt3 = null;
    ResultSet rs = null;
    try {
        Connection conn = ArchivalSourceDAOFactory.getConnection();
        String sql = "SELECT * FROM DM_ENROLMENT_OP_MAPPING WHERE OPERATION_ID IN "
                + "(SELECT ID FROM DM_ARCHIVED_OPERATIONS)";
        stmt = this.createMemoryEfficientStatement(conn);
        rs = stmt.executeQuery(sql);

        Connection conn2 = ArchivalDestinationDAOFactory.getConnection();

        sql = "INSERT INTO DM_ENROLMENT_OP_MAPPING_ARCH VALUES(?, ?, ?, ?, ?, ?, ?,?)";
        stmt2 = conn2.prepareStatement(sql);

        int count = 0;
        while (rs.next()) {
            stmt2.setInt(1, rs.getInt("ID"));
            stmt2.setInt(2, rs.getInt("ENROLMENT_ID"));
            stmt2.setInt(3, rs.getInt("OPERATION_ID"));
            stmt2.setString(4, rs.getString("STATUS"));
            stmt2.setString(5, rs.getString("PUSH_NOTIFICATION_STATUS"));
            stmt2.setInt(6, rs.getInt("CREATED_TIMESTAMP"));
            stmt2.setInt(7, rs.getInt("UPDATED_TIMESTAMP"));
            stmt2.setTimestamp(8, this.currentTimestamp);
            stmt2.addBatch();

            if (++count % batchSize == 0) {
                stmt2.executeBatch();
                if (log.isDebugEnabled()) {
                    log.debug("Executing batch " + count);
                }
            }
        }
        stmt2.executeBatch();
        if (log.isDebugEnabled()) {
            log.debug(
                    count + " [ENROLMENT_OP_MAPPING] Records copied to the archival table. Starting deletion");
        }
        sql = "DELETE FROM DM_ENROLMENT_OP_MAPPING WHERE OPERATION_ID IN ("
                + "SELECT ID FROM DM_ARCHIVED_OPERATIONS)";
        stmt3 = conn.createStatement();
        int affected = stmt3.executeUpdate(sql);
        if (log.isDebugEnabled()) {
            log.debug(affected + " Rows deleted");
        }
    } catch (SQLException e) {
        throw new ArchivalDAOException("Error occurred while moving enrolment mappings", e);
    } finally {
        ArchivalDAOUtil.cleanupResources(stmt, rs);
        ArchivalDAOUtil.cleanupResources(stmt2);
        ArchivalDAOUtil.cleanupResources(stmt3);
    }
}