Example usage for java.sql PreparedStatement executeBatch

List of usage examples for java.sql PreparedStatement executeBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement executeBatch.

Prototype

int[] executeBatch() throws SQLException;

Source Link

Document

Submits a batch of commands to the database for execution and if all commands execute successfully, returns an array of update counts.

Usage

From source file:org.wso2.carbon.device.mgt.core.archival.dao.impl.DataDeletionDAOImpl.java

@Override
public void deleteOperationResponses() throws ArchivalDAOException {
    PreparedStatement stmt = null;
    try {//from   w  w w  .j av a 2 s  .co m
        Connection conn = ArchivalDestinationDAOFactory.getConnection();
        conn.setAutoCommit(false);
        String sql = "DELETE FROM DM_DEVICE_OPERATION_RESPONSE_ARCH "
                + "WHERE ARCHIVED_AT < DATE_SUB(NOW(), INTERVAL ? DAY)";
        stmt = conn.prepareStatement(sql);
        stmt.setInt(1, this.retentionPeriod);
        stmt.addBatch();
        stmt.executeBatch();
        conn.commit();
    } catch (SQLException e) {
        throw new ArchivalDAOException("Error occurred while deleting operation responses", e);
    } finally {
        ArchivalDAOUtil.cleanupResources(stmt);
    }
}

From source file:com.chenxin.authority.common.logback.DBAppender.java

protected void insertThrowable(IThrowableProxy tp, Connection connection, long eventId) throws SQLException {

    PreparedStatement exceptionStatement = connection.prepareStatement(insertExceptionSQL);

    short baseIndex = 0;
    while (tp != null) {
        baseIndex = buildExceptionStatement(tp, baseIndex, exceptionStatement, eventId);
        tp = tp.getCause();/*from  w ww.j  av  a  2s .c om*/
    }

    if (cnxSupportsBatchUpdates) {
        exceptionStatement.executeBatch();
    }
    exceptionStatement.close();
    exceptionStatement = null;

}

From source file:mayoapp.migrations.V0300_1005__extract_image_metadata_retroactively.java

@Override
public void migrate(Connection connection) throws Exception {
    ImageProcessor imageProcessor = new DefaultImageProcessor();
    ImageDimensionsMetadataExtractor extractor = new ImageDimensionsMetadataExtractor(imageProcessor);

    StatementContext context = new StatementContextStub();
    connection.setAutoCommit(false);//from   w  w  w .ja  v  a 2  s. c  o  m
    Statement countStatement = connection.createStatement();

    Integer count = 0;
    ResultSet res = countStatement
            .executeQuery("SELECT COUNT(*) FROM attachment JOIN entity on attachment.entity_id = entity.id"); //WHERE parent_id is not null
    while (res.next()) {
        count = res.getInt(1);
    }
    countStatement.close();

    Integer i = 0;

    Map<UUID, Object> toSave = new HashMap<>();

    for (int offset = 0; offset < count; offset += 50) {
        Statement queryStatement = connection.createStatement();
        ResultSet data = queryStatement.executeQuery(
                "SELECT * from attachment JOIN entity on attachment.entity_id = entity.id LIMIT 50 OFFSET "
                        + offset);

        while (data.next()) {
            LoadedAttachmentMapper mapper = new LoadedAttachmentMapper();
            LoadedAttachment attachment = mapper.map(0, data, context);

            logger.info("Processing attachment " + i + " : " + attachment.getFilename());

            Optional<Map<String, Object>> metadata = extractor.extractMetadata(attachment);

            if (metadata.isPresent()) {
                Map<String, Map<String, Object>> meta = new HashMap<>(attachment.getMetadata());
                meta.put("imageDimensions", metadata.get());
                toSave.put(attachment.getId(), meta);
            }

            i++;
        }

        queryStatement.close();
    }

    ObjectMapper mapper = new ObjectMapper();
    PreparedStatement statement = connection
            .prepareStatement("UPDATE attachment SET metadata = CAST (? AS json) WHERE entity_id =?");

    for (UUID attachment : toSave.keySet()) {
        statement.setObject(2, new PG_UUID(attachment));
        statement.setObject(1, mapper.writeValueAsString(toSave.get(attachment)));
        statement.addBatch();
        logger.info("Adding image to batch " + i + " : " + attachment.toString());
    }

    statement.executeBatch();
}

From source file:com.pactera.edg.am.metamanager.extractor.dao.helper.CreateDependencyHelper.java

public Object doInPreparedStatement(PreparedStatement ps) throws SQLException {
    String logMsg = "";
    // ???/*ww  w . j  a v a2  s  .c  o  m*/

    long sysTime = AdapterExtractorContext.getInstance().getGlobalTime();
    for (Iterator<MMDDependency> dependencies = dependenciesList.iterator(); dependencies.hasNext();) {
        MMDDependency dependency = dependencies.next();
        myDoInPreparedStatement(sysTime, dependency, ps);
    }

    if (super.count % super.batchSize != 0) {
        ps.executeBatch();
        ps.clearBatch();

    }
    logMsg = "?:" + dependenciesList.size();
    log.info(logMsg);
    AdapterExtractorContext.addExtractorLog(ExtractorLogLevel.INFO, logMsg);
    return null;

}

From source file:com.pactera.edg.am.metamanager.extractor.dao.helper.DeleteDependencyHelper.java

public Object doInPreparedStatement(PreparedStatement ps) throws SQLException, DataAccessException {
    String logMsg = "";
    // ???/*from w w  w  . j  a  v a  2s. co m*/
    Iterator<MMDDependency> dependencies = dependenciesList.iterator();

    long sysTime = AdapterExtractorContext.getInstance().getGlobalTime();
    while (dependencies.hasNext()) {
        MMDDependency dependency = dependencies.next();
        doInPreparedStatement(sysTime, dependency, ps);
    }
    if (super.count % super.batchSize != 0) {
        ps.executeBatch();
        ps.clearBatch();

    }
    logMsg = "?:" + dependenciesList.size();
    log.info(logMsg);
    AdapterExtractorContext.addExtractorLog(ExtractorLogLevel.INFO, logMsg);
    return null;

}

From source file:org.h2gis.drivers.osm.OSMParser.java

private int insertBatch(PreparedStatement st, int batchSize, int maxBatchSize) throws SQLException {
    if (batchSize >= maxBatchSize) {
        st.executeBatch();
        return 0;
    } else {/*from w  ww .  ja v a 2s  . c  o  m*/
        return batchSize;
    }
}

From source file:org.schedoscope.metascope.tasks.repository.mysql.impl.TransformationEntityMySQLRepository.java

@Override
public void insertOrUpdate(Connection connection, List<TransformationEntity> tes) {
    String insertTeSql = "insert into transformation_entity ("
            + JDBCUtil.getDatabaseColumnsForClass(TransformationEntity.class) + ") values ("
            + JDBCUtil.getValuesCountForClass(TransformationEntity.class) + ") " + "on duplicate key update "
            + MySQLUtil.getOnDuplicateKeyString(TransformationEntity.class);
    PreparedStatement stmt = null;
    try {/*from  ww  w.  j  a v  a  2s. c o m*/
        int batch = 0;
        connection.setAutoCommit(false);
        stmt = connection.prepareStatement(insertTeSql);
        for (TransformationEntity te : tes) {
            stmt.setString(1, te.getFqdn());
            stmt.setString(2, te.getTransformationKey());
            stmt.setString(3, te.getTransformationValue());
            stmt.setString(4, te.getFqdn());
            stmt.addBatch();
            batch++;
            if (batch % 1024 == 0) {
                stmt.executeBatch();
            }
        }
        stmt.executeBatch();
        connection.commit();
        connection.setAutoCommit(true);
    } catch (SQLException e) {
        LOG.error("Could not save transformation property", e);
    } finally {
        DbUtils.closeQuietly(stmt);
    }
}

From source file:chh.utils.db.source.common.JdbcClient.java

public void executeInsertQuery(String query, List<List<Column>> columnLists) {
    Connection connection = null;
    try {//  w w  w  . j  a  va  2s .  c  om
        connection = connectionProvider.getConnection();
        boolean autoCommit = connection.getAutoCommit();
        if (autoCommit) {
            connection.setAutoCommit(false);
        }

        LOG.debug("Executing query {}", query);

        PreparedStatement preparedStatement = connection.prepareStatement(query);
        if (queryTimeoutSecs > 0) {
            preparedStatement.setQueryTimeout(queryTimeoutSecs);
        }

        for (List<Column> columnList : columnLists) {
            setPreparedStatementParams(preparedStatement, columnList);
            preparedStatement.addBatch();
        }

        int[] results = preparedStatement.executeBatch();
        if (Arrays.asList(results).contains(Statement.EXECUTE_FAILED)) {
            connection.rollback();
            throw new RuntimeException(
                    "failed at least one sql statement in the batch, operation rolled back.");
        } else {
            try {
                connection.commit();
            } catch (SQLException e) {
                throw new RuntimeException("Failed to commit insert query " + query, e);
            }
        }
    } catch (SQLException e) {
        throw new RuntimeException("Failed to execute insert query " + query, e);
    } finally {
        closeConnection(connection);
    }
}

From source file:org.plista.kornakapi.core.storage.MySqlMaxPersistentStorage.java

@Override
public void batchSetPreferences(Iterator<Preference> preferences, int batchSize) throws IOException {
    Connection conn = null;/*  ww  w. j av  a 2  s. c  om*/
    PreparedStatement stmt = null;

    try {
        conn = dataSource.getConnection();
        stmt = conn.prepareStatement(IMPORT_QUERY_MAX);

        int recordsQueued = 0;

        while (preferences.hasNext()) {
            Preference preference = preferences.next();
            stmt.setLong(1, preference.getUserID());
            stmt.setLong(2, preference.getItemID());
            stmt.setFloat(3, preference.getValue());
            stmt.addBatch();

            if (++recordsQueued % batchSize == 0) {
                stmt.executeBatch();
                log.info("imported {} records in batch", recordsQueued);
            }
        }

        if (recordsQueued % batchSize != 0) {
            stmt.executeBatch();
            log.info("imported {} records in batch. done.", recordsQueued);
        }

    } catch (SQLException e) {
        throw new IOException(e);
    } finally {
        IOUtils.quietClose(stmt);
        IOUtils.quietClose(conn);
    }
}

From source file:com.tacitknowledge.util.migration.jdbc.SqlLoadMigrationTask.java

/**
 * {@inheritDoc}// ww  w  .  jav  a 2s.com
 */
public void migrate(MigrationContext ctx) throws MigrationException {
    DataSourceMigrationContext context = (DataSourceMigrationContext) ctx;

    Connection conn = null;
    PreparedStatement stmt = null;
    try {
        conn = context.getConnection();
        stmt = conn.prepareStatement(getStatmentSql());
        List rows = getData(getResourceAsStream());
        int rowCount = rows.size();
        for (int i = 0; i < rowCount; i++) {
            String data = (String) rows.get(i);
            boolean loadRowFlag = insert(data, stmt);
            if (loadRowFlag) {
                stmt.addBatch();
                if (i % 50 == 0) {
                    stmt.executeBatch();
                }
            }
        }
        stmt.executeBatch();
        context.commit();
    } catch (Exception e) {
        String message = getName() + ": Error running SQL \"" + getStatmentSql() + "\"";
        log.error(message, e);
        if (e instanceof SQLException) {
            if (((SQLException) e).getNextException() != null) {
                log.error("Chained SQL Exception", ((SQLException) e).getNextException());
            }
        }

        context.rollback();

        throw new MigrationException(message, e);
    } finally {
        SqlUtil.close(conn, stmt, null);
    }
}