Example usage for java.sql PreparedStatement clearBatch

List of usage examples for java.sql PreparedStatement clearBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement clearBatch.

Prototype

void clearBatch() throws SQLException;

Source Link

Document

Empties this Statement object's current list of SQL commands.

Usage

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

public synchronized void flushBatch() throws SQLException {
    if (batchCommit && tripleBatch != null) {
        requireJDBCConnection();/*w w  w  .  j  a v a2 s  .  c om*/

        commitLock.lock();
        try {
            RetryExecution execution = new RetryExecution("FLUSH BATCH");
            execution.setUseSavepoint(true);
            execution.execute(connection, new RetryCommand<Void>() {
                @Override
                public Void run() throws SQLException {
                    PreparedStatement insertTriple = getPreparedStatement("store.triple");
                    insertTriple.clearParameters();
                    insertTriple.clearBatch();

                    synchronized (tripleBatch) {
                        for (KiWiTriple triple : tripleBatch) {
                            // retrieve a new triple ID and set it in the object
                            if (triple.getId() < 0) {
                                triple.setId(getNextSequence());
                            }

                            insertTriple.setLong(1, triple.getId());
                            insertTriple.setLong(2, triple.getSubject().getId());
                            insertTriple.setLong(3, triple.getPredicate().getId());
                            insertTriple.setLong(4, triple.getObject().getId());
                            if (triple.getContext() != null) {
                                insertTriple.setLong(5, triple.getContext().getId());
                            } else {
                                insertTriple.setNull(5, Types.BIGINT);
                            }
                            insertTriple.setBoolean(6, triple.isInferred());
                            insertTriple.setTimestamp(7, new Timestamp(triple.getCreated().getTime()));

                            insertTriple.addBatch();
                        }
                    }
                    insertTriple.executeBatch();

                    tripleBatch.clear();

                    return null;
                }
            });

        } finally {
            commitLock.unlock();
        }

    }

}

From source file:com.oltpbenchmark.benchmarks.auctionmark.AuctionMarkLoader.java

/**
 * Load the tuples for the given table name
 * @param tableName//  w w w .j  a va  2 s.  c o  m
 */
protected void generateTableData(String tableName) throws SQLException {
    LOG.info("*** START " + tableName);
    final AbstractTableGenerator generator = this.generators.get(tableName);
    assert (generator != null);

    // Generate Data
    final Table catalog_tbl = benchmark.getCatalog().getTable(tableName);
    assert (catalog_tbl != null) : tableName;
    final List<Object[]> volt_table = generator.getVoltTable();
    final String sql = SQLUtil.getInsertSQL(catalog_tbl);
    final PreparedStatement stmt = conn.prepareStatement(sql);
    final int types[] = catalog_tbl.getColumnTypes();

    while (generator.hasMore()) {
        generator.generateBatch();

        //            StringBuilder sb = new StringBuilder();
        //            if (tableName.equalsIgnoreCase("USER_FEEDBACK")) { //  || tableName.equalsIgnoreCase("USER_ATTRIBUTES")) {
        //                sb.append(tableName + "\n");
        //                for (int i = 0; i < volt_table.size(); i++) {
        //                    sb.append(String.format("[%03d] %s\n", i, StringUtil.abbrv(Arrays.toString(volt_table.get(i)), 100)));
        //                }
        //                LOG.info(sb.toString() + "\n");
        //            }

        for (Object row[] : volt_table) {
            for (int i = 0; i < row.length; i++) {
                if (row[i] != null) {
                    stmt.setObject(i + 1, row[i]);
                } else {
                    stmt.setNull(i + 1, types[i]);
                }
            } // FOR
            stmt.addBatch();
        } // FOR
        try {
            stmt.executeBatch();
            conn.commit();
            stmt.clearBatch();
        } catch (SQLException ex) {
            if (ex.getNextException() != null)
                ex = ex.getNextException();
            LOG.warn(tableName + " - " + ex.getMessage());
            throw ex;
            // SKIP
        }

        this.tableSizes.put(tableName, volt_table.size());

        // Release anything to the sub-generators if we have it
        // We have to do this to ensure that all of the parent tuples get
        // insert first for foreign-key relationships
        generator.releaseHoldsToSubTableGenerators();
    } // WHILE
    stmt.close();

    // Mark as finished
    if (this.fail == false) {
        generator.markAsFinished();
        synchronized (this) {
            this.finished.add(tableName);
            LOG.info(String.format("*** FINISH %s - %d tuples - [%d / %d]", tableName,
                    this.tableSizes.get(tableName), this.finished.size(), this.generators.size()));
            if (LOG.isDebugEnabled()) {
                LOG.debug("Remaining Tables: "
                        + CollectionUtils.subtract(this.generators.keySet(), this.finished));
            }
        } // SYNCH
    }
}

From source file:com.mirth.connect.donkey.server.data.jdbc.JdbcDao.java

@Override
public void executeBatchInsertMessageContent(String channelId) {
    logger.debug(channelId + ": executing batch message content insert");

    try {/*from   w  ww  . ja v a  2 s . c  o m*/
        /*
         * MIRTH-3597 Batch statements need to have their own prepared statement object due to a
         * bug? in the Oracle 12 JDBC driver. Therefore we need to have both
         * insertMessageContent and insertMessageContentBatch even though they are the same
         * query. We should also call clearBatch() after each batch execution to verify that all
         * internal buffers are cleared.
         */
        PreparedStatement statement = prepareStatement("batchInsertMessageContent", channelId);
        statement.executeBatch();
        statement.clearBatch();
    } catch (SQLException e) {
        throw new DonkeyDaoException(e);
    }
}

From source file:org.wso2.carbon.idp.mgt.dao.IdPManagementDAO.java

/**
 * @param conn/*from  w ww  .  j ava  2  s  . c om*/
 * @param idPId
 * @param addedRoles
 * @param deletedRoles
 * @param renamedOldRoles
 * @param renamedNewRoles
 * @throws SQLException
 */
private void updateIdPRoles(Connection conn, int idPId, List<String> addedRoles, List<String> deletedRoles,
        List<String> renamedOldRoles, List<String> renamedNewRoles) throws SQLException {

    PreparedStatement prepStmt = null;
    String sqlStmt = null;

    try {

        for (String deletedRole : deletedRoles) {
            sqlStmt = IdPManagementConstants.SQLQueries.DELETE_IDP_ROLES_SQL;
            prepStmt = conn.prepareStatement(sqlStmt);
            prepStmt.setInt(1, idPId);
            prepStmt.setString(2, deletedRole);
            prepStmt.addBatch();
        }

        prepStmt.executeBatch();
        prepStmt.clearParameters();
        prepStmt.clearBatch();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);

        for (String addedRole : addedRoles) {
            sqlStmt = IdPManagementConstants.SQLQueries.ADD_IDP_ROLES_SQL;
            prepStmt = conn.prepareStatement(sqlStmt);
            prepStmt.setInt(1, idPId);
            prepStmt.setString(2, CharacterEncoder.getSafeText(addedRole));
            prepStmt.addBatch();
        }

        prepStmt.executeBatch();
        prepStmt.clearParameters();
        prepStmt.clearBatch();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);

        for (int i = 0; i < renamedOldRoles.size(); i++) {
            sqlStmt = IdPManagementConstants.SQLQueries.UPDATE_IDP_ROLES_SQL;
            prepStmt = conn.prepareStatement(sqlStmt);
            prepStmt.setString(1, CharacterEncoder.getSafeText(renamedNewRoles.get(i)));
            prepStmt.setInt(2, idPId);
            prepStmt.setString(3, CharacterEncoder.getSafeText(renamedOldRoles.get(i)));
            prepStmt.addBatch();
        }

        prepStmt.executeBatch();

    } finally {
        prepStmt.clearParameters();
        prepStmt.clearBatch();
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
    }

}

From source file:com.pactera.edg.am.metamanager.extractor.dao.helper.CreateMetadataHelper.java

public Object doInPreparedStatement(PreparedStatement ps) throws SQLException {
    // ?//from   www . j a  va  2s  . c  o  m

    Map<String, String> mAttrs = metaModel.getMAttrs();
    boolean hasChildMetaModel = metaModel.isHasChildMetaModel();

    // ???
    List<AbstractMetadata> metadatas = metaModel.getMetadatas();
    int size = metadatas.size();
    String code = "";
    String metaModelCode = "";
    MMMetadata parentMetadata = null;
    String logMsg = "";
    try {
        for (int i = 0; i < size; i++) {

            MMMetadata metadata = (MMMetadata) metadatas.get(i);
            if (metadata.isHasExist()) {
                // ??,??
                continue;
            }

            parentMetadata = metadata.getParentMetadata();
            if (parentMetadata == null) {
                String error = new StringBuilder("?:").append(metadata.getCode())
                        .append(" ,??!!").toString();
                log.error(error);
                throw new SQLException(error);
            }
            String metadataNamespace = genNamespace(parentMetadata, metadata.getId(), hasChildMetaModel);

            // ?ID
            ps.setString(1, metadata.getId());
            code = metadata.getCode();
            // ???
            ps.setString(2, code);
            // ???
            ps.setString(3,
                    (metadata.getName() == null || metadata.getName().equals("")) ? code : metadata.getName());
            // ID
            metaModelCode = metaModel.getCode();
            ps.setString(4, metaModelCode);

            // namespaceID
            ps.setString(5, metadataNamespace);
            ps.setString(6, parentMetadata.getId());
            // START_TIME: 
            ps.setLong(7, this.getGlobalTime());

            int index = setAttrs(ps, metadata, mAttrs);

            setPs(ps, metadata, index + 7);

            if (log.isDebugEnabled()) {
                log.debug(new StringBuilder().append(":parent_id:").append(parentMetadata.getId())
                        .append(",parent_code:").append(parentMetadata.getCode()).append(",instance_code:")
                        .append(code).append(",classifier_id:").append(metaModelCode).toString());
            }
            ps.addBatch();
            // ??
            ps.clearParameters();

            if (++super.count % super.batchSize == 0) {
                ps.executeBatch();
                ps.clearBatch();
            }
        }

        if (super.count % super.batchSize != 0) {
            ps.executeBatch();
            ps.clearBatch();

        }
    } catch (SQLException e) {
        logMsg = new StringBuilder().append("?,?:parent_id:")
                .append(parentMetadata.getId()).append(",parent_code:").append(parentMetadata.getCode())
                .append(",instance_code:").append(code).append(",classifier_id:").append(metaModelCode)
                .append("  ?:").append(e.getLocalizedMessage()).toString();
        log.error(logMsg);
        AdapterExtractorContext.addExtractorLog(ExtractorLogLevel.ERROR, logMsg);
        throw e;
    }
    return null;
    // test for callback
    // throw new SQLException();
}

From source file:org.wso2.carbon.registry.core.jdbc.dao.JDBCResourceDAO.java

public void removeProperties(ResourceDO resourceDO) throws RegistryException {
    JDBCDatabaseTransaction.ManagedRegistryConnection conn = JDBCDatabaseTransaction.getConnection();

    PreparedStatement ps1 = null, ps2 = null;

    Integer[] propertyIds = getPropertyIds(resourceDO);
    if (propertyIds == null) {
        return;/*from w w w  .j  av  a  2s. c om*/
    }
    try {

        String sql = "DELETE FROM REG_RESOURCE_PROPERTY WHERE REG_PROPERTY_ID= ? " + "AND REG_TENANT_ID=?";
        ps1 = conn.prepareStatement(sql);

        sql = "DELETE FROM REG_PROPERTY WHERE REG_ID= ? AND REG_TENANT_ID=?";
        ps2 = conn.prepareStatement(sql);

        for (Integer propertyId : propertyIds) {
            ps1.setInt(1, propertyId);
            ps1.setInt(2, CurrentSession.getTenantId());
            ps2.setInt(1, propertyId);
            ps2.setInt(2, CurrentSession.getTenantId());
            ps1.addBatch();
            ps2.addBatch();
        }

        if (propertyIds.length > 0) {
            try {
                ps1.executeBatch();
                ps2.executeBatch();
            } catch (SQLException e) {
                ps1.clearBatch();
                ps2.clearBatch();
                // the exception will be handled in the next catch block
                throw e;
            }
        }
    } catch (SQLException e) {

        String msg = "Failed to remove properties from resource version " + resourceDO.getVersion() + ". "
                + e.getMessage();
        log.error(msg, e);
        throw new RegistryException(msg, e);
    } finally {
        try {
            try {
                if (ps1 != null) {
                    ps1.close();
                }
            } finally {
                if (ps2 != null) {
                    ps2.close();
                }
            }
        } catch (SQLException ex) {
            String msg = RegistryConstants.RESULT_SET_PREPARED_STATEMENT_CLOSE_ERROR;
            log.error(msg, ex);
        }
    }
}

From source file:org.wso2.carbon.repository.core.jdbc.dao.JDBCResourceDAO.java

public void removeProperties(ResourceDO resourceDO) throws RepositoryException {
    JDBCDatabaseTransaction.ManagedRegistryConnection conn = JDBCDatabaseTransaction.getConnection();

    PreparedStatement ps1 = null, ps2 = null;

    Integer[] propertyIds = getPropertyIds(resourceDO);
    if (propertyIds == null) {
        return;//  w  ww  . j a  va2  s.  c o  m
    }
    try {

        String sql = "DELETE FROM REG_RESOURCE_PROPERTY WHERE REG_PROPERTY_ID= ? AND REG_TENANT_ID=?";
        ps1 = conn.prepareStatement(sql);

        sql = "DELETE FROM REG_PROPERTY WHERE REG_ID= ? AND REG_TENANT_ID=?";
        ps2 = conn.prepareStatement(sql);

        for (Integer propertyId : propertyIds) {
            ps1.setInt(1, propertyId);
            ps1.setInt(2, CurrentContext.getTenantId());
            ps2.setInt(1, propertyId);
            ps2.setInt(2, CurrentContext.getTenantId());
            ps1.addBatch();
            ps2.addBatch();
        }

        if (propertyIds.length > 0) {
            try {
                ps1.executeBatch();
                ps2.executeBatch();
            } catch (SQLException e) {
                ps1.clearBatch();
                ps2.clearBatch();
                throw e;
            }
        }
    } catch (SQLException e) {

        String msg = "Failed to remove properties from resource version " + resourceDO.getVersion() + ". "
                + e.getMessage();
        log.error(msg, e);
        throw new RepositoryDBException(msg, e);
    } finally {
        try {
            try {
                if (ps1 != null) {
                    ps1.close();
                }
            } finally {
                if (ps2 != null) {
                    ps2.close();
                }
            }
        } catch (SQLException ex) {
            String msg = InternalConstants.RESULT_SET_PREPARED_STATEMENT_CLOSE_ERROR;
            log.error(msg, ex);
        }
    }
}

From source file:org.forgerock.openidm.repo.jdbc.impl.GenericTableHandler.java

/**
 * Internal recursive function to add/write properties.
 * If batching is enabled, prepared statements are added to the batch and only executed if they hit the max limit.
 * After completion returns the number of properties that have only been added to the batch but not yet executed.
 * The caller is responsible for executing the batch on remaining items when it deems the batch complete.
 *
 * If batching is not enabled, prepared statements are immediately executed.
 *
 * @param fullId the full URI of the resource the belongs to
 * @param dbId the generated identifier to link the properties table with the main table (foreign key)
 * @param localId the local identifier of the resource these properties belong to
 * @param value the JSON value with the properties to write
 * @param connection the DB connection/*from www.  j  a  v a2 s  . c  om*/
 * @param propCreateStatement the prepared properties insert statement
 * @param batchingCount the current number of statements that have been batched and not yet executed on the prepared statement
 * @return status of the current batchingCount, i.e. how many statements are not yet executed in the PreparedStatement
 * @throws SQLException if the insert failed
 */
private int writeValueProperties(String fullId, long dbId, String localId, JsonValue value,
        Connection connection, PreparedStatement propCreateStatement, int batchingCount) throws SQLException {

    for (JsonValue entry : value) {
        JsonPointer propPointer = entry.getPointer();
        if (cfg.isSearchable(propPointer)) {
            String propkey = propPointer.toString();
            if (entry.isMap() || entry.isList()) {
                batchingCount = writeValueProperties(fullId, dbId, localId, entry, connection,
                        propCreateStatement, batchingCount);
            } else {
                String propvalue = null;
                Object val = entry.getObject();
                if (val != null) {
                    propvalue = StringUtils.left(val.toString(), getSearchableLength());
                }
                String proptype = null;
                if (propvalue != null) {
                    proptype = entry.getObject().getClass().getName(); // TODO: proper type info
                }
                if (logger.isTraceEnabled()) {
                    logger.trace("Populating statement {} with params {}, {}, {}, {}, {}",
                            queryMap.get(QueryDefinition.PROPCREATEQUERYSTR), dbId, localId, propkey, proptype,
                            propvalue);
                }
                propCreateStatement.setLong(1, dbId);
                propCreateStatement.setString(2, propkey);
                propCreateStatement.setString(3, proptype);
                propCreateStatement.setString(4, propvalue);
                logger.debug("Executing: {}", propCreateStatement);
                if (enableBatching) {
                    propCreateStatement.addBatch();
                    batchingCount++;
                } else {
                    int numUpdate = propCreateStatement.executeUpdate();
                }
                if (logger.isTraceEnabled()) {
                    logger.trace("Inserting objectproperty id: {} propkey: {} proptype: {}, propvalue: {}",
                            fullId, propkey, proptype, propvalue);
                }
            }
            if (enableBatching && batchingCount >= maxBatchSize) {
                int[] numUpdates = propCreateStatement.executeBatch();
                if (logger.isDebugEnabled()) {
                    logger.debug("Batch limit reached, update of objectproperties updated: {}",
                            Arrays.asList(numUpdates));
                }
                propCreateStatement.clearBatch();
                batchingCount = 0;
            }
        }
    }

    return batchingCount;
}

From source file:com.oltpbenchmark.benchmarks.tpcc.TPCCLoader.java

protected int loadItem(int itemKount) {

    int k = 0;//  w  ww. ja v  a2 s  .  c o m
    int t = 0;
    int randPct = 0;
    int len = 0;
    int startORIGINAL = 0;

    try {
        PreparedStatement itemPrepStmt = getInsertStatement(TPCCConstants.TABLENAME_ITEM);

        now = new java.util.Date();
        t = itemKount;
        LOG.debug("\nStart Item Load for " + t + " Items @ " + now + " ...");

        if (outputFiles == true) {
            out = new PrintWriter(new FileOutputStream(fileLocation + "item.csv"));
            LOG.debug("\nWriting Item file to: " + fileLocation + "item.csv");
        }

        Item item = new Item();

        for (int i = 1; i <= itemKount; i++) {

            item.i_id = i;
            item.i_name = TPCCUtil.randomStr(TPCCUtil.randomNumber(14, 24, gen));
            item.i_price = (float) (TPCCUtil.randomNumber(100, 10000, gen) / 100.0);

            // i_data
            randPct = TPCCUtil.randomNumber(1, 100, gen);
            len = TPCCUtil.randomNumber(26, 50, gen);
            if (randPct > 10) {
                // 90% of time i_data isa random string of length [26 .. 50]
                item.i_data = TPCCUtil.randomStr(len);
            } else {
                // 10% of time i_data has "ORIGINAL" crammed somewhere in
                // middle
                startORIGINAL = TPCCUtil.randomNumber(2, (len - 8), gen);
                item.i_data = TPCCUtil.randomStr(startORIGINAL - 1) + "ORIGINAL"
                        + TPCCUtil.randomStr(len - startORIGINAL - 9);
            }

            item.i_im_id = TPCCUtil.randomNumber(1, 10000, gen);

            k++;

            if (outputFiles == false) {
                itemPrepStmt.setLong(1, item.i_id);
                itemPrepStmt.setString(2, item.i_name);
                itemPrepStmt.setDouble(3, item.i_price);
                itemPrepStmt.setString(4, item.i_data);
                itemPrepStmt.setLong(5, item.i_im_id);
                itemPrepStmt.addBatch();

                if ((k % configCommitCount) == 0) {
                    long tmpTime = new java.util.Date().getTime();
                    String etStr = "  Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000)
                            + "                    ";
                    LOG.debug(etStr.substring(0, 30) + "  Writing record " + k + " of " + t);
                    lastTimeMS = tmpTime;
                    itemPrepStmt.executeBatch();
                    itemPrepStmt.clearBatch();
                    transCommit();
                }
            } else {
                String str = "";
                str = str + item.i_id + ",";
                str = str + item.i_name + ",";
                str = str + item.i_price + ",";
                str = str + item.i_data + ",";
                str = str + item.i_im_id;
                out.println(str);

                if ((k % configCommitCount) == 0) {
                    long tmpTime = new java.util.Date().getTime();
                    String etStr = "  Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000)
                            + "                    ";
                    LOG.debug(etStr.substring(0, 30) + "  Writing record " + k + " of " + t);
                    lastTimeMS = tmpTime;
                }
            }

        } // end for

        long tmpTime = new java.util.Date().getTime();
        String etStr = "  Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000) + "                    ";
        LOG.debug(etStr.substring(0, 30) + "  Writing record " + k + " of " + t);
        lastTimeMS = tmpTime;

        if (outputFiles == false) {
            itemPrepStmt.executeBatch();
        }

        transCommit();
        now = new java.util.Date();
        LOG.debug("End Item Load @  " + now);

    } catch (SQLException se) {
        LOG.debug(se.getMessage());
        transRollback();
    } catch (Exception e) {
        e.printStackTrace();
        transRollback();
    }

    return (k);

}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private boolean setStateMergeDB(Map<String, String> nameStates, int epoch, RCStates state,
        Set<NodeIDType> newActives) {
    String updateCmd = "update " + getRCRecordTable() + " set " + Columns.RC_GROUP_NAME.toString() + "=?, "
            + Columns.STRINGIFIED_RECORD.toString() + "=? where " + Columns.SERVICE_NAME.toString() + "=?";

    PreparedStatement updateRC = null;
    Connection conn = null;//from ww w  .j a  va 2s. c o m
    boolean updatedAll = true;
    try {
        if (conn == null) {
            conn = this.getDefaultConn();
            conn.setAutoCommit(false);
            updateRC = conn.prepareStatement(updateCmd);
        }
        assert (nameStates != null && !nameStates.isEmpty());
        String rcGroupName = this.getRCGroupName(nameStates.keySet().iterator().next());
        int i = 0;
        long t1 = System.currentTimeMillis();
        for (String name : nameStates.keySet()) {
            ReconfigurationRecord<NodeIDType> record = new ReconfigurationRecord<NodeIDType>(name, 0,
                    newActives);
            record.setState(name, 0, state/* RCStates.READY_READY */).setActivesToNewActives();
            ;
            updateRC.setString(1, rcGroupName);
            if (RC_RECORD_CLOB_OPTION)
                updateRC.setClob(2, new StringReader(record.toString()));
            else
                updateRC.setString(2, record.toString());
            updateRC.setString(3, name);
            updateRC.addBatch();
            i++;
            if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == nameStates.size()) {
                int[] executed = updateRC.executeBatch();
                conn.commit();
                updateRC.clearBatch();
                for (int j : executed)
                    updatedAll = updatedAll && (j > 0);
                if (updatedAll)
                    log.log(Level.FINE, "{0} successfully logged the last {1} messages in {2} ms",
                            new Object[] { this, (i + 1), (System.currentTimeMillis() - t1) });
                t1 = System.currentTimeMillis();
            }
        }
    } catch (SQLException sqle) {
        log.severe("SQLException while inserting batched RC records using " + updateCmd);
        sqle.printStackTrace();
    } finally {
        cleanup(updateRC);
        cleanup(conn);
    }
    return updatedAll;
}