Example usage for java.sql PreparedStatement addBatch

List of usage examples for java.sql PreparedStatement addBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement addBatch.

Prototype

void addBatch() throws SQLException;

Source Link

Document

Adds a set of parameters to this PreparedStatement object's batch of commands.

Usage

From source file:db.IitbInfo.java

public void populateTable(Connection connection)
        throws SQLException, UnsupportedEncodingException, FileNotFoundException, IOException {
    PreparedStatement preparedStatement = connection.prepareStatement(INSERT_TABLE_SQL);
    BufferedReader br = new BufferedReader(
            new InputStreamReader(new FileInputStream(Config.LDAP_DUMP_FILE), "UTF-8"));
    int batchCounter = 0;
    String line;/*from www  . j a  v a  2  s  . co  m*/
    while ((line = br.readLine()) != null) {
        String[] fields = line.split("\\t");
        Integer id = Integer.parseInt(fields[0]);
        String ldapId = fields[1];
        String rollNo = fields[2];
        String employeeType = fields[3];
        String name = WordUtils.capitalizeFully(fields[4]);
        String department = LdapSearch.getDepartment(fields[5]);

        preparedStatement.setInt(1, id);
        preparedStatement.setString(2, ldapId);
        preparedStatement.setString(3, rollNo);
        preparedStatement.setString(4, employeeType);
        preparedStatement.setString(5, name);
        preparedStatement.setString(6, department);
        preparedStatement.addBatch();
        ++batchCounter;
        if (batchCounter >= BATCH_MAX_SIZE) {
            batchCounter = 0;
            preparedStatement.executeBatch();
        }
    }
    preparedStatement.executeBatch();
    preparedStatement.close();
    br.close();
}

From source file:org.obm.push.backend.obm22.mail.EmailCacheStorage.java

private void updateDbCache(BackendSession bs, Integer devId, Integer collectionId, final Set<Long> data)
        throws SQLException {
    Set<Long> toRemove = new HashSet<Long>();
    Set<Long> oldUids = loadMailFromDb(bs, devId, collectionId);
    if (oldUids != null) {
        toRemove.addAll(oldUids);// w w  w.  j  a  v  a  2s . co  m
        toRemove.removeAll(data);
    }

    Set<Long> toInsert = new HashSet<Long>();
    toInsert.addAll(data);
    if (oldUids != null) {
        toInsert.removeAll(oldUids);
    }

    if (toRemove.size() == 0 && toInsert.size() == 0) {
        return;
    }

    PreparedStatement del = null;
    PreparedStatement insert = null;

    if (logger.isDebugEnabled()) {
        logger.debug(debugName + " should run a batch with " + toRemove.size() + " deletions & "
                + toInsert.size() + " insertions.");
    }
    Connection con = null;
    UserTransaction ut = getUserTransaction();
    try {
        ut.begin();
        con = OBMPoolActivator.getDefault().getConnection();
        del = con.prepareStatement(
                "DELETE FROM opush_sync_mail WHERE collection_id=? AND device_id=? AND mail_uid=?");
        for (Long l : toRemove) {
            del.setInt(1, collectionId);
            del.setInt(2, devId);
            del.setInt(3, l.intValue());
            del.addBatch();
        }
        del.executeBatch();

        insert = con.prepareStatement(
                "INSERT INTO opush_sync_mail (collection_id, device_id, mail_uid) VALUES (?, ?, ?)");
        for (Long l : toInsert) {
            insert.setInt(1, collectionId);
            insert.setInt(2, devId);
            insert.setInt(3, l.intValue());
            insert.addBatch();
        }
        insert.executeBatch();
        ut.commit();
    } catch (Throwable e) {
        logger.error(e.getMessage(), e);
        JDBCUtils.rollback(con);
    } finally {
        JDBCUtils.cleanup(null, del, null);
        JDBCUtils.cleanup(con, insert, null);
    }
}

From source file:org.sonar.server.db.migrations.MassUpdater.java

public <S> void execute(InputLoader<S> inputLoader, InputConverter<S> converter) {
    long count = 0;
    Connection readConnection = null;
    Statement stmt = null;/*from   w  w  w .  j av a 2 s.c  o m*/
    ResultSet rs = null;
    Connection writeConnection = null;
    PreparedStatement writeStatement = null;
    try {
        writeConnection = db.getDataSource().getConnection();
        writeConnection.setAutoCommit(false);
        writeStatement = writeConnection.prepareStatement(converter.updateSql());

        readConnection = db.getDataSource().getConnection();
        readConnection.setAutoCommit(false);

        stmt = readConnection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
        stmt.setFetchSize(GROUP_SIZE);
        if (db.getDialect().getId().equals(MySql.ID)) {
            stmt.setFetchSize(Integer.MIN_VALUE);
        } else {
            stmt.setFetchSize(GROUP_SIZE);
        }
        rs = stmt.executeQuery(convertSelectSql(inputLoader.selectSql(), db));

        int cursor = 0;
        while (rs.next()) {
            if (converter.convert(inputLoader.load(rs), writeStatement)) {
                writeStatement.addBatch();
                cursor++;
                count++;
            }

            if (cursor == GROUP_SIZE) {
                writeStatement.executeBatch();
                writeConnection.commit();
                cursor = 0;
            }
        }
        if (cursor > 0) {
            writeStatement.executeBatch();
            writeConnection.commit();
        }

    } catch (SQLException e) {
        SqlUtil.log(LOGGER, e);
        throw processError(e);
    } catch (Exception e) {
        throw processError(e);
    } finally {
        DbUtils.closeQuietly(writeStatement);
        DbUtils.closeQuietly(writeConnection);
        DbUtils.closeQuietly(readConnection, stmt, rs);

        LOGGER.info("{} rows have been updated", count);
    }
}

From source file:com.nabla.wapp.server.auth.UserManager.java

public boolean updateUserRoleTable() throws SQLException {
    final Map<Integer, Map<Integer, Set<Integer>>> userRoles = loadUserRoles();
    Database.executeUpdate(conn, "DELETE FROM user_role;");
    final PreparedStatement stmt = conn
            .prepareStatement("INSERT INTO user_role (object_id, user_id, role_id) VALUES(?,?,?);");
    try {//  www  . j  av  a 2s  .c  om
        stmt.clearBatch();
        for (Map.Entry<Integer, Map<Integer, Set<Integer>>> e : userRoles.entrySet()) {
            if (e.getKey() == null)
                stmt.setNull(1, Types.BIGINT);
            else
                stmt.setInt(1, e.getKey());
            for (Map.Entry<Integer, Set<Integer>> ee : e.getValue().entrySet()) {
                stmt.setInt(2, ee.getKey());
                for (Integer roleId : ee.getValue()) {
                    stmt.setInt(3, roleId);
                    stmt.addBatch();
                }
            }
        }
        return Database.isBatchCompleted(stmt.executeBatch());
    } finally {
        stmt.close();
    }
}

From source file:com.ibm.bluemix.samples.PostgreSQLReportedErrors.java

/**
 * Insert text into PostgreSQL/*www.  j  a  va 2s  .c o m*/
 * 
 * @param files 
 *           List of Strings of text to insert
 * 
 * @return number of rows affected
 * 
 * @throws Exception TODO describe exception
 */
public int addFile(String action_number, String make, String model, String year, String compname,
        String mfr_name, String odate, String cdate, String campno, String subject, String summary)
        throws Exception {

    String sql = "INSERT INTO reportedErrors (action_number, make, model, year, compname, mfr_name, odate, cdate, campno, subject, summary) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,)";
    Connection connection = null;
    PreparedStatement statement = null;

    try {
        connection = getConnection();
        connection.setAutoCommit(false);
        statement = connection.prepareStatement(sql);

        statement.setString(1, action_number);
        statement.setString(2, make);
        statement.setString(3, model);
        statement.setString(4, year);
        statement.setString(5, compname);
        statement.setString(6, mfr_name);
        statement.setString(7, odate);
        statement.setString(8, cdate);
        statement.setString(9, campno);
        statement.setString(10, subject);
        statement.setString(11, summary);

        statement.addBatch();

        int[] rows = statement.executeBatch();
        connection.commit();

        return rows.length;

    } catch (SQLException e) {
        SQLException next = e.getNextException();

        if (next != null) {
            throw next;
        }

        throw e;
    } finally {
        if (statement != null) {
            statement.close();
        }

        if (connection != null) {
            connection.close();
        }
    }
}

From source file:org.wso2.carbon.policy.mgt.core.dao.impl.feature.GenericFeatureDAOImpl.java

@Override
public List<ProfileFeature> addProfileFeatures(List<ProfileFeature> features, int profileId)
        throws FeatureManagerDAOException {

    Connection conn;//from w  w w. ja va2s.c  o  m
    PreparedStatement stmt = null;
    ResultSet generatedKeys = null;
    int tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId();

    try {
        conn = this.getConnection();
        String query = "INSERT INTO DM_PROFILE_FEATURES (PROFILE_ID, FEATURE_CODE, DEVICE_TYPE, CONTENT, "
                + "TENANT_ID) VALUES (?, ?, ?, ?, ?)";
        stmt = conn.prepareStatement(query, new String[] { "id" });

        for (ProfileFeature feature : features) {
            stmt.setInt(1, profileId);
            stmt.setString(2, feature.getFeatureCode());
            stmt.setString(3, feature.getDeviceType());
            // if (conn.getMetaData().getDriverName().contains("H2")) {
            //    stmt.setBytes(4, PolicyManagerUtil.getBytes(feature.getContent()));
            // } else {
            stmt.setBytes(4, PolicyManagerUtil.getBytes(feature.getContent()));
            //}
            stmt.setInt(5, tenantId);
            stmt.addBatch();
            //Not adding the logic to check the size of the stmt and execute if the size records added is over 1000
        }
        stmt.executeBatch();

        generatedKeys = stmt.getGeneratedKeys();
        int i = 0;

        while (generatedKeys.next()) {
            features.get(i).setId(generatedKeys.getInt(1));
            i++;
        }

    } catch (SQLException | IOException e) {
        throw new FeatureManagerDAOException("Error occurred while adding the feature list to the database.",
                e);
    } finally {
        PolicyManagementDAOUtil.cleanupResources(stmt, generatedKeys);
    }
    return features;
}

From source file:org.wso2.carbon.policy.mgt.core.dao.impl.feature.SQLServerFeatureDAOImpl.java

@Override
public List<ProfileFeature> addProfileFeatures(List<ProfileFeature> features, int profileId)
        throws FeatureManagerDAOException {

    Connection conn;//  w  ww. j  a  v  a 2 s.c o m
    PreparedStatement stmt = null;
    ResultSet generatedKeys = null;
    int tenantId = PrivilegedCarbonContext.getThreadLocalCarbonContext().getTenantId();

    try {
        conn = this.getConnection();
        String query = "INSERT INTO DM_PROFILE_FEATURES (PROFILE_ID, FEATURE_CODE, DEVICE_TYPE, CONTENT, "
                + "TENANT_ID) VALUES (?, ?, ?, ?, ?)";
        stmt = conn.prepareStatement(query, new String[] { "id" });

        for (ProfileFeature feature : features) {
            stmt.setInt(1, profileId);
            stmt.setString(2, feature.getFeatureCode());
            stmt.setString(3, feature.getDeviceType());
            // if (conn.getMetaData().getDriverName().contains("H2")) {
            //    stmt.setBytes(4, PolicyManagerUtil.getBytes(feature.getContent()));
            // } else {
            stmt.setBytes(4, PolicyManagerUtil.getBytes(feature.getContent()));
            //}
            stmt.setInt(5, tenantId);
            stmt.addBatch();
            //Not adding the logic to check the size of the stmt and execute if the size records added is over 1000
        }
        stmt.executeBatch();
        // This logic has been commented out due to getGeneratedKeys method is not supported in MSSQL.
        //            generatedKeys = stmt.getGeneratedKeys();
        //            int i = 0;
        //
        //            while (generatedKeys.next()) {
        //                features.get(i).setId(generatedKeys.getInt(1));
        //                i++;
        //            }

    } catch (SQLException | IOException e) {
        throw new FeatureManagerDAOException("Error occurred while adding the feature list to the database.",
                e);
    } finally {
        PolicyManagementDAOUtil.cleanupResources(stmt, generatedKeys);
    }
    return features;
}

From source file:com.redhat.victims.database.VictimsSqlDB.java

/**
 * Update all records in the given {@link RecordStream}. This will remove
 * the record if it already exits and then add it. Otherwise, it just adds
 * it.//from w  w  w . j av  a 2 s . c  o m
 *
 * @param recordStream
 * @throws SQLException
 * @throws IOException
 */
protected int update(Connection connection, RecordStream recordStream) throws SQLException, IOException {
    int count = 0;
    PreparedStatement insertFileHash = statement(connection, Query.INSERT_FILEHASH);
    PreparedStatement insertMeta = statement(connection, Query.INSERT_META);
    PreparedStatement insertCVE = statement(connection, Query.INSERT_CVES);
    while (recordStream.hasNext()) {
        VictimsRecord vr = recordStream.getNext();
        String hash = vr.hash.trim();

        // remove if already present
        deleteRecord(connection, hash);

        // add the new/updated hash
        int id = insertRecord(connection, hash);

        // insert file hahes
        for (String filehash : vr.getHashes(Algorithms.SHA512).keySet()) {
            setObjects(insertFileHash, id, filehash.trim());
            insertFileHash.addBatch();
        }

        // insert metadata key-value pairs
        HashMap<String, String> md = vr.getFlattenedMetaData();
        for (String key : md.keySet()) {
            setObjects(insertMeta, id, key, md.get(key));
            insertMeta.addBatch();
        }

        // insert cves
        for (String cve : vr.cves) {
            setObjects(insertCVE, id, cve.trim());
            insertCVE.addBatch();
        }

        count++;
    }
    executeBatchAndClose(insertFileHash, insertMeta, insertCVE);
    return count;
}

From source file:org.schedoscope.metascope.tasks.repository.mysql.impl.TableEntityMySQLRepository.java

public void insertOrUpdatePartial(Connection connection, List<TableEntity> tables) {
    String insertTableSql = "insert into table_entity (table_fqdn, table_name, database_name, url_path_prefix, external_table, "
            + "table_description, storage_format, materialize_once, transformation_type, status) "
            + "values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) "
            + "on duplicate key update table_fqdn=values(table_fqdn), table_name=values(table_name), database_name=values(database_name), "
            + "url_path_prefix=values(url_path_prefix),external_table=values(external_table), table_description=values(table_description), "
            + "storage_format=values(storage_format), materialize_once=values(materialize_once), transformation_type=values(transformation_type), "
            + "status=values(status)";
    PreparedStatement stmt = null;
    try {//from ww w  .jav a 2  s  . c  o m
        int batch = 0;
        connection.setAutoCommit(false);
        stmt = connection.prepareStatement(insertTableSql);
        for (TableEntity tableEntity : tables) {
            stmt.setString(1, tableEntity.getFqdn());
            stmt.setString(2, tableEntity.getTableName());
            stmt.setString(3, tableEntity.getDatabaseName());
            stmt.setString(4, tableEntity.getUrlPathPrefix());
            stmt.setBoolean(5, tableEntity.isExternalTable());
            stmt.setString(6, tableEntity.getTableDescription());
            stmt.setString(7, tableEntity.getStorageFormat());
            stmt.setBoolean(8, tableEntity.isMaterializeOnce());
            stmt.setString(9, tableEntity.getTransformationType());
            stmt.setString(10, tableEntity.getStatus());
            stmt.addBatch();
            batch++;
            if (batch % 1024 == 0) {
                stmt.executeBatch();
            }
        }
        stmt.executeBatch();
        connection.commit();
        connection.setAutoCommit(true);
    } catch (SQLException e) {
        LOG.error("Could not save table", e);
    } finally {
        DbUtils.closeQuietly(stmt);
    }
}

From source file:org.bidtime.dbutils.QueryRunnerEx.java

/**
 * Executes the given batch of INSERT SQL statements.
 * @param conn The connection to use for the query call.
 * @param closeConn True if the connection should be closed, false otherwise.
 * @param sql The SQL statement to execute.
 * @param rsh The handler used to create the result object from
 * the <code>ResultSet</code> of auto-generated keys.
 * @param params The query replacement parameters.
 * @return The result generated by the handler.
 * @throws SQLException If there are database or parameter errors.
 * @since 1.6//  w  ww .ja v a  2  s .  c o  m
 */
private <T> T insertBatch(Connection conn, boolean closeConn, String sql, ResultSetHandler<T> rsh,
        Object[][] params) throws SQLException {
    if (conn == null) {
        throw new SQLException("Null connection");
    }

    if (sql == null) {
        if (closeConn) {
            close(conn);
        }
        throw new SQLException("Null SQL statement");
    }

    if (params == null) {
        if (closeConn) {
            close(conn);
        }
        throw new SQLException("Null parameters. If parameters aren't need, pass an empty array.");
    }

    PreparedStatement stmt = null;
    long startTime = System.currentTimeMillis();
    T generatedKeys = null;
    try {
        stmt = this.prepareStatement(conn, sql, Statement.RETURN_GENERATED_KEYS);
        stmt.setQueryTimeout(StmtParams.getInstance().getStmtBatchTimeOut());

        for (int i = 0; i < params.length; i++) {
            this.fillStatement(stmt, params[i]);
            stmt.addBatch();
        }
        stmt.executeBatch();
        ResultSet rs = stmt.getGeneratedKeys();
        generatedKeys = rsh.handle(rs);
    } catch (SQLException e) {
        this.rethrow(e, sql, (Object[]) params);
    } finally {
        close(stmt);
        if (closeConn) {
            close(conn);
        }
        if (LogInsertSql.logInfoOrDebug()) {
            LogInsertSql.logFormatTimeNow(startTime, sql, params);
        }
    }
    return generatedKeys;
}