Example usage for java.sql PreparedStatement addBatch

List of usage examples for java.sql PreparedStatement addBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement addBatch.

Prototype

void addBatch() throws SQLException;

Source Link

Document

Adds a set of parameters to this PreparedStatement object's batch of commands.

Usage

From source file:com.drevelopment.couponcodes.bukkit.coupon.BukkitCouponHandler.java

@Override
public boolean addCouponToDatabase(Coupon coupon) {
    if (couponExists(coupon))
        return false;
    try {//from   w  w w  .ja va2  s .  com
        Connection con = databaseHandler.getConnection();
        PreparedStatement p = null;

        if (coupon instanceof ItemCoupon) {
            ItemCoupon c = (ItemCoupon) coupon;
            p = con.prepareStatement(
                    "INSERT INTO couponcodes (name, ctype, usetimes, usedplayers, ids, timeuse) "
                            + "VALUES (?, ?, ?, ?, ?, ?)");
            p.setString(1, c.getName());
            p.setString(2, c.getType());
            p.setInt(3, c.getUseTimes());
            p.setString(4, playerHashToString(c.getUsedPlayers()));
            p.setString(5, itemHashToString(c.getIDs()));
            p.setInt(6, c.getTime());
        } else

        if (coupon instanceof EconomyCoupon) {
            EconomyCoupon c = (EconomyCoupon) coupon;
            p = con.prepareStatement(
                    "INSERT INTO couponcodes (name, ctype, usetimes, usedplayers, money, timeuse) "
                            + "VALUES (?, ?, ?, ?, ?, ?)");
            p.setString(1, c.getName());
            p.setString(2, c.getType());
            p.setInt(3, c.getUseTimes());
            p.setString(4, playerHashToString(c.getUsedPlayers()));
            p.setInt(5, c.getMoney());
            p.setInt(6, c.getTime());
        } else

        if (coupon instanceof RankCoupon) {
            RankCoupon c = (RankCoupon) coupon;
            p = con.prepareStatement(
                    "INSERT INTO couponcodes (name, ctype, usetimes, usedplayers, groupname, timeuse) "
                            + "VALUES (?, ?, ?, ?, ?, ?)");
            p.setString(1, c.getName());
            p.setString(2, c.getType());
            p.setInt(3, c.getUseTimes());
            p.setString(4, playerHashToString(c.getUsedPlayers()));
            p.setString(5, c.getGroup());
            p.setInt(6, c.getTime());
        } else

        if (coupon instanceof XpCoupon) {
            XpCoupon c = (XpCoupon) coupon;
            p = con.prepareStatement(
                    "INSERT INTO couponcodes (name, ctype, usetimes, usedplayers, timeuse, xp) "
                            + "VALUES (?, ?, ?, ?, ?, ?)");
            p.setString(1, c.getName());
            p.setString(2, c.getType());
            p.setInt(3, c.getUseTimes());
            p.setString(4, playerHashToString(c.getUsedPlayers()));
            p.setInt(5, c.getTime());
            p.setInt(6, c.getXp());
        } else

        if (coupon instanceof CommandCoupon) {
            CommandCoupon c = (CommandCoupon) coupon;
            p = con.prepareStatement(
                    "INSERT INTO couponcodes (name, ctype, usetimes, usedplayers, timeuse, command) "
                            + "VALUES (?, ?, ?, ?, ?, ?)");
            p.setString(1, c.getName());
            p.setString(2, c.getType());
            p.setInt(3, c.getUseTimes());
            p.setString(4, playerHashToString(c.getUsedPlayers()));
            p.setInt(5, c.getTime());
            p.setString(6, c.getCmd());
        }

        p.addBatch();
        con.setAutoCommit(false);
        p.executeBatch();
        con.setAutoCommit(true);
        return true;
    } catch (SQLException e) {
        return false;
    }
}

From source file:org.wso2.carbon.apimgt.migration.client.MigrateFrom110to200.java

private boolean updateAMAppKeyDomainMapping(Connection connection) throws SQLException {
    log.info("Updating consumer keys in AM_APP_KEY_DOMAIN_MAPPING");
    Statement selectStatement = null;
    Statement deleteStatement = null;
    PreparedStatement preparedStatement = null;
    ResultSet resultSet = null;/*from  w w w .  j ava2s  .  co  m*/
    boolean continueUpdatingDB = true;
    long totalRecords = 0;
    long decryptionFailedRecords = 0;

    try {
        ArrayList<KeyDomainMappingTableDTO> keyDomainMappingTableDTOs = new ArrayList<>();
        String query = "SELECT * FROM AM_APP_KEY_DOMAIN_MAPPING";

        selectStatement = connection.createStatement();
        selectStatement.setFetchSize(50);
        resultSet = selectStatement.executeQuery(query);
        while (resultSet.next()) {
            ConsumerKeyDTO consumerKeyDTO = new ConsumerKeyDTO();
            consumerKeyDTO.setEncryptedConsumerKey(resultSet.getString("CONSUMER_KEY"));
            totalRecords++;
            if (ResourceModifier.decryptConsumerKeyIfEncrypted(consumerKeyDTO)) {
                KeyDomainMappingTableDTO keyDomainMappingTableDTO = new KeyDomainMappingTableDTO();
                keyDomainMappingTableDTO.setConsumerKey(consumerKeyDTO);
                keyDomainMappingTableDTO.setAuthzDomain(resultSet.getString("AUTHZ_DOMAIN"));

                keyDomainMappingTableDTOs.add(keyDomainMappingTableDTO);
            } else {
                log.error("Cannot decrypt consumer key : " + consumerKeyDTO.getEncryptedConsumerKey()
                        + " in AM_APP_KEY_DOMAIN_MAPPING table");
                decryptionFailedRecords++;
                //If its not allowed to remove decryption failed entries from DB, we will not continue updating 
                // tables even with successfully decrypted entries to maintain DB integrity
                if (!removeDecryptionFailedKeysFromDB) {
                    continueUpdatingDB = false;
                }
            }
        }

        if (continueUpdatingDB) { // Modify table only if decryption is successful
            preparedStatement = connection.prepareStatement(
                    "INSERT INTO AM_APP_KEY_DOMAIN_MAPPING " + "(CONSUMER_KEY, AUTHZ_DOMAIN) VALUES (?, ?)");

            for (KeyDomainMappingTableDTO keyDomainMappingTableDTO : keyDomainMappingTableDTOs) {
                preparedStatement.setString(1,
                        keyDomainMappingTableDTO.getConsumerKey().getDecryptedConsumerKey());
                preparedStatement.setString(2, keyDomainMappingTableDTO.getAuthzDomain());
                preparedStatement.addBatch();
            }

            deleteStatement = connection.createStatement();
            deleteStatement.execute("DELETE FROM AM_APP_KEY_DOMAIN_MAPPING");

            preparedStatement.executeBatch();
            log.info("AM_APP_KEY_DOMAIN_MAPPING table updated with " + decryptionFailedRecords + "/"
                    + totalRecords + " of the CONSUMER_KEY entries deleted as they cannot be decrypted");
        } else {
            log.error("AM_APP_KEY_DOMAIN_MAPPING table not updated as " + decryptionFailedRecords + "/"
                    + totalRecords + " of the CONSUMER_KEY entries" + " cannot be decrypted");
        }
    } finally {
        if (selectStatement != null)
            selectStatement.close();
        if (deleteStatement != null)
            deleteStatement.close();
        if (preparedStatement != null)
            preparedStatement.close();
        if (resultSet != null)
            resultSet.close();
    }

    return continueUpdatingDB;
}

From source file:org.cartoweb.stats.imports.Import.java

private void fillCacheHits(Connection con) throws SQLException {
    con.commit();//from   w w  w  .j a v a2 s  .  c  o  m
    con.setAutoCommit(true);
    JdbcUtilities.runDeleteQuery("vacuuming " + tableName, "VACUUM ANALYZE " + tableName, con, null);
    con.setAutoCommit(false);

    if (DB_SOLVE_HITS) {
        //take around 55m for 4M records and is not greate for incremental updates...
        JdbcUtilities.runDeleteQuery("solving cache hits", "UPDATE " + tableName
                + " f SET general_elapsed_time=s.general_elapsed_time, images_mainmap_width=s.images_mainmap_width, images_mainmap_height=s.images_mainmap_height, layers=s.layers, layers_switch_id=s.layers_switch_id, bbox_minx=s.bbox_minx, bbox_miny=s.bbox_miny, bbox_maxx=s.bbox_maxx, bbox_maxy=s.bbox_maxy, location_scale=s.location_scale, query_results_count=s.query_results_count, query_results_table_count=s.query_results_table_count FROM "
                + tableName
                + " s WHERE s.general_cache_id=f.general_cache_hit AND f.general_cache_hit IS NOT NULL AND f.general_elapsed_time IS NULL AND f.layers IS NULL",
                con, null);
    } else {
        //takes around 21m for the same 4M records and is optimal for incremental updates...
        try {
            final PreparedStatement updateStmt = con.prepareStatement("UPDATE " + tableName
                    + " SET general_elapsed_time=?, images_mainmap_width=?, images_mainmap_height=?, layers=?, layers_switch_id=?, bbox_minx=?, bbox_miny=?, bbox_maxx=?, bbox_maxy=?, location_scale=?, query_results_count=?, query_results_table_count=? WHERE general_cache_hit=?");
            if (hits.size() == 0) {
                return;
            }

            JdbcUtilities.runSelectQuery("reading cached values",
                    "SELECT general_cache_id, general_elapsed_time, images_mainmap_width, images_mainmap_height, layers, layers_switch_id, bbox_minx, bbox_miny, bbox_maxx, bbox_maxy, location_scale, query_results_count, query_results_table_count FROM "
                            + tableName + " WHERE general_cache_id IS NOT NULL",
                    con, new JdbcUtilities.SelectTask() {
                        private int cpt = 0;

                        public void setupStatement(PreparedStatement stmt) throws SQLException {
                        }

                        public void run(ResultSet rs) throws SQLException {
                            int count = 0;
                            final int todo = hits.size();
                            Progress progress = new Progress(10 * 1000, todo, "Cache hit record updating",
                                    LOGGER);
                            while (rs.next()) {
                                String cacheId = rs.getString(1);
                                //We can have the same general_cache_id multiple times.
                                //So we have to remove it from the set.
                                if (hits.remove(cacheId)) {
                                    StatementUtils.copyFloat(rs, 2, updateStmt, 1);
                                    StatementUtils.copyInt(rs, 3, updateStmt, 2);
                                    StatementUtils.copyInt(rs, 4, updateStmt, 3);
                                    StatementUtils.copyString(rs, 5, updateStmt, 4);
                                    StatementUtils.copyInt(rs, 6, updateStmt, 5);
                                    StatementUtils.copyFloat(rs, 7, updateStmt, 6);
                                    StatementUtils.copyFloat(rs, 8, updateStmt, 7);
                                    StatementUtils.copyFloat(rs, 9, updateStmt, 8);
                                    StatementUtils.copyFloat(rs, 10, updateStmt, 9);
                                    StatementUtils.copyFloat(rs, 11, updateStmt, 10);
                                    StatementUtils.copyInt(rs, 12, updateStmt, 11);
                                    StatementUtils.copyString(rs, 13, updateStmt, 12);
                                    updateStmt.setString(13, cacheId);
                                    updateStmt.addBatch();

                                    if (++cpt % 50 == 0) {
                                        int[] counts = updateStmt.executeBatch();
                                        for (int i = 0; i < counts.length; ++i) {
                                            count += counts[i];
                                        }
                                    }

                                    progress.update(todo - hits.size());
                                }
                            }
                            ++cpt;
                            int[] counts = updateStmt.executeBatch();
                            for (int i = 0; i < counts.length; ++i) {
                                count += counts[i];
                            }

                            LOGGER.info(count + " cache hit records updated from " + cpt + " cached values");
                        }
                    });

            updateStmt.close();
        } catch (BatchUpdateException ex) {
            LOGGER.error(ex.getNextException());
            throw ex;
        }
    }
    con.commit();
}

From source file:org.wso2.carbon.idp.mgt.dao.IdPManagementDAO.java

/**
 * @param conn/*from  w  ww .j  a v  a 2s.  co  m*/
 * @param idPId
 * @param tenantId
 * @param claimMappings
 * @throws SQLException
 * @throws IdentityProviderManagementException
 */
private void addDefaultClaimValuesForLocalIdP(Connection conn, int idPId, int tenantId,
        ClaimMapping[] claimMappings) throws SQLException, IdentityProviderManagementException {

    PreparedStatement prepStmt = null;
    ResultSet rs = null;
    String sqlStmt;

    try {

        if (claimMappings == null || claimMappings.length == 0) {
            return;
        }

        sqlStmt = IdPManagementConstants.SQLQueries.ADD_LOCAL_IDP_DEFAULT_CLAIM_VALUES_SQL;
        prepStmt = conn.prepareStatement(sqlStmt);
        for (ClaimMapping mapping : claimMappings) {
            if (mapping != null && mapping.getLocalClaim() != null
                    && mapping.getLocalClaim().getClaimUri() != null) {

                prepStmt.setInt(1, idPId);
                prepStmt.setString(2, CharacterEncoder.getSafeText(mapping.getLocalClaim().getClaimUri()));
                prepStmt.setString(3, CharacterEncoder.getSafeText(mapping.getDefaultValue()));
                prepStmt.setInt(4, tenantId);
                if (mapping.isRequested()) {
                    prepStmt.setString(5, "1");
                } else {
                    prepStmt.setString(5, "0");
                }
                prepStmt.addBatch();
            }
        }

        prepStmt.executeBatch();

    } finally {
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
        IdentityApplicationManagementUtil.closeResultSet(rs);
        IdentityApplicationManagementUtil.closeStatement(prepStmt);
    }
}

From source file:fr.aliacom.obm.common.calendar.CalendarDaoJdbcImpl.java

private void insertIntoDeletedEvent(Connection con, AccessToken token, Event event, EventType eventType,
        Collection<Integer> attendeeIds) throws SQLException {
    PreparedStatement insertStatement = null;
    try {//from ww w  .  j  a  v  a 2  s .  c om
        insertStatement = con
                .prepareStatement("INSERT INTO DeletedEvent (deletedevent_event_id, deletedevent_user_id, "
                        + "deletedevent_origin, deletedevent_type, deletedevent_timestamp, deletedevent_event_ext_id) "
                        + "VALUES (?, ?, ?, ?, now(), ?)");
        EventObmId databaseId = event.getObmId();
        for (int attendeeId : attendeeIds) {
            insertStatement.setInt(1, databaseId.getObmId());
            insertStatement.setInt(2, attendeeId);
            insertStatement.setString(3, token.getOrigin());
            insertStatement.setObject(4,
                    obmHelper.getDBCP().getJdbcObject(ObmHelper.VCOMPONENT, eventType.toString()));
            insertStatement.setString(5, event.getExtId().getExtId());
            insertStatement.addBatch();
        }
        insertStatement.executeBatch();
    } finally {
        obmHelper.cleanup(null, insertStatement, null);
    }
}

From source file:com.alfaariss.oa.engine.session.jdbc.JDBCSessionFactory.java

/**
 * Uses a batch update to persist all supplied sessions.
 * @param sessions The sessions to persist.
 * @throws PersistenceException If persistance fails.
 * //from  w w  w  . j  a  v a  2 s . co  m
 * @see IEntityManager#persist(IEntity[])
 * @see PreparedStatement#addBatch()
 */
public void persist(JDBCSession[] sessions) throws PersistenceException {
    if (sessions == null)
        throw new IllegalArgumentException("Suplied session array is empty or invalid");

    Connection connection = null;
    PreparedStatement psInsert = null;
    PreparedStatement psDelete = null;
    PreparedStatement psUpdate = null;
    try {
        connection = _oDataSource.getConnection(); //Manage connection
        connection.setAutoCommit(false);

        psInsert = connection.prepareStatement(_sInsertQuery);
        psDelete = connection.prepareStatement(_sRemoveQuery);
        psUpdate = connection.prepareStatement(_sUpdateQuery);

        for (JDBCSession session : sessions) {
            String id = session.getId();
            if (id == null) {
                byte[] baId = new byte[ISession.ID_BYTE_LENGTH];
                do {
                    _random.nextBytes(baId);
                    try {
                        id = ModifiedBase64.encode(baId);
                    } catch (UnsupportedEncodingException e) {
                        _logger.error("Could not create id for byte[]: " + baId, e);
                        throw new PersistenceException(SystemErrors.ERROR_INTERNAL);
                    }
                } while (exists(id)); //Key allready exists   

                session.setId(id);
                //Update expiration time
                long expiration = System.currentTimeMillis() + _lExpiration;
                session.setTgtExpTime(expiration);
                psInsert.setString(1, id);
                psInsert.setString(2, session.getTGTId());
                psInsert.setInt(3, session.getState().ordinal());
                psInsert.setString(4, session.getRequestorId());
                psInsert.setString(5, session.getProfileURL());
                psInsert.setBytes(6, Serialize.encode(session.getUser()));
                psInsert.setTimestamp(7, new Timestamp(expiration));
                psInsert.setBoolean(8, session.isForcedAuthentication());
                psInsert.setBoolean(9, session.isPassive());
                psInsert.setBytes(10, Serialize.encode(session.getAttributes()));
                psInsert.setString(11, session.getForcedUserID());
                psInsert.setBytes(12, Serialize.encode(session.getLocale()));
                psInsert.setBytes(13, Serialize.encode(session.getSelectedAuthNProfile()));
                psInsert.setBytes(14, Serialize.encode(session.getAuthNProfiles()));
                psInsert.addBatch();
            } else if (session.isExpired()) //Expired
            {
                _logger.info("Session Expired: " + id);

                _eventLogger.info(new UserEventLogItem(session, null, UserEvent.SESSION_EXPIRED, this, null));

                psDelete.setString(1, id);
                psDelete.addBatch();
            } else //Update
            {
                //Update expiration time
                long expiration = System.currentTimeMillis() + _lExpiration;
                session.setTgtExpTime(expiration);
                psUpdate.setString(1, session.getTGTId());
                psUpdate.setInt(2, session.getState().ordinal());
                psUpdate.setString(3, session.getRequestorId());
                psUpdate.setString(4, session.getProfileURL());
                psUpdate.setBytes(5, Serialize.encode(session.getUser()));
                psUpdate.setTimestamp(6, new Timestamp(expiration));
                psUpdate.setBoolean(7, session.isForcedAuthentication());
                psInsert.setBoolean(8, session.isPassive());
                psUpdate.setBytes(9, Serialize.encode(session.getAttributes()));
                psUpdate.setString(10, session.getForcedUserID());
                psUpdate.setBytes(11, Serialize.encode(session.getLocale()));
                psUpdate.setBytes(12, Serialize.encode(session.getSelectedAuthNProfile()));
                psUpdate.setBytes(13, Serialize.encode(session.getAuthNProfiles()));
                psUpdate.setString(14, id);
                psUpdate.addBatch();
            }
        }
        try {
            int[] iResult = psInsert.executeBatch();
            if (_logger.isDebugEnabled()) {
                int iTotalAdded = 0;
                for (int i : iResult)
                    iTotalAdded += i;

                _logger.info(iTotalAdded + " new session(s) added by batch");
            }
        } catch (SQLException e) {
            _logger.error("Could not execute insert batch", e);
            throw new PersistenceException(SystemErrors.ERROR_RESOURCE_INSERT);
        }
        try {
            int[] iResult = psDelete.executeBatch();
            if (_logger.isDebugEnabled()) {
                int iTotalDeleted = 0;
                for (int i : iResult)
                    iTotalDeleted += i;

                _logger.info(iTotalDeleted + " session(s) deleted by batch");
            }

        } catch (SQLException e) {
            _logger.error("Could not execute delete batch", e);
            throw new PersistenceException(SystemErrors.ERROR_RESOURCE_REMOVE);
        }
        try {
            int[] iResult = psUpdate.executeBatch();
            if (_logger.isDebugEnabled()) {
                int iTotalUpdated = 0;
                for (int i : iResult)
                    iTotalUpdated += i;

                _logger.info(iTotalUpdated + " session(s) updated by batch");
            }
        } catch (SQLException e) {
            _logger.error("Could not execute update batch", e);
            throw new PersistenceException(SystemErrors.ERROR_RESOURCE_UPDATE);
        }

        connection.commit();
    } catch (SQLException e) {
        _logger.error("Could not execute batch", e);
        try {
            if (connection != null)
                connection.rollback();
        } catch (SQLException e1) {
            _logger.warn("Could not rollback batch", e);
        }

        throw new PersistenceException(SystemErrors.ERROR_INTERNAL);
    } catch (PersistenceException e) {
        try {
            if (connection != null)
                connection.rollback();
        } catch (SQLException e1) {
            _logger.warn("Could not rollback batch", e);
        }
        throw e;
    } catch (Exception e) {
        _logger.error("Internal error during session persist", e);
        throw new PersistenceException(SystemErrors.ERROR_RESOURCE_CONNECT);
    } finally {
        try {
            if (psInsert != null)
                psInsert.close();
        } catch (SQLException e) {
            _logger.debug("Could not close insert statement", e);
        }
        try {
            if (psDelete != null)
                psDelete.close();
        } catch (SQLException e) {
            _logger.debug("Could not close delete statement", e);
        }
        try {
            if (psUpdate != null)
                psUpdate.close();
        } catch (SQLException e) {
            _logger.debug("Could not close update statement", e);
        }
        try {
            if (connection != null)
                connection.close();
        } catch (SQLException e) {
            _logger.debug("Could not close connection", e);
        }
    }
}

From source file:org.apache.ctakes.jdl.data.loader.CsvLoader.java

/**
 * @param jdlConnection/*from   w  w w .ja v a  2 s. c o  m*/
 *            the jdlConnection to manage
 */
@Override
public final void dataInsert(final JdlConnection jdlConnection) {
    String sql = getSqlInsert(loader);
    if (log.isInfoEnabled())
        log.info(sql);
    Number ncommit = loader.getCommit();
    int rs = (loader.getSkip() == null) ? 0 : loader.getSkip().intValue();
    PreparedStatement preparedStatement = null;
    try {
        jdlConnection.setAutoCommit(false);
        // String[][] values = parser.getAllValues();
        preparedStatement = jdlConnection.getOpenConnection().prepareStatement(sql);
        boolean leftoversToCommit = false;
        // for (int r = rs; r < values.length; r++) {
        String[] row = null;
        int r = 0;
        do {
            row = parser.getLine();
            if (row == null)
                break;
            if (r < rs) {
                r++;
                continue;
            }
            r++;
            try {
                int cs = 0; // columns to skip
                int ce = 0; // columns from external
                int c = 0;
                // PreparedStatement preparedStatement = jdlConnection
                // .getOpenConnection().prepareStatement(sql);
                // if (ncommit == null) {
                // jdlConnection.setAutoCommit(true);
                // } else {
                // jdlConnection.setAutoCommit(false);
                // }
                for (Column column : loader.getColumn()) {
                    if (BooleanUtils.isTrue(column.isSkip())) {
                        cs++;
                    } else {
                        c++;
                        Object value = column.getConstant();
                        ce++;
                        if (value == null) {
                            if (column.getSeq() != null) {
                                value = r + column.getSeq().intValue();
                            } else {
                                // value = values[r][c + cs - ce];
                                value = row[c + cs - ce];
                                ce--;
                            }
                        }
                        if (value == null || (value instanceof String && ((String) value).length() == 0))
                            preparedStatement.setObject(c, null);
                        else {
                            // if there is a formatter, parse the string
                            if (this.formatMap.containsKey(column.getName())) {
                                try {
                                    preparedStatement.setObject(c,
                                            this.formatMap.get(column.getName()).parseObject((String) value));
                                } catch (Exception e) {
                                    System.err.println("Could not format '" + value + "' for column "
                                            + column.getName() + " on line " + r);
                                    e.printStackTrace(System.err);
                                    throw new RuntimeException(e);
                                }
                            } else {
                                preparedStatement.setObject(c, value);
                            }
                        }
                    }
                }
                preparedStatement.addBatch();
                leftoversToCommit = true;
                // preparedStatement.executeBatch();
                // executeBatch(preparedStatement);
                // if (!jdlConnection.isAutoCommit()
                // && (r % ncommit.intValue() == 0)) {
                if (r % ncommit.intValue() == 0) {
                    preparedStatement.executeBatch();
                    jdlConnection.commitConnection();
                    leftoversToCommit = false;
                    log.info("inserted " + ncommit.intValue() + " rows");
                }
            } catch (SQLException e) {
                // e.printStackTrace();
                throw new RuntimeException(e);
            }
        } while (row != null);
        if (leftoversToCommit) {
            preparedStatement.executeBatch();
            jdlConnection.commitConnection();
            leftoversToCommit = false;
        }
        log.info("inserted " + (r - rs) + " rows total");
    } catch (InstantiationException e) {
        log.error("", e);
    } catch (IllegalAccessException e) {
        log.error("", e);
    } catch (ClassNotFoundException e) {
        log.error("", e);
    } catch (IOException e) {
        log.error("", e);
    } catch (SQLException e) {
        throw new RuntimeException(e);
    } finally {
        if (preparedStatement != null) {
            try {
                preparedStatement.close();
            } catch (Exception e) {
            }
        }
    }
    // try {
    // if (!jdlConnection.isAutoCommit()) {
    // jdlConnection.commitConnection();
    // }
    // jdlConnection.closeConnection();
    // } catch (SQLException e) {
    // // TODO Auto-generated catch block
    // e.printStackTrace();
    // }
}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private boolean setStateMergeDB(Map<String, String> nameStates, int epoch, RCStates state,
        Set<NodeIDType> newActives) {
    String updateCmd = "update " + getRCRecordTable() + " set " + Columns.RC_GROUP_NAME.toString() + "=?, "
            + Columns.STRINGIFIED_RECORD.toString() + "=? where " + Columns.SERVICE_NAME.toString() + "=?";

    PreparedStatement updateRC = null;
    Connection conn = null;/*from w ww .ja  va 2 s .c om*/
    boolean updatedAll = true;
    try {
        if (conn == null) {
            conn = this.getDefaultConn();
            conn.setAutoCommit(false);
            updateRC = conn.prepareStatement(updateCmd);
        }
        assert (nameStates != null && !nameStates.isEmpty());
        String rcGroupName = this.getRCGroupName(nameStates.keySet().iterator().next());
        int i = 0;
        long t1 = System.currentTimeMillis();
        for (String name : nameStates.keySet()) {
            ReconfigurationRecord<NodeIDType> record = new ReconfigurationRecord<NodeIDType>(name, 0,
                    newActives);
            record.setState(name, 0, state/* RCStates.READY_READY */).setActivesToNewActives();
            ;
            updateRC.setString(1, rcGroupName);
            if (RC_RECORD_CLOB_OPTION)
                updateRC.setClob(2, new StringReader(record.toString()));
            else
                updateRC.setString(2, record.toString());
            updateRC.setString(3, name);
            updateRC.addBatch();
            i++;
            if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == nameStates.size()) {
                int[] executed = updateRC.executeBatch();
                conn.commit();
                updateRC.clearBatch();
                for (int j : executed)
                    updatedAll = updatedAll && (j > 0);
                if (updatedAll)
                    log.log(Level.FINE, "{0} successfully logged the last {1} messages in {2} ms",
                            new Object[] { this, (i + 1), (System.currentTimeMillis() - t1) });
                t1 = System.currentTimeMillis();
            }
        }
    } catch (SQLException sqle) {
        log.severe("SQLException while inserting batched RC records using " + updateCmd);
        sqle.printStackTrace();
    } finally {
        cleanup(updateRC);
        cleanup(conn);
    }
    return updatedAll;
}

From source file:fr.aliacom.obm.common.calendar.CalendarDaoJdbcImpl.java

private void updateAttendees(AccessToken token, Connection con, Event event) throws SQLException {
    String q = "update EventLink set eventlink_state=?, eventlink_required=?, eventlink_userupdate=?, eventlink_percent=?, eventlink_is_organizer=? "
            + "where eventlink_event_id = ? AND eventlink_entity_id = ?";
    PreparedStatement ps = null;
    int[] updatedAttendees;
    List<Attendee> mightInsert = new LinkedList<Attendee>();
    List<Attendee> toInsert = new LinkedList<Attendee>();

    try {/*from   w  w  w . j  a  v a  2s.  co m*/
        ps = con.prepareStatement(q);

        for (Attendee at : event.getAttendees()) {
            int idx = 1;

            ps.setObject(idx++, obmHelper.getDBCP().getJdbcObject(ObmHelper.VPARTSTAT,
                    at.getParticipation().getState().toString()));
            ps.setObject(idx++,
                    obmHelper.getDBCP().getJdbcObject(ObmHelper.VROLE, at.getParticipationRole().toString()));
            ps.setInt(idx++, token.getObmId());
            ps.setInt(idx++, at.getPercent());
            ps.setBoolean(idx++, at.isOrganizer());
            ps.setInt(idx++, event.getObmId().getObmId());
            ps.setInt(idx++, at.getEntityId().getId());
            ps.addBatch();
            mightInsert.add(at);
        }
        updatedAttendees = ps.executeBatch();
    } finally {
        obmHelper.cleanup(null, ps, null);
    }

    for (int i = 0; i < updatedAttendees.length; i++) {
        if (updatedAttendees[i] == 0) {
            Attendee at = mightInsert.get(i);
            toInsert.add(at);
        }
    }

    logger.info("event modification needs to add " + toInsert.size() + " attendees.");
    insertAttendees(token, event, con, toInsert);
}

From source file:org.jamwiki.db.CacheQueryHandler.java

/**
 *
 *//*from w w  w  .java2 s . co m*/
@Override
public void insertTopicVersions(List<TopicVersion> topicVersions) {
    Connection conn = null;
    PreparedStatement stmt = null;
    ResultSet rs = null;
    boolean useBatch = (topicVersions.size() > 1);
    try {
        conn = DatabaseConnection.getConnection();
        if (!this.autoIncrementPrimaryKeys()) {
            stmt = conn.prepareStatement(STATEMENT_INSERT_TOPIC_VERSION);
        } else if (useBatch) {
            // generated keys don't work in batch mode
            stmt = conn.prepareStatement(STATEMENT_INSERT_TOPIC_VERSION_AUTO_INCREMENT);
        } else {
            stmt = conn.prepareStatement(STATEMENT_INSERT_TOPIC_VERSION_AUTO_INCREMENT,
                    Statement.RETURN_GENERATED_KEYS);
        }
        int topicVersionId = -1;
        if (!this.autoIncrementPrimaryKeys() || useBatch) {
            // manually retrieve next topic version id when using batch
            // mode or when the database doesn't support generated keys.
            topicVersionId = DatabaseConnection.executeSequenceQuery(STATEMENT_SELECT_TOPIC_VERSION_SEQUENCE);
        }
        for (TopicVersion topicVersion : topicVersions) {
            if (!this.autoIncrementPrimaryKeys() || useBatch) {
                // FIXME - if two threads update the database simultaneously then
                // it is possible that this code could set the topic version ID
                // to a value that is different from what the database ends up
                // using.
                topicVersion.setTopicVersionId(topicVersionId++);
            }
            StringReader sr = null;
            try {
                int index = 1;
                stmt.setInt(index++, topicVersion.getTopicVersionId());
                if (topicVersion.getEditDate() == null) {
                    topicVersion.setEditDate(new Timestamp(System.currentTimeMillis()));
                }
                stmt.setInt(index++, topicVersion.getTopicId());
                stmt.setString(index++, topicVersion.getEditComment());
                //pass the content into a stream to be passed to Cach
                sr = new StringReader(topicVersion.getVersionContent());
                stmt.setCharacterStream(index++, sr, topicVersion.getVersionContent().length());
                if (topicVersion.getAuthorId() == null) {
                    stmt.setNull(index++, Types.INTEGER);
                } else {
                    stmt.setInt(index++, topicVersion.getAuthorId());
                }
                stmt.setInt(index++, topicVersion.getEditType());
                stmt.setString(index++, topicVersion.getAuthorDisplay());
                stmt.setTimestamp(index++, topicVersion.getEditDate());
                if (topicVersion.getPreviousTopicVersionId() == null) {
                    stmt.setNull(index++, Types.INTEGER);
                } else {
                    stmt.setInt(index++, topicVersion.getPreviousTopicVersionId());
                }
                stmt.setInt(index++, topicVersion.getCharactersChanged());
                stmt.setString(index++, topicVersion.getVersionParamString());
            } finally {
                if (sr != null) {
                    sr.close();
                }
            }
            if (useBatch) {
                stmt.addBatch();
            } else {
                stmt.executeUpdate();
            }
            if (this.autoIncrementPrimaryKeys() && !useBatch) {
                rs = stmt.getGeneratedKeys();
                if (!rs.next()) {
                    throw new SQLException("Unable to determine auto-generated ID for database record");
                }
                topicVersion.setTopicVersionId(rs.getInt(1));
            }
        }
        if (useBatch) {
            stmt.executeBatch();
        }
    } catch (SQLException e) {
        throw new UncategorizedSQLException("insertTopicVersions", null, e);
    } finally {
        DatabaseConnection.closeConnection(conn, stmt, rs);
    }
}