Example usage for java.sql PreparedStatement executeBatch

List of usage examples for java.sql PreparedStatement executeBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement executeBatch.

Prototype

int[] executeBatch() throws SQLException;

Source Link

Document

Submits a batch of commands to the database for execution and if all commands execute successfully, returns an array of update counts.

Usage

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private Set<String> putReconfigurationRecordDB(Map<String, ReconfigurationRecord<NodeIDType>> toCommit) {
    String updateCmd = "update " + getRCRecordTable() + " set " + Columns.RC_GROUP_NAME.toString() + "=?, "
            + Columns.STRINGIFIED_RECORD.toString() + "=? where " + Columns.SERVICE_NAME.toString() + "=?";
    String cmd = updateCmd;/*from   w  ww . j a  v a  2s  .c o  m*/

    PreparedStatement pstmt = null;
    Connection conn = null;
    Set<String> committed = new HashSet<String>();
    String[] keys = toCommit.keySet().toArray(new String[0]);
    try {
        ArrayList<String> batch = new ArrayList<String>();
        for (int i = 0; i < keys.length; i++) {
            String name = keys[i];
            if (conn == null) {
                conn = this.getDefaultConn();
                conn.setAutoCommit(false);
                pstmt = conn.prepareStatement(updateCmd);
            }
            // removal
            if (toCommit.get(name) == null) {
                this.deleteReconfigurationRecordDB(name);
                log.log(Level.INFO, "{0} deleted RC record {1}", new Object[] { this, name });
                committed.add(name);
                continue;
            }
            // else update/insert
            String rcGroupName = toCommit.get(name).getRCGroupName();
            if (rcGroupName == null)
                rcGroupName = this.getRCGroupName(name);
            pstmt.setString(1, rcGroupName);
            if (RC_RECORD_CLOB_OPTION)
                pstmt.setClob(2, new StringReader((toCommit.get(name)).toString()));
            else
                pstmt.setString(2, (toCommit.get(name)).toString());
            pstmt.setString(3, name);
            pstmt.addBatch();
            batch.add(name);

            int[] executed = new int[batch.size()];
            if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == toCommit.size()) {
                executed = pstmt.executeBatch();
                assert (executed.length == batch.size());
                conn.commit();
                pstmt.clearBatch();
                for (int j = 0; j < executed.length; j++) {
                    if (executed[j] > 0) {
                        log.log(Level.FINE, "{0} updated RC DB record to {1}",
                                new Object[] { this, toCommit.get(batch.get(j)).getSummary() });
                        committed.add(batch.get(j));
                    } else
                        log.log(Level.FINE,
                                "{0} unable to update RC record {1} (executed={2}), will try insert",
                                new Object[] { this, batch.get(j), executed[j] });
                }
                batch.clear();
            }
        }
    } catch (SQLException sqle) {
        log.severe("SQLException while inserting RC record using " + cmd);
        sqle.printStackTrace();
    } finally {
        cleanup(pstmt);
        cleanup(conn);
    }

    log.log(Level.FINE, "{0} batch-committed {1}({2}) out of {3}({4})",
            new Object[] { this, committed.size(), committed, toCommit.size(), toCommit.keySet() });
    committed.addAll(this.putReconfigurationRecordIndividually(this.diff(toCommit, committed)));
    log.log(Level.FINE, "{0} committed {1}({2}) out of {3}({4})",
            new Object[] { this, committed.size(), committed, toCommit.size(), toCommit.keySet() });
    return committed;
}

From source file:org.wso2.carbon.apimgt.migration.client.MigrateFrom19to110.java

private boolean updateAMApplicationKeyMapping(Connection connection) throws SQLException {
    log.info("Updating consumer keys in AM_APPLICATION_KEY_MAPPING");

    PreparedStatement preparedStatementUpdate = null;
    PreparedStatement preparedStatementDelete = null;
    Statement statement = null;/*from  www. j av a  2  s .co m*/
    ResultSet resultSet = null;
    boolean continueUpdatingDB = true;
    long totalRecords = 0;
    long decryptionFailedRecords = 0;

    try {
        String query = "SELECT APPLICATION_ID, CONSUMER_KEY, KEY_TYPE FROM AM_APPLICATION_KEY_MAPPING";
        ArrayList<AppKeyMappingTableDTO> appKeyMappingTableDTOs = new ArrayList<>();
        ArrayList<AppKeyMappingTableDTO> appKeyMappingTableDTOsFailed = new ArrayList<>();

        statement = connection.createStatement();
        statement.setFetchSize(50);
        resultSet = statement.executeQuery(query);

        while (resultSet.next()) {
            ConsumerKeyDTO consumerKeyDTO = new ConsumerKeyDTO();
            consumerKeyDTO.setEncryptedConsumerKey(resultSet.getString("CONSUMER_KEY"));

            AppKeyMappingTableDTO appKeyMappingTableDTO = new AppKeyMappingTableDTO();
            appKeyMappingTableDTO.setApplicationId(resultSet.getString("APPLICATION_ID"));
            appKeyMappingTableDTO.setConsumerKey(consumerKeyDTO);
            appKeyMappingTableDTO.setKeyType(resultSet.getString("KEY_TYPE"));
            totalRecords++;
            if (ResourceModifier.decryptConsumerKeyIfEncrypted(consumerKeyDTO)) {

                appKeyMappingTableDTOs.add(appKeyMappingTableDTO);
                log.debug("Successfully decrypted consumer key : " + consumerKeyDTO.getEncryptedConsumerKey()
                        + " as : " + consumerKeyDTO.getDecryptedConsumerKey()
                        + " in AM_APPLICATION_KEY_MAPPING table");
            } else {
                log.error("Cannot decrypt consumer key : " + consumerKeyDTO.getEncryptedConsumerKey()
                        + " in AM_APPLICATION_KEY_MAPPING table");
                decryptionFailedRecords++;
                appKeyMappingTableDTOsFailed.add(appKeyMappingTableDTO);

                //If its not allowed to remove decryption failed entries from DB, we will not continue updating 
                // tables even with successfully decrypted entries to maintain DB integrity
                if (!removeDecryptionFailedKeysFromDB) {
                    continueUpdatingDB = false;
                }
            }
        }

        if (continueUpdatingDB) {
            preparedStatementUpdate = connection
                    .prepareStatement("UPDATE AM_APPLICATION_KEY_MAPPING SET CONSUMER_KEY = ?"
                            + " WHERE APPLICATION_ID = ? AND KEY_TYPE = ?");

            for (AppKeyMappingTableDTO appKeyMappingTableDTO : appKeyMappingTableDTOs) {
                preparedStatementUpdate.setString(1,
                        appKeyMappingTableDTO.getConsumerKey().getDecryptedConsumerKey());
                preparedStatementUpdate.setString(2, appKeyMappingTableDTO.getApplicationId());
                preparedStatementUpdate.setString(3, appKeyMappingTableDTO.getKeyType());
                preparedStatementUpdate.addBatch();
            }
            preparedStatementUpdate.executeBatch();

            //deleting rows where consumer key decryption was unsuccessful
            preparedStatementDelete = connection
                    .prepareStatement("DELETE FROM AM_APPLICATION_KEY_MAPPING WHERE CONSUMER_KEY = ?");

            for (AppKeyMappingTableDTO appKeyMappingTableDTO : appKeyMappingTableDTOsFailed) {
                preparedStatementDelete.setString(1,
                        appKeyMappingTableDTO.getConsumerKey().getEncryptedConsumerKey());
                preparedStatementDelete.addBatch();
            }
            preparedStatementDelete.executeBatch();

            log.info("AM_APPLICATION_KEY_MAPPING table updated with " + decryptionFailedRecords + "/"
                    + totalRecords + " of the CONSUMER_KEY entries deleted as they cannot be decrypted");
        } else {
            log.error("AM_APPLICATION_KEY_MAPPING table not updated as " + decryptionFailedRecords + "/"
                    + totalRecords + " of the CONSUMER_KEY entries cannot be decrypted");
        }
    } finally {
        APIMgtDBUtil.closeAllConnections(null, null, resultSet);
        APIMgtDBUtil.closeAllConnections(preparedStatementUpdate, null, null);
        APIMgtDBUtil.closeAllConnections(preparedStatementDelete, null, null);

        if (statement != null) {
            try {
                statement.close();
            } catch (SQLException e) {
                log.error("Unable to close the statement", e);
            }
        }
    }
    return continueUpdatingDB;
}

From source file:com.nextep.designer.dbgm.services.impl.DataService.java

private void saveDataLinesToRepository(IDataSet dataSet, IDataSet dataSetContents, DeltaType deltaType,
        IProgressMonitor monitor) {//  w ww  .ja  v  a 2s.  co m
    final SubMonitor m = SubMonitor.convert(monitor, 10000);
    m.subTask(MessageFormat.format(DBGMMessages.getString("service.data.dataSetSaveInit"), dataSet.getName())); //$NON-NLS-1$
    IStorageHandle handle = dataSetContents.getStorageHandle();
    if (handle == null) {
        handle = storageService.createDataSetStorage(dataSet);
    }

    Connection derbyConn = null;
    Statement stmt = null;
    ResultSet rset = null;
    Connection repoConn = null;
    PreparedStatement insertStmt = null;
    Session s = null;
    Transaction t = null;
    long rowid = dataSet.getCurrentRowId() + 1;
    try {
        repoConn = getRepositoryConnection();
        repoConn.setAutoCommit(false);

        // We handle the Hibernate session specifically to boost the import process
        s = HibernateUtil.getInstance().getSandBoxSession();
        s.clear();
        t = s.beginTransaction();
        // Our prepared INSERT rows statement
        insertStmt = repoConn.prepareStatement("INSERT INTO dbgm_dset_row_values ( " //$NON-NLS-1$
                + "  drow_id, column_refid, column_value " //$NON-NLS-1$
                + ") VALUES ( " //$NON-NLS-1$
                + "  ?, ?, ? " //$NON-NLS-1$
                + ") "); //$NON-NLS-1$

        // Getting our local derby connection
        derbyConn = storageService.getLocalConnection();
        stmt = derbyConn.createStatement();

        // Selecting data from derby local storage
        String selectStmt = handle.getSelectStatement();
        selectStmt = selectStmt.replace("SELECT", "SELECT " + IStorageService.ROWID_COLUMN_NAME //$NON-NLS-1$ //$NON-NLS-2$
                + ","); //$NON-NLS-1$
        rset = stmt.executeQuery(selectStmt);
        final List<IReference> colRefs = dataSet.getColumnsRef();

        int lineBufferCount = 0;
        long counter = 0;
        while (rset.next()) {
            final IDataLine line = typedObjectFactory.create(IDataLine.class);
            line.setDataSet(dataSet);
            // If we got a repository rowid, we use it, else we affect a new available rowid
            final long selectedRowId = rset.getLong(1);
            if (selectedRowId != 0) {
                line.setRowId(selectedRowId);
            } else {
                line.setRowId(rowid++);
            }
            // Persisting line so that columns can use its ID
            s.save(line);
            if (deltaType != DeltaType.DELETE) {
                for (int i = 2; i < colRefs.size() + 2; i++) {
                    final Object val = rset.getObject(i);
                    // First column is our rowid, so we shift left by 1, starting at 0 => -2
                    final IReference colRef = colRefs.get(i - 2);
                    final IColumnValue colValue = typedObjectFactory.create(IColumnValue.class);
                    colValue.setDataLine(line);
                    colValue.setColumnRef(colRef);
                    colValue.setValue(val);
                    line.addColumnValue(colValue);
                    insertStmt.setLong(1, line.getUID().rawId());
                    insertStmt.setLong(2, colRef.getUID().rawId());
                    insertStmt.setString(3, colValue.getStringValue());
                    insertStmt.addBatch();
                }
            }
            if (lineBufferCount++ >= LINE_BUFFER_SIZE) {
                t.commit();
                insertStmt.executeBatch();
                s.clear();
                t = s.beginTransaction();
                counter += lineBufferCount;
                m.subTask(MessageFormat.format(DBGMMessages.getString("service.data.savedLines"), //$NON-NLS-1$
                        dataSet.getName(), counter));
                m.worked(500);
                lineBufferCount = 0;
            }
        }
        if (lineBufferCount > 0) {
            t.commit();
            insertStmt.executeBatch();
            s.clear();
            lineBufferCount = 0;
        }
        repoConn.commit();
        dataSet.setCurrentRowId(rowid);
    } catch (SQLException e) {
        throw new ErrorException(DBGMMessages.getString("service.data.saveDatalineFailed") + e.getMessage(), e); //$NON-NLS-1$
    } finally {
        safeClose(rset, stmt, derbyConn, false);
        safeClose(null, insertStmt, repoConn, true);
    }
}

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

public synchronized void flushBatch() throws SQLException {
    if (batchCommit && tripleBatch != null) {
        requireJDBCConnection();/*w  w w  .  j a v  a  2s  .  c  om*/

        commitLock.lock();
        try {
            RetryExecution execution = new RetryExecution("FLUSH BATCH");
            execution.setUseSavepoint(true);
            execution.execute(connection, new RetryCommand<Void>() {
                @Override
                public Void run() throws SQLException {
                    PreparedStatement insertTriple = getPreparedStatement("store.triple");
                    insertTriple.clearParameters();
                    insertTriple.clearBatch();

                    synchronized (tripleBatch) {
                        for (KiWiTriple triple : tripleBatch) {
                            // retrieve a new triple ID and set it in the object
                            if (triple.getId() < 0) {
                                triple.setId(getNextSequence());
                            }

                            insertTriple.setLong(1, triple.getId());
                            insertTriple.setLong(2, triple.getSubject().getId());
                            insertTriple.setLong(3, triple.getPredicate().getId());
                            insertTriple.setLong(4, triple.getObject().getId());
                            if (triple.getContext() != null) {
                                insertTriple.setLong(5, triple.getContext().getId());
                            } else {
                                insertTriple.setNull(5, Types.BIGINT);
                            }
                            insertTriple.setBoolean(6, triple.isInferred());
                            insertTriple.setTimestamp(7, new Timestamp(triple.getCreated().getTime()));

                            insertTriple.addBatch();
                        }
                    }
                    insertTriple.executeBatch();

                    tripleBatch.clear();

                    return null;
                }
            });

        } finally {
            commitLock.unlock();
        }

    }

}

From source file:com.flexive.core.storage.genericSQL.GenericHierarchicalStorage.java

/**
 * {@inheritDoc}/*from w  ww .  j av  a2s .c  om*/
 */
@Override
public FxPK contentCreate(Connection con, FxEnvironment env, StringBuilder sql, long newId, FxContent content)
        throws FxCreateException, FxInvalidParameterException {
    content.getRootGroup().removeEmptyEntries();
    content.getRootGroup().compactPositions(true);
    content.checkValidity();

    final Integer contentVersionValue = content.getValue(FxNumber.class, "/VERSION").getBestTranslation();
    final Long contentIdValue = content.getValue(FxLargeNumber.class, "/ID").getBestTranslation();
    final int version = content.isForcePkOnCreate() && contentVersionValue != -1 ? contentVersionValue : 1;

    boolean stepsUpdated = false;
    if (content.isForcePkOnCreate() && contentIdValue != -1) {
        // if a specific ID was set, other versions of this content may exist and we need to process the
        // workflow as if creating a new version of an existing content
        try {
            stepsUpdated = updateStepDependencies(con, contentIdValue, version, env,
                    env.getType(content.getTypeId()), content.getStepId());
        } catch (FxApplicationException e) {
            throw new FxCreateException(LOG, e);
        }
    }

    FxPK pk = createMainEntry(con, newId, version, content, false);
    FxType type = env.getType(content.getTypeId());
    PreparedStatement ps = null;
    FulltextIndexer ft = getFulltextIndexer(pk, con);
    try {
        if (sql == null)
            sql = new StringBuilder(2000);
        ps = con.prepareStatement(CONTENT_DATA_INSERT);
        createDetailEntries(con, ps, ft, sql, pk, content.isMaxVersion(), content.isLiveVersion(),
                content.getData("/"));

        ps.executeBatch();

        ft.commitChanges();
        if (CacheAdmin.getEnvironment().getType(content.getTypeId()).isContainsFlatStorageAssignments()) {
            FxFlatStorage flatStorage = FxFlatStorageManager.getInstance();
            flatStorage.setPropertyData(con, pk, content.getTypeId(), content.getStepId(),
                    content.isMaxVersion(), content.isLiveVersion(),
                    flatStorage.getFlatPropertyData(content.getRootGroup()), true);
        }

        if (content.isForcePkOnCreate()) {
            // we must fix the MAX_VER/LIVE_VER columns now, since they may not have been set correctly in the insert
            fixContentVersionStats(con, env, type, pk.getId(), true, stepsUpdated);
        }

        checkUniqueConstraints(con, env, sql, pk, content.getTypeId());
        content.resolveBinaryPreview();
        if (content.getBinaryPreviewId() != -1) {
            binaryStorage.updateContentBinaryEntry(con, pk, content.getBinaryPreviewId(),
                    content.getBinaryPreviewACL());
        }
        if (type.isTrackHistory())
            EJBLookup.getHistoryTrackerEngine().track(type, pk, ConversionEngine.getXStream().toXML(content),
                    "history.content.created");
    } catch (FxApplicationException e) {
        if (e instanceof FxCreateException)
            throw (FxCreateException) e;
        if (e instanceof FxInvalidParameterException)
            throw (FxInvalidParameterException) e;
        throw new FxCreateException(LOG, e);
    } catch (SQLException e) {
        throw new FxCreateException(LOG, e, "ex.db.sqlError", e.getMessage());
    } finally {
        Database.closeObjects(GenericHierarchicalStorage.class, ps);
        ft.cleanup();
    }
    return pk;
}

From source file:HSqlManager.java

@SuppressWarnings("Duplicates")
@Deprecated/* w w  w.j  a  v  a2 s  .  co  m*/
public static void mycoUniqueDB(Connection connection, int bps) throws ClassNotFoundException, SQLException,
        InstantiationException, IllegalAccessException, IOException {
    long time = System.currentTimeMillis();
    DpalLoad.main(new String[1]);
    HSqlPrimerDesign.Dpal_Inst = DpalLoad.INSTANCE_WIN64;
    String base = new File("").getAbsolutePath();
    if (!written) {
        CSV.makeDirectory(new File(base + "/PhageData"));
        INSTANCE.parseAllPhages(bps);
    }
    Connection db = connection;
    db.setAutoCommit(false);
    Statement stat = db.createStatement();
    PrintWriter log = new PrintWriter(new File("javalog.log"));
    stat.execute("SET FILES LOG FALSE;\n");
    PreparedStatement st = db
            .prepareStatement("UPDATE Primerdb.Primers" + " SET UniqueP = true, Tm = ?, GC =?, Hairpin =?"
                    + "WHERE Cluster = ? and Strain = ? and " + "Sequence = ? and Bp = ?");
    ResultSet call = stat.executeQuery("Select * From Primerdb.Phages;");
    List<String[]> phages = new ArrayList<>();
    String strain = "";
    while (call.next()) {
        String[] r = new String[3];
        r[0] = call.getString("Strain");
        r[1] = call.getString("Cluster");
        r[2] = call.getString("Name");
        phages.add(r);
        if (r[2].equals("xkcd")) {
            strain = r[0];
        }
    }
    call.close();
    String x = strain;
    Set<String> clust = phages.stream().filter(y -> y[0].equals(x)).map(y -> y[1]).collect(Collectors.toSet());
    String[] clusters = clust.toArray(new String[clust.size()]);
    for (String z : clusters) {
        try {
            Set<String> nonclustphages = phages.stream().filter(a -> a[0].equals(x) && !a[1].equals(z))
                    .map(a -> a[2]).collect(Collectors.toSet());
            ResultSet resultSet = stat.executeQuery(
                    "Select Sequence from primerdb.primers" + " where Strain ='" + x + "' and Cluster ='" + z
                            + "' and CommonP = true" + " and Bp = " + Integer.valueOf(bps) + " ");
            Set<CharSequence> primers = Collections.synchronizedSet(new HashSet<>());
            while (resultSet.next()) {
                primers.add(resultSet.getString("Sequence"));
            }
            resultSet.close();
            for (String phage : nonclustphages) {
                //                    String[] seqs = Fasta.parse(base + "/Fastas/" + phage + ".fasta");
                //                    String sequence =seqs[0]+seqs[1];
                //                        Map<String, List<Integer>> seqInd = new HashMap<>();
                //                        for (int i = 0; i <= sequence.length()-bps; i++) {
                //                            String sub=sequence.substring(i,i+bps);
                //                            if(seqInd.containsKey(sub)){
                //                                seqInd.get(sub).add(i);
                //                            }else {
                //                                List<Integer> list = new ArrayList<>();
                //                                list.add(i);
                //                                seqInd.put(sub,list);
                //                            }
                //                        }
                //                    primers = primers.stream().filter(primer->!seqInd.containsKey(primer)).collect(Collectors.toSet());
                //                    primers =Sets.difference(primers,CSV.readCSV(base + "/PhageData/"+Integer.toString(bps)
                //                                    + phage + ".csv"));
                CSV.readCSV(base + "/PhageData/" + Integer.toString(bps) + phage + ".csv").stream()
                        .filter(primers::contains).forEach(primers::remove);
                //                    System.gc();

            }
            int i = 0;
            for (CharSequence a : primers) {
                try {
                    st.setDouble(1, HSqlPrimerDesign.primerTm(a, 0, 800, 1.5, 0.2));
                    st.setDouble(2, HSqlPrimerDesign.gcContent(a));
                    st.setBoolean(3, HSqlPrimerDesign.calcHairpin((String) a, 4));
                    st.setString(4, z);
                    st.setString(5, x);
                    st.setString(6, a.toString());
                    st.setInt(7, bps);
                    st.addBatch();
                } catch (SQLException e) {
                    e.printStackTrace();
                    System.out.println("Error occurred at " + x + " " + z);
                }
                i++;
                if (i == 1000) {
                    i = 0;
                    st.executeBatch();
                    db.commit();
                }
            }
            if (i > 0) {
                st.executeBatch();
                db.commit();
            }
        } catch (SQLException e) {
            e.printStackTrace();
            System.out.println("Error occurred at " + x + " " + z);
        }
        log.println(z);
        log.flush();
        System.gc();
    }
    stat.execute("SET FILES LOG TRUE\n");
    st.close();
    stat.close();
    System.out.println("Unique Updated");
    System.out.println((System.currentTimeMillis() - time) / Math.pow(10, 3) / 60);
}

From source file:org.jamwiki.db.CacheQueryHandler.java

/**
 *
 *//*from w w  w . jav a  2s  .  c o m*/
@Override
public void insertTopicVersions(List<TopicVersion> topicVersions) {
    Connection conn = null;
    PreparedStatement stmt = null;
    ResultSet rs = null;
    boolean useBatch = (topicVersions.size() > 1);
    try {
        conn = DatabaseConnection.getConnection();
        if (!this.autoIncrementPrimaryKeys()) {
            stmt = conn.prepareStatement(STATEMENT_INSERT_TOPIC_VERSION);
        } else if (useBatch) {
            // generated keys don't work in batch mode
            stmt = conn.prepareStatement(STATEMENT_INSERT_TOPIC_VERSION_AUTO_INCREMENT);
        } else {
            stmt = conn.prepareStatement(STATEMENT_INSERT_TOPIC_VERSION_AUTO_INCREMENT,
                    Statement.RETURN_GENERATED_KEYS);
        }
        int topicVersionId = -1;
        if (!this.autoIncrementPrimaryKeys() || useBatch) {
            // manually retrieve next topic version id when using batch
            // mode or when the database doesn't support generated keys.
            topicVersionId = DatabaseConnection.executeSequenceQuery(STATEMENT_SELECT_TOPIC_VERSION_SEQUENCE);
        }
        for (TopicVersion topicVersion : topicVersions) {
            if (!this.autoIncrementPrimaryKeys() || useBatch) {
                // FIXME - if two threads update the database simultaneously then
                // it is possible that this code could set the topic version ID
                // to a value that is different from what the database ends up
                // using.
                topicVersion.setTopicVersionId(topicVersionId++);
            }
            StringReader sr = null;
            try {
                int index = 1;
                stmt.setInt(index++, topicVersion.getTopicVersionId());
                if (topicVersion.getEditDate() == null) {
                    topicVersion.setEditDate(new Timestamp(System.currentTimeMillis()));
                }
                stmt.setInt(index++, topicVersion.getTopicId());
                stmt.setString(index++, topicVersion.getEditComment());
                //pass the content into a stream to be passed to Cach
                sr = new StringReader(topicVersion.getVersionContent());
                stmt.setCharacterStream(index++, sr, topicVersion.getVersionContent().length());
                if (topicVersion.getAuthorId() == null) {
                    stmt.setNull(index++, Types.INTEGER);
                } else {
                    stmt.setInt(index++, topicVersion.getAuthorId());
                }
                stmt.setInt(index++, topicVersion.getEditType());
                stmt.setString(index++, topicVersion.getAuthorDisplay());
                stmt.setTimestamp(index++, topicVersion.getEditDate());
                if (topicVersion.getPreviousTopicVersionId() == null) {
                    stmt.setNull(index++, Types.INTEGER);
                } else {
                    stmt.setInt(index++, topicVersion.getPreviousTopicVersionId());
                }
                stmt.setInt(index++, topicVersion.getCharactersChanged());
                stmt.setString(index++, topicVersion.getVersionParamString());
            } finally {
                if (sr != null) {
                    sr.close();
                }
            }
            if (useBatch) {
                stmt.addBatch();
            } else {
                stmt.executeUpdate();
            }
            if (this.autoIncrementPrimaryKeys() && !useBatch) {
                rs = stmt.getGeneratedKeys();
                if (!rs.next()) {
                    throw new SQLException("Unable to determine auto-generated ID for database record");
                }
                topicVersion.setTopicVersionId(rs.getInt(1));
            }
        }
        if (useBatch) {
            stmt.executeBatch();
        }
    } catch (SQLException e) {
        throw new UncategorizedSQLException("insertTopicVersions", null, e);
    } finally {
        DatabaseConnection.closeConnection(conn, stmt, rs);
    }
}

From source file:com.jabyftw.lobstercraft.player.PlayerHandlerService.java

/**
 * This should run on server close, so we don't need to synchronize as every player join is denied before.
 *
 * @param connection MySQL connection/*from   w w  w.  j ava 2s . c o m*/
 * @throws SQLException in case of something going wrong
 */
private void saveChangedPlayerNames(@NotNull Connection connection) throws SQLException {
    long start = System.nanoTime();
    int numberOfEntriesUpdated = 0, numberOfEntriesInserted = 0;

    // Prepare statements
    PreparedStatement updateStatement = connection.prepareStatement(
            "UPDATE `minecraft`.`player_name_changes` SET `oldPlayerName` = ?, `changeDate` = ? WHERE `user_playerId` = ?;");
    PreparedStatement insertStatement = connection.prepareStatement(
            "INSERT INTO `minecraft`.`player_name_changes` (`user_playerId`, `oldPlayerName`, `changeDate`) VALUES  (?, ?, ?);");

    // Iterate through all entries
    for (NameChangeEntry nameChangeEntry : nameChangeEntries.values()) {
        if (nameChangeEntry.databaseState == DatabaseState.UPDATE_DATABASE) {
            // Set variables
            insertStatement.setString(1, nameChangeEntry.getOldPlayerName());
            insertStatement.setLong(2, nameChangeEntry.getChangeDate());
            insertStatement.setInt(3, nameChangeEntry.getPlayerId());

            // Add batch
            updateStatement.addBatch();
            numberOfEntriesUpdated++;
        } else if (nameChangeEntry.databaseState == DatabaseState.INSERT_TO_DATABASE) {
            // Set variables
            insertStatement.setInt(1, nameChangeEntry.getPlayerId());
            insertStatement.setString(2, nameChangeEntry.getOldPlayerName());
            insertStatement.setLong(3, nameChangeEntry.getChangeDate());

            // Add batch
            insertStatement.addBatch();
            numberOfEntriesInserted++;
        } else {
            // Lets not change their database state
            continue;
        }

        // Update their database state
        nameChangeEntry.databaseState = DatabaseState.ON_DATABASE;
    }

    // Delete those who wasn't updated
    PreparedStatement deleteStatement = connection
            .prepareStatement("DELETE FROM `minecraft`.`player_name_changes` WHERE `changeDate` > ?;");
    deleteStatement.setLong(1, System.currentTimeMillis() + REQUIRED_TIME_TO_ALLOW_NAME);
    deleteStatement.execute();
    deleteStatement.close();

    // Delete from cache too
    Iterator<NameChangeEntry> iterator = nameChangeEntries.values().iterator();
    while (iterator.hasNext()) {
        NameChangeEntry next = iterator.next();

        if (next.isNameAvailable())
            iterator.remove();
    }

    // Execute and announce if needed
    if (numberOfEntriesUpdated > 0)
        updateStatement.executeBatch();
    if (numberOfEntriesInserted > 0)
        insertStatement.executeBatch();
    if (numberOfEntriesUpdated > 0 || numberOfEntriesInserted > 0)
        LobsterCraft.logger.info(Util.appendStrings("Took us ",
                Util.formatDecimal(
                        (double) (System.nanoTime() - start) / (double) TimeUnit.MILLISECONDS.toNanos(1)),
                "ms to clean old, insert ", numberOfEntriesInserted, " and update ", numberOfEntriesUpdated,
                " name changes."));

    // Close statement
    updateStatement.close();
    insertStatement.close();
}

From source file:org.jamwiki.db.AnsiQueryHandler.java

/**
 *
 *//*from w w w .  j a v a2 s  .c  om*/
public void insertCategories(List<Category> categoryList, int virtualWikiId, int topicId, Connection conn)
        throws SQLException {
    if (topicId == -1) {
        throw new SQLException("Invalid topicId passed to method AnsiQueryHandler.insertCategories");
    }
    PreparedStatement stmt = null;
    try {
        stmt = conn.prepareStatement(STATEMENT_INSERT_CATEGORY);
        for (Category category : categoryList) {
            stmt.setInt(1, topicId);
            stmt.setString(2, category.getName());
            stmt.setString(3, category.getSortKey());
            stmt.addBatch();
        }
        stmt.executeBatch();
    } finally {
        DatabaseConnection.closeStatement(stmt);
    }
}

From source file:org.jamwiki.db.AnsiQueryHandler.java

/**
 *
 *//* w w w .  j av  a 2s.c  o  m*/
public void insertTopicLinks(List<Topic> topicLinks, int topicId, Connection conn) throws SQLException {
    if (topicId == -1) {
        throw new SQLException("Invalid topicId passed to method AnsiQueryHandler.insertTopicLinks");
    }
    PreparedStatement stmt = null;
    try {
        stmt = conn.prepareStatement(STATEMENT_INSERT_TOPIC_LINKS);
        for (Topic topicLink : topicLinks) {
            stmt.setInt(1, topicId);
            stmt.setInt(2, topicLink.getNamespace().getId());
            stmt.setString(3, topicLink.getPageName());
            stmt.addBatch();
        }
        stmt.executeBatch();
    } finally {
        DatabaseConnection.closeStatement(stmt);
    }
}