Example usage for java.sql PreparedStatement clearBatch

List of usage examples for java.sql PreparedStatement clearBatch

Introduction

In this page you can find the example usage for java.sql PreparedStatement clearBatch.

Prototype

void clearBatch() throws SQLException;

Source Link

Document

Empties this Statement object's current list of SQL commands.

Usage

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private boolean createReconfigurationRecordsDB(Map<String, String> nameStates, Set<NodeIDType> newActives) {
    String insertCmd = "insert into " + getRCRecordTable() + " (" + Columns.RC_GROUP_NAME.toString() + ", "
            + Columns.STRINGIFIED_RECORD.toString() + ", " + Columns.SERVICE_NAME.toString()
            + " ) values (?,?,?)";

    PreparedStatement insertRC = null;
    Connection conn = null;//from ww w. ja  v a2  s. co  m
    boolean insertedAll = true;
    Set<String> batch = new HashSet<String>();
    Set<String> committed = new HashSet<String>();
    try {
        if (conn == null) {
            conn = this.getDefaultConn();
            conn.setAutoCommit(false);
            insertRC = conn.prepareStatement(insertCmd);
        }
        assert (nameStates != null && !nameStates.isEmpty());
        String rcGroupName = this.getRCGroupName(nameStates.keySet().iterator().next());
        int i = 0;
        long t1 = System.currentTimeMillis();
        for (String name : nameStates.keySet()) {
            ReconfigurationRecord<NodeIDType> record = new ReconfigurationRecord<NodeIDType>(name, -1,
                    newActives);
            /* We just directly initialize with WAIT_ACK_STOP:-1 instead of
             * starting with READY:-1 and pretending to go through the whole
             * reconfiguration protocol sequence. */
            record.setState(name, -1, RCStates.WAIT_ACK_STOP);
            insertRC.setString(1, rcGroupName);
            if (RC_RECORD_CLOB_OPTION)
                insertRC.setClob(2, new StringReader(record.toString()));
            else
                insertRC.setString(2, record.toString());
            insertRC.setString(3, name);
            insertRC.addBatch();
            batch.add(name);
            i++;
            if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == nameStates.size()) {
                int[] executed = insertRC.executeBatch();
                conn.commit();
                insertRC.clearBatch();
                committed.addAll(batch);
                batch.clear();
                for (int j : executed)
                    insertedAll = insertedAll && (j > 0);
                if (insertedAll)
                    log.log(Level.FINE, "{0} successfully logged the last {1} messages in {2} ms",
                            new Object[] { this, (i + 1), (System.currentTimeMillis() - t1) });
                t1 = System.currentTimeMillis();
            }
        }
    } catch (SQLException sqle) {
        log.severe("SQLException while inserting batched RC records using " + insertCmd);
        sqle.printStackTrace();
    } finally {
        cleanup(insertRC);
        cleanup(conn);
    }

    // rollback
    if (!insertedAll) {
        for (String name : nameStates.keySet())
            if (committed.contains(name))
                this.deleteReconfigurationRecord(name, 0);
    }

    return insertedAll;
}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private Set<String> putReconfigurationRecordDB(Map<String, ReconfigurationRecord<NodeIDType>> toCommit) {
    String updateCmd = "update " + getRCRecordTable() + " set " + Columns.RC_GROUP_NAME.toString() + "=?, "
            + Columns.STRINGIFIED_RECORD.toString() + "=? where " + Columns.SERVICE_NAME.toString() + "=?";
    String cmd = updateCmd;/* www .  java  2  s .  c o  m*/

    PreparedStatement pstmt = null;
    Connection conn = null;
    Set<String> committed = new HashSet<String>();
    String[] keys = toCommit.keySet().toArray(new String[0]);
    try {
        ArrayList<String> batch = new ArrayList<String>();
        for (int i = 0; i < keys.length; i++) {
            String name = keys[i];
            if (conn == null) {
                conn = this.getDefaultConn();
                conn.setAutoCommit(false);
                pstmt = conn.prepareStatement(updateCmd);
            }
            // removal
            if (toCommit.get(name) == null) {
                this.deleteReconfigurationRecordDB(name);
                log.log(Level.INFO, "{0} deleted RC record {1}", new Object[] { this, name });
                committed.add(name);
                continue;
            }
            // else update/insert
            String rcGroupName = toCommit.get(name).getRCGroupName();
            if (rcGroupName == null)
                rcGroupName = this.getRCGroupName(name);
            pstmt.setString(1, rcGroupName);
            if (RC_RECORD_CLOB_OPTION)
                pstmt.setClob(2, new StringReader((toCommit.get(name)).toString()));
            else
                pstmt.setString(2, (toCommit.get(name)).toString());
            pstmt.setString(3, name);
            pstmt.addBatch();
            batch.add(name);

            int[] executed = new int[batch.size()];
            if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == toCommit.size()) {
                executed = pstmt.executeBatch();
                assert (executed.length == batch.size());
                conn.commit();
                pstmt.clearBatch();
                for (int j = 0; j < executed.length; j++) {
                    if (executed[j] > 0) {
                        log.log(Level.FINE, "{0} updated RC DB record to {1}",
                                new Object[] { this, toCommit.get(batch.get(j)).getSummary() });
                        committed.add(batch.get(j));
                    } else
                        log.log(Level.FINE,
                                "{0} unable to update RC record {1} (executed={2}), will try insert",
                                new Object[] { this, batch.get(j), executed[j] });
                }
                batch.clear();
            }
        }
    } catch (SQLException sqle) {
        log.severe("SQLException while inserting RC record using " + cmd);
        sqle.printStackTrace();
    } finally {
        cleanup(pstmt);
        cleanup(conn);
    }

    log.log(Level.FINE, "{0} batch-committed {1}({2}) out of {3}({4})",
            new Object[] { this, committed.size(), committed, toCommit.size(), toCommit.keySet() });
    committed.addAll(this.putReconfigurationRecordIndividually(this.diff(toCommit, committed)));
    log.log(Level.FINE, "{0} committed {1}({2}) out of {3}({4})",
            new Object[] { this, committed.size(), committed, toCommit.size(), toCommit.keySet() });
    return committed;
}

From source file:hoot.services.db.DbUtils.java

public static void batchRecordsDirectNodes(final long mapId, final List<?> records,
        final RecordBatchType recordBatchType, Connection conn, int maxRecordBatchSize) throws Exception {
    PreparedStatement ps = null;
    try {// ww w .java  2  s.  c  o  m
        String sql = null;
        long execResult = -1;
        //conn.setAutoCommit(false);
        int count = 0;

        switch (recordBatchType) {
        case INSERT:

            sql = "insert into current_nodes_" + mapId + " (id, latitude, "
                    + "longitude, changeset_id, visible, \"timestamp\", tile, version, tags) "
                    + "values (?, ?, ?, ?, ?, ?, ?, ?, ?)";

            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentNodes node = (CurrentNodes) o;

                ps.setLong(1, node.getId());
                ps.setInt(2, node.getLatitude());
                ps.setInt(3, node.getLongitude());
                ps.setLong(4, node.getChangesetId());
                ps.setBoolean(5, node.getVisible());
                ps.setTimestamp(6, node.getTimestamp());
                ps.setLong(7, node.getTile());
                ps.setLong(8, node.getVersion());

                Map<String, String> tags = (Map<String, String>) node.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(9, hstoreStr, Types.OTHER);
                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();

                    }
                }

            }

            break;

        case UPDATE:

            sql = "update current_nodes_" + mapId + " set  latitude=?, "
                    + "longitude=?, changeset_id=?, visible=?, \"timestamp\"=?, tile=?, version=?, tags=? "
                    + "where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentNodes node = (CurrentNodes) o;

                ps.setInt(1, node.getLatitude());
                ps.setInt(2, node.getLongitude());
                ps.setLong(3, node.getChangesetId());
                ps.setBoolean(4, node.getVisible());
                ps.setTimestamp(5, node.getTimestamp());
                ps.setLong(6, node.getTile());
                ps.setLong(7, node.getVersion());

                Map<String, String> tags = (Map<String, String>) node.getTags();

                String hstoreStr = "";
                Iterator it = tags.entrySet().iterator();
                while (it.hasNext()) {
                    Map.Entry pairs = (Map.Entry) it.next();
                    if (hstoreStr.length() > 0) {
                        hstoreStr += ",";
                    }
                    hstoreStr += "\"" + pairs.getKey() + "\"=>\"" + pairs.getValue() + "\"";
                }
                ps.setObject(8, hstoreStr, Types.OTHER);

                ps.setLong(9, node.getId());

                ps.addBatch();

                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                        ps.clearBatch();
                    }
                }
            }

            break;

        case DELETE:

            sql = "delete from current_nodes_" + mapId + " where id=?";
            ps = conn.prepareStatement(sql);
            for (Object o : records) {
                CurrentNodes node = (CurrentNodes) o;

                ps.setLong(1, node.getId());

                ps.addBatch();
                if (maxRecordBatchSize > -1) {
                    if (++count % maxRecordBatchSize == 0) {
                        ps.executeBatch();
                        //conn.commit();
                        ps.clearBatch();
                    }
                }
            }

            break;

        default:
            throw new Exception("");
        }

        ps.executeBatch();
        //conn.commit();
    } catch (Exception e) {
        //conn.rollback();
        String msg = "Error executing batch query.";
        msg += "  " + e.getMessage();
        msg += " Cause:" + e.getCause().toString();
        throw new Exception(msg);
    } finally {
        if (ps != null) {
            ps.close();
        }
        //conn.setAutoCommit(true);
    }
}

From source file:org.pentaho.di.core.database.Database.java

public void clearBatch(PreparedStatement preparedStatement) throws KettleDatabaseException {
    try {//from  w  ww. j a v a2 s. co m
        preparedStatement.clearBatch();
    } catch (SQLException e) {
        throw new KettleDatabaseException("Unable to clear batch for prepared statement", e);
    }
}

From source file:com.oltpbenchmark.benchmarks.tpcc.TPCCLoader.java

protected int loadStock(int whseKount, int itemKount) {

    int k = 0;/*from   w  ww  .j a  v a2 s  . c  o m*/
    int t = 0;
    int randPct = 0;
    int len = 0;
    int startORIGINAL = 0;

    try {

        PreparedStatement stckPrepStmt = getInsertStatement(TPCCConstants.TABLENAME_STOCK);

        now = new java.util.Date();
        t = (whseKount * itemKount);
        LOG.debug("\nStart Stock Load for " + t + " units @ " + now + " ...");

        if (outputFiles == true) {
            out = new PrintWriter(new FileOutputStream(fileLocation + "stock.csv"));
            LOG.debug("\nWriting Stock file to: " + fileLocation + "stock.csv");
        }

        Stock stock = new Stock();

        for (int i = 1; i <= itemKount; i++) {

            for (int w = 1; w <= whseKount; w++) {

                stock.s_i_id = i;
                stock.s_w_id = w;
                stock.s_quantity = TPCCUtil.randomNumber(10, 100, gen);
                stock.s_ytd = 0;
                stock.s_order_cnt = 0;
                stock.s_remote_cnt = 0;

                // s_data
                randPct = TPCCUtil.randomNumber(1, 100, gen);
                len = TPCCUtil.randomNumber(26, 50, gen);
                if (randPct > 10) {
                    // 90% of time i_data isa random string of length [26 ..
                    // 50]
                    stock.s_data = TPCCUtil.randomStr(len);
                } else {
                    // 10% of time i_data has "ORIGINAL" crammed somewhere
                    // in middle
                    startORIGINAL = TPCCUtil.randomNumber(2, (len - 8), gen);
                    stock.s_data = TPCCUtil.randomStr(startORIGINAL - 1) + "ORIGINAL"
                            + TPCCUtil.randomStr(len - startORIGINAL - 9);
                }

                stock.s_dist_01 = TPCCUtil.randomStr(24);
                stock.s_dist_02 = TPCCUtil.randomStr(24);
                stock.s_dist_03 = TPCCUtil.randomStr(24);
                stock.s_dist_04 = TPCCUtil.randomStr(24);
                stock.s_dist_05 = TPCCUtil.randomStr(24);
                stock.s_dist_06 = TPCCUtil.randomStr(24);
                stock.s_dist_07 = TPCCUtil.randomStr(24);
                stock.s_dist_08 = TPCCUtil.randomStr(24);
                stock.s_dist_09 = TPCCUtil.randomStr(24);
                stock.s_dist_10 = TPCCUtil.randomStr(24);

                k++;
                if (outputFiles == false) {
                    stckPrepStmt.setLong(1, stock.s_w_id);
                    stckPrepStmt.setLong(2, stock.s_i_id);
                    stckPrepStmt.setLong(3, stock.s_quantity);
                    stckPrepStmt.setDouble(4, stock.s_ytd);
                    stckPrepStmt.setLong(5, stock.s_order_cnt);
                    stckPrepStmt.setLong(6, stock.s_remote_cnt);
                    stckPrepStmt.setString(7, stock.s_data);
                    stckPrepStmt.setString(8, stock.s_dist_01);
                    stckPrepStmt.setString(9, stock.s_dist_02);
                    stckPrepStmt.setString(10, stock.s_dist_03);
                    stckPrepStmt.setString(11, stock.s_dist_04);
                    stckPrepStmt.setString(12, stock.s_dist_05);
                    stckPrepStmt.setString(13, stock.s_dist_06);
                    stckPrepStmt.setString(14, stock.s_dist_07);
                    stckPrepStmt.setString(15, stock.s_dist_08);
                    stckPrepStmt.setString(16, stock.s_dist_09);
                    stckPrepStmt.setString(17, stock.s_dist_10);
                    stckPrepStmt.addBatch();
                    if ((k % configCommitCount) == 0) {
                        long tmpTime = new java.util.Date().getTime();
                        String etStr = "  Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000)
                                + "                    ";
                        LOG.debug(etStr.substring(0, 30) + "  Writing record " + k + " of " + t);
                        lastTimeMS = tmpTime;
                        stckPrepStmt.executeBatch();
                        stckPrepStmt.clearBatch();
                        transCommit();
                    }
                } else {
                    String str = "";
                    str = str + stock.s_i_id + ",";
                    str = str + stock.s_w_id + ",";
                    str = str + stock.s_quantity + ",";
                    str = str + stock.s_ytd + ",";
                    str = str + stock.s_order_cnt + ",";
                    str = str + stock.s_remote_cnt + ",";
                    str = str + stock.s_data + ",";
                    str = str + stock.s_dist_01 + ",";
                    str = str + stock.s_dist_02 + ",";
                    str = str + stock.s_dist_03 + ",";
                    str = str + stock.s_dist_04 + ",";
                    str = str + stock.s_dist_05 + ",";
                    str = str + stock.s_dist_06 + ",";
                    str = str + stock.s_dist_07 + ",";
                    str = str + stock.s_dist_08 + ",";
                    str = str + stock.s_dist_09 + ",";
                    str = str + stock.s_dist_10;
                    out.println(str);

                    if ((k % configCommitCount) == 0) {
                        long tmpTime = new java.util.Date().getTime();
                        String etStr = "  Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000)
                                + "                    ";
                        LOG.debug(etStr.substring(0, 30) + "  Writing record " + k + " of " + t);
                        lastTimeMS = tmpTime;
                    }
                }

            } // end for [w]

        } // end for [i]

        long tmpTime = new java.util.Date().getTime();
        String etStr = "  Elasped Time(ms): " + ((tmpTime - lastTimeMS) / 1000.000) + "                    ";
        LOG.debug(etStr.substring(0, 30) + "  Writing final records " + k + " of " + t);
        lastTimeMS = tmpTime;
        if (outputFiles == false) {
            stckPrepStmt.executeBatch();
        }
        transCommit();

        now = new java.util.Date();
        LOG.debug("End Stock Load @  " + now);

    } catch (SQLException se) {
        LOG.debug(se.getMessage());
        transRollback();

    } catch (Exception e) {
        e.printStackTrace();
        transRollback();
    }

    return (k);

}

From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java

public/* synchronized */Map<String, HotRestoreInfo> pause(Map<String, HotRestoreInfo> hriMap) {
    if (isClosed())
        return null;
    if (!USE_CHECKPOINTS_AS_PAUSE_TABLE)
        return pauseBatchIndividually(hriMap);

    String updateCmdNoLogIndex = "update " + (USE_CHECKPOINTS_AS_PAUSE_TABLE ? getCTable() : getPTable())
            + " set serialized=?, has_serialized=true where  paxos_id=?";

    Map<String, HotRestoreInfo> paused = new HashMap<String, HotRestoreInfo>();
    HotRestoreInfo[] hris = hriMap.values().toArray(new HotRestoreInfo[0]);
    PreparedStatement pstmt = null;
    Connection conn = null;//from ww  w .  j a  v a  2  s  . c o  m
    try {
        Map<String, HotRestoreInfo> batch = new HashMap<String, HotRestoreInfo>();
        for (int i = 0; i < hris.length; i++) {
            String paxosID = hris[i].paxosID;
            if (conn == null) {
                conn = this.getDefaultConn();
                conn.setAutoCommit(false);
                pstmt = conn.prepareStatement(updateCmdNoLogIndex);
            }
            pstmt.setString(1, hriMap.get(paxosID).toString());
            pstmt.setString(2, paxosID);

            pstmt.addBatch();
            batch.put(paxosID, hris[i]);
            if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == hriMap.size()) {
                pstmt.executeBatch();
                conn.commit();
                pstmt.clearBatch();
                paused.putAll(batch);

                log.log(Level.FINE, "{0} paused [{1}] ,[{2}]",
                        new Object[] { this, Util.truncatedLog(batch.keySet(), 16) });
                batch.clear();
            }
        }
    } catch (SQLException e) {
        log.severe(this + " failed to pause batch " + Util.truncatedLog(hriMap.keySet(), 10));
        e.printStackTrace();
    } finally {
        cleanup(pstmt);
        cleanup(conn);
    }
    paused.putAll(this.pauseBatchIndividually(this.diffHRI(hriMap, paused)));
    return paused;
}

From source file:gemlite.core.internal.db.DBSynchronizer.java

public boolean processEvents(List<AsyncEvent> events) {
    if (this.shutDown) {
        return false;
    }//www.  j  a v a2s  . c o m
    boolean completedSucessfully = false;
    String listOfEventsString = null;
    // The retval will be considered true only if the list was iterated
    // completely. If the List iteration was incomplete we will return
    // false so that the events are not removed during failure.
    // As for individual events, they can get exceptions due to constraint
    // violations etc but will not cause return value to be false.
    Statement stmt = null;
    PreparedStatement ps = null;
    // keep track of the previous prepared statement in case we can optimize
    // by create a batch when the previous and current statements match
    PreparedStatement prevPS = null;
    AsyncEvent prevEvent = null;
    boolean prevPSHasBatch = false;
    Iterator<AsyncEvent> itr = events.iterator();
    AsyncEvent event = null;
    String eventString = null;
    String prevEventStr = null;
    try {
        while (!(completedSucessfully = !itr.hasNext())) {
            event = itr.next();
            Operation operation = event.getOperation();
            if (logger.isDebugEnabled()) {
                eventString = event.toString();
                if (prevEvent != null) {
                    prevEventStr = prevEvent.toString();
                }
                logger.info("DBSynchronizer::processEvents :processing PK based " + "event=" + eventString
                        + " AsyncEvent Operation=" + operation);
            } else {
                eventString = null;
                prevEventStr = null;
            }
            try {
                if (operation.isPutAll() || operation.isCreate())
                    ps = getExecutableInsertPrepStmntPKBased(event, prevPS);
                else if (operation.isUpdate())
                    ps = getExecutableUpdatePrepStmntPKBased(event, prevPS);
                else if (operation.isDestroy())
                    ps = getExecutableDeletePrepStmntPKBased(event, prevPS);
                else {
                    logger.error("DBSynchronizer::processEvents: unexpected " + "eventType " + operation
                            + " for " + event);
                    continue;
                }
            } catch (SQLException sqle) {
                SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__1, null, event,
                        eventString, logger, true);
                if (handler.breakTheLoop()) {
                    break;
                }
            } catch (RegionDestroyedException rde) {
                if (logger.isInfoEnabled()) {
                    logger.info("DBSynchronizer::processEvents: WBCLEvent " + event
                            + " will  be discarded as the underlying region "
                            + "for the table has been destroyed");
                }
                continue;
            }
            if (logger.isDebugEnabled()) {
                if (eventString == null) {
                    eventString = event.toString();
                }
                logger.debug("DBSynchronizer::processEvents: Statement=" + (ps != null ? ps : stmt)
                        + " for event=" + eventString);
            }
            try {
                int num;
                if (prevPS != null && prevPS != ps) {
                    try {
                        if (prevPSHasBatch) {
                            prevPS.addBatch();
                            if (logger.isDebugEnabled()) {
                                logger.info("DBSynchronizer::processEvents executing "
                                        + "batch statement for prepared statement=" + prevPS + " for event="
                                        + prevEventStr);
                            }
                            final int[] res = prevPS.executeBatch();
                            num = res.length;
                            prevPSHasBatch = false;
                        } else {
                            num = prevPS.executeUpdate();
                        }
                        if (logger.isDebugEnabled()) {
                            logger.info("DBSynchronizer::processEvents total num rows " + "modified=" + num
                                    + " for prepared statement=" + prevPS + " for event=" + prevEventStr);
                        }
                        // clear event from failure map if present
                        helper.removeEventFromFailureMap(prevEvent);
                    } catch (SQLException sqle) {
                        if (prevPSHasBatch) {
                            try {
                                prevPS.clearBatch();
                            } catch (SQLException e) {
                                // ignored
                            }
                            prevPSHasBatch = false;
                        }
                        SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__3, prevPS,
                                prevEvent, prevEventStr, logger, false);
                        if (handler.breakTheLoop()) {
                            break;
                        }
                        prevPS = null;
                        prevEvent = null;
                        prevPSHasBatch = false;
                    }
                }
                // in case previous prepared statement matches the current
                // one,
                // it will already be added as a batch when setting the
                // arguments
                // by AsyncEventHelper#setColumnInPrepStatement()
                else if (prevPS != null && ps != null) {
                    prevPSHasBatch = true;
                    if (logger.isDebugEnabled()) {
                        logger.info("DBSynchronizer::processEvents added new row "
                                + "as a batch for prepared statement=" + ps + " for event=" + eventString);
                    }
                }

                prevPS = ps;
                prevEvent = event;
            } catch (SQLException sqle) {
                if (prevPS != null && prevPSHasBatch) {
                    try {
                        prevPS.clearBatch();
                    } catch (SQLException e) {
                        // ignored
                    }
                }
                SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__3,
                        ps != null ? ps : stmt, event, eventString, logger, false);
                if (handler.breakTheLoop()) {
                    break;
                }
            }
        } // end of while (event list processing loop)

        // now handle the last statement in the above loop since it is still
        // pending due to anticipated batching
        if (completedSucessfully) {
            try {
                if (logger.isInfoEnabled()) {
                    if (listOfEventsString == null) {
                        listOfEventsString = events.toString();
                    }
                    logger.info("DBSynchronizer::processEvents: " + "before commit of events="
                            + listOfEventsString);
                }
                int num;
                // first the case when the previous statement was a batched
                // one
                // so add current one as batch and execute
                if (prevPSHasBatch) {
                    ps.addBatch();
                    if (logger.isDebugEnabled()) {
                        logger.info("DBSynchronizer::processEvents executing batch "
                                + "statement for prepared statement=" + ps + " for event=" + eventString);
                    }
                    final int[] res = ps.executeBatch();
                    num = res.length;
                }
                // next the case of a non BULK_INSERT operation;
                // BULK_INSERT operations are always executed as a single
                // batch
                // by itself, so will never reach here
                else if (ps != null) {
                    num = ps.executeUpdate();
                    if (event != null) {
                        // clear event from failure map if present
                        helper.removeEventFromFailureMap(event);
                    }
                } else {
                    num = 0;
                }
                // clear event from failure map if present
                helper.removeEventFromFailureMap(event);
                if (logger.isDebugEnabled()) {
                    if (ps != null) {
                        logger.info("DBSynchronizer::processEvents num rows modified=" + num
                                + " for prepared statement=" + ps + " for event=" + eventString);
                    }
                }
                this.conn.commit();
                if (logger.isInfoEnabled()) {
                    if (listOfEventsString == null) {
                        listOfEventsString = events.toString();
                    }
                    logger.info("DBSynchronizer::processEvents: " + "committed successfully for events="
                            + listOfEventsString);
                }
            } catch (SQLException sqle) {

                if (ps != null && prevPSHasBatch) {
                    try {
                        ps.clearBatch();
                    } catch (SQLException e) {
                        // ignored
                    }
                }

                SqlExceptionHandler handler = handleSQLException(sqle, DB_SYNCHRONIZER__7,
                        ps != null ? ps : stmt, event, eventString, logger, true);
                if (handler != SqlExceptionHandler.IGNORE) {
                    completedSucessfully = false;
                }
            }
        }
    } catch (Exception e) {

        if (logger != null && logger.isErrorEnabled() && !(event != null && helper.skipFailureLogging(event))) {
            StringBuilder sb = new StringBuilder();
            if (event != null) {
                if (eventString == null) {
                    eventString = event.toString();
                }
                sb.append("[FAILED: ").append(eventString).append(" ]");
            }
            while (itr.hasNext()) {
                sb.append("[ ").append(itr.next().toString()).append(" ]");
            }
            helper.logFormat(logger, Level.SEVERE, e, DB_SYNCHRONIZER__2, sb.toString());
        }
        SqlExceptionHandler.CLEANUP.execute(this);
        completedSucessfully = false;
    }

    if (completedSucessfully) {
        // on successful completion, log any pending errors to XML file;
        // when
        // unsuccessful then we know that batch will be retried so don't log
        // in
        // that case else it can get logged multiple times
        // clear event from failure map if present
        flushErrorEventsToLog();
    }

    if (logger.isDebugEnabled()) {
        logger.info("DBSynchronizer::processEvents: processed " + events.size() + " events, success="
                + completedSucessfully);
    }

    return completedSucessfully;
}

From source file:org.pentaho.di.core.database.Database.java

/**
 * Close the prepared statement of the insert statement.
 *
 * @param ps/*w  w w.  j  av a2s  . c o m*/
 *          The prepared statement to empty and close.
 * @param batch
 *          true if you are using batch processing
 * @param psBatchCounter
 *          The number of rows on the batch queue
 * @throws KettleDatabaseException
 */
public void emptyAndCommit(PreparedStatement ps, boolean batch, int batchCounter)
        throws KettleDatabaseException {
    boolean isBatchUpdate = false;
    try {
        if (ps != null) {
            if (!isAutoCommit()) {
                // Execute the batch or just perform a commit.
                if (batch && getDatabaseMetaData().supportsBatchUpdates() && batchCounter > 0) {
                    // The problem with the batch counters is that you can't just
                    // execute the current batch.
                    // Certain databases have a problem if you execute the batch and if
                    // there are no statements in it.
                    // You can't just catch the exception either because you would have
                    // to roll back on certain databases before you can then continue to
                    // do anything.
                    // That leaves the task of keeping track of the number of rows up to
                    // our responsibility.
                    isBatchUpdate = true;
                    ps.executeBatch();
                    commit();
                    ps.clearBatch();
                } else {
                    commit();
                }
            }

            // Let's not forget to close the prepared statement.
            //
            ps.close();
        }
    } catch (BatchUpdateException ex) {
        throw createKettleDatabaseBatchException("Error updating batch", ex);
    } catch (SQLException ex) {
        if (isBatchUpdate) {
            throw createKettleDatabaseBatchException("Error updating batch", ex);
        } else {
            throw new KettleDatabaseException("Unable to empty ps and commit connection.", ex);
        }
    }
}

From source file:org.apache.tajo.catalog.store.AbstractDBStore.java

@Override
public void addPartitions(String databaseName, String tableName,
        List<CatalogProtos.PartitionDescProto> partitions, boolean ifNotExists)
        throws UndefinedDatabaseException, UndefinedTableException, UndefinedPartitionMethodException {

    final int databaseId = getDatabaseId(databaseName);
    final int tableId = getTableId(databaseId, databaseName, tableName);
    ensurePartitionTable(tableName, tableId);

    Connection conn = null;//ww  w  . j a va 2 s  . c om
    // To delete existing partition keys
    PreparedStatement pstmt1 = null;
    // To delete existing partition;
    PreparedStatement pstmt2 = null;
    // To insert a partition
    PreparedStatement pstmt3 = null;
    // To insert partition keys
    PreparedStatement pstmt4 = null;

    PartitionDescProto partitionDesc = null;

    try {
        conn = getConnection();
        conn.setAutoCommit(false);

        int currentIndex = 0, lastIndex = 0;

        pstmt1 = conn.prepareStatement(deletePartitionKeysSql);
        pstmt2 = conn.prepareStatement(deletePartitionSql);
        pstmt3 = conn.prepareStatement(insertPartitionSql);
        pstmt4 = conn.prepareStatement(insertPartitionKeysSql);

        // Set a batch size like 1000. This avoids SQL injection and also takes care of out of memory issue.
        int batchSize = conf.getInt(TajoConf.ConfVars.PARTITION_DYNAMIC_BULK_INSERT_BATCH_SIZE.varname, 1000);
        for (currentIndex = 0; currentIndex < partitions.size(); currentIndex++) {
            PartitionDescProto partition = partitions.get(currentIndex);

            try {
                partitionDesc = getPartition(databaseName, tableName, partition.getPartitionName());
                // Delete existing partition and partition keys
                if (ifNotExists) {
                    pstmt1.setInt(1, partitionDesc.getId());
                    pstmt1.addBatch();
                    pstmt1.clearParameters();

                    pstmt2.setInt(1, partitionDesc.getId());
                    pstmt2.addBatch();
                    pstmt2.clearParameters();
                }
            } catch (UndefinedPartitionException e) {
            }

            // Insert partition
            pstmt3.setInt(1, tableId);
            pstmt3.setString(2, partition.getPartitionName());
            pstmt3.setString(3, partition.getPath());
            pstmt3.setLong(4, partition.getNumBytes());
            pstmt3.addBatch();
            pstmt3.clearParameters();

            // Insert partition keys
            for (int i = 0; i < partition.getPartitionKeysCount(); i++) {
                PartitionKeyProto partitionKey = partition.getPartitionKeys(i);
                pstmt4.setInt(1, tableId);
                pstmt4.setString(2, partition.getPartitionName());
                pstmt4.setInt(3, tableId);
                pstmt4.setString(4, partitionKey.getColumnName());
                pstmt4.setString(5, partitionKey.getPartitionValue());

                pstmt4.addBatch();
                pstmt4.clearParameters();
            }

            // Execute batch
            if (currentIndex >= lastIndex + batchSize && lastIndex != currentIndex) {
                pstmt1.executeBatch();
                pstmt1.clearBatch();
                pstmt2.executeBatch();
                pstmt2.clearBatch();
                pstmt3.executeBatch();
                pstmt3.clearBatch();
                pstmt4.executeBatch();
                pstmt4.clearBatch();
                lastIndex = currentIndex;
            }
        }

        // Execute existing batch queries
        if (lastIndex != currentIndex) {
            pstmt1.executeBatch();
            pstmt2.executeBatch();
            pstmt3.executeBatch();
            pstmt4.executeBatch();
        }

        if (conn != null) {
            conn.commit();
        }
    } catch (SQLException se) {
        if (conn != null) {
            try {
                conn.rollback();
            } catch (SQLException e) {
                LOG.error(e, e);
            }
        }
        throw new TajoInternalError(se);
    } finally {
        CatalogUtil.closeQuietly(pstmt1);
        CatalogUtil.closeQuietly(pstmt2);
        CatalogUtil.closeQuietly(pstmt3);
        CatalogUtil.closeQuietly(pstmt4);
    }
}

From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java

private/* synchronized */Set<String> pauseLogIndex(Map<String, LogIndex> toCommit) {
    if (isClosed())
        return null;
    if (!USE_CHECKPOINTS_AS_PAUSE_TABLE)
        return this.pauseLogIndexIndividually(toCommit);
    String updateCmd = "update " + (USE_CHECKPOINTS_AS_PAUSE_TABLE ? getCTable() : getPTable())
            + " set logindex=? where  paxos_id=?";

    PreparedStatement pstmt = null;
    Connection conn = null;/* w ww .  ja v  a 2  s  .  c o  m*/
    Set<String> paused = new HashSet<String>();
    Set<String> batch = new HashSet<String>();
    synchronized (this.messageLog) {
        try {
            int i = 0;
            for (String paxosID : toCommit.keySet()) {
                LogIndex logIndex = toCommit.get(paxosID);
                if (conn == null) {
                    conn = this.getDefaultConn();
                    conn.setAutoCommit(false);
                    pstmt = conn.prepareStatement(updateCmd);
                }

                byte[] logIndexBytes = logIndex != null ? deflate(logIndex.toString().getBytes(CHARSET)) : null;
                if (logIndexBytes != null && ENABLE_INSTRUMENTATION && Util.oneIn(Integer.MAX_VALUE))
                    DelayProfiler.updateMovAvg("logindex_size", logIndexBytes.length);
                Blob blob = conn.createBlob();
                if (logIndexBytes != null)
                    blob.setBytes(1, logIndexBytes);
                pstmt.setBlob(1, logIndexBytes != null ? blob : null);
                pstmt.setString(2, paxosID);
                pstmt.addBatch();
                batch.add(paxosID);
                if ((i + 1) % MAX_DB_BATCH_SIZE == 0 || (i + 1) == toCommit.size()) {
                    pstmt.executeBatch();
                    conn.commit();
                    pstmt.clearBatch();
                    paused.addAll(batch);

                    log.log(Level.FINE, "{0} paused logIndex batch {1}",
                            new Object[] { this, Util.truncatedLog(batch, 16) });
                    batch.clear();
                }
                i++;
            }
        } catch (SQLException | IOException sqle) {
            log.severe(this + " failed to pause logIndex batch");
            sqle.printStackTrace();
        } finally {
            cleanup(pstmt);
            cleanup(conn);
        }
        // free up memory
        for (String paxosID : paused)
            this.messageLog.uncache(paxosID);
    }
    if (paused.size() != toCommit.size())
        paused.addAll(this.pauseLogIndexIndividually(diffLI(toCommit, paused)));
    return paused;
}