Example usage for java.sql BatchUpdateException getUpdateCounts

List of usage examples for java.sql BatchUpdateException getUpdateCounts

Introduction

In this page you can find the example usage for java.sql BatchUpdateException getUpdateCounts.

Prototype

public int[] getUpdateCounts() 

Source Link

Document

Retrieves the update count for each update statement in the batch update that executed successfully before this exception occurred.

Usage

From source file:com.google.enterprise.connector.sharepoint.dao.SimpleSharePointDAO.java

/**
 * <p>/* www  .  j a  v a  2 s .c o  m*/
 * Analyze the batch exception, identifies the return values from various
 * driver implementations during {@link BatchUpdateException}and fall-back to
 * individual query execution.
 * </p>
 * <p>
 * In case of MS SQL, the driver implementation never try to execute insert
 * query if there is at least one row matches against given memberships and
 * returns -3 always.
 * </p>
 * <p>
 * In case of MYSQL, it attempt to insert all new records and return -3 for
 * existing memberships.
 * </p>
 *
 * @param batchUpdateException the exception to be handled
 * @param params an array of {@link SqlParameterSource}; each representing the
 *          parameters to construct one SQL query. Hence, The length of the
 *          array will indicate the number of SQL queries executed in batch.
 * @param query query to be executed specified as {@link Query}
 * @return status of each query execution (=no. of rows updated) in the same
 *         order in which the queries were specified
 */
public int[] handleBatchUpdateExceptionForMSSQLAndMySQL(BatchUpdateException batchUpdateException, Query query,
        SqlParameterSource[] params) {
    boolean fallBack = false;
    // In case of MS SQL need to analyze the batch status array as it
    // returns -3 always and then fall back to individual query execution.
    int[] optimisticBatchStatus = batchUpdateException.getUpdateCounts();
    if (null != optimisticBatchStatus) {
        for (int i = 0; i < optimisticBatchStatus.length; i++) {
            if (optimisticBatchStatus[i] == SPConstants.MINUS_THREE) {
                fallBack = true;
            } else {
                LOGGER.info("Fall back set to false.");
                fallBack = false;
                break;
            }
        }
        // getUpdateCount() should return params.lenth.
        if (optimisticBatchStatus.length == params.length && !fallBack) {
            // for MySQL since all queries were tried and few/all failed, it
            // does not make sense to execute them again individually rather
            // than batch as they will fail with same error. However, for
            // MSSQL the connector should retry.
            LOGGER.log(Level.FINE,
                    "Not all the queries executed successfully however, the attempt for execution was made for all of them.",
                    batchUpdateException);
            return optimisticBatchStatus;
        } else {
            if (fallBack) {
                LOGGER.log(Level.WARNING, "Falling back to individual query execution.");
                for (int i = 0; i < optimisticBatchStatus.length; i++) {
                    try {
                        optimisticBatchStatus[i] = update(query, params[i]);
                    } catch (SharepointException e) {
                        LOGGER.log(Level.WARNING,
                                "Failed to add record to user data store with the group name ["
                                        + params[i].getValue(SPConstants.GROUP_NAME) + ", user name ["
                                        + params[i].getValue(SPConstants.USERNAME) + "]",
                                e);
                    }
                }
            }
            return optimisticBatchStatus;
        }
    }
    return null;
}

From source file:ConsumerServer.java

public void processMessages() {
    // for this exercise start from offset 0
    // produce batches of n size for jdbc and insert

    // for this table
    // char(10), char(20), long
    String sqlInsert = "INSERT INTO kblog.BLOGDATA VALUES (?,?,?,?,?)";
    String sqlUpsert = "UPSERT INTO kblog.BLOGDATA VALUES (?,?,?,?,?)";
    final String UDFCALL = " select * from udf(kblog.kaf3('nap007:9092'," + " 'gid'," + " 'blogit'," + "  0,"
            + " 'null'," + " 'C10C20IC55C55'," + " '|'," + "  -1," + "  1000 ))";
    final String SQLUPSERT = "upsert using load into kblog.blogdata ";
    final String SQLINSERT = "insert into kblog.blogdata ";
    final String SQLUPSERT2 = "upsert into kblog.blogdata ";

    try {//  w  ww .  j av  a2 s  . co m
        if (t2Connect) {
            // T2
            Class.forName("org.apache.trafodion.jdbc.t2.T2Driver");
            conn = DriverManager.getConnection("jdbc:t2jdbc:");
        } else {
            // T4
            Class.forName("org.trafodion.jdbc.t4.T4Driver");
            conn = DriverManager.getConnection("jdbc:t4jdbc://nap007:23400/:", "trafodion", "passw");
        }
        conn.setAutoCommit(autoCommit);
    } catch (SQLException sx) {
        System.out.println("SQL error: " + sx.getMessage());
        System.exit(1);
    } catch (ClassNotFoundException cx) {
        System.out.println("Driver class not found: " + cx.getMessage());
        System.exit(2);

    }

    // message processing loop
    String[] msgFields;
    long numRows = 0;
    long totalRows = 0;
    int[] batchResult;

    if (udfMode == 0 && insMode == 0) {
        // missing cmd line setting
        System.out.println("*** Neither UDF nor INSERT mode specified - aborting ***");
        System.exit(2);
    }

    try {
        if (udfMode > 0) {
            long diff = 0;

            long startTime = System.currentTimeMillis();
            switch (udfMode) {
            case 1: // upsert using load
                pStmt = conn.prepareStatement(SQLUPSERT + UDFCALL);
                totalRows = pStmt.executeUpdate();
                diff = (System.currentTimeMillis() - startTime);
                System.out.println("Upsert loaded row count: " + totalRows + " in " + diff + " ms");
                break;

            case 2: // insert 
                pStmt = conn.prepareStatement(SQLINSERT + UDFCALL);
                totalRows = pStmt.executeUpdate();
                if (!autoCommit) {
                    conn.commit();
                    diff = (System.currentTimeMillis() - startTime);
                    System.out
                            .println("Insert row count (autocommit off): " + totalRows + " in " + diff + " ms");
                } else {
                    diff = (System.currentTimeMillis() - startTime);
                    System.out
                            .println("Insert row count (autocommit on): " + totalRows + " in " + diff + " ms");
                }
                break;

            case 3: // upsert 
                pStmt = conn.prepareStatement(SQLUPSERT2 + UDFCALL);
                totalRows = pStmt.executeUpdate();
                if (!autoCommit) {
                    conn.commit();
                    diff = (System.currentTimeMillis() - startTime);
                    System.out
                            .println("Upsert row count (autocommit off): " + totalRows + " in " + diff + " ms");
                } else {
                    diff = (System.currentTimeMillis() - startTime);
                    System.out
                            .println("Upsert row count (autocommit on): " + totalRows + " in " + diff + " ms");
                }
                break;

            default: // illegal value
                System.out.println("*** Only udf values 1,2,3 allowed; found: " + udfMode);
                System.exit(2);

            } // switch

        } // udfMode
        else { // iterative insert/upsert

            switch (insMode) {
            case 1: // insert
                pStmt = conn.prepareStatement(sqlInsert);
                break;
            case 2: //upsert
                pStmt = conn.prepareStatement(sqlUpsert);
                break;
            default: // illegal
                System.out.println("*** Only insert values 1,2 allowed; found: " + insMode);
                System.exit(2);
            } // switch

            kafka.subscribe(Arrays.asList(topic));
            // priming poll
            kafka.poll(100);
            // always start from beginning
            kafka.seekToBeginning(Arrays.asList(new TopicPartition(topic, 0)));

            // enable autocommit and singleton inserts for comparative timings

            long startTime = System.currentTimeMillis();
            while (true) {
                // note that we don't commitSync to kafka - tho we should
                ConsumerRecords<String, String> records = kafka.poll(streamTO);
                if (records.isEmpty())
                    break; // timed out
                for (ConsumerRecord<String, String> msg : records) {
                    msgFields = msg.value().split("\\" + Character.toString(delimiter));

                    // position info for this message
                    long offset = msg.offset();
                    int partition = msg.partition();
                    String topic = msg.topic();

                    pStmt.setString(1, msgFields[0]);
                    pStmt.setString(2, msgFields[1]);
                    pStmt.setLong(3, Long.parseLong(msgFields[2]));
                    pStmt.setString(4, msgFields[3]);
                    pStmt.setString(5, msgFields[4]);
                    numRows++;
                    totalRows++;
                    if (autoCommit) {
                        // single ins/up sert
                        pStmt.executeUpdate();
                    } else {
                        pStmt.addBatch();
                        if ((numRows % commitCount) == 0) {
                            numRows = 0;
                            batchResult = pStmt.executeBatch();
                            conn.commit();
                        }
                    }

                } // for each msg

            } // while true

            // get here when poll returns no records
            if (numRows > 0 && !autoCommit) {
                // remaining rows
                batchResult = pStmt.executeBatch();
                conn.commit();
            }
            long diff = (System.currentTimeMillis() - startTime);
            if (autoCommit)
                System.out.println("Total rows: " + totalRows + " in " + diff + " ms");
            else
                System.out.println(
                        "Total rows: " + totalRows + " in " + diff + " ms; batch size = " + commitCount);

            kafka.close();
        } // else

    } // try
    catch (ConsumerTimeoutException to) {
        System.out.println("consumer time out; " + to.getMessage());
        System.exit(1);
    } catch (BatchUpdateException bx) {
        int[] insertCounts = bx.getUpdateCounts();
        int count = 1;
        for (int i : insertCounts) {
            if (i == Statement.EXECUTE_FAILED)
                System.out.println("Error on request #" + count + ": Execute failed");
            else
                count++;
        }
        System.out.println(bx.getMessage());
        System.exit(1);

    } catch (SQLException sx) {
        System.out.println("SQL error: " + sx.getMessage());
        System.exit(1);
    }

}

From source file:lib.JdbcTemplate.java

@Override
public int[] batchUpdate(final String... sql) throws DataAccessException {
    Assert.notEmpty(sql, "SQL array must not be empty");
    if (logger.isDebugEnabled()) {
        logger.debug("Executing SQL batch update of " + sql.length + " statements");
    }/*from  ww  w .  j  a  va2s  . c o  m*/

    class BatchUpdateStatementCallback implements StatementCallback<int[]>, SqlProvider {

        private String currSql;

        @Override
        public int[] doInStatement(Statement stmt) throws SQLException, DataAccessException {
            int[] rowsAffected = new int[sql.length];
            if (JdbcUtils.supportsBatchUpdates(stmt.getConnection())) {
                for (String sqlStmt : sql) {
                    this.currSql = appendSql(this.currSql, sqlStmt);
                    stmt.addBatch(sqlStmt);
                }
                try {
                    rowsAffected = stmt.executeBatch();
                } catch (BatchUpdateException ex) {
                    String batchExceptionSql = null;
                    for (int i = 0; i < ex.getUpdateCounts().length; i++) {
                        if (ex.getUpdateCounts()[i] == Statement.EXECUTE_FAILED) {
                            batchExceptionSql = appendSql(batchExceptionSql, sql[i]);
                        }
                    }
                    if (StringUtils.hasLength(batchExceptionSql)) {
                        this.currSql = batchExceptionSql;
                    }
                    throw ex;
                }
            } else {
                for (int i = 0; i < sql.length; i++) {
                    this.currSql = sql[i];
                    if (!stmt.execute(sql[i])) {
                        rowsAffected[i] = stmt.getUpdateCount();
                    } else {
                        throw new InvalidDataAccessApiUsageException("Invalid batch SQL statement: " + sql[i]);
                    }
                }
            }
            return rowsAffected;
        }

        private String appendSql(String sql, String statement) {
            return (StringUtils.isEmpty(sql) ? statement : sql + "; " + statement);
        }

        @Override
        public String getSql() {
            return this.currSql;
        }
    }

    return execute(new BatchUpdateStatementCallback());
}

From source file:jade.domain.DFDBKB.java

/**
 * Builds an error message for a <code>BatchUpdateException</code>
 *//*from   w  w w  . j a  v a 2s  .  c o m*/
private String getBatchUpdateErroMsg(BatchUpdateException e) {
    StringBuffer msg = new StringBuffer("SQLException: " + e.getMessage() + "\n");
    msg.append("SQLState:  " + e.getSQLState() + "\n");
    msg.append("Message:  " + e.getMessage() + "\n");
    msg.append("Vendor:  " + e.getErrorCode() + "\n");
    msg.append("Update counts: ");

    int[] updateCounts = e.getUpdateCounts();
    for (int i = 0; i < updateCounts.length; i++) {
        msg.append(updateCounts[i] + "   ");
    }
    return msg.toString();
}

From source file:org.apache.ojb.broker.util.ExceptionHelper.java

/**
 * Method which support the conversion of {@link java.sql.SQLException} to
 * OJB's runtime exception (with additional message details).
 *
 * @param message The error message to use, if <em>null</em> a standard message is used.
 * @param ex The exception to convert (mandatory).
 * @param sql The used sql-statement or <em>null</em>.
 * @param cld The {@link org.apache.ojb.broker.metadata.ClassDescriptor} of the target object or <em>null</em>.
 * @param values The values set in prepared statement or <em>null</em>.
 * @param logger The {@link org.apache.ojb.broker.util.logging.Logger} to log an detailed message
 * to the specified {@link org.apache.ojb.broker.util.logging.Logger} or <em>null</em> to skip logging message.
 * @param obj The target object or <em>null</em>.
 * @return A new created {@link org.apache.ojb.broker.PersistenceBrokerSQLException} based on the specified
 *         arguments./*  ww  w. j a v a  2  s.co m*/
 */
public static PersistenceBrokerSQLException generateException(String message, SQLException ex, String sql,
        ClassDescriptor cld, ValueContainer[] values, Logger logger, Object obj) {
    /*
    X/OPEN codes within class 23:
    23000   INTEGRITY CONSTRAINT VIOLATION
    23001   RESTRICT VIOLATION
    23502   NOT NULL VIOLATION
    23503   FOREIGN KEY VIOLATION
    23505   UNIQUE VIOLATION
    23514   CHECK VIOLATION
    */
    String eol = SystemUtils.LINE_SEPARATOR;
    StringBuffer msg = new StringBuffer(eol);
    eol += "* ";

    if (ex instanceof BatchUpdateException) {
        BatchUpdateException tmp = (BatchUpdateException) ex;
        if (message != null) {
            msg.append("* ").append(message);
        } else {
            msg.append("* BatchUpdateException during execution of sql-statement:");
        }
        msg.append(eol).append("Batch update count is '").append(tmp.getUpdateCounts()).append("'");
    } else if (ex instanceof SQLWarning) {
        if (message != null) {
            msg.append("* ").append(message);
        } else {
            msg.append("* SQLWarning during execution of sql-statement:");
        }
    } else {
        if (message != null) {
            msg.append("* ").append(message);
        } else {
            msg.append("* SQLException during execution of sql-statement:");
        }
    }

    if (sql != null) {
        msg.append(eol).append("sql statement was '").append(sql).append("'");
    }
    String stateCode = null;
    if (ex != null) {
        msg.append(eol).append("Exception message is [").append(ex.getMessage()).append("]");
        msg.append(eol).append("Vendor error code [").append(ex.getErrorCode()).append("]");
        msg.append(eol).append("SQL state code [");

        stateCode = ex.getSQLState();
        if ("23000".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=INTEGRITY CONSTRAINT VIOLATION");
        else if ("23001".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=RESTRICT VIOLATION");
        else if ("23502".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=NOT NULL VIOLATION");
        else if ("23503".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=FOREIGN KEY VIOLATION");
        else if ("23505".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=UNIQUE VIOLATION");
        else if ("23514".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=CHECK VIOLATION");
        else
            msg.append(stateCode);
        msg.append("]");
    }

    if (cld != null) {
        msg.append(eol).append("Target class is '").append(cld.getClassNameOfObject()).append("'");
        FieldDescriptor[] fields = cld.getPkFields();
        msg.append(eol).append("PK of the target object is [");
        for (int i = 0; i < fields.length; i++) {
            try {
                if (i > 0)
                    msg.append(", ");
                msg.append(fields[i].getPersistentField().getName());
                if (obj != null) {
                    msg.append("=");
                    msg.append(fields[i].getPersistentField().get(obj));
                }
            } catch (Exception ignore) {
                msg.append(" PK field build FAILED! ");
            }
        }
        msg.append("]");
    }
    if (values != null) {
        msg.append(eol).append(values.length).append(" values performed in statement: ").append(eol);
        for (int i = 0; i < values.length; i++) {
            ValueContainer value = values[i];
            msg.append("[");
            msg.append("jdbcType=").append(JdbcTypesHelper.getSqlTypeAsString(value.getJdbcType().getType()));
            msg.append(", value=").append(value.getValue());
            msg.append("]");
        }
    }
    if (obj != null) {
        msg.append(eol).append("Source object: ");
        try {
            msg.append(obj.toString());
        } catch (Exception e) {
            msg.append(obj.getClass());
        }
    }

    // message string for PB exception
    String shortMsg = msg.toString();

    if (ex != null) {
        // add causing stack trace
        Throwable rootCause = ExceptionUtils.getRootCause(ex);
        if (rootCause == null)
            rootCause = ex;
        msg.append(eol).append("The root stack trace is --> ");
        String rootStack = ExceptionUtils.getStackTrace(rootCause);
        msg.append(eol).append(rootStack);
    }
    msg.append(SystemUtils.LINE_SEPARATOR).append("**");

    // log error message
    if (logger != null)
        logger.error(msg.toString());

    // throw a specific type of runtime exception for a key constraint.
    if ("23000".equals(stateCode) || "23505".equals(stateCode)) {
        throw new KeyConstraintViolatedException(shortMsg, ex);
    } else {
        throw new PersistenceBrokerSQLException(shortMsg, ex);
    }
}

From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java

/**
 * This method inserts data into the data table. If any data points in the
 * list fail to get added (e.g. because of a constraint violation), it will
 * be returned in the result list./* w  ww  .  j a  v a2s  .  c  om*/
 * 
 * @param conn The connection.
 * @param data The data points to insert.
 * @param continueOnSQLException <code>true</code> to continue inserting the
 *        rest of the data points even after a <code>SQLException</code>
 *        occurs; <code>false</code> to throw the <code>SQLException</code>.
 * @return The list of data points that were not inserted.
 * @throws SQLException only if there is an exception for one of the data
 *         point batch inserts and <code>continueOnSQLException</code> is
 *         set to <code>false</code>.
 */
private List<DataPoint> insertData(Connection conn, List<DataPoint> data, boolean continueOnSQLException)
        throws SQLException {
    PreparedStatement stmt = null;
    final List<DataPoint> left = new ArrayList<DataPoint>();
    final Map<String, List<DataPoint>> buckets = MeasRangeObj.getInstance().bucketData(data);
    final HQDialect dialect = measurementDAO.getHQDialect();
    final boolean supportsDupInsStmt = dialect.supportsDuplicateInsertStmt();
    final boolean supportsPLSQL = dialect.supportsPLSQL();
    final StringBuilder buf = new StringBuilder();
    for (final Entry<String, List<DataPoint>> entry : buckets.entrySet()) {
        buf.setLength(0);
        final String table = entry.getKey();
        final List<DataPoint> dpts = entry.getValue();
        try {
            if (supportsDupInsStmt) {
                stmt = conn.prepareStatement(buf.append("INSERT INTO ").append(table)
                        .append(" (measurement_id, timestamp, value) VALUES (?, ?, ?)")
                        .append(" ON DUPLICATE KEY UPDATE value = ?").toString());
            } else if (supportsPLSQL) {
                final String sql = PLSQL.replaceAll(":table", table);
                stmt = conn.prepareStatement(sql);
            } else {
                stmt = conn.prepareStatement(buf.append("INSERT INTO ").append(table)
                        .append(" (measurement_id, timestamp, value) VALUES (?, ?, ?)").toString());
            }
            // TODO need to set synchronous commit to off
            for (DataPoint pt : dpts) {
                Integer metricId = pt.getMeasurementId();
                MetricValue val = pt.getMetricValue();
                BigDecimal bigDec;
                bigDec = new BigDecimal(val.getValue());
                stmt.setInt(1, metricId.intValue());
                stmt.setLong(2, val.getTimestamp());
                stmt.setBigDecimal(3, getDecimalInRange(bigDec, metricId));
                if (supportsDupInsStmt) {
                    stmt.setBigDecimal(4, getDecimalInRange(bigDec, metricId));
                } else if (supportsPLSQL) {
                    stmt.setBigDecimal(4, getDecimalInRange(bigDec, metricId));
                    stmt.setLong(5, val.getTimestamp());
                    stmt.setInt(6, metricId.intValue());
                }
                stmt.addBatch();
            }
            int[] execInfo = stmt.executeBatch();
            left.addAll(getRemainingDataPoints(dpts, execInfo));
        } catch (BatchUpdateException e) {
            if (!continueOnSQLException) {
                throw e;
            }
            left.addAll(getRemainingDataPointsAfterBatchFail(dpts, e.getUpdateCounts()));
        } catch (SQLException e) {
            if (!continueOnSQLException) {
                throw e;
            }
            // If the batch insert is not within a transaction, then we
            // don't know which of the inserts completed successfully.
            // Assume they all failed.
            left.addAll(dpts);
            if (log.isDebugEnabled()) {
                log.debug("A general SQLException occurred during the insert. " + "Assuming that none of the "
                        + dpts.size() + " data points were inserted.", e);
            }
        } finally {
            DBUtil.closeStatement(LOG_CTX, stmt);
        }
    }
    return left;
}

From source file:org.hyperic.hq.measurement.server.session.DataManagerImpl.java

/**
 * This method is called to perform 'updates' for any inserts that failed.
 * /*  w w w.  j  a v  a2s  .co m*/
 * @return The data insert result containing the data points that were not
 *         updated.
 */
private List<DataPoint> updateData(Connection conn, List<DataPoint> data) {
    PreparedStatement stmt = null;
    List<DataPoint> left = new ArrayList<DataPoint>();
    Map<String, List<DataPoint>> buckets = MeasRangeObj.getInstance().bucketData(data);

    for (Entry<String, List<DataPoint>> entry : buckets.entrySet()) {
        String table = entry.getKey();
        List<DataPoint> dpts = entry.getValue();

        try {
            // TODO need to set synchronous commit to off
            stmt = conn.prepareStatement(
                    "UPDATE " + table + " SET value = ? WHERE timestamp = ? AND measurement_id = ?");
            for (DataPoint pt : dpts) {
                Integer metricId = pt.getMeasurementId();
                MetricValue val = pt.getMetricValue();
                BigDecimal bigDec;
                bigDec = new BigDecimal(val.getValue());
                stmt.setBigDecimal(1, getDecimalInRange(bigDec, metricId));
                stmt.setLong(2, val.getTimestamp());
                stmt.setInt(3, metricId.intValue());
                stmt.addBatch();
            }

            int[] execInfo = stmt.executeBatch();
            left.addAll(getRemainingDataPoints(dpts, execInfo));
        } catch (BatchUpdateException e) {
            left.addAll(getRemainingDataPointsAfterBatchFail(dpts, e.getUpdateCounts()));
        } catch (SQLException e) {
            // If the batch update is not within a transaction, then we
            // don't know which of the updates completed successfully.
            // Assume they all failed.
            left.addAll(dpts);

            if (log.isDebugEnabled()) {
                log.debug("A general SQLException occurred during the update. " + "Assuming that none of the "
                        + dpts.size() + " data points were updated.", e);
            }
        } finally {
            DBUtil.closeStatement(LOG_CTX, stmt);
        }
    }
    return left;
}

From source file:org.jumpmind.db.sql.JdbcSqlTransaction.java

protected void removeMarkersThatWereSuccessful(BatchUpdateException ex) {
    int[] updateCounts = ex.getUpdateCounts();
    Iterator<Object> it = markers.iterator();
    int index = 0;
    while (it.hasNext()) {
        it.next();/*w w w  .  j  av a2  s  .c  o m*/
        if (updateCounts.length > index && normalizeUpdateCount(updateCounts[index]) > 0) {
            it.remove();
        }
        index++;
    }
}

From source file:org.openmrs.util.databasechange.AddConceptMapTypesChangeset.java

/**
 * Executes all the changes to the concept names as a batch update.
 *
 * @param connection The database connection
 *//*from   w w  w .j  a v  a 2s. com*/
private void runBatchInsert(JdbcConnection connection) throws CustomChangeException {
    PreparedStatement pStmt = null;
    ResultSet rs = null;
    try {
        connection.setAutoCommit(false);

        Integer userId = DatabaseUpdater.getAuthenticatedUserId();
        //if we have no authenticated user(for API users), set as Daemon
        if (userId == null || userId < 1) {
            userId = getInt(connection, "SELECT min(user_id) FROM users");
            //leave it as null rather than setting it to 0
            if (userId < 1) {
                userId = null;
            }
        }

        //userId is not a param, because it's easier this way if it's null
        pStmt = connection.prepareStatement("INSERT INTO concept_map_type "
                + "(concept_map_type_id, name, is_hidden, retired, creator, date_created, uuid) VALUES(?,?,?,?,"
                + userId + ",?,?)");

        int mapTypeId = 1;

        for (String map : visibleConceptMapTypeArray) {
            String[] mapTypeAndUuid = map.trim().split("\\|");
            String mapType = mapTypeAndUuid[0];
            String mapUuid = mapTypeAndUuid[1];

            pStmt.setInt(1, mapTypeId);
            pStmt.setString(2, mapType);
            pStmt.setBoolean(3, false);
            pStmt.setBoolean(4, false);
            pStmt.setDate(5, new Date(Calendar.getInstance().getTimeInMillis()));
            pStmt.setString(6, mapUuid);
            pStmt.addBatch();

            mapTypeId++;
        }

        for (String map : hiddenConceptMapTypeArray) {
            String[] mapTypeAndUuid = map.trim().split("\\|");
            String mapType = mapTypeAndUuid[0];
            String mapUuid = mapTypeAndUuid[1];

            pStmt.setInt(1, mapTypeId);
            pStmt.setString(2, mapType);
            pStmt.setBoolean(3, true);
            pStmt.setBoolean(4, false);
            pStmt.setDate(5, new Date(Calendar.getInstance().getTimeInMillis()));
            pStmt.setString(6, mapUuid);
            pStmt.addBatch();

            mapTypeId++;
        }

        try {
            int[] updateCounts = pStmt.executeBatch();
            for (int i = 0; i < updateCounts.length; i++) {
                if (updateCounts[i] > -1) {
                    log.debug("Successfully executed: updateCount=" + updateCounts[i]);
                } else if (updateCounts[i] == Statement.SUCCESS_NO_INFO) {
                    log.debug("Successfully executed; No Success info");
                } else if (updateCounts[i] == Statement.EXECUTE_FAILED) {
                    log.warn("Failed to execute insert");
                }
            }

            log.debug("Committing inserts...");
            connection.commit();
        } catch (BatchUpdateException be) {
            log.warn("Error generated while processsing batch insert", be);
            int[] updateCounts = be.getUpdateCounts();

            for (int i = 0; i < updateCounts.length; i++) {
                if (updateCounts[i] > -1) {
                    log.warn("Executed with exception: insertCount=" + updateCounts[i]);
                } else if (updateCounts[i] == Statement.SUCCESS_NO_INFO) {
                    log.warn("Executed with exception; No Success info");
                } else if (updateCounts[i] == Statement.EXECUTE_FAILED) {
                    log.warn("Failed to execute insert with exception");
                }
            }

            try {
                log.debug("Rolling back batch", be);
                connection.rollback();
            } catch (Exception rbe) {
                log.warn("Error generated while rolling back batch insert", be);
            }

            //marks the changeset as a failed one
            throw new CustomChangeException("Failed to insert one or more concept map types", be);
        }
    } catch (DatabaseException e) {
        throw new CustomChangeException("Failed to insert one or more concept map types:", e);
    } catch (SQLException e) {
        throw new CustomChangeException("Failed to insert one or more concept map types:", e);
    } finally {
        //reset to auto commit mode
        try {
            connection.setAutoCommit(true);
        } catch (DatabaseException e) {
            log.warn("Failed to reset auto commit back to true", e);
        }
        if (rs != null) {
            try {
                rs.close();
            } catch (SQLException e) {
                log.warn("Failed to close the resultset object");
            }
        }

        if (pStmt != null) {
            try {
                pStmt.close();
            } catch (SQLException e) {
                log.warn("Failed to close the prepared statement object");
            }
        }
    }
}

From source file:org.openmrs.util.databasechange.ConceptReferenceTermChangeSet.java

/**
 * Convenience method that inserts rows into the concept reference term table. The
 * concept_map_id values becomes the concept_reference_term_id values
 * //from w w w. j a v a  2 s. c  o m
 * @param connection the current database connection
 * @param listOfPropertyValueMaps a list of property and value maps for the objects to insert
 * @throws CustomChangeException
 */
private void insertRows(JdbcConnection connection, List<Map<String, Object>> listOfPropertyValueMaps)
        throws CustomChangeException {
    if (CollectionUtils.isNotEmpty(listOfPropertyValueMaps)) {
        PreparedStatement pStmt = null;
        try {
            connection.setAutoCommit(false);
            pStmt = connection.prepareStatement("INSERT INTO concept_reference_term"
                    + "(concept_reference_term_id, concept_source_id, code, description, creator, date_created, retired, uuid) "
                    + "VALUES(?, ?, ?, ?, ?, ?, ?, ?)");

            for (Map<String, Object> propertyValueMap : listOfPropertyValueMaps) {
                pStmt.setInt(1, (Integer) propertyValueMap.get("termId"));
                pStmt.setInt(2, (Integer) propertyValueMap.get("sourceId"));
                pStmt.setString(3, propertyValueMap.get("code").toString());
                pStmt.setString(4, (propertyValueMap.get("description") == null) ? null
                        : propertyValueMap.get("description").toString());
                pStmt.setInt(5, (Integer) propertyValueMap.get("creator"));
                pStmt.setDate(6, (Date) propertyValueMap.get("dateCreated"));
                pStmt.setBoolean(7, false);
                pStmt.setString(8, propertyValueMap.get("uuid").toString());

                pStmt.addBatch();
            }

            try {
                int[] updateCounts = pStmt.executeBatch();
                for (int i = 0; i < updateCounts.length; i++) {
                    if (updateCounts[i] > -1) {
                        log.debug("Successfully executed: updateCount=" + updateCounts[i]);
                    } else if (updateCounts[i] == Statement.SUCCESS_NO_INFO) {
                        log.debug("Successfully executed; No Success info");
                    } else if (updateCounts[i] == Statement.EXECUTE_FAILED) {
                        log.warn("Failed to execute update");
                    }
                }

                log.debug("Committing updates...");
                connection.commit();
            } catch (BatchUpdateException be) {
                log.warn("Error generated while processsing batch update", be);
                int[] updateCounts = be.getUpdateCounts();

                for (int i = 0; i < updateCounts.length; i++) {
                    if (updateCounts[i] > -1) {
                        log.warn("Executed with exception: updateCount=" + updateCounts[i]);
                    } else if (updateCounts[i] == Statement.SUCCESS_NO_INFO) {
                        log.warn("Executed with exception; No Success info");
                    } else if (updateCounts[i] == Statement.EXECUTE_FAILED) {
                        log.warn("Failed to execute update with exception");
                    }
                }

                try {
                    log.warn("Rolling back batch", be);
                    connection.rollback();
                } catch (Exception rbe) {
                    log.warn("Error generated while rolling back batch update", be);
                }

                //marks the changeset as a failed one
                throw new CustomChangeException(
                        "Failed to generate concept reference terms from existing concept mappings.");
            }
        } catch (DatabaseException e) {
            throw new CustomChangeException("Error generated", e);
        } catch (SQLException e) {
            throw new CustomChangeException("Error generated", e);
        } finally {
            //reset to auto commit mode
            try {
                connection.setAutoCommit(true);
            } catch (DatabaseException e) {
                log.warn("Failed to reset auto commit back to true", e);
            }

            if (pStmt != null) {
                try {
                    pStmt.close();
                } catch (SQLException e) {
                    log.warn("Failed to close the prepared statement object");
                }
            }
        }
    } else
        log.error("List of property value maps is null or empty");
}