Example usage for java.sql BatchUpdateException getNextException

List of usage examples for java.sql BatchUpdateException getNextException

Introduction

In this page you can find the example usage for java.sql BatchUpdateException getNextException.

Prototype

public SQLException getNextException() 

Source Link

Document

Retrieves the exception chained to this SQLException object by setNextException(SQLException ex).

Usage

From source file:org.cartoweb.stats.imports.Import.java

/**
 * Imports one file into the DB.//from   w  w  w  . ja  va2s .c o  m
 */
private void convertFile(final Connection con, File file) throws IOException, SQLException {
    try {
        final String query = "INSERT INTO " + tableName + " (" + MAPPER.getFieldNames() + ") VALUES ("
                + MAPPER.getInsertPlaceHolders() + ")";
        final PreparedStatement layerStmt = wantLayers
                ? con.prepareStatement("INSERT INTO " + tableName + "_layers (id, layer) VALUES (?,?)")
                : null;

        StatsReader reader = createReader(file);

        JdbcUtilities.runInsertQuery("inserting stats", query, con, reader, 500,
                new JdbcUtilities.InsertTask<StatsRecord>() {
                    private int cptLayers = 0;

                    public boolean marshall(PreparedStatement stmt, StatsRecord item) throws SQLException {
                        if (item != null) {
                            item.setId(curId++);
                            MAPPER.saveToDb(stmt, item, 1);

                            if (wantLayers && item.getLayerArray() != null) {
                                for (int i = 0; i < item.getLayerArray().size(); i++) {
                                    Integer val = item.getLayerArray().get(i);
                                    layerStmt.setLong(1, item.getId());
                                    layerStmt.setInt(2, val);
                                    layerStmt.addBatch();
                                    if ((++cptLayers % 500) == 0) {
                                        layerStmt.executeBatch();
                                    }
                                }
                            }
                            return true;
                        } else {
                            return false;
                        }
                    }
                });

        if (layerStmt != null) {
            layerStmt.executeBatch();
            layerStmt.close();
        }
    } catch (BatchUpdateException ex) {
        ex.getNextException().printStackTrace();
        throw ex;
    }
}

From source file:org.cartoweb.stats.imports.Import.java

private void fillCacheHits(Connection con) throws SQLException {
    con.commit();/* w  w w. j a  v a2 s. c  o m*/
    con.setAutoCommit(true);
    JdbcUtilities.runDeleteQuery("vacuuming " + tableName, "VACUUM ANALYZE " + tableName, con, null);
    con.setAutoCommit(false);

    if (DB_SOLVE_HITS) {
        //take around 55m for 4M records and is not greate for incremental updates...
        JdbcUtilities.runDeleteQuery("solving cache hits", "UPDATE " + tableName
                + " f SET general_elapsed_time=s.general_elapsed_time, images_mainmap_width=s.images_mainmap_width, images_mainmap_height=s.images_mainmap_height, layers=s.layers, layers_switch_id=s.layers_switch_id, bbox_minx=s.bbox_minx, bbox_miny=s.bbox_miny, bbox_maxx=s.bbox_maxx, bbox_maxy=s.bbox_maxy, location_scale=s.location_scale, query_results_count=s.query_results_count, query_results_table_count=s.query_results_table_count FROM "
                + tableName
                + " s WHERE s.general_cache_id=f.general_cache_hit AND f.general_cache_hit IS NOT NULL AND f.general_elapsed_time IS NULL AND f.layers IS NULL",
                con, null);
    } else {
        //takes around 21m for the same 4M records and is optimal for incremental updates...
        try {
            final PreparedStatement updateStmt = con.prepareStatement("UPDATE " + tableName
                    + " SET general_elapsed_time=?, images_mainmap_width=?, images_mainmap_height=?, layers=?, layers_switch_id=?, bbox_minx=?, bbox_miny=?, bbox_maxx=?, bbox_maxy=?, location_scale=?, query_results_count=?, query_results_table_count=? WHERE general_cache_hit=?");
            if (hits.size() == 0) {
                return;
            }

            JdbcUtilities.runSelectQuery("reading cached values",
                    "SELECT general_cache_id, general_elapsed_time, images_mainmap_width, images_mainmap_height, layers, layers_switch_id, bbox_minx, bbox_miny, bbox_maxx, bbox_maxy, location_scale, query_results_count, query_results_table_count FROM "
                            + tableName + " WHERE general_cache_id IS NOT NULL",
                    con, new JdbcUtilities.SelectTask() {
                        private int cpt = 0;

                        public void setupStatement(PreparedStatement stmt) throws SQLException {
                        }

                        public void run(ResultSet rs) throws SQLException {
                            int count = 0;
                            final int todo = hits.size();
                            Progress progress = new Progress(10 * 1000, todo, "Cache hit record updating",
                                    LOGGER);
                            while (rs.next()) {
                                String cacheId = rs.getString(1);
                                //We can have the same general_cache_id multiple times.
                                //So we have to remove it from the set.
                                if (hits.remove(cacheId)) {
                                    StatementUtils.copyFloat(rs, 2, updateStmt, 1);
                                    StatementUtils.copyInt(rs, 3, updateStmt, 2);
                                    StatementUtils.copyInt(rs, 4, updateStmt, 3);
                                    StatementUtils.copyString(rs, 5, updateStmt, 4);
                                    StatementUtils.copyInt(rs, 6, updateStmt, 5);
                                    StatementUtils.copyFloat(rs, 7, updateStmt, 6);
                                    StatementUtils.copyFloat(rs, 8, updateStmt, 7);
                                    StatementUtils.copyFloat(rs, 9, updateStmt, 8);
                                    StatementUtils.copyFloat(rs, 10, updateStmt, 9);
                                    StatementUtils.copyFloat(rs, 11, updateStmt, 10);
                                    StatementUtils.copyInt(rs, 12, updateStmt, 11);
                                    StatementUtils.copyString(rs, 13, updateStmt, 12);
                                    updateStmt.setString(13, cacheId);
                                    updateStmt.addBatch();

                                    if (++cpt % 50 == 0) {
                                        int[] counts = updateStmt.executeBatch();
                                        for (int i = 0; i < counts.length; ++i) {
                                            count += counts[i];
                                        }
                                    }

                                    progress.update(todo - hits.size());
                                }
                            }
                            ++cpt;
                            int[] counts = updateStmt.executeBatch();
                            for (int i = 0; i < counts.length; ++i) {
                                count += counts[i];
                            }

                            LOGGER.info(count + " cache hit records updated from " + cpt + " cached values");
                        }
                    });

            updateStmt.close();
        } catch (BatchUpdateException ex) {
            LOGGER.error(ex.getNextException());
            throw ex;
        }
    }
    con.commit();
}

From source file:com.wavemaker.runtime.data.spring.SpringDataServiceManager.java

private Object runInTx(Task task, Object... input) {
    HibernateCallback action = new RunInHibernate(task, input);
    TransactionTemplate txTemplate = new TransactionTemplate(this.txMgr);
    boolean rollbackOnly = task instanceof DefaultRollback && !isTxRunning();
    RunInTx tx = new RunInTx(action, rollbackOnly);
    if (txLogger.isInfoEnabled()) {
        if (isTxRunning()) {
            txLogger.info("tx is running executing \"" + task.getName() + "\" in current tx");
        } else {/*w ww.ja  v a  2 s .  c  o  m*/
            txLogger.info("no tx running, wrapping execution of \"" + task.getName() + "\" in tx");
            if (rollbackOnly) {
                txLogger.info("rollback enabled for \"" + task.getName() + "\"");
            }
        }
    }
    Object rtn = null;
    try {
        rtn = txTemplate.execute(tx);
    } catch (Throwable ex) {
        //The following logic intends to display a sensible message for the user when a column contains a value whose length
        //exceeds the maximum length allowed in the database.  The logic has been tested on MySQL, Postgres, Oracle and
        //SQLServer so far.
        if (ex.getCause() instanceof java.sql.BatchUpdateException) { //Oracle
            String msg = ((java.sql.BatchUpdateException) ex.getCause()).getNextException().getMessage();
            if (msg != null) {
                ex.printStackTrace();
                throw new WMRuntimeException(msg);
            }
        } else if (ex.getCause().getCause() instanceof java.sql.BatchUpdateException) { //Postgres
            java.sql.BatchUpdateException e = (java.sql.BatchUpdateException) ex.getCause().getCause();
            if (e != null && e.getMessage() != null) {
                ex.printStackTrace();
                throw new WMRuntimeException(e.getNextException().getMessage());
            }
        } else if (ex.getCause().getCause() != null) { //MySQL, SQLServer
            String msg = ex.getCause().getCause().getMessage();
            if (msg != null) {
                ex.printStackTrace();
                throw new WMRuntimeException(msg);
            }
        } else {
            throw new WMRuntimeException(ex);
        }
    }
    if (txLogger.isInfoEnabled()) {
        if (isTxRunning()) {
            txLogger.info("tx is running after execution of \"" + task.getName() + "\"");
        } else {
            txLogger.info("tx is not running after execution of \"" + task.getName() + "\"");
        }

    }
    return rtn;
}

From source file:org.openbravo.service.json.DefaultJsonDataService.java

public String update(Map<String, String> parameters, String content) {
    try {/* w  w w . j  a v  a2s .c  o  m*/
        final boolean sendOriginalIdBack = "true".equals(parameters.get(JsonConstants.SEND_ORIGINAL_ID_BACK));

        final JsonToDataConverter fromJsonConverter = OBProvider.getInstance().get(JsonToDataConverter.class);

        String localContent = content;
        if (parameters.containsKey(ADD_FLAG)) {
            localContent = doPreAction(parameters, content, DataSourceAction.ADD);
        } else {
            localContent = doPreAction(parameters, content, DataSourceAction.UPDATE);
        }

        final Object jsonContent = getContentAsJSON(localContent);
        final List<BaseOBObject> bobs;
        final List<JSONObject> originalData = new ArrayList<JSONObject>();
        if (jsonContent instanceof JSONArray) {
            bobs = fromJsonConverter.toBaseOBObjects((JSONArray) jsonContent);
            final JSONArray jsonArray = (JSONArray) jsonContent;
            for (int i = 0; i < jsonArray.length(); i++) {
                originalData.add(jsonArray.getJSONObject(i));
            }
        } else {
            final JSONObject jsonObject = (JSONObject) jsonContent;
            originalData.add(jsonObject);
            // now set the id and entityname from the parameters if it was set
            if (!jsonObject.has(JsonConstants.ID) && parameters.containsKey(JsonConstants.ID)) {
                jsonObject.put(JsonConstants.ID, parameters.containsKey(JsonConstants.ID));
            }
            if (!jsonObject.has(JsonConstants.ENTITYNAME) && parameters.containsKey(JsonConstants.ENTITYNAME)) {
                jsonObject.put(JsonConstants.ENTITYNAME, parameters.get(JsonConstants.ENTITYNAME));
            }

            bobs = Collections.singletonList(fromJsonConverter.toBaseOBObject((JSONObject) jsonContent));
        }

        if (fromJsonConverter.hasErrors()) {
            OBDal.getInstance().rollbackAndClose();
            // report the errors
            final JSONObject jsonResult = new JSONObject();
            final JSONObject jsonResponse = new JSONObject();
            jsonResponse.put(JsonConstants.RESPONSE_STATUS, JsonConstants.RPCREQUEST_STATUS_VALIDATION_ERROR);
            final JSONObject errorsObject = new JSONObject();
            for (JsonConversionError error : fromJsonConverter.getErrors()) {
                errorsObject.put(error.getProperty().getName(), error.getThrowable().getMessage());
            }
            jsonResponse.put(JsonConstants.RESPONSE_ERRORS, errorsObject);
            jsonResult.put(JsonConstants.RESPONSE_RESPONSE, jsonResponse);
            return jsonResult.toString();
        } else {
            for (BaseOBObject bob : bobs) {
                OBDal.getInstance().save(bob);
            }
            OBDal.getInstance().flush();

            // business event handlers can change the data
            // flush again before refreshing, refreshing can
            // potentially remove any in-memory changes
            int countFlushes = 0;
            while (OBDal.getInstance().getSession().isDirty()) {
                OBDal.getInstance().flush();
                countFlushes++;
                // arbitrary point to give up...
                if (countFlushes > 100) {
                    throw new OBException("Infinite loop in flushing when persisting json: " + content);
                }
            }

            // refresh the objects from the db as they can have changed
            for (BaseOBObject bob : bobs) {
                OBDal.getInstance().getSession().refresh(bob);
                // if object has computed columns refresh from the database too
                if (bob.getEntity().hasComputedColumns()) {
                    OBDal.getInstance().getSession().refresh(bob.get(Entity.COMPUTED_COLUMNS_PROXY_PROPERTY));
                }
            }

            // almost successfull, now create the response
            // needs to be done before the close of the session
            final DataToJsonConverter toJsonConverter = OBProvider.getInstance().get(DataToJsonConverter.class);
            toJsonConverter.setAdditionalProperties(JsonUtils.getAdditionalProperties(parameters));
            final List<JSONObject> jsonObjects = toJsonConverter.toJsonObjects(bobs);

            if (sendOriginalIdBack) {
                // now it is assumed that the jsonObjects are the same size and the same location
                // in the array
                if (jsonObjects.size() != originalData.size()) {
                    throw new OBException("Unequal sizes in json data processed " + jsonObjects.size() + " "
                            + originalData.size());
                }

                // now add the old id back
                for (int i = 0; i < originalData.size(); i++) {
                    final JSONObject original = originalData.get(i);
                    final JSONObject ret = jsonObjects.get(i);
                    if (original.has(JsonConstants.ID) && original.has(JsonConstants.NEW_INDICATOR)) {
                        ret.put(JsonConstants.ORIGINAL_ID, original.get(JsonConstants.ID));
                    }
                }
            }

            final JSONObject jsonResult = new JSONObject();
            final JSONObject jsonResponse = new JSONObject();
            jsonResponse.put(JsonConstants.RESPONSE_STATUS, JsonConstants.RPCREQUEST_STATUS_SUCCESS);
            jsonResponse.put(JsonConstants.RESPONSE_DATA, new JSONArray(jsonObjects));
            jsonResult.put(JsonConstants.RESPONSE_RESPONSE, jsonResponse);

            final String result;
            if (parameters.containsKey(ADD_FLAG)) {
                result = doPostAction(parameters, jsonResult.toString(), DataSourceAction.ADD, content);
            } else {
                result = doPostAction(parameters, jsonResult.toString(), DataSourceAction.UPDATE, content);
            }

            OBDal.getInstance().commitAndClose();

            return result;
        }
    } catch (Throwable t) {
        Throwable localThrowable = t;
        if (localThrowable.getCause() instanceof BatchUpdateException) {
            final BatchUpdateException batchException = (BatchUpdateException) localThrowable.getCause();
            localThrowable = batchException.getNextException();
        }
        log.error(localThrowable.getMessage(), localThrowable);
        return JsonUtils.convertExceptionToJson(localThrowable);
    }

}

From source file:org.nuxeo.ecm.core.storage.sql.jdbc.JDBCRowMapper.java

/**
 * Inserts multiple rows, all for the same table.
 *//*from ww  w .  ja v  a2  s  .com*/
protected void insertSimpleRows(String tableName, List<Row> rows) throws StorageException {
    if (rows.isEmpty()) {
        return;
    }
    String sql = sqlInfo.getInsertSql(tableName);
    if (sql == null) {
        throw new StorageException("Unknown table: " + tableName);
    }
    String loggedSql = supportsBatchUpdates && rows.size() > 1 ? sql + " -- BATCHED" : sql;
    List<Column> columns = sqlInfo.getInsertColumns(tableName);
    try {
        PreparedStatement ps = connection.prepareStatement(sql);
        try {
            int batch = 0;
            for (Row row : rows) {
                batch++;
                if (logger.isLogEnabled()) {
                    logger.logSQL(loggedSql, columns, row);
                }
                int i = 1;
                for (Column column : columns) {
                    column.setToPreparedStatement(ps, i++, row.get(column.getKey()));
                }
                if (supportsBatchUpdates) {
                    ps.addBatch();
                    if (batch % UPDATE_BATCH_SIZE == 0) {
                        ps.executeBatch();
                        countExecute();
                    }
                } else {
                    ps.execute();
                    countExecute();
                }
            }
            if (supportsBatchUpdates) {
                ps.executeBatch();
                countExecute();
            }
        } finally {
            closeStatement(ps);
        }
    } catch (Exception e) {
        checkConnectionReset(e);
        if (e instanceof BatchUpdateException) {
            BatchUpdateException bue = (BatchUpdateException) e;
            if (e.getCause() == null && bue.getNextException() != null) {
                // provide a readable cause in the stack trace
                e.initCause(bue.getNextException());
            }
        }
        checkConcurrentUpdate(e);
        throw new StorageException("Could not insert: " + sql, e);
    }
}