Example usage for java.sql SQLException getSQLState

List of usage examples for java.sql SQLException getSQLState

Introduction

In this page you can find the example usage for java.sql SQLException getSQLState.

Prototype

public String getSQLState() 

Source Link

Document

Retrieves the SQLState for this SQLException object.

Usage

From source file:org.forgerock.openidm.repo.jdbc.impl.JDBCRepoService.java

/**
 * Gets an object from the repository by identifier. The returned object is not validated
 * against the current schema and may need processing to conform to an updated schema.
 * <p/>/*from   w  ww .  j  a v  a  2 s.  c  o  m*/
 * The object will contain metadata properties, including object identifier {@code _id},
 * and object version {@code _rev} to enable optimistic concurrency supported by OrientDB and OpenIDM.
 *
 * @param fullId the identifier of the object to retrieve from the object set.
 * @return the requested object.
 * @throws NotFoundException   if the specified object could not be found.
 * @throws ForbiddenException  if access to the object is forbidden.
 * @throws BadRequestException if the passed identifier is invalid
 */
public Map<String, Object> read(String fullId) throws ObjectSetException {
    String localId = getLocalId(fullId);
    String type = getObjectType(fullId);

    if (fullId == null || localId == null) {
        throw new NotFoundException(
                "The repository requires clients to supply an identifier for the object to create. Full identifier: "
                        + fullId + " local identifier: " + localId);
    } else if (type == null) {
        throw new NotFoundException(
                "The object identifier did not include sufficient information to determine the object type: "
                        + fullId);
    }

    Connection connection = null;
    Map<String, Object> result = null;
    try {
        connection = getConnection();
        connection.setAutoCommit(true); // Ensure this does not get transaction isolation handling
        TableHandler handler = getTableHandler(type);
        if (handler == null) {
            throw new ObjectSetException("No handler configured for resource type " + type);
        }
        result = handler.read(fullId, type, localId, connection);
    } catch (SQLException ex) {
        if (logger.isDebugEnabled()) {
            logger.debug("SQL Exception in read of {} with error code {}, sql state {}",
                    new Object[] { fullId, ex.getErrorCode(), ex.getSQLState(), ex });
        }
        throw new InternalServerErrorException("Reading object failed " + ex.getMessage(), ex);
    } catch (ObjectSetException ex) {
        logger.debug("ObjectSetException in read of {}", fullId, ex);
        throw ex;
    } catch (IOException ex) {
        logger.debug("IO Exception in read of {}", fullId, ex);
        throw new InternalServerErrorException("Conversion of read object failed", ex);
    } finally {
        CleanupHelper.loggedClose(connection);
    }

    return result;
}

From source file:org.forgerock.openidm.repo.jdbc.impl.JDBCRepoService.java

/**
 * Performs the query on the specified object and returns the associated results.
 * <p/>// w w  w.  ja  v a 2  s.co m
 * Queries are parametric; a set of named parameters is provided as the query criteria.
 * The query result is a JSON object structure composed of basic Java types.
 * <p/>
 * The returned map is structured as follow:
 * - The top level map contains meta-data about the query, plus an entry with the actual result records.
 * - The <code>QueryConstants</code> defines the map keys, including the result records (QUERY_RESULT)
 *
 * @param fullId identifies the object to query.
 * @param params the parameters of the query to perform.
 * @return the query results, which includes meta-data and the result records in JSON object structure format.
 * @throws NotFoundException   if the specified object could not be found.
 * @throws BadRequestException if the specified params contain invalid arguments, e.g. a query id that is not
 *                             configured, a query expression that is invalid, or missing query substitution tokens.
 * @throws ForbiddenException  if access to the object or specified query is forbidden.
 */
public Map<String, Object> query(String fullId, Map<String, Object> params) throws ObjectSetException {
    // TODO: replace with common utility
    String type = fullId;
    logger.trace("Full id: {} Extracted type: {}", fullId, type);

    Map<String, Object> result = new HashMap<String, Object>();
    Connection connection = null;
    try {
        TableHandler handler = getTableHandler(type);
        if (handler == null) {
            throw new ObjectSetException("No handler configured for resource type " + type);
        }
        connection = getConnection();
        connection.setAutoCommit(true); // Ensure we do not implicitly start transaction isolation

        long start = System.currentTimeMillis();
        List<Map<String, Object>> docs = handler.query(type, params, connection);
        long end = System.currentTimeMillis();
        result.put(QueryConstants.QUERY_RESULT, docs);
        // TODO: split out conversion time
        //result.put(QueryConstants.STATISTICS_CONVERSION_TIME, Long.valueOf(convEnd-convStart));

        result.put(QueryConstants.STATISTICS_QUERY_TIME, Long.valueOf(end - start));

        if (logger.isDebugEnabled()) {
            logger.debug("Query result contains {} records, took {} ms and took {} ms to convert result.",
                    new Object[] { ((List) result.get(QueryConstants.QUERY_RESULT)).size(),
                            result.get(QueryConstants.STATISTICS_QUERY_TIME),
                            result.get(QueryConstants.STATISTICS_CONVERSION_TIME) });
        }
    } catch (SQLException ex) {
        if (logger.isDebugEnabled()) {
            logger.debug("SQL Exception in query of {} with error code {}, sql state {}",
                    new Object[] { fullId, ex.getErrorCode(), ex.getSQLState(), ex });
        }
        throw new InternalServerErrorException("Querying failed: " + ex.getMessage(), ex);
    } catch (ObjectSetException ex) {
        logger.debug("ObjectSetException in query of {}", fullId, ex);
        throw ex;
    } finally {
        CleanupHelper.loggedClose(connection);
    }
    return result;
}

From source file:org.apache.hadoop.chukwa.analysis.salsa.visualization.Heatmap.java

/**
 * Interfaces with database to get data and 
 * populate data structures for rendering
 *//*w  w  w. j  av a  2s.  com*/
public HeatmapData getData() {
    // preliminary setup
    OfflineTimeHandler time_offline;
    TimeHandler time_online;
    long start, end, min, max;

    if (offline_use) {
        time_offline = new OfflineTimeHandler(param_map, this.timezone);
        start = time_offline.getStartTime();
        end = time_offline.getEndTime();
    } else {
        time_online = new TimeHandler(this.request, this.timezone);
        start = time_online.getStartTime();
        end = time_online.getEndTime();
    }

    DatabaseWriter dbw = new DatabaseWriter(this.cluster);

    // setup query
    String query;
    if (this.query_state != null && this.query_state.equals("read")) {
        query = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from ["
                + table
                + "] where finish_time between '[start]' and '[end]' and (state_name like 'read_local' or state_name like 'read_remote')";
    } else if (this.query_state != null && this.query_state.equals("write")) {
        query = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from ["
                + table
                + "] where finish_time between '[start]' and '[end]' and (state_name like 'write_local' or state_name like 'write_remote' or state_name like 'write_replicated')";
    } else {
        query = "select block_id,start_time,finish_time,start_time_millis,finish_time_millis,status,state_name,hostname,other_host,bytes from ["
                + table + "] where finish_time between '[start]' and '[end]' and state_name like '"
                + query_state + "'";
    }
    Macro mp = new Macro(start, end, query);
    query = mp.toString() + " order by start_time";

    ArrayList<HashMap<String, Object>> events = new ArrayList<HashMap<String, Object>>();

    ResultSet rs = null;

    log.debug("Query: " + query);
    // run query, extract results
    try {
        rs = dbw.query(query);
        ResultSetMetaData rmeta = rs.getMetaData();
        int col = rmeta.getColumnCount();
        while (rs.next()) {
            HashMap<String, Object> event = new HashMap<String, Object>();
            long event_time = 0;
            for (int i = 1; i <= col; i++) {
                if (rmeta.getColumnType(i) == java.sql.Types.TIMESTAMP) {
                    event.put(rmeta.getColumnName(i), rs.getTimestamp(i).getTime());
                } else {
                    event.put(rmeta.getColumnName(i), rs.getString(i));
                }
            }
            events.add(event);
        }
    } catch (SQLException ex) {
        // handle any errors
        log.error("SQLException: " + ex.getMessage());
        log.error("SQLState: " + ex.getSQLState());
        log.error("VendorError: " + ex.getErrorCode());
    } finally {
        dbw.close();
    }
    SimpleDateFormat format = new SimpleDateFormat("MMM dd yyyy HH:mm:ss");

    log.info(events.size() + " results returned.");

    HashSet<String> host_set = new HashSet<String>();
    HashMap<String, Integer> host_indices = new HashMap<String, Integer>();
    HashMap<Integer, String> host_rev_indices = new HashMap<Integer, String>();

    // collect hosts, name unique hosts
    for (int i = 0; i < events.size(); i++) {
        HashMap<String, Object> event = events.get(i);
        String curr_host = (String) event.get("hostname");
        String other_host = (String) event.get("other_host");
        host_set.add(curr_host);
        host_set.add(other_host);
    }
    int num_hosts = host_set.size();

    Iterator<String> host_iter = host_set.iterator();
    for (int i = 0; i < num_hosts && host_iter.hasNext(); i++) {
        String curr_host = host_iter.next();
        host_indices.put(curr_host, new Integer(i));
        host_rev_indices.put(new Integer(i), curr_host);
    }

    System.out.println("Number of hosts: " + num_hosts);
    long stats[][] = new long[num_hosts][num_hosts];
    long count[][] = new long[num_hosts][num_hosts]; // used for averaging

    int start_millis = 0, end_millis = 0;

    // deliberate design choice to duplicate code PER possible operation
    // otherwise we have to do the mode check N times, for N states returned
    //
    // compute aggregate statistics
    log.info("Query statistic type: " + this.query_stat_type);
    if (this.query_stat_type.equals("transaction_count")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            // to, from
            stats[other_host_idx][this_host_idx] += 1;
        }
    } else if (this.query_stat_type.equals("avg_duration")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            long curr_val = end_millis - start_millis + ((end - start) * 1000);

            // to, from
            stats[other_host_idx][this_host_idx] += curr_val;
            count[other_host_idx][this_host_idx] += 1;
        }
        for (int i = 0; i < num_hosts; i++) {
            for (int j = 0; j < num_hosts; j++) {
                if (count[i][j] > 0)
                    stats[i][j] = stats[i][j] / count[i][j];
            }
        }
    } else if (this.query_stat_type.equals("avg_volume")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            long curr_val = Long.parseLong((String) event.get("bytes"));

            // to, from
            stats[other_host_idx][this_host_idx] += curr_val;
            count[other_host_idx][this_host_idx] += 1;
        }
        for (int i = 0; i < num_hosts; i++) {
            for (int j = 0; j < num_hosts; j++) {
                if (count[i][j] > 0)
                    stats[i][j] = stats[i][j] / count[i][j];
            }
        }
    } else if (this.query_stat_type.equals("total_duration")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            double curr_val = end_millis - start_millis + ((end - start) * 1000);

            // to, from
            stats[other_host_idx][this_host_idx] += curr_val;
        }
    } else if (this.query_stat_type.equals("total_volume")) {
        for (int i = 0; i < events.size(); i++) {
            HashMap<String, Object> event = events.get(i);
            start = (Long) event.get("start_time");
            end = (Long) event.get("finish_time");
            start_millis = Integer.parseInt(((String) event.get("start_time_millis")));
            end_millis = Integer.parseInt(((String) event.get("finish_time_millis")));
            String cell = (String) event.get("state_name");
            String this_host = (String) event.get("hostname");
            String other_host = (String) event.get("other_host");
            int this_host_idx = host_indices.get(this_host).intValue();
            int other_host_idx = host_indices.get(other_host).intValue();

            long curr_val = Long.parseLong((String) event.get("bytes"));

            // to, from
            stats[other_host_idx][this_host_idx] += curr_val;
        }
    }

    int[] permute = null;
    if (sort_nodes) {
        permute = hClust(stats);
        stats = doPermute(stats, permute);
    }

    Table agg_tab = new Table();
    agg_tab.addColumn("stat", long.class);
    min = Long.MAX_VALUE;
    max = Long.MIN_VALUE;
    agg_tab.addRows(num_hosts * num_hosts);

    // row-wise placement (row1, followed by row2, etc.)
    for (int i = 0; i < num_hosts; i++) {
        for (int j = 0; j < num_hosts; j++) {
            agg_tab.setLong((i * num_hosts) + j, "stat", stats[i][j]);
            if (stats[i][j] > max)
                max = stats[i][j];
            if (stats[i][j] > 0 && stats[i][j] < min)
                min = stats[i][j];
        }
    }
    if (min == Long.MAX_VALUE)
        min = 0;

    log.info(agg_tab);

    // collate data
    HeatmapData hd = new HeatmapData();
    hd.stats = new long[num_hosts][num_hosts];
    hd.stats = stats;
    hd.min = min;
    hd.max = max;
    hd.num_hosts = num_hosts;
    hd.agg_tab = agg_tab;

    this.add_info_extra = new String("\nState: " + this.prettyStateNames.get(this.query_state) + " ("
            + events.size() + " " + this.query_state + "'s [" + this.query_stat_type + "])\n"
            + "Plotted value range: [" + hd.min + "," + hd.max + "] (Zeros in black)");

    hd.hostnames = new String[num_hosts];
    for (int i = 0; i < num_hosts; i++) {
        String curr_host = host_rev_indices.get(new Integer(permute[i]));
        if (sort_nodes) {
            hd.hostnames[i] = new String(curr_host);
        } else {
            hd.hostnames[i] = new String(curr_host);
        }
    }

    return hd;
}

From source file:org.executequery.gui.importexport.AbstractImportExportWorker.java

protected void outputExceptionError(String message, Throwable e) {
    if (message != null) {
        outputBuffer.append(message);/* w w w . j  ava  2s . c  o  m*/
    }
    outputBuffer.append("\n[ ");
    outputBuffer.append(MiscUtils.getExceptionName(e));
    outputBuffer.append(" ] ");

    if (e instanceof DataSourceException) {
        outputBuffer.append(e.getMessage());
        outputBuffer.append(((DataSourceException) e).getExtendedMessage());
    } else if (e instanceof SQLException) {
        outputBuffer.append(e.getMessage());
        SQLException _e = (SQLException) e;
        outputBuffer.append(getBundle().getString("AbstractImportExportWorker.errorCode",
                String.valueOf(_e.getErrorCode())));

        String state = _e.getSQLState();
        if (state != null) {
            outputBuffer.append(getBundle().getString("AbstractImportExportWorker.stateCode", state));
        }

    } else {
        String exceptionMessage = e.getMessage();
        if (StringUtils.isNotBlank(exceptionMessage)) {
            outputBuffer.append(exceptionMessage);
        }
    }

    appendProgressErrorText(outputBuffer);
}

From source file:com.googlecode.fascinator.portal.services.impl.DatabaseServicesImpl.java

/**
 * Tapestry notification that server is shutting down
 * // www  .j  a v a 2s .c  o m
 */
@Override
public void registryDidShutdown() {
    log.info("Database services shutting down...");

    // Release all our queries
    for (String key : statements.keySet()) {
        close(statements.get(key));
    }

    // Shutdown database connections
    for (String key : dbConnections.keySet()) {
        try {
            dbConnections.get(key).close();
        } catch (SQLException ex) {
            log.error("Error closing database: ", ex);
        }
    }

    // Shutdown database engine
    // Derby can only be shutdown from one thread,
    // we'll catch errors from the rest.
    String threadedShutdownMessage = DERBY_DRIVER + " is not registered with the JDBC driver manager";
    try {
        // Tell the database to close
        DriverManager.getConnection(DERBY_PROTOCOL + ";shutdown=true");
    } catch (SQLException ex) {
        // Valid response
        if (ex.getErrorCode() == 50000 && ex.getSQLState().equals("XJ015")) {
            // Error response
        } else {
            // Make sure we ignore simple thread issues
            if (!ex.getMessage().equals(threadedShutdownMessage)) {
                log.warn("Error during database shutdown:", ex);
            }
        }
    }
}

From source file:org.forgerock.openidm.repo.jdbc.impl.JDBCRepoService.java

/**
 * Updates the specified object in the object set.
 * <p/>//from ww  w .j ava  2s. c  o  m
 * This implementation requires MVCC and hence enforces that clients state what revision they expect
 * to be updating
 * <p/>
 * If successful, this method updates metadata properties within the passed object,
 * including: a new {@code _rev} value for the revised object's version
 *
 * @param fullId the identifier of the object to be put, or {@code null} to request a generated identifier.
 * @param rev    the version of the object to update; or {@code null} if not provided.
 * @param obj    the contents of the object to put in the object set.
 * @throws ConflictException           if version is required but is {@code null}.
 * @throws ForbiddenException          if access to the object is forbidden.
 * @throws NotFoundException           if the specified object could not be found.
 * @throws PreconditionFailedException if version did not match the existing object in the set.
 * @throws BadRequestException         if the passed identifier is invalid
 */
public void update(String fullId, String rev, Map<String, Object> obj) throws ObjectSetException {

    String localId = getLocalId(fullId);
    String type = getObjectType(fullId);

    if (rev == null) {
        throw new ConflictException("Object passed into update does not have revision it expects set.");
    }

    Connection connection = null;
    Integer previousIsolationLevel = null;
    boolean retry = false;
    int tryCount = 0;
    do {
        TableHandler handler = getTableHandler(type);
        if (handler == null) {
            throw new ObjectSetException("No handler configured for resource type " + type);
        }
        retry = false;
        ++tryCount;
        try {
            connection = getConnection();
            previousIsolationLevel = new Integer(connection.getTransactionIsolation());
            connection.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
            connection.setAutoCommit(false);

            handler.update(fullId, type, localId, rev, obj, connection);

            connection.commit();
            logger.debug("Commited updated object for id: {}", fullId);
        } catch (SQLException ex) {
            if (logger.isDebugEnabled()) {
                logger.debug("SQL Exception in update of {} with error code {}, sql state {}",
                        new Object[] { fullId, ex.getErrorCode(), ex.getSQLState(), ex });
            }
            rollback(connection);
            if (handler.isRetryable(ex, connection)) {
                if (tryCount <= maxTxRetry) {
                    retry = true;
                    logger.debug("Retryable exception encountered, retry {}", ex.getMessage());
                }
            }
            if (!retry) {
                throw new InternalServerErrorException("Updating object failed " + ex.getMessage(), ex);
            }
        } catch (ObjectSetException ex) {
            logger.debug("ObjectSetException in update of {}", fullId, ex);
            rollback(connection);
            throw ex;
        } catch (java.io.IOException ex) {
            logger.debug("IO Exception in update of {}", fullId, ex);
            rollback(connection);
            throw new InternalServerErrorException("Conversion of object to update failed", ex);
        } catch (RuntimeException ex) {
            logger.debug("Runtime Exception in update of {}", fullId, ex);
            rollback(connection);
            throw new InternalServerErrorException(
                    "Updating object failed with unexpected failure: " + ex.getMessage(), ex);
        } finally {
            if (connection != null) {
                try {
                    if (previousIsolationLevel != null) {
                        connection.setTransactionIsolation(previousIsolationLevel.intValue());
                    }
                } catch (SQLException ex) {
                    logger.warn("Failure in resetting connection isolation level ", ex);
                }
                CleanupHelper.loggedClose(connection);
            }
        }
    } while (retry);
}

From source file:org.forgerock.openidm.repo.jdbc.impl.JDBCRepoService.java

/**
 * Creates a new object in the object set.
 * <p/>// w  ww.ja  v  a  2  s .  com
 * This method sets the {@code _id} property to the assigned identifier for the object,
 * and the {@code _rev} property to the revised object version (For optimistic concurrency)
 *
 * @param fullId the client-generated identifier to use, or {@code null} if server-generated identifier is requested.
 * @param obj    the contents of the object to create in the object set.
 * @throws NotFoundException           if the specified id could not be resolved.
 * @throws ForbiddenException          if access to the object or object set is forbidden.
 * @throws PreconditionFailedException if an object with the same ID already exists.
 */
public void create(String fullId, Map<String, Object> obj) throws ObjectSetException {
    String localId = getLocalId(fullId);
    String type = getObjectType(fullId);

    if (fullId == null || localId == null) {
        throw new NotFoundException(
                "The repository requires clients to supply an identifier for the object to create. Full identifier: "
                        + fullId + " local identifier: " + localId);
    } else if (type == null) {
        throw new NotFoundException(
                "The object identifier did not include sufficient information to determine the object type: "
                        + fullId);
    }

    Connection connection = null;
    boolean retry = false;
    int tryCount = 0;
    do {
        TableHandler handler = getTableHandler(type);
        if (handler == null) {
            throw new ObjectSetException("No handler configured for resource type " + type);
        }
        retry = false;
        ++tryCount;
        try {
            connection = getConnection();
            connection.setAutoCommit(false);

            handler.create(fullId, type, localId, obj, connection);

            connection.commit();
            logger.debug("Commited created object for id: {}", fullId);

        } catch (SQLException ex) {
            if (logger.isDebugEnabled()) {
                logger.debug("SQL Exception in create of {} with error code {}, sql state {}",
                        new Object[] { fullId, ex.getErrorCode(), ex.getSQLState(), ex });
            }
            rollback(connection);
            boolean alreadyExisted = handler.isErrorType(ex, ErrorType.DUPLICATE_KEY);
            if (alreadyExisted) {
                throw new PreconditionFailedException(
                        "Create rejected as Object with same ID already exists and was detected. " + "("
                                + ex.getErrorCode() + "-" + ex.getSQLState() + ")" + ex.getMessage(),
                        ex);
            }
            if (handler.isRetryable(ex, connection)) {
                if (tryCount <= maxTxRetry) {
                    retry = true;
                    logger.debug("Retryable exception encountered, retry {}", ex.getMessage());
                }
            }
            if (!retry) {
                throw new InternalServerErrorException("Creating object failed " + "(" + ex.getErrorCode() + "-"
                        + ex.getSQLState() + ")" + ex.getMessage(), ex);
            }
        } catch (ObjectSetException ex) {
            logger.debug("ObjectSetException in create of {}", fullId, ex);
            rollback(connection);
            throw ex;
        } catch (java.io.IOException ex) {
            logger.debug("IO Exception in create of {}", fullId, ex);
            rollback(connection);
            throw new InternalServerErrorException("Conversion of object to create failed", ex);
        } catch (RuntimeException ex) {
            logger.debug("Runtime Exception in create of {}", fullId, ex);
            rollback(connection);
            throw new InternalServerErrorException(
                    "Creating object failed with unexpected failure: " + ex.getMessage(), ex);
        } finally {
            CleanupHelper.loggedClose(connection);
        }
    } while (retry);
}

From source file:org.apache.ojb.broker.util.ExceptionHelper.java

/**
 * Method which support the conversion of {@link java.sql.SQLException} to
 * OJB's runtime exception (with additional message details).
 *
 * @param message The error message to use, if <em>null</em> a standard message is used.
 * @param ex The exception to convert (mandatory).
 * @param sql The used sql-statement or <em>null</em>.
 * @param cld The {@link org.apache.ojb.broker.metadata.ClassDescriptor} of the target object or <em>null</em>.
 * @param values The values set in prepared statement or <em>null</em>.
 * @param logger The {@link org.apache.ojb.broker.util.logging.Logger} to log an detailed message
 * to the specified {@link org.apache.ojb.broker.util.logging.Logger} or <em>null</em> to skip logging message.
 * @param obj The target object or <em>null</em>.
 * @return A new created {@link org.apache.ojb.broker.PersistenceBrokerSQLException} based on the specified
 *         arguments.//from w  w w.  ja  va  2s .  c  om
 */
public static PersistenceBrokerSQLException generateException(String message, SQLException ex, String sql,
        ClassDescriptor cld, ValueContainer[] values, Logger logger, Object obj) {
    /*
    X/OPEN codes within class 23:
    23000   INTEGRITY CONSTRAINT VIOLATION
    23001   RESTRICT VIOLATION
    23502   NOT NULL VIOLATION
    23503   FOREIGN KEY VIOLATION
    23505   UNIQUE VIOLATION
    23514   CHECK VIOLATION
    */
    String eol = SystemUtils.LINE_SEPARATOR;
    StringBuffer msg = new StringBuffer(eol);
    eol += "* ";

    if (ex instanceof BatchUpdateException) {
        BatchUpdateException tmp = (BatchUpdateException) ex;
        if (message != null) {
            msg.append("* ").append(message);
        } else {
            msg.append("* BatchUpdateException during execution of sql-statement:");
        }
        msg.append(eol).append("Batch update count is '").append(tmp.getUpdateCounts()).append("'");
    } else if (ex instanceof SQLWarning) {
        if (message != null) {
            msg.append("* ").append(message);
        } else {
            msg.append("* SQLWarning during execution of sql-statement:");
        }
    } else {
        if (message != null) {
            msg.append("* ").append(message);
        } else {
            msg.append("* SQLException during execution of sql-statement:");
        }
    }

    if (sql != null) {
        msg.append(eol).append("sql statement was '").append(sql).append("'");
    }
    String stateCode = null;
    if (ex != null) {
        msg.append(eol).append("Exception message is [").append(ex.getMessage()).append("]");
        msg.append(eol).append("Vendor error code [").append(ex.getErrorCode()).append("]");
        msg.append(eol).append("SQL state code [");

        stateCode = ex.getSQLState();
        if ("23000".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=INTEGRITY CONSTRAINT VIOLATION");
        else if ("23001".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=RESTRICT VIOLATION");
        else if ("23502".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=NOT NULL VIOLATION");
        else if ("23503".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=FOREIGN KEY VIOLATION");
        else if ("23505".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=UNIQUE VIOLATION");
        else if ("23514".equalsIgnoreCase(stateCode))
            msg.append(stateCode).append("=CHECK VIOLATION");
        else
            msg.append(stateCode);
        msg.append("]");
    }

    if (cld != null) {
        msg.append(eol).append("Target class is '").append(cld.getClassNameOfObject()).append("'");
        FieldDescriptor[] fields = cld.getPkFields();
        msg.append(eol).append("PK of the target object is [");
        for (int i = 0; i < fields.length; i++) {
            try {
                if (i > 0)
                    msg.append(", ");
                msg.append(fields[i].getPersistentField().getName());
                if (obj != null) {
                    msg.append("=");
                    msg.append(fields[i].getPersistentField().get(obj));
                }
            } catch (Exception ignore) {
                msg.append(" PK field build FAILED! ");
            }
        }
        msg.append("]");
    }
    if (values != null) {
        msg.append(eol).append(values.length).append(" values performed in statement: ").append(eol);
        for (int i = 0; i < values.length; i++) {
            ValueContainer value = values[i];
            msg.append("[");
            msg.append("jdbcType=").append(JdbcTypesHelper.getSqlTypeAsString(value.getJdbcType().getType()));
            msg.append(", value=").append(value.getValue());
            msg.append("]");
        }
    }
    if (obj != null) {
        msg.append(eol).append("Source object: ");
        try {
            msg.append(obj.toString());
        } catch (Exception e) {
            msg.append(obj.getClass());
        }
    }

    // message string for PB exception
    String shortMsg = msg.toString();

    if (ex != null) {
        // add causing stack trace
        Throwable rootCause = ExceptionUtils.getRootCause(ex);
        if (rootCause == null)
            rootCause = ex;
        msg.append(eol).append("The root stack trace is --> ");
        String rootStack = ExceptionUtils.getStackTrace(rootCause);
        msg.append(eol).append(rootStack);
    }
    msg.append(SystemUtils.LINE_SEPARATOR).append("**");

    // log error message
    if (logger != null)
        logger.error(msg.toString());

    // throw a specific type of runtime exception for a key constraint.
    if ("23000".equals(stateCode) || "23505".equals(stateCode)) {
        throw new KeyConstraintViolatedException(shortMsg, ex);
    } else {
        throw new PersistenceBrokerSQLException(shortMsg, ex);
    }
}

From source file:QueryRunner.java

/**
 * Throws a new exception with a more informative error message.
 * //from w w  w .  j  a va 2  s  .  c  o  m
 * @param cause The original exception that will be chained to the new 
 * exception when it's rethrown. 
 * 
 * @param sql The query that was executing when the exception happened.
 * 
 * @param params The query replacement parameters; <code>null</code> is a 
 * valid value to pass in.
 * 
 * @throws SQLException if a database access error occurs
 */
protected void rethrow(SQLException cause, String sql, Object[] params) throws SQLException {

    String causeMessage = cause.getMessage();
    if (causeMessage == null) {
        causeMessage = "";
    }
    StringBuffer msg = new StringBuffer(causeMessage);

    msg.append(" Query: ");
    msg.append(sql);
    msg.append(" Parameters: ");

    if (params == null) {
        msg.append("[]");
    } else {
        msg.append(Arrays.asList(params));
    }

    SQLException e = new SQLException(msg.toString(), cause.getSQLState(), cause.getErrorCode());
    e.setNextException(cause);

    throw e;
}

From source file:com.openddal.test.BaseTestCase.java

/**
 * Check that a given exception is not an unexpected 'general error'
 * exception.//www. j  av  a 2 s .co  m
 *
 * @param message the message
 * @param e the exception
 */
protected void assertKnownException(String message, SQLException e) {
    if (e != null && e.getSQLState().startsWith("HY000")) {
        BaseTestCase.logError("Unexpected General error " + message, e);
    }
}