Example usage for java.sql SQLException getSQLState

List of usage examples for java.sql SQLException getSQLState

Introduction

In this page you can find the example usage for java.sql SQLException getSQLState.

Prototype

public String getSQLState() 

Source Link

Document

Retrieves the SQLState for this SQLException object.

Usage

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private boolean createTable(Statement stmt, String cmd, String table) {
    boolean created = false;
    try {/*w w w.j  a  v  a2  s  . com*/
        stmt.execute(cmd);
        created = true;
    } catch (SQLException sqle) {
        if (SQL.DUPLICATE_TABLE.contains(sqle.getSQLState())) {
            log.log(Level.INFO, "{0}{1}{2}", new Object[] { "Table ", table, " already exists" });
            created = true;
        } else {
            log.severe("Could not create table: " + table);
            sqle.printStackTrace();
        }
    }
    return created;
}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private boolean dropTable(String table) {
    boolean dropped = false;
    String cmd = "drop table " + table;
    Connection conn = null;//from  www  .j ava 2 s  . c  o m
    PreparedStatement stmt = null;
    try {
        conn = this.getDefaultConn();
        stmt = conn.prepareStatement(cmd);
        stmt.execute();
        dropped = true;
    } catch (SQLException sqle) {
        if (!SQL.NONEXISTENT_TABLE.contains(sqle.getSQLState())) {
            log.severe(this + " could not drop table " + table + ":" + sqle.getSQLState() + ":"
                    + sqle.getErrorCode());
            sqle.printStackTrace();
        }
    } finally {
        this.cleanup(conn);
        this.cleanup(stmt);
    }
    return dropped;
}

From source file:org.getobjects.eoaccess.EOAdaptorChannel.java

/**
 * A primary fetch method./* w  w  w .  j  a  v a2  s. co  m*/
 * <p>
 * Creates a PreparedStatement from the statement and the bindings of the
 * EOSQLExpression.
 * <p>
 * @param _sqlexpr - the EOSQLExpression to execute
 * @return the fetch results as a List of Maps
 */
public List<Map<String, Object>> evaluateQueryExpression(final EOSQLExpression _sqlexpr,
        final EOAttribute[] _optAttrs) {
    this.lastException = null;

    // System.err.println("\nEXEC: " + _s.statement());

    if (_sqlexpr == null) {
        log.error("evaluateQueryExpression() caller gave us no SQL ...");
        return null;
    }

    final List<Map<String, Object>> binds = _sqlexpr.bindVariableDictionaries();

    if (binds == null || binds.size() == 0)
        /* expression has no binds, perform a plain SQL query */
        return this.performSQL(_sqlexpr.statement(), _optAttrs);

    /* otherwise, create a PreparedStatement */

    final PreparedStatement stmt = this._prepareStatementWithBinds(_sqlexpr.statement(), binds);
    if (stmt == null) {
        log.error("could not create prepared statement for expr: " + _sqlexpr);
        return null;
    }

    /* perform query */

    this.lastException = null;
    List<Map<String, Object>> records = null;
    ResultSet rs = null;
    try {
        if (sqllog.isInfoEnabled())
            sqllog.info(_sqlexpr.statement());

        rs = stmt.executeQuery();

        SQLWarning warning = rs.getWarnings();
        if (warning != null) {
            // TBD: find out when this happens
            log.warn("detected SQL warning: " + warning);
        }

        /* Collect meta data, calling meta inside fetches is rather expensive,
         * even though the PG JDBC adaptor also has some cache.
         */
        final ResultSetMetaData meta = rs.getMetaData();
        final int columnCount = meta.getColumnCount();
        final String[] colNames = new String[columnCount];
        final int[] colHashes = new int[columnCount];
        final int[] colTypes = new int[columnCount];
        for (int i = 1; i <= columnCount; i++) {
            if (_optAttrs != null)
                colNames[i - 1] = _optAttrs[i - 1].columnName();
            else
                colNames[i - 1] = meta.getColumnName(i);

            colHashes[i - 1] = colNames[i - 1].hashCode();
            colTypes[i - 1] = meta.getColumnType(i);
        }

        /* loop over results and convert them to records */
        records = new ArrayList<Map<String, Object>>(128);
        while (rs.next()) {
            final EORecordMap record = new EORecordMap(colNames, colHashes);

            boolean ok = this.fillRecordMapFromResultSet(record, rs, colNames, colTypes);
            if (ok)
                records.add(record);
        }
    } catch (SQLException e) {
        /*
         * getSQLState()
         *   08S01 MySQL network-connect issues during the processing of a query
         *   42601 PG    syntax error
         *   42703 PG    column "number" does not exist
         *   22023 PG    No value specified for parameter 3 (eg multiple %andQual)
         */
        this.lastException = e;

        if (records != null && records.size() == 0) {
            records = null;
            if (log.isInfoEnabled()) {
                log.info("could not execute SQL expression " + e.getSQLState() + ":\n  " + _sqlexpr.statement(),
                        e);
            }

            // System.err.println("STATE: " + e.getSQLState());
        } else {
            log.warn("could not execute SQL expression " + e.getSQLState() + ":\n  " + _sqlexpr.statement(), e);
        }
    } finally {
        // TODO: we might also want to close our channel if the tear down was not
        //       clean
        this._releaseResources(stmt, rs);
    }

    return records;
}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private boolean addToNodeConfig(NodeIDType node, InetSocketAddress sockAddr, int version,
        boolean isReconfigurator) {
    String cmd = "insert into " + getNodeConfigTable() + " (" + Columns.RC_NODE_ID.toString() + ", "
            + Columns.INET_ADDRESS.toString() + ", " + Columns.PORT.toString() + ", "
            + Columns.NODE_CONFIG_VERSION.toString() + ", " + Columns.IS_RECONFIGURATOR.toString()
            + " ) values (?,?,?,?,?)";

    PreparedStatement insertCP = null;
    Connection conn = null;/*from  www .  j a v  a 2 s . c om*/
    boolean added = false;
    try {
        conn = this.getDefaultConn();
        insertCP = conn.prepareStatement(cmd);
        insertCP.setString(1, node.toString());
        insertCP.setString(2, sockAddr.toString());
        insertCP.setInt(3, sockAddr.getPort());
        insertCP.setInt(4, version);
        insertCP.setInt(5, isReconfigurator ? 1 : 0); // 1 means true
        insertCP.executeUpdate();
        // conn.commit();
        added = true;
    } catch (SQLException sqle) {
        if (!SQL.DUPLICATE_KEY.contains(sqle.getSQLState())) {
            log.severe("SQLException while inserting RC record using " + cmd);
            sqle.printStackTrace();
        }
    } finally {
        cleanup(insertCP);
        cleanup(conn);
    }
    return added;
}

From source file:com.evolveum.midpoint.repo.sql.helpers.BaseHelper.java

private boolean isExceptionRelatedToSerializationInternal(Throwable ex) {

    if (ex instanceof PessimisticLockException || ex instanceof LockAcquisitionException
            || ex instanceof HibernateOptimisticLockingFailureException
            || ex instanceof StaleObjectStateException) { // todo the last one is questionable
        return true;
    }//  ww  w. j  a v a  2 s.c o m
    if (ExceptionUtil.findCause(ex, SerializationRelatedException.class) != null) {
        return true;
    }

    // it's not locking exception (optimistic, pesimistic lock or simple lock acquisition) understood by hibernate
    // however, it still could be such exception... wrapped in e.g. TransactionException
    // so we have a look inside - we try to find SQLException there

    SQLException sqlException = findSqlException(ex);
    if (sqlException == null) {
        return false;
    }

    // these error codes / SQL states we consider related to locking:
    //  code 50200 [table timeout lock in H2, 50200 is LOCK_TIMEOUT_1 error code]
    //  code 40001 [DEADLOCK_1 in H2]
    //  state 40001 [serialization failure in PostgreSQL - http://www.postgresql.org/docs/9.1/static/transaction-iso.html - and probably also in other systems]
    //  state 40P01 [deadlock in PostgreSQL]
    //  code ORA-08177: can't serialize access for this transaction in Oracle
    //  code ORA-01466 ["unable to read data - table definition has changed"] in Oracle
    //  code ORA-01555: snapshot too old: rollback segment number  with name "" too small
    //  code ORA-22924: snapshot too old
    //
    // sql states should be somewhat standardized; sql error codes are vendor-specific
    // todo: so it is probably not very safe to test for codes without testing for specific database (h2, oracle)
    // but the risk of problem is quite low here, so let it be...

    // strange exception occurring in MySQL when doing multithreaded org closure maintenance
    // alternatively we might check for error code = 1030, sql state = HY000
    // but that would cover all cases of "Got error XYZ from storage engine"
    if (getConfiguration().isUsingMySqlCompatible() && sqlException.getMessage() != null
            && sqlException.getMessage().contains("Got error -1 from storage engine")) {
        return true;
    }

    // this is some recent H2 weirdness (MID-3969)
    if (getConfiguration().isUsingH2() && sqlException.getMessage() != null
            && sqlException.getMessage().contains(
                    "Referential integrity constraint violation: \"FK_AUDIT_ITEM: PUBLIC.M_AUDIT_ITEM FOREIGN KEY(RECORD_ID) REFERENCES PUBLIC.M_AUDIT_EVENT(ID)")) {
        return true;
    }

    return sqlException.getErrorCode() == 50200 || sqlException.getErrorCode() == 40001
            || "40001".equals(sqlException.getSQLState()) || "40P01".equals(sqlException.getSQLState())
            || sqlException.getErrorCode() == 8177 || sqlException.getErrorCode() == 1466
            || sqlException.getErrorCode() == 1555 || sqlException.getErrorCode() == 22924
            || sqlException.getErrorCode() == 3960; // Snapshot isolation transaction aborted due to update conflict.
}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private boolean connectDB() {
    boolean connected = false;
    int connAttempts = 0, maxAttempts = 1;
    long interAttemptDelay = 2000; // ms
    Properties props = new Properties(); // connection properties
    // providing a user name and PASSWORD is optional in embedded derby
    props.put("user", SQL.getUser() + (isEmbeddedDB() ? this.getMyIDSanitized() : ""));
    props.put("password", SQL.getPassword());
    String dbCreation = SQL.getProtocolOrURL(SQL_TYPE) + (isEmbeddedDB() ? this.logDirectory + getMyDBName()
            + (!SQLPaxosLogger.existsDB(SQL_TYPE, this.logDirectory, getMyDBName()) ? ";create=true" : "")
            : getMyDBName() + "?createDatabaseIfNotExist=true");

    try {// w ww  .j  ava2 s  .  c  om
        dataSource = (ComboPooledDataSource) setupDataSourceC3P0(dbCreation, props);
    } catch (SQLException e) {
        log.severe("Could not create pooled data source to DB " + dbCreation);
        e.printStackTrace();
        return false;
    }

    while (!connected && connAttempts < maxAttempts) {
        Connection conn = null;
        try {
            connAttempts++;
            log.info("Attempting getDefaultConn() to DB " + dbCreation);
            getDefaultConn(); // open first connection
            log.info("Connected to and created database " + getMyDBName());
            connected = true;
            if (isEmbeddedDB())
                fixURI(); // remove create flag
        } catch (SQLException sqle) {
            log.severe("Could not connect to the derby DB: " + sqle.getErrorCode() + ":" + sqle.getSQLState());
            sqle.printStackTrace();
            try {
                Thread.sleep(interAttemptDelay);
            } catch (InterruptedException ie) {
                ie.printStackTrace();
            }
        } finally {
            cleanup(conn); // close the test connection
        }
    }
    return connected;
}

From source file:org.apache.hive.jdbc.TestJdbcDriver2.java

private void doTestErrorCase(String sql, String expectedMessage, String expectedSQLState, int expectedErrorCode)
        throws SQLException {
    Statement stmt = con.createStatement();
    boolean exceptionFound = false;
    try {//from   w  ww .  ja  v  a2 s  . co m
        stmt.execute(sql);
    } catch (SQLException e) {
        assertTrue("Adequate error messaging not found for '" + sql + "': " + e.getMessage(),
                e.getMessage().contains(expectedMessage));
        assertEquals("Expected SQLState not found for '" + sql + "'", expectedSQLState, e.getSQLState());
        assertEquals("Expected error code not found for '" + sql + "'", expectedErrorCode, e.getErrorCode());
        exceptionFound = true;
    }

    assertNotNull("Exception should have been thrown for query: " + sql, exceptionFound);
}

From source file:org.getobjects.eoaccess.EOAdaptorChannel.java

/**
 * Executes the SQL string and returns a Map containing the results of the
 * SQL./*from w  w  w  .  j  a  v a2  s  .co  m*/
 * <p>
 * If the SQL string is empty, an error is set and null is returned.
 * 
 * @return null on error (check lastException), or the fetch results
 */
public List<Map<String, Object>> performSQL(final String _sql, final EOAttribute[] _optAttrs) {
    if (_sql == null || _sql.length() == 0) {
        log.error("performSQL caller gave us no SQL ...");
        this.lastException = new Exception("got no SQL to perform!");
        return null;
    }
    this.lastException = null;

    /* acquire DB resources */

    final Statement stmt = this._createStatement();
    if (stmt == null)
        return null;

    /* perform query */

    ArrayList<Map<String, Object>> records = null;
    ResultSet rs = null;
    try {
        if (sqllog.isInfoEnabled())
            sqllog.info(_sql);

        rs = stmt.executeQuery(_sql);

        SQLWarning warning = rs.getWarnings();
        if (warning != null) {
            // TBD: find out when this happens
            log.warn("detected SQL warning: " + warning);
        }

        /* Collect meta data, calling meta inside fetches is rather expensive,
         * even though the PG JDBC adaptor also has some cache.
         */
        final ResultSetMetaData meta = rs.getMetaData();
        final int columnCount = meta.getColumnCount();
        final String[] colNames = new String[columnCount];
        final int[] colHashes = new int[columnCount];
        final int[] colTypes = new int[columnCount];
        for (int i = 1; i <= columnCount; i++) {
            if (_optAttrs != null)
                colNames[i - 1] = _optAttrs[i - 1].columnName();
            else
                colNames[i - 1] = meta.getColumnName(i);

            colHashes[i - 1] = colNames[i - 1].hashCode();
            colTypes[i - 1] = meta.getColumnType(i);
        }

        /* loop over results and convert them to records */
        records = new ArrayList<Map<String, Object>>(128);
        while (rs.next()) {
            EORecordMap record = new EORecordMap(colNames, colHashes);

            boolean ok = this.fillRecordMapFromResultSet(record, rs, colNames, colTypes);
            if (ok)
                records.add(record);
        }
    } catch (SQLException e) {
        /*
         * SQLState:
         * 42601 - PostgreSQL for invalid SQL, like "SELECT *" or "IN ()"
         * 42804 - PostgreSQL for
         *           IN types character varying and integer cannot be matched
         * 42P01 - PostgreSQL: relation 'notes' does not exist
         * 42703 - PostgreSQL: column "lastname" does not exist
         */
        this.lastException = e;

        /* Note: if we already fetched records, we actually return them ... */
        if (records != null && records.size() == 0) {
            records = null;
            if (log.isInfoEnabled()) {
                log.info("could not execute SQL statement (state=" + e.getSQLState() + "): " + _sql, e);
            }

            // System.err.println("STATE: " + e.getSQLState());
        } else {
            log.warn("could not execute SQL statement (state=" + e.getSQLState() + "): " + _sql, e);
        }
    } finally {
        // TODO: we might also want to close our channel if the tear down was not
        //       clean
        this._releaseResources(stmt, rs);
    }

    if (sqllog.isDebugEnabled())
        sqllog.debug("  GOT RESULTS: " + records);

    /* compact array */
    if (records != null)
        records.trimToSize();

    return records;
}

From source file:com.runwaysdk.dataaccess.database.general.PostgreSQL.java

/**
 * /*from   ww w .  j  av a 2  s .c om*/
 * @param errorCode
 * @param errorMessage
 */
public void throwDatabaseException(SQLException ex, String debugMsg) {
    String errorCode = ex.getSQLState();
    String errorMessage = ex.getMessage();

    // In PostgreSQL, no more queries to the database during the session can
    // occur until the
    // transaction block has been closed. This is done with a rollback.
    try {
        try {
            // If there is a savepoint, then don't rollback. The calling code that
            // has set a savepoint
            // should release it.
            Database.peekCurrentSavepoint();
        } catch (EmptyStackException e) {
            Connection conn = Database.getConnection();
            conn.rollback();
        }
    } catch (SQLException sqlEx) {
        throw new DatabaseException(sqlEx);
    }

    if (errorCode == null) {
        throw new DatabaseException(errorMessage, debugMsg);
    }

    errorCode = errorCode.trim();

    if (errorCode.equals("23505")) {
        int startIndex = errorMessage.indexOf("\"") + 1;
        int endIndex = errorMessage.indexOf("\"", startIndex);
        String indexName = errorMessage.substring(startIndex, endIndex);

        if (indexName.substring(0, 4).equalsIgnoreCase(MdRelationshipDAOIF.INDEX_PREFIX)) {
            String error = "Constraint [" + indexName + "] on relationship violated";

            throw new DuplicateGraphPathException(error);
        } else {
            String error = "Constraint [" + indexName + "] on object violated";

            int pkeyStartIndex = indexName.indexOf(PRIMARY_KEY_SUFFIX);

            if (indexName.contains(PRIMARY_KEY_SUFFIX)
                    && indexName.substring(pkeyStartIndex, indexName.length()).equals(PRIMARY_KEY_SUFFIX)) {
                String tableName = indexName.substring(0, pkeyStartIndex).trim();

                throw new DuplicateDataDatabaseException(error, ex, indexName, tableName);
            } else {
                throw new DuplicateDataDatabaseException(error, ex, indexName);
            }
        }

    }

    if (errorCode.equals("21000")) {
        String errMsg = "Subquery returns more than 1 row";
        throw new SubSelectReturnedMultipleRowsException(errMsg);
    }

    if (errorCode.equals("22003")) {
        String errMsg = "Numeric input overflowed the bounds of its column";
        throw new NumericFieldOverflowException(errMsg);
    }

    if (DatabaseProperties.isSeriousError(errorCode)) {
        throw new ProgrammingErrorException(debugMsg, ex);
    } else {
        throw new DatabaseException(errorMessage, debugMsg);
    }
}

From source file:org.apache.marmotta.kiwi.persistence.KiWiConnection.java

/**
 * Store a triple in the database. This method assumes that all nodes used by the triple are already persisted.
 *
 * @param triple     the triple to store
 * @throws SQLException//  w  w  w. ja v  a2 s  . c  o  m
 * @throws NullPointerException in case the subject, predicate, object or context have not been persisted
 * @return true in case the update added a new triple to the database, false in case the triple already existed
 */
public synchronized void storeTriple(final KiWiTriple triple) throws SQLException {
    // mutual exclusion: prevent parallel adding and removing of the same triple
    synchronized (triple) {

        requireJDBCConnection();

        if (triple.getId() < 0) {
            triple.setId(getNextSequence());
        }

        if (deletedStatementsLog.mightContain(triple.getId())) {
            // this is a hack for a concurrency problem that may occur in case the triple is removed in the
            // transaction and then added again; in these cases the createStatement method might return
            // an expired state of the triple because it uses its own database connection

            //deletedStatementsLog.remove(triple.getId());
            undeleteTriple(triple);

        } else {

            if (batchCommit) {
                commitLock.lock();
                try {
                    cacheTriple(triple);
                    tripleBatch.add(triple);
                    if (tripleBatch.size() >= batchSize) {
                        flushBatch();
                    }
                } finally {
                    commitLock.unlock();
                }
            } else {
                Preconditions.checkNotNull(triple.getSubject().getId());
                Preconditions.checkNotNull(triple.getPredicate().getId());
                Preconditions.checkNotNull(triple.getObject().getId());

                try {
                    RetryExecution<Boolean> execution = new RetryExecution<>("STORE");
                    execution.setUseSavepoint(true);
                    execution.execute(connection, new RetryCommand<Boolean>() {
                        @Override
                        public Boolean run() throws SQLException {
                            PreparedStatement insertTriple = getPreparedStatement("store.triple");
                            insertTriple.setLong(1, triple.getId());
                            insertTriple.setLong(2, triple.getSubject().getId());
                            insertTriple.setLong(3, triple.getPredicate().getId());
                            insertTriple.setLong(4, triple.getObject().getId());
                            if (triple.getContext() != null) {
                                insertTriple.setLong(5, triple.getContext().getId());
                            } else {
                                insertTriple.setNull(5, Types.BIGINT);
                            }
                            insertTriple.setBoolean(6, triple.isInferred());
                            insertTriple.setTimestamp(7, new Timestamp(triple.getCreated().getTime()));
                            int count = insertTriple.executeUpdate();

                            cacheTriple(triple);

                            return count > 0;
                        }
                    });

                } catch (SQLException ex) {
                    if ("HYT00".equals(ex.getSQLState())) { // H2 table locking timeout
                        throw new ConcurrentModificationException(
                                "the same triple was modified in concurrent transactions (triple=" + triple
                                        + ")");
                    } else {
                        throw ex;
                    }
                }
            }
        }
    }
}