Example usage for java.sql Connection TRANSACTION_SERIALIZABLE

List of usage examples for java.sql Connection TRANSACTION_SERIALIZABLE

Introduction

In this page you can find the example usage for java.sql Connection TRANSACTION_SERIALIZABLE.

Prototype

int TRANSACTION_SERIALIZABLE

To view the source code for java.sql Connection TRANSACTION_SERIALIZABLE.

Click Source Link

Document

A constant indicating that dirty reads, non-repeatable reads and phantom reads are prevented.

Usage

From source file:org.apache.cayenne.conf.CustomDBCPDataSourceBuilder.java

private ObjectPool createConnectionPool() {

    ConnectionFactory factory = createConnectionFactory();
    GenericObjectPool.Config poolConfig = createConnectionPoolConfig();
    KeyedObjectPoolFactory statementPool = createPreparedStatementPool();

    String validationQuery = config.getString(VALIDATION_QUERY);
    boolean defaultReadOnly = config.getBoolean(READ_ONLY, false);
    boolean defaultAutoCommit = config.getBoolean(AUTO_COMMIT, false);
    int defaultTransactionIsolation = config.getTransactionIsolation(TRANSACTION_ISOLATION,
            Connection.TRANSACTION_SERIALIZABLE);
    String defaultCatalog = config.getString(CATALOG);

    String initSql = config.getString(INIT_SQL);
    List<String> initSqls = null;
    if (initSql != null && !initSql.isEmpty()) {
        initSqls = new ArrayList<String>();
        initSqls.add(initSql);/*www  . j a va 2 s.  co  m*/
    }

    ObjectPool connectionPool = new GenericObjectPool(null, poolConfig);

    // a side effect of PoolableConnectionFactory constructor call is that newly
    // created factory object is assigned to "connectionPool", which is
    // definitely a
    // very confusing part of DBCP - new object is not visibly assigned to
    // anything,
    // still it is preserved...
    new PoolableConnectionFactory(factory, connectionPool, statementPool, validationQuery, initSqls,
            defaultReadOnly ? Boolean.TRUE : Boolean.FALSE, defaultAutoCommit, defaultTransactionIsolation,
            defaultCatalog, null);

    return connectionPool;
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * Sets the user to run as.  This is for the case
 * where the request was generated by the user and so the worker must set this value later.
 * @param cq_id id of this entry in the queue
 * @param user user to run the jobs as/* w  w  w . j  a  va2s.  c  o m*/
 */
public void setRunAs(long cq_id, String user) throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
            stmt = dbConn.createStatement();
            String s = "update COMPACTION_QUEUE set cq_run_as = '" + user + "' where cq_id = " + cq_id;
            LOG.debug("Going to execute update <" + s + ">");
            if (stmt.executeUpdate(s) != 1) {
                LOG.error("Unable to update compaction record");
                LOG.debug("Going to rollback");
                dbConn.rollback();
            }
            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.error("Unable to update compaction queue, " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "setRunAs(cq_id:" + cq_id + ",user:" + user + ")");
        } finally {
            closeDbConn(dbConn);
            closeStmt(stmt);
        }
    } catch (RetryException e) {
        setRunAs(cq_id, user);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * This will grab the next compaction request off of
 * the queue, and assign it to the worker.
 * @param workerId id of the worker calling this, will be recorded in the db
 * @return an info element for this compaction request, or null if there is no work to do now.
 *///  ww w .  j a v  a2  s . co m
public CompactionInfo findNextToCompact(String workerId) throws MetaException {
    try {
        Connection dbConn = null;
        CompactionInfo info = new CompactionInfo();

        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
            stmt = dbConn.createStatement();
            String s = "select cq_id, cq_database, cq_table, cq_partition, "
                    + "cq_type from COMPACTION_QUEUE where cq_state = '" + INITIATED_STATE + "'";
            LOG.debug("Going to execute query <" + s + ">");
            ResultSet rs = stmt.executeQuery(s);
            if (!rs.next()) {
                LOG.debug("No compactions found ready to compact");
                dbConn.rollback();
                return null;
            }
            info.id = rs.getLong(1);
            info.dbname = rs.getString(2);
            info.tableName = rs.getString(3);
            info.partName = rs.getString(4);
            switch (rs.getString(5).charAt(0)) {
            case MAJOR_TYPE:
                info.type = CompactionType.MAJOR;
                break;
            case MINOR_TYPE:
                info.type = CompactionType.MINOR;
                break;
            default:
                throw new MetaException("Unexpected compaction type " + rs.getString(5));
            }

            // Now, update this record as being worked on by this worker.
            long now = getDbTime(dbConn);
            s = "update COMPACTION_QUEUE set cq_worker_id = '" + workerId + "', " + "cq_start = " + now
                    + ", cq_state = '" + WORKING_STATE + "' where cq_id = " + info.id;
            LOG.debug("Going to execute update <" + s + ">");
            if (stmt.executeUpdate(s) != 1) {
                LOG.error("Unable to update compaction record");
                LOG.debug("Going to rollback");
                dbConn.rollback();
            }
            LOG.debug("Going to commit");
            dbConn.commit();
            return info;
        } catch (SQLException e) {
            LOG.error("Unable to select next element for compaction, " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "findNextToCompact(workerId:" + workerId + ")");
            throw new MetaException(
                    "Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeDbConn(dbConn);
            closeStmt(stmt);
        }
    } catch (RetryException e) {
        return findNextToCompact(workerId);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * This will mark an entry in the queue as compacted
 * and put it in the ready to clean state.
 * @param info info on the compaction entry to mark as compacted.
 *//*from   ww w .  java2s .  co m*/
public void markCompacted(CompactionInfo info) throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
            stmt = dbConn.createStatement();
            String s = "update COMPACTION_QUEUE set cq_state = '" + READY_FOR_CLEANING + "', "
                    + "cq_worker_id = null where cq_id = " + info.id;
            LOG.debug("Going to execute update <" + s + ">");
            if (stmt.executeUpdate(s) != 1) {
                LOG.error("Unable to update compaction record");
                LOG.debug("Going to rollback");
                dbConn.rollback();
            }
            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.error("Unable to update compaction queue " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "markCompacted(" + info + ")");
            throw new MetaException(
                    "Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeDbConn(dbConn);
            closeStmt(stmt);
        }
    } catch (RetryException e) {
        markCompacted(info);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * This will remove an entry from the queue after
 * it has been compacted.//  w ww  .  j  a v a  2 s . c o  m
 * @param info info on the compaction entry to remove
 */
public void markCleaned(CompactionInfo info) throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
            stmt = dbConn.createStatement();
            String s = "delete from COMPACTION_QUEUE where cq_id = " + info.id;
            LOG.debug("Going to execute update <" + s + ">");
            if (stmt.executeUpdate(s) != 1) {
                LOG.error("Unable to delete compaction record");
                LOG.debug("Going to rollback");
                dbConn.rollback();
            }

            // Remove entries from completed_txn_components as well, so we don't start looking there
            // again.
            s = "delete from COMPLETED_TXN_COMPONENTS where ctc_database = '" + info.dbname + "' and "
                    + "ctc_table = '" + info.tableName + "'";
            if (info.partName != null) {
                s += " and ctc_partition = '" + info.partName + "'";
            }
            LOG.debug("Going to execute update <" + s + ">");
            if (stmt.executeUpdate(s) < 1) {
                LOG.error("Expected to remove at least one row from completed_txn_components when "
                        + "marking compaction entry as clean!");
            }

            s = "select txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '"
                    + TXN_ABORTED + "' and tc_database = '" + info.dbname + "' and tc_table = '"
                    + info.tableName + "'";
            if (info.partName != null)
                s += " and tc_partition = '" + info.partName + "'";
            LOG.debug("Going to execute update <" + s + ">");
            ResultSet rs = stmt.executeQuery(s);
            Set<Long> txnids = new HashSet<Long>();
            while (rs.next())
                txnids.add(rs.getLong(1));
            if (txnids.size() > 0) {

                // Remove entries from txn_components, as there may be aborted txn components
                StringBuilder buf = new StringBuilder();
                buf.append("delete from TXN_COMPONENTS where tc_txnid in (");
                boolean first = true;
                for (long id : txnids) {
                    if (first)
                        first = false;
                    else
                        buf.append(", ");
                    buf.append(id);
                }

                buf.append(") and tc_database = '");
                buf.append(info.dbname);
                buf.append("' and tc_table = '");
                buf.append(info.tableName);
                buf.append("'");
                if (info.partName != null) {
                    buf.append(" and tc_partition = '");
                    buf.append(info.partName);
                    buf.append("'");
                }
                LOG.debug("Going to execute update <" + buf.toString() + ">");
                int rc = stmt.executeUpdate(buf.toString());
                LOG.debug("Removed " + rc + " records from txn_components");

                // Don't bother cleaning from the txns table.  A separate call will do that.  We don't
                // know here which txns still have components from other tables or partitions in the
                // table, so we don't know which ones we can and cannot clean.
            }

            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.error("Unable to delete from compaction queue " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "markCleaned(" + info + ")");
            throw new MetaException(
                    "Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeDbConn(dbConn);
            closeStmt(stmt);
        }
    } catch (RetryException e) {
        markCleaned(info);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * Clean up aborted transactions from txns that have no components in txn_components.
 *//*from   w  w  w.  j  a  v  a 2s .c  o m*/
public void cleanEmptyAbortedTxns() throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
            stmt = dbConn.createStatement();
            String s = "select txn_id from TXNS where "
                    + "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " + "txn_state = '" + TXN_ABORTED
                    + "'";
            LOG.debug("Going to execute query <" + s + ">");
            ResultSet rs = stmt.executeQuery(s);
            Set<Long> txnids = new HashSet<Long>();
            while (rs.next())
                txnids.add(rs.getLong(1));
            if (txnids.size() > 0) {
                StringBuilder buf = new StringBuilder("delete from TXNS where txn_id in (");
                boolean first = true;
                for (long tid : txnids) {
                    if (first)
                        first = false;
                    else
                        buf.append(", ");
                    buf.append(tid);
                }
                buf.append(")");
                String bufStr = buf.toString();
                LOG.debug("Going to execute update <" + bufStr + ">");
                int rc = stmt.executeUpdate(bufStr);
                LOG.debug("Removed " + rc + " records from txns");
                LOG.debug("Going to commit");
                dbConn.commit();
            }
        } catch (SQLException e) {
            LOG.error("Unable to delete from txns table " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "cleanEmptyAbortedTxns");
            throw new MetaException(
                    "Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeDbConn(dbConn);
            closeStmt(stmt);
        }
    } catch (RetryException e) {
        cleanEmptyAbortedTxns();
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * This will take all entries assigned to workers
 * on a host return them to INITIATED state.  The initiator should use this at start up to
 * clean entries from any workers that were in the middle of compacting when the metastore
 * shutdown.  It does not reset entries from worker threads on other hosts as those may still
 * be working./*www.j  ava 2s .c o m*/
 * @param hostname Name of this host.  It is assumed this prefixes the thread's worker id,
 *                 so that like hostname% will match the worker id.
 */
public void revokeFromLocalWorkers(String hostname) throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
            stmt = dbConn.createStatement();
            String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '"
                    + INITIATED_STATE + "' where cq_state = '" + WORKING_STATE + "' and cq_worker_id like '"
                    + hostname + "%'";
            LOG.debug("Going to execute update <" + s + ">");
            // It isn't an error if the following returns no rows, as the local workers could have died
            // with  nothing assigned to them.
            stmt.executeUpdate(s);
            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.error("Unable to change dead worker's records back to initiated state " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "revokeFromLocalWorkers(hostname:" + hostname + ")");
            throw new MetaException(
                    "Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeDbConn(dbConn);
            closeStmt(stmt);
        }
    } catch (RetryException e) {
        revokeFromLocalWorkers(hostname);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * This call will return all compaction queue
 * entries assigned to a worker but over the timeout back to the initiated state.
 * This should be called by the initiator on start up and occasionally when running to clean up
 * after dead threads.  At start up {@link #revokeFromLocalWorkers(String)} should be called
 * first./*from  w w w.j av a 2 s  .  c o m*/
 * @param timeout number of milliseconds since start time that should elapse before a worker is
 *                declared dead.
 */
public void revokeTimedoutWorkers(long timeout) throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_SERIALIZABLE);
            long latestValidStart = getDbTime(dbConn) - timeout;
            stmt = dbConn.createStatement();
            String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '"
                    + INITIATED_STATE + "' where cq_state = '" + WORKING_STATE + "' and cq_start < "
                    + latestValidStart;
            LOG.debug("Going to execute update <" + s + ">");
            // It isn't an error if the following returns no rows, as the local workers could have died
            // with  nothing assigned to them.
            stmt.executeUpdate(s);
            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.error("Unable to change dead worker's records back to initiated state " + e.getMessage());
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "revokeTimedoutWorkers(timeout:" + timeout + ")");
            throw new MetaException(
                    "Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeDbConn(dbConn);
            closeStmt(stmt);
        }
    } catch (RetryException e) {
        revokeTimedoutWorkers(timeout);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.TestTxnHandler.java

@Test
@Ignore/*from w  w  w.j  a  v a  2s .  com*/
public void deadlockDetected() throws Exception {
    LOG.debug("Starting deadlock test");
    Connection conn = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE);
    Statement stmt = conn.createStatement();
    long now = txnHandler.getDbTime(conn);
    stmt.executeUpdate("insert into TXNS (txn_id, txn_state, txn_started, txn_last_heartbeat, "
            + "txn_user, txn_host) values (1, 'o', " + now + ", " + now + ", 'shagy', " + "'scooby.com')");
    stmt.executeUpdate("insert into HIVE_LOCKS (hl_lock_ext_id, hl_lock_int_id, hl_txnid, "
            + "hl_db, hl_table, hl_partition, hl_lock_state, hl_lock_type, hl_last_heartbeat, "
            + "hl_user, hl_host) values (1, 1, 1, 'mydb', 'mytable', 'mypartition', '" + txnHandler.LOCK_WAITING
            + "', '" + txnHandler.LOCK_EXCLUSIVE + "', " + now + ", 'fred', " + "'scooby.com')");
    conn.commit();
    txnHandler.closeDbConn(conn);

    final AtomicBoolean sawDeadlock = new AtomicBoolean();

    final Connection conn1 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE);
    final Connection conn2 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE);
    try {

        for (int i = 0; i < 5; i++) {
            Thread t1 = new Thread() {
                @Override
                public void run() {
                    try {
                        try {
                            updateTxns(conn1);
                            updateLocks(conn1);
                            Thread.sleep(1000);
                            conn1.commit();
                            LOG.debug("no exception, no deadlock");
                        } catch (SQLException e) {
                            try {
                                txnHandler.checkRetryable(conn1, e, "thread t1");
                                LOG.debug("Got an exception, but not a deadlock, SQLState is " + e.getSQLState()
                                        + " class of exception is " + e.getClass().getName() + " msg is <"
                                        + e.getMessage() + ">");
                            } catch (TxnHandler.RetryException de) {
                                LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of "
                                        + "exception is " + e.getClass().getName() + " msg is <"
                                        + e.getMessage() + ">");
                                sawDeadlock.set(true);
                            }
                        }
                        conn1.rollback();
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
            };

            Thread t2 = new Thread() {
                @Override
                public void run() {
                    try {
                        try {
                            updateLocks(conn2);
                            updateTxns(conn2);
                            Thread.sleep(1000);
                            conn2.commit();
                            LOG.debug("no exception, no deadlock");
                        } catch (SQLException e) {
                            try {
                                txnHandler.checkRetryable(conn2, e, "thread t2");
                                LOG.debug("Got an exception, but not a deadlock, SQLState is " + e.getSQLState()
                                        + " class of exception is " + e.getClass().getName() + " msg is <"
                                        + e.getMessage() + ">");
                            } catch (TxnHandler.RetryException de) {
                                LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of "
                                        + "exception is " + e.getClass().getName() + " msg is <"
                                        + e.getMessage() + ">");
                                sawDeadlock.set(true);
                            }
                        }
                        conn2.rollback();
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
            };

            t1.start();
            t2.start();
            t1.join();
            t2.join();
            if (sawDeadlock.get())
                break;
        }
        assertTrue(sawDeadlock.get());
    } finally {
        conn1.rollback();
        txnHandler.closeDbConn(conn1);
        conn2.rollback();
        txnHandler.closeDbConn(conn2);
    }
}

From source file:org.apache.hadoop.mapreduce.lib.db.DBInputFormat.java

public Connection createConnection() {
    try {/*  ww  w  .j  a va  2  s .  co m*/
        Connection newConnection = dbConf.getConnection();
        newConnection.setAutoCommit(false);
        newConnection.setTransactionIsolation(Connection.TRANSACTION_SERIALIZABLE);

        return newConnection;
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}