Example usage for java.sql Connection TRANSACTION_READ_COMMITTED

List of usage examples for java.sql Connection TRANSACTION_READ_COMMITTED

Introduction

In this page you can find the example usage for java.sql Connection TRANSACTION_READ_COMMITTED.

Prototype

int TRANSACTION_READ_COMMITTED

To view the source code for java.sql Connection TRANSACTION_READ_COMMITTED.

Click Source Link

Document

A constant indicating that dirty reads are prevented; non-repeatable reads and phantom reads can occur.

Usage

From source file:com.cloudera.sqoop.manager.SqlManager.java

/**
 * @return the transaction isolation level to use for metadata queries
 * (queries executed by the ConnManager itself).
 *///w  w  w.  j  av  a2s .c o m
protected int getMetadataIsolationLevel() {
    return Connection.TRANSACTION_READ_COMMITTED;
}

From source file:org.wso2.carbon.user.core.util.DatabaseUtil.java

public static Connection getDBConnection(DataSource dataSource) throws SQLException {
    Connection dbConnection = dataSource.getConnection();
    dbConnection.setAutoCommit(false);//  w  w  w . j a  v a  2  s.  c om
    if (dbConnection.getTransactionIsolation() != Connection.TRANSACTION_READ_COMMITTED) {
        dbConnection.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
    }
    return dbConnection;
}

From source file:gemlite.core.internal.db.DBSynchronizer.java

protected synchronized void instantiateConnection() {
    if (this.driver == null) {
        initConnection();//from  w w  w  . ja va2  s. c  om
        return;
    }
    String maskedPasswordDbUrl = null;
    try {
        // use Driver directly for connect instead of looping through all
        // drivers as DriverManager.getConnection() would do, to avoid
        // hitting any broken drivers in the process (vertica driver is
        // known to
        // fail in acceptsURL with this set of properties)
        final Properties props = new Properties();
        // the user/password property names are standard ones also used by
        // DriverManager.getConnection(String, String, String) itself, so
        // will work for all drivers
        if (this.userName != null) {
            props.put("user", this.userName);
        }
        if (this.passwd != null) {
            props.put("password", this.passwd);
        }

        this.conn = this.driver.connect(this.dbUrl, props);
        // null to GC password as soon as possible
        props.clear();
        try {
            // try to set the default isolation to at least READ_COMMITTED
            // need it for proper HA handling
            if (this.conn.getTransactionIsolation() < Connection.TRANSACTION_READ_COMMITTED && this.conn
                    .getMetaData().supportsTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED)) {
                this.conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
                if (this.dbUrl != null) {
                    maskedPasswordDbUrl = maskPassword(this.dbUrl);
                }
                logger.info("explicitly set the transaction isolation level to " + "READ_COMMITTED for URL: "
                        + maskedPasswordDbUrl);
            }
        } catch (SQLException sqle) {
            // ignore any exception here
        }
        this.conn.setAutoCommit(false);
        this.shutDown = false;
    } catch (Exception e) {
        if (this.dbUrl != null) {
            maskedPasswordDbUrl = maskPassword(this.dbUrl);
        }
        // throttle retries for connection failures
        try {
            Thread.sleep(200);
        } catch (InterruptedException ie) {
            Thread.currentThread().interrupt();
        }
        throw helper.newRuntimeException(
                String.format(DB_SYNCHRONIZER__6, this.driverClass, maskedPasswordDbUrl), e);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler.java

/**
 * Queries metastore DB directly to find columns in the table which have statistics information.
 * If {@code ci} includes partition info then per partition stats info is examined, otherwise
 * table level stats are examined.//from   w w  w .j ava 2 s .c  o m
 * @throws MetaException
 */
public List<String> findColumnsWithStats(CompactionInfo ci) throws MetaException {
    Connection dbConn = null;
    Statement stmt = null;
    ResultSet rs = null;
    try {
        try {
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            String quote = getIdentifierQuoteString(dbConn);
            stmt = dbConn.createStatement();
            StringBuilder bldr = new StringBuilder();
            bldr.append("SELECT ").append(quote).append("COLUMN_NAME").append(quote).append(" FROM ")
                    .append(quote).append((ci.partName == null ? "TAB_COL_STATS" : "PART_COL_STATS"))
                    .append(quote).append(" WHERE ").append(quote).append("DB_NAME").append(quote)
                    .append(" = '").append(ci.dbname).append("' AND ").append(quote).append("TABLE_NAME")
                    .append(quote).append(" = '").append(ci.tableName).append("'");
            if (ci.partName != null) {
                bldr.append(" AND ").append(quote).append("PARTITION_NAME").append(quote).append(" = '")
                        .append(ci.partName).append("'");
            }
            String s = bldr.toString();

            /*String s = "SELECT COLUMN_NAME FROM " + (ci.partName == null ? "TAB_COL_STATS" :
                "PART_COL_STATS")
               + " WHERE DB_NAME='" + ci.dbname + "' AND TABLE_NAME='" + ci.tableName + "'"
              + (ci.partName == null ? "" : " AND PARTITION_NAME='" + ci.partName + "'");*/
            LOG.debug("Going to execute <" + s + ">");
            rs = stmt.executeQuery(s);
            List<String> columns = new ArrayList<String>();
            while (rs.next()) {
                columns.add(rs.getString(1));
            }
            LOG.debug("Found columns to update stats: " + columns + " on " + ci.tableName
                    + (ci.partName == null ? "" : "/" + ci.partName));
            dbConn.commit();
            return columns;
        } catch (SQLException e) {
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "findColumnsWithStats(" + ci.tableName
                    + (ci.partName == null ? "" : "/" + ci.partName) + ")");
            throw new MetaException(
                    "Unable to connect to transaction database " + StringUtils.stringifyException(e));
        } finally {
            close(rs, stmt, dbConn);
        }
    } catch (RetryException ex) {
        return findColumnsWithStats(ci);
    }
}

From source file:org.wso2.carbon.registry.core.jdbc.dao.JDBCResourceDAO.java

public int getChildCount(CollectionImpl collection, DataAccessManager dataAccessManager)
        throws RegistryException {
    int childCount = -1;
    if (Transaction.isStarted()) {
        childCount = getChildCount(collection, JDBCDatabaseTransaction.getConnection());
    } else {//from  www.  j a va2  s. c  o  m
        Connection conn = null;
        boolean transactionSucceeded = false;
        try {
            if (!(dataAccessManager instanceof JDBCDataAccessManager)) {
                String msg = "Failed to get child count. Invalid data access manager.";
                log.error(msg);
                throw new RegistryException(msg);
            }
            conn = ((JDBCDataAccessManager) dataAccessManager).getDataSource().getConnection();

            // If a managed connection already exists, use that instead of a new
            // connection.
            JDBCDatabaseTransaction.ManagedRegistryConnection temp = JDBCDatabaseTransaction
                    .getManagedRegistryConnection(conn);
            if (temp != null) {
                conn.close();
                conn = temp;
            }
            if (conn.getTransactionIsolation() != Connection.TRANSACTION_READ_COMMITTED) {
                conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
            }
            conn.setAutoCommit(false);

            childCount = getChildCount(collection, conn);
            transactionSucceeded = true;

        } catch (SQLException e) {

            String msg = "Failed to get the child count of resource " + collection.getPath() + ". "
                    + e.getMessage();
            log.error(msg, e);
            throw new RegistryException(msg, e);

        } finally {
            if (transactionSucceeded) {
                try {
                    conn.commit();
                } catch (SQLException e) {
                    log.error("Failed to commit the database connection used in "
                            + "getting child count of the collection " + collection.getPath());
                }
            } else if (conn != null) {
                try {
                    conn.rollback();
                } catch (SQLException e) {
                    log.error("Failed to rollback the database connection used in "
                            + "getting child count of the collection " + collection.getPath());
                }
            }
            if (conn != null) {
                try {
                    conn.close();
                } catch (SQLException e) {
                    log.error("Failed to close the database connection used in "
                            + "getting child count of collection " + collection.getPath());
                }
            }
        }
    }
    return childCount;
}

From source file:org.wso2.carbon.repository.core.jdbc.dao.JDBCResourceDAO.java

public int getChildCount(CollectionImpl collection, DataAccessManager dataAccessManager)
        throws RepositoryException {
    int childCount = -1;
    if (Transaction.isStarted()) {
        childCount = getChildCount(collection, JDBCDatabaseTransaction.getConnection());
    } else {//from   w w  w .  j a  v  a2s .  co m
        Connection conn = null;
        boolean transactionSucceeded = false;
        try {
            if (!(dataAccessManager instanceof JDBCDataAccessManager)) {
                String msg = "Failed to get child count. Invalid data access manager.";
                log.error(msg);
                throw new RepositoryException(msg);
            }
            conn = ((JDBCDataAccessManager) dataAccessManager).getDataSource().getConnection();

            // If a managed connection already exists, use that instead of a new
            // connection.
            JDBCDatabaseTransaction.ManagedRegistryConnection temp = JDBCDatabaseTransaction
                    .getManagedRegistryConnection(conn);
            if (temp != null) {
                conn.close();
                conn = temp;
            }
            if (conn.getTransactionIsolation() != Connection.TRANSACTION_READ_COMMITTED) {
                conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
            }
            conn.setAutoCommit(false);

            childCount = getChildCount(collection, conn);
            transactionSucceeded = true;
        } catch (SQLException e) {
            String msg = "Failed to get the child count of resource " + collection.getPath() + ". "
                    + e.getMessage();
            log.error(msg, e);
            throw new RepositoryException(msg, e);
        } finally {
            if (transactionSucceeded) {
                try {
                    conn.commit();
                } catch (SQLException e) {
                    log.error("Failed to commit the database connection used in "
                            + "getting child count of the collection " + collection.getPath());
                }
            } else if (conn != null) {
                try {
                    conn.rollback();
                } catch (SQLException e) {
                    log.error("Failed to rollback the database connection used in "
                            + "getting child count of the collection " + collection.getPath());
                }
            }
            if (conn != null) {
                try {
                    conn.close();
                } catch (SQLException e) {
                    log.error("Failed to close the database connection used in "
                            + "getting child count of collection " + collection.getPath());
                }
            }
        }
    }
    return childCount;
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

/**
 * Retry-by-caller note://from w ww .j av a 2s .c o m
 * Worst case, it will leave an open txn which will timeout.
 */
@Override
@RetrySemantics.Idempotent
public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException {
    if (openTxnsCounter == null) {
        synchronized (TxnHandler.class) {
            try {
                if (openTxnsCounter == null) {
                    startHouseKeeperService(conf,
                            Class.forName("org.apache.hadoop.hive.ql.txn.AcidOpenTxnsCounterService"));
                }
            } catch (ClassNotFoundException e) {
                throw new MetaException(e.getMessage());
            }
        }
    }

    if (!tooManyOpenTxns && numOpenTxns >= maxOpenTxns) {
        tooManyOpenTxns = true;
    }
    if (tooManyOpenTxns) {
        if (numOpenTxns < maxOpenTxns * 0.9) {
            tooManyOpenTxns = false;
        } else {
            LOG.warn("Maximum allowed number of open transactions (" + maxOpenTxns + ") has been "
                    + "reached. Current number of open transactions: " + numOpenTxns);
            throw new MetaException("Maximum allowed number of open transactions has been reached. "
                    + "See hive.max.open.txns.");
        }
    }

    int numTxns = rqst.getNum_txns();
    try {
        Connection dbConn = null;
        Statement stmt = null;
        ResultSet rs = null;
        try {
            lockInternal();
            /**
             * To make {@link #getOpenTxns()}/{@link #getOpenTxnsInfo()} work correctly, this operation must ensure
             * that advancing the counter in NEXT_TXN_ID and adding appropriate entries to TXNS is atomic.
             * Also, advancing the counter must work when multiple metastores are running.
             * SELECT ... FOR UPDATE is used to prevent
             * concurrent DB transactions being rolled back due to Write-Write conflict on NEXT_TXN_ID.
             *
             * In the current design, there can be several metastore instances running in a given Warehouse.
             * This makes ideas like reserving a range of IDs to save trips to DB impossible.  For example,
             * a client may go to MS1 and start a transaction with ID 500 to update a particular row.
             * Now the same client will start another transaction, except it ends up on MS2 and may get
             * transaction ID 400 and update the same row.  Now the merge that happens to materialize the snapshot
             * on read will thing the version of the row from transaction ID 500 is the latest one.
             *
             * Longer term we can consider running Active-Passive MS (at least wrt to ACID operations).  This
             * set could support a write-through cache for added performance.
             */
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            // Make sure the user has not requested an insane amount of txns.
            int maxTxns = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH);
            if (numTxns > maxTxns)
                numTxns = maxTxns;

            stmt = dbConn.createStatement();
            String s = sqlGenerator.addForUpdateClause("select ntxn_next from NEXT_TXN_ID");
            LOG.debug("Going to execute query <" + s + ">");
            rs = stmt.executeQuery(s);
            if (!rs.next()) {
                throw new MetaException(
                        "Transaction database not properly " + "configured, can't find next transaction id.");
            }
            long first = rs.getLong(1);
            s = "update NEXT_TXN_ID set ntxn_next = " + (first + numTxns);
            LOG.debug("Going to execute update <" + s + ">");
            stmt.executeUpdate(s);

            long now = getDbTime(dbConn);
            List<Long> txnIds = new ArrayList<Long>(numTxns);

            List<String> rows = new ArrayList<>();
            for (long i = first; i < first + numTxns; i++) {
                txnIds.add(i);
                rows.add(i + "," + quoteChar(TXN_OPEN) + "," + now + "," + now + ","
                        + quoteString(rqst.getUser()) + "," + quoteString(rqst.getHostname()));
            }
            List<String> queries = sqlGenerator.createInsertValuesStmt(
                    "TXNS (txn_id, txn_state, txn_started, txn_last_heartbeat, txn_user, txn_host)", rows);
            for (String q : queries) {
                LOG.debug("Going to execute update <" + q + ">");
                stmt.execute(q);
            }
            LOG.debug("Going to commit");
            dbConn.commit();
            return new OpenTxnsResponse(txnIds);
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "openTxns(" + rqst + ")");
            throw new MetaException(
                    "Unable to select from transaction database " + StringUtils.stringifyException(e));
        } finally {
            close(rs, stmt, dbConn);
            unlockInternal();
        }
    } catch (RetryException e) {
        return openTxns(rqst);
    }
}

From source file:com.evolveum.midpoint.repo.sql.SqlRepositoryServiceImpl.java

private String getTransactionIsolation(Connection connection, SqlRepositoryConfiguration config) {
    String value = config.getTransactionIsolation() != null
            ? config.getTransactionIsolation().name() + "(read from repo configuration)"
            : null;/*from   w  ww  .jav a  2 s. c o  m*/

    try {
        switch (connection.getTransactionIsolation()) {
        case Connection.TRANSACTION_NONE:
            value = "TRANSACTION_NONE (read from connection)";
            break;
        case Connection.TRANSACTION_READ_COMMITTED:
            value = "TRANSACTION_READ_COMMITTED (read from connection)";
            break;
        case Connection.TRANSACTION_READ_UNCOMMITTED:
            value = "TRANSACTION_READ_UNCOMMITTED (read from connection)";
            break;
        case Connection.TRANSACTION_REPEATABLE_READ:
            value = "TRANSACTION_REPEATABLE_READ (read from connection)";
            break;
        case Connection.TRANSACTION_SERIALIZABLE:
            value = "TRANSACTION_SERIALIZABLE (read from connection)";
            break;
        default:
            value = "Unknown value in connection.";
        }
    } catch (Exception ex) {
        //nowhere to report error (no operation result available)
    }

    return value;
}

From source file:org.executequery.gui.browser.ConnectionPanel.java

private int isolationLevelFromSelection(int index) {
    int isolationLevel = -1;
    switch (index) {
    case 1:/*from  ww  w. ja v  a  2  s.  c om*/
        isolationLevel = Connection.TRANSACTION_NONE;
        break;
    case 2:
        isolationLevel = Connection.TRANSACTION_READ_UNCOMMITTED;
        break;
    case 3:
        isolationLevel = Connection.TRANSACTION_READ_COMMITTED;
        break;
    case 4:
        isolationLevel = Connection.TRANSACTION_REPEATABLE_READ;
        break;
    case 5:
        isolationLevel = Connection.TRANSACTION_SERIALIZABLE;
        break;
    }
    return isolationLevel;
}

From source file:lasige.steeldb.jdbc.BFTRowSet.java

/**
 * Sets the properties for this <code>CachedRowSetImpl</code> object to
 * their default values. This method is called internally by the
 * default constructor./* w  ww  . j a  v  a  2  s  . co  m*/
 */

private void initProperties() throws SQLException {

    if (resBundle == null) {
        try {
            resBundle = JdbcRowSetResourceBundle.getJdbcRowSetResourceBundle();
        } catch (IOException ioe) {
            throw new RuntimeException(ioe);
        }
    }
    setShowDeleted(false);
    setQueryTimeout(0);
    setMaxRows(0);
    setMaxFieldSize(0);
    setType(ResultSet.TYPE_SCROLL_INSENSITIVE);
    setConcurrency(ResultSet.CONCUR_UPDATABLE);
    if (rvh.size() > 0 && isReadOnly() == false)
        setReadOnly(false);
    else
        setReadOnly(true);
    setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
    setEscapeProcessing(true);
    setTypeMap(null);
    checkTransactionalWriter();

    //Instantiating the vector for MatchColumns

    iMatchColumns = new Vector<Integer>(10);
    for (int i = 0; i < 10; i++) {
        iMatchColumns.add(i, new Integer(-1));
    }

    strMatchColumns = new Vector<String>(10);
    for (int j = 0; j < 10; j++) {
        strMatchColumns.add(j, null);
    }
}