Example usage for java.sql Connection TRANSACTION_READ_COMMITTED

List of usage examples for java.sql Connection TRANSACTION_READ_COMMITTED

Introduction

In this page you can find the example usage for java.sql Connection TRANSACTION_READ_COMMITTED.

Prototype

int TRANSACTION_READ_COMMITTED

To view the source code for java.sql Connection TRANSACTION_READ_COMMITTED.

Click Source Link

Document

A constant indicating that dirty reads are prevented; non-repeatable reads and phantom reads can occur.

Usage

From source file:org.apache.hadoop.hive.metastore.MyXid.java

@Override
public Partition getPartition(String dbName, String tableName, int level) throws MetaException {
    boolean success = false;

    Connection con = null;/*from w ww  . j  a va 2  s . co  m*/
    Statement ps = null;
    Partition part = null;

    dbName = dbName.toLowerCase();
    tableName = tableName.toLowerCase();

    Map<String, List<String>> partNameMap = new LinkedHashMap<String, List<String>>();

    try {
        con = getSegmentConnection(dbName);
    } catch (MetaStoreConnectException e1) {
        LOG.error("get partition error, db=" + dbName + ", tbl=" + tableName + ", level=" + level + ", msg="
                + e1.getMessage());
        throw new MetaException(e1.getMessage());
    } catch (SQLException e1) {
        LOG.error("get partition error, db=" + dbName + ", tbl=" + tableName + ", level=" + level + ", msg="
                + e1.getMessage());
        throw new MetaException(e1.getMessage());
    }

    try {
        con.setAutoCommit(false);
        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        ps = con.createStatement();

        long tblID = 0;
        boolean isTblFind = false;
        String priPartType = null;
        String subPartType = null;
        boolean hasPriPart = false;
        boolean hasSubPart = false;
        String priPartKey = null;
        String subPartKey = null;
        String partKey = null;

        String sql = "SELECT tbl_id,  pri_part_type, pri_part_key, sub_part_type, sub_part_key from TBLS where db_name='"
                + dbName + "' and tbl_name='" + tableName + "'";

        ResultSet tblSet = ps.executeQuery(sql);
        while (tblSet.next()) {
            isTblFind = true;
            tblID = tblSet.getLong(1);
            priPartType = tblSet.getString(2);
            priPartKey = tblSet.getString(3);
            subPartType = tblSet.getString(4);
            subPartKey = tblSet.getString(5);

            if (priPartType != null && !priPartType.isEmpty()) {
                hasPriPart = true;
            }
            if (subPartType != null && !subPartType.isEmpty()) {
                hasSubPart = true;
            }

            if (hasPriPart && level == 0) {
                part = new Partition();
                part.setParType(priPartType);
                partKey = priPartKey;
                break;
            }

            if (hasSubPart && level == 1) {
                part = new Partition();
                part.setParType(subPartType);
                partKey = subPartKey;
                break;
            }

            con.commit();
            return null;
        }

        tblSet.close();

        if (!isTblFind) {
            throw new MetaException("can not find table " + dbName + ":" + tableName);
        }

        FieldSchema field = null;
        sql = "select type_name, comment from columns where tbl_id=" + tblID + " and column_name='" + partKey
                + "'";
        ResultSet colSet = ps.executeQuery(sql);
        while (colSet.next()) {
            field = new FieldSchema();
            field.setType(colSet.getString(1));
            field.setComment(colSet.getString(2));
            field.setName(partKey);

            break;
        }

        colSet.close();

        sql = "select part_name, part_values from partitions where tbl_id=" + tblID + " and level=" + level;
        ResultSet partSet = ps.executeQuery(sql);

        while (partSet.next()) {
            String partName = partSet.getString(1);
            List<String> valueList = new ArrayList<String>();
            Array spaceArray = partSet.getArray(2);

            ResultSet priValueSet = spaceArray.getResultSet();

            if (priValueSet != null) {
                while (priValueSet.next()) {
                    valueList.add(priValueSet.getString(2));
                }
            }

            partNameMap.put(partName, valueList);
        }
        partSet.close();

        part.setParSpaces(partNameMap);
        part.setDbName(dbName);
        part.setTableName(tableName);
        part.setLevel(level);
        part.setParKey(field);

        con.commit();
        success = true;
    } catch (SQLException sqlex) {
        sqlex.printStackTrace();
        LOG.error("get partition error, db=" + dbName + ", tbl=" + tableName + ", level=" + level + ", msg="
                + sqlex.getMessage());
        throw new MetaException(sqlex.getMessage());
    } finally {
        if (!success) {
            try {
                con.rollback();
            } catch (SQLException e) {
            }
        }

        closeStatement(ps);
        closeConnection(con);
    }

    if (success)
        return part;
    else
        return null;
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

/**
 * Isolation Level Notes//from  ww w .j a va  2s.co  m
 * Plain: RC is OK
 * This will find transactions that have timed out and abort them.
 * Will also delete locks which are not associated with a transaction and have timed out
 * Tries to keep transactions (against metastore db) small to reduce lock contention.
 */
@RetrySemantics.Idempotent
public void performTimeOuts() {
    Connection dbConn = null;
    Statement stmt = null;
    ResultSet rs = null;
    try {
        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
        //We currently commit after selecting the TXNS to abort.  So whether SERIALIZABLE
        //READ_COMMITTED, the effect is the same.  We could use FOR UPDATE on Select from TXNS
        //and do the whole performTimeOuts() in a single huge transaction, but the only benefit
        //would be to make sure someone cannot heartbeat one of these txns at the same time.
        //The attempt to heartbeat would block and fail immediately after it's unblocked.
        //With current (RC + multiple txns) implementation it is possible for someone to send
        //heartbeat at the very end of the expire interval, and just after the Select from TXNS
        //is made, in which case heartbeat will succeed but txn will still be Aborted.
        //Solving this corner case is not worth the perf penalty.  The client should heartbeat in a
        //timely way.
        long now = getDbTime(dbConn);
        timeOutLocks(dbConn, now);
        while (true) {
            stmt = dbConn.createStatement();
            String s = " txn_id from TXNS where txn_state = '" + TXN_OPEN + "' and txn_last_heartbeat <  "
                    + (now - timeout);
            //safety valve for extreme cases
            s = sqlGenerator.addLimitClause(10 * TIMED_OUT_TXN_ABORT_BATCH_SIZE, s);
            LOG.debug("Going to execute query <" + s + ">");
            rs = stmt.executeQuery(s);
            if (!rs.next()) {
                return;//no more timedout txns
            }
            List<List<Long>> timedOutTxns = new ArrayList<>();
            List<Long> currentBatch = new ArrayList<>(TIMED_OUT_TXN_ABORT_BATCH_SIZE);
            timedOutTxns.add(currentBatch);
            do {
                if (currentBatch.size() == TIMED_OUT_TXN_ABORT_BATCH_SIZE) {
                    currentBatch = new ArrayList<>(TIMED_OUT_TXN_ABORT_BATCH_SIZE);
                    timedOutTxns.add(currentBatch);
                }
                currentBatch.add(rs.getLong(1));
            } while (rs.next());
            dbConn.commit();
            close(rs, stmt, null);
            int numTxnsAborted = 0;
            for (List<Long> batchToAbort : timedOutTxns) {
                if (abortTxns(dbConn, batchToAbort, now - timeout, true) == batchToAbort.size()) {
                    dbConn.commit();
                    numTxnsAborted += batchToAbort.size();
                    //todo: add TXNS.COMMENT filed and set it to 'aborted by system due to timeout'
                    Collections.sort(batchToAbort);//easier to read logs
                    LOG.info("Aborted the following transactions due to timeout: " + batchToAbort.toString());
                } else {
                    //could not abort all txns in this batch - this may happen because in parallel with this
                    //operation there was activity on one of the txns in this batch (commit/abort/heartbeat)
                    //This is not likely but may happen if client experiences long pause between heartbeats or
                    //unusually long/extreme pauses between heartbeat() calls and other logic in checkLock(),
                    //lock(), etc.
                    dbConn.rollback();
                }
            }
            LOG.info("Aborted " + numTxnsAborted + " transactions due to timeout");
        }
    } catch (SQLException ex) {
        LOG.warn("Aborting timedout transactions failed due to " + getMessage(ex), ex);
    } catch (MetaException e) {
        LOG.warn("Aborting timedout transactions failed due to " + e.getMessage(), e);
    } finally {
        close(rs, stmt, dbConn);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

@Override
@RetrySemantics.ReadOnly//from   w  w  w. j ava2s.c o  m
public void countOpenTxns() throws MetaException {
    Connection dbConn = null;
    Statement stmt = null;
    ResultSet rs = null;
    try {
        try {
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();
            String s = "select count(*) from TXNS where txn_state = '" + TXN_OPEN + "'";
            LOG.debug("Going to execute query <" + s + ">");
            rs = stmt.executeQuery(s);
            if (!rs.next()) {
                LOG.error("Transaction database not properly configured, " + "can't find txn_state from TXNS.");
            } else {
                numOpenTxns = rs.getLong(1);
            }
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            LOG.info("Failed to update number of open transactions");
            checkRetryable(dbConn, e, "countOpenTxns()");
        } finally {
            close(rs, stmt, dbConn);
        }
    } catch (RetryException e) {
        countOpenTxns();
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

@Override
public LockHandle acquireLock(String key) throws MetaException {
    /**//from   ww w .j  av a 2 s  .co  m
     * The implementation here is a bit kludgey but done so that code exercised by unit tests
     * (which run against Derby which has no support for select for update) is as similar to
     * production code as possible.
     * In particular, with Derby we always run in a single process with a single metastore and
     * the absence of For Update is handled via a Semaphore.  The later would strictly speaking
     * make the SQL statements below unnecessary (for Derby), but then they would not be tested.
     */
    Connection dbConn = null;
    Statement stmt = null;
    ResultSet rs = null;
    try {
        try {
            String sqlStmt = sqlGenerator.addForUpdateClause(
                    "select MT_COMMENT from AUX_TABLE where MT_KEY1=" + quoteString(key) + " and MT_KEY2=0");
            lockInternal();
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED, connPoolMutex);
            stmt = dbConn.createStatement();
            if (LOG.isDebugEnabled()) {
                LOG.debug("About to execute SQL: " + sqlStmt);
            }
            rs = stmt.executeQuery(sqlStmt);
            if (!rs.next()) {
                close(rs);
                try {
                    stmt.executeUpdate(
                            "insert into AUX_TABLE(MT_KEY1,MT_KEY2) values(" + quoteString(key) + ", 0)");
                    dbConn.commit();
                } catch (SQLException ex) {
                    if (!isDuplicateKeyError(ex)) {
                        throw new RuntimeException(
                                "Unable to lock " + quoteString(key) + " due to: " + getMessage(ex), ex);
                    }
                    //if here, it means a concrurrent acquireLock() inserted the 'key'

                    //rollback is done for the benefit of Postgres which throws (SQLState=25P02, ErrorCode=0) if
                    //you attempt any stmt in a txn which had an error.
                    dbConn.rollback();
                }
                rs = stmt.executeQuery(sqlStmt);
                if (!rs.next()) {
                    throw new IllegalStateException(
                            "Unable to lock " + quoteString(key) + ".  Expected row in AUX_TABLE is missing.");
                }
            }
            Semaphore derbySemaphore = null;
            if (dbProduct == DatabaseProduct.DERBY) {
                derbyKey2Lock.putIfAbsent(key, new Semaphore(1));
                derbySemaphore = derbyKey2Lock.get(key);
                derbySemaphore.acquire();
            }
            LOG.debug(quoteString(key) + " locked by " + quoteString(TxnHandler.hostname));
            //OK, so now we have a lock
            return new LockHandleImpl(dbConn, stmt, rs, key, derbySemaphore);
        } catch (SQLException ex) {
            rollbackDBConn(dbConn);
            close(rs, stmt, dbConn);
            checkRetryable(dbConn, ex, "acquireLock(" + key + ")");
            throw new MetaException("Unable to lock " + quoteString(key) + " due to: " + getMessage(ex) + "; "
                    + StringUtils.stringifyException(ex));
        } catch (InterruptedException ex) {
            rollbackDBConn(dbConn);
            close(rs, stmt, dbConn);
            throw new MetaException("Unable to lock " + quoteString(key) + " due to: " + ex.getMessage()
                    + StringUtils.stringifyException(ex));
        } finally {
            unlockInternal();
        }
    } catch (RetryException ex) {
        return acquireLock(key);
    }
}

From source file:org.apache.hadoop.hive.metastore.MyXid.java

@Override
public List<String> getTables(String dbName, String pattern) throws MetaException {

    Connection con;/*  w  ww  .j a v  a2s  .co  m*/
    Statement ps = null;
    List<String> tableList = new ArrayList<String>();

    dbName = dbName.toLowerCase();
    pattern = pattern.toLowerCase();

    try {
        con = getSegmentConnection(dbName);
    } catch (MetaStoreConnectException e1) {
        LOG.error("get table error, db=" + dbName + ", pattern=" + pattern + ", msg=" + e1.getMessage());

        return tableList;
    } catch (SQLException e1) {
        LOG.error("get table error, db=" + dbName + ", pattern=" + pattern + ", msg=" + e1.getMessage());

        return tableList;
    }

    try {
        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        ps = con.createStatement();
        String sql = null;

        if (pattern == null || pattern.isEmpty() || pattern.equals(".*") || pattern.equals("*")) {
            sql = "select tbl_name from tbls where db_name='" + dbName.toLowerCase() + "'";
        } else {
            pattern = pattern.replace('*', '%');

            sql = "select tbl_name from tbls where db_name='" + dbName.toLowerCase() + "'"
                    + " and tbl_name like '" + pattern + "'";
        }

        ResultSet tblSet = ps.executeQuery(sql);

        while (tblSet.next()) {
            String item = tblSet.getString(1);
            tableList.add(item);
        }
    } catch (SQLException sqlex) {
        LOG.error("get table error, db=" + dbName + ", pattern=" + pattern + ", msg=" + sqlex.getMessage());
        sqlex.printStackTrace();
        throw new MetaException(sqlex.getMessage());
    } finally {
        closeStatement(ps);
        closeConnection(con);
    }

    return tableList;
}

From source file:org.apache.hadoop.hive.metastore.MyXid.java

@Override
public MGroup findGroup(String gname) throws MetaException {
    Connection con = null;//ww  w .  ja  v a  2 s .  c  o m
    ;
    Statement ps = null;
    boolean success = false;
    MGroup group = null;

    gname = gname.toLowerCase();

    try {
        con = getGlobalConnection();
    } catch (MetaStoreConnectException e1) {
        LOG.error("create user error, user=" + user + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    } catch (SQLException e1) {
        LOG.error("create user error, user=" + user + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    }

    try {
        con.setAutoCommit(false);
        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        ps = con.createStatement();
        String sql = "select creator from usergroup where group_name='" + gname + "'";

        ResultSet groupSet = ps.executeQuery(sql);
        while (groupSet.next()) {
            group = new MGroup();
            group.setCreator(groupSet.getString(1));
            group.setGroupName(gname);
        }

        groupSet.close();

        if (group != null) {
            sql = "select string_agg(tdwuser.user_name, ',') namelist from tdwuser where group_name='" + gname
                    + "'";
            ResultSet userSet = ps.executeQuery(sql);
            while (userSet.next()) {
                group.setUSER_LIST(userSet.getString(1));
            }
            userSet.close();
        }
        con.commit();
        success = true;
    } catch (SQLException sqlex) {
        LOG.error("create user error, user=" + user + ", msg=" + sqlex.getMessage());
        sqlex.printStackTrace();
        throw new MetaException(sqlex.getMessage());
    } finally {
        if (!success) {
            try {
                con.rollback();
            } catch (SQLException e) {
            }
        }

        closeStatement(ps);
        closeConnection(con);
    }

    return group;
}

From source file:org.apache.hadoop.hive.metastore.MyXid.java

@Override
public User getUser(String userName) throws NoSuchObjectException, MetaException {
    Connection con = null;//from ww w  .ja v  a2s.  co m
    ;
    Statement ps = null;

    User user = null;

    userName = userName.toLowerCase();

    try {
        con = getGlobalConnection();
    } catch (MetaStoreConnectException e1) {
        LOG.error("get user error, user=" + user + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    } catch (SQLException e1) {
        LOG.error("get user error, user=" + user + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    }

    try {
        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        ps = con.createStatement();

        String sql = "select alter_priv, create_priv, createview_priv, dba_priv, "
                + "delete_priv, drop_priv, index_priv, insert_priv, select_priv, showview_priv, "
                + "update_priv, user_name, group_name from tdwuser where tdwuser.user_name='" + userName + "'";

        ResultSet userSet = ps.executeQuery(sql);
        boolean isUserFind = false;

        while (userSet.next()) {
            isUserFind = true;
            user = new User();
            user.setAlterPriv(userSet.getBoolean(1));
            user.setCreatePriv(userSet.getBoolean(2));
            user.setCreateviewPriv(userSet.getBoolean(3));
            user.setDbaPriv(userSet.getBoolean(4));
            user.setDeletePriv(userSet.getBoolean(5));
            user.setDropPriv(userSet.getBoolean(6));
            user.setIndexPriv(userSet.getBoolean(7));
            user.setInsertPriv(userSet.getBoolean(8));
            user.setSelectPriv(userSet.getBoolean(9));
            user.setShowviewPriv(userSet.getBoolean(10));
            user.setUpdatePriv(userSet.getBoolean(11));
            user.setUserName(userSet.getString(12));
            user.setGroupName(userSet.getString(13));
        }

        userSet.close();

        if (!isUserFind) {
            throw new NoSuchObjectException("can not find user:" + userName);
        }

        sql = "select role_name from tdwuserrole where user_name='" + userName + "'";
        ResultSet roleSet = ps.executeQuery(sql);

        List<String> roleList = new ArrayList<String>();
        while (roleSet.next()) {
            roleList.add(roleSet.getString(1));
        }

        roleSet.close();

        user.setPlayRoles(roleList);
    } catch (SQLException sqlex) {
        LOG.error("get user error, user=" + user + ", msg=" + sqlex.getMessage());
        sqlex.printStackTrace();
        throw new MetaException(sqlex.getMessage());
    } finally {
        closeStatement(ps);
        closeConnection(con);
    }

    return user;
}

From source file:org.apache.hadoop.hive.metastore.MyXid.java

@Override
public List<String> getUsersAll() throws MetaException {
    Connection con = null;/*from  w  w  w .ja  va 2  s .c om*/
    ;
    Statement ps = null;
    boolean success = false;

    List<String> users = new ArrayList<String>();

    try {
        con = getGlobalConnection();
    } catch (MetaStoreConnectException e1) {
        LOG.error("get user all error" + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    } catch (SQLException e1) {
        LOG.error("get user all error" + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    }

    try {
        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        ps = con.createStatement();

        String sql = "select user_name, group_name from tdwuser";
        ResultSet userSet = ps.executeQuery(sql);

        while (userSet.next()) {
            users.add(userSet.getString(1) + "   " + userSet.getString(2));
        }
        userSet.close();
        success = true;
    } catch (SQLException sqlex) {
        LOG.error("get user all error" + ", msg=" + sqlex.getMessage());
        sqlex.printStackTrace();
        throw new MetaException(sqlex.getMessage());
    } finally {
        closeStatement(ps);
        closeConnection(con);
    }

    if (success == true)
        return users;
    else
        return null;
}

From source file:org.apache.hadoop.hive.metastore.MyXid.java

public boolean isAUser(String userName) throws MetaException {
    Connection con = null;//from  w  w  w. ja  va 2 s  .  c  om
    ;
    Statement ps = null;
    boolean success = false;
    userName = userName.toLowerCase();

    try {
        con = getGlobalConnection();
    } catch (MetaStoreConnectException e1) {
        LOG.error(
                "check user exist error, user=" + userName + ", passwd=" + passwd + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    } catch (SQLException e1) {
        LOG.error(
                "check user exist error, user=" + userName + ", passwd=" + passwd + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    }

    try {
        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        ps = con.createStatement();

        String sql = "select user_name from tdwuser where user_name='" + userName + "'";

        boolean isUserFind = false;

        ResultSet userSet = ps.executeQuery(sql);

        while (userSet.next()) {
            isUserFind = true;
            break;
        }
        userSet.close();

        if (!isUserFind) {
            throw new NoSuchObjectException("can not find user:" + userName);
        }

        success = true;
    } catch (Exception ex) {
        LOG.error(
                "check user exist error, user=" + userName + ", passwd=" + passwd + ", msg=" + ex.getMessage());
        ex.printStackTrace();
    } finally {
        closeStatement(ps);
        closeConnection(con);
    }

    return success;
}

From source file:org.apache.hadoop.hive.metastore.MyXid.java

@Override
public boolean isAUser(String userName, String passwd) throws MetaException {
    Connection con = null;//from ww  w  .j av a 2s .  co m
    ;
    Statement ps = null;
    boolean success = false;

    userName = userName.toLowerCase();
    passwd = passwd.toLowerCase();

    try {
        con = getGlobalConnection();
    } catch (MetaStoreConnectException e1) {
        LOG.error("audit error, user=" + userName + ", passwd=" + passwd + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    } catch (SQLException e1) {
        LOG.error("audit error, user=" + userName + ", passwd=" + passwd + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    }

    try {
        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        ps = con.createStatement();

        String sql = "select user_name, passwd from tdwuser where user_name='" + userName + "'";

        String actualPass = null;
        boolean isUserFind = false;

        ResultSet userSet = ps.executeQuery(sql);

        while (userSet.next()) {
            isUserFind = true;
            actualPass = userSet.getString(2);
            break;
        }

        userSet.close();

        if (!isUserFind) {
            throw new NoSuchObjectException("can not find user:" + userName);
        }

        if (actualPass == null || !actualPass.equals(passwd)) {
            throw new MetaException("audit failed, password error!");
        }

        success = true;
    } catch (Exception ex) {
        LOG.error("audit error, user=" + userName + ", passwd=" + passwd + ", msg=" + ex.getMessage());
        ex.printStackTrace();
    } finally {
        closeStatement(ps);
        closeConnection(con);
    }

    return success;
}