Example usage for java.sql Connection TRANSACTION_READ_COMMITTED

List of usage examples for java.sql Connection TRANSACTION_READ_COMMITTED

Introduction

In this page you can find the example usage for java.sql Connection TRANSACTION_READ_COMMITTED.

Prototype

int TRANSACTION_READ_COMMITTED

To view the source code for java.sql Connection TRANSACTION_READ_COMMITTED.

Click Source Link

Document

A constant indicating that dirty reads are prevented; non-repeatable reads and phantom reads can occur.

Usage

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

@RetrySemantics.ReadOnly
public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException {
    try {//from w w w  .  ja v a2 s .  co m
        Connection dbConn = null;
        ShowLocksResponse rsp = new ShowLocksResponse();
        List<ShowLocksResponseElement> elems = new ArrayList<ShowLocksResponseElement>();
        List<LockInfoExt> sortedList = new ArrayList<LockInfoExt>();
        Statement stmt = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();

            String s = "select hl_lock_ext_id, hl_txnid, hl_db, hl_table, hl_partition, hl_lock_state, "
                    + "hl_lock_type, hl_last_heartbeat, hl_acquired_at, hl_user, hl_host, hl_lock_int_id,"
                    + "hl_blockedby_ext_id, hl_blockedby_int_id, hl_agent_info from HIVE_LOCKS";

            // Some filters may have been specified in the SHOW LOCKS statement. Add them to the query.
            String dbName = rqst.getDbname();
            String tableName = rqst.getTablename();
            String partName = rqst.getPartname();

            StringBuilder filter = new StringBuilder();
            if (dbName != null && !dbName.isEmpty()) {
                filter.append("hl_db=").append(quoteString(dbName));
            }
            if (tableName != null && !tableName.isEmpty()) {
                if (filter.length() > 0) {
                    filter.append(" and ");
                }
                filter.append("hl_table=").append(quoteString(tableName));
            }
            if (partName != null && !partName.isEmpty()) {
                if (filter.length() > 0) {
                    filter.append(" and ");
                }
                filter.append("hl_partition=").append(quoteString(partName));
            }
            String whereClause = filter.toString();

            if (!whereClause.isEmpty()) {
                s = s + " where " + whereClause;
            }

            LOG.debug("Doing to execute query <" + s + ">");
            ResultSet rs = stmt.executeQuery(s);
            while (rs.next()) {
                ShowLocksResponseElement e = new ShowLocksResponseElement();
                e.setLockid(rs.getLong(1));
                long txnid = rs.getLong(2);
                if (!rs.wasNull())
                    e.setTxnid(txnid);
                e.setDbname(rs.getString(3));
                e.setTablename(rs.getString(4));
                String partition = rs.getString(5);
                if (partition != null)
                    e.setPartname(partition);
                switch (rs.getString(6).charAt(0)) {
                case LOCK_ACQUIRED:
                    e.setState(LockState.ACQUIRED);
                    break;
                case LOCK_WAITING:
                    e.setState(LockState.WAITING);
                    break;
                default:
                    throw new MetaException("Unknown lock state " + rs.getString(6).charAt(0));
                }
                switch (rs.getString(7).charAt(0)) {
                case LOCK_SEMI_SHARED:
                    e.setType(LockType.SHARED_WRITE);
                    break;
                case LOCK_EXCLUSIVE:
                    e.setType(LockType.EXCLUSIVE);
                    break;
                case LOCK_SHARED:
                    e.setType(LockType.SHARED_READ);
                    break;
                default:
                    throw new MetaException("Unknown lock type " + rs.getString(6).charAt(0));
                }
                e.setLastheartbeat(rs.getLong(8));
                long acquiredAt = rs.getLong(9);
                if (!rs.wasNull())
                    e.setAcquiredat(acquiredAt);
                e.setUser(rs.getString(10));
                e.setHostname(rs.getString(11));
                e.setLockIdInternal(rs.getLong(12));
                long id = rs.getLong(13);
                if (!rs.wasNull()) {
                    e.setBlockedByExtId(id);
                }
                id = rs.getLong(14);
                if (!rs.wasNull()) {
                    e.setBlockedByIntId(id);
                }
                e.setAgentInfo(rs.getString(15));
                sortedList.add(new LockInfoExt(e));
            }
            LOG.debug("Going to rollback");
            dbConn.rollback();
        } catch (SQLException e) {
            checkRetryable(dbConn, e, "showLocks(" + rqst + ")");
            throw new MetaException(
                    "Unable to select from transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeStmt(stmt);
            closeDbConn(dbConn);
        }
        //this ensures that "SHOW LOCKS" prints the locks in the same order as they are examined
        //by checkLock() - makes diagnostics easier.
        Collections.sort(sortedList, new LockInfoComparator());
        for (LockInfoExt lockInfoExt : sortedList) {
            elems.add(lockInfoExt.e);
        }
        rsp.setLocks(elems);
        return rsp;
    } catch (RetryException e) {
        return showLocks(rqst);
    }
}

From source file:org.kawanfw.sql.jdbc.ConnectionHttp.java

/**
 * Attempts to change the transaction isolation level for this
 * <code>Connection</code> object to the one given. The constants defined in
 * the interface <code>Connection</code> are the possible transaction
 * isolation levels./*from w w w.j  a  v a2 s. c o m*/
 * <P>
 * <B>Note:</B> If this method is called during a transaction, the result is
 * implementation-defined.
 * 
 * @param level
 *            one of the following <code>Connection</code> constants:
 *            <code>Connection.TRANSACTION_READ_UNCOMMITTED</code>,
 *            <code>Connection.TRANSACTION_READ_COMMITTED</code>,
 *            <code>Connection.TRANSACTION_REPEATABLE_READ</code>, or
 *            <code>Connection.TRANSACTION_SERIALIZABLE</code>. (Note that
 *            <code>Connection.TRANSACTION_NONE</code> cannot be used
 *            because it specifies that transactions are not supported.)
 * @exception SQLException
 *                if a database access error occurs, this method is called
 *                on a closed connection or the given parameter is not one
 *                of the <code>Connection</code> constants
 * @see DatabaseMetaData#supportsTransactionIsolationLevel
 * @see #getTransactionIsolation
 */
@Override
public void setTransactionIsolation(int level) throws SQLException {
    testIfClosed();

    if (level != Connection.TRANSACTION_READ_UNCOMMITTED && level != Connection.TRANSACTION_READ_COMMITTED
            && level != Connection.TRANSACTION_REPEATABLE_READ
            && level != Connection.TRANSACTION_SERIALIZABLE) {
        throw new SQLException("Illegal transaction isolation level: " + level);
    }

    if (!statelessMode) {
        JdbcHttpTransactionTransfer jdbcHttpTransactionTransfer = new JdbcHttpTransactionTransfer(this,
                authenticationToken);
        jdbcHttpTransactionTransfer.setTransactionIsolation(level);
    }

    this.transactionIsolation = level;
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

/**
 * {@code ids} should only have txnid or lockid but not both, ideally.
 * Currently DBTxnManager.heartbeat() enforces this.
 */// w  ww . ja  v  a  2s. c  om
@Override
@RetrySemantics.SafeToRetry
public void heartbeat(HeartbeatRequest ids)
        throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException {
    try {
        Connection dbConn = null;
        try {
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            heartbeatLock(dbConn, ids.getLockid());
            heartbeatTxn(dbConn, ids.getTxnid());
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "heartbeat(" + ids + ")");
            throw new MetaException(
                    "Unable to select from transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeDbConn(dbConn);
        }
    } catch (RetryException e) {
        heartbeat(ids);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

@Override
@RetrySemantics.SafeToRetry/*w  w w .jav a  2  s .  c  o  m*/
public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;
        HeartbeatTxnRangeResponse rsp = new HeartbeatTxnRangeResponse();
        Set<Long> nosuch = new HashSet<Long>();
        Set<Long> aborted = new HashSet<Long>();
        rsp.setNosuch(nosuch);
        rsp.setAborted(aborted);
        try {
            /**
             * READ_COMMITTED is sufficient since {@link #heartbeatTxn(java.sql.Connection, long)}
             * only has 1 update statement in it and
             * we only update existing txns, i.e. nothing can add additional txns that this operation
             * would care about (which would have required SERIALIZABLE)
             */
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            /*do fast path first (in 1 statement) if doesn't work, rollback and do the long version*/
            stmt = dbConn.createStatement();
            List<String> queries = new ArrayList<>();
            int numTxnsToHeartbeat = (int) (rqst.getMax() - rqst.getMin() + 1);
            List<Long> txnIds = new ArrayList<>(numTxnsToHeartbeat);
            for (long txn = rqst.getMin(); txn <= rqst.getMax(); txn++) {
                txnIds.add(txn);
            }
            TxnUtils.buildQueryWithINClause(conf, queries,
                    new StringBuilder("update TXNS set txn_last_heartbeat = " + getDbTime(dbConn)
                            + " where txn_state = " + quoteChar(TXN_OPEN) + " and "),
                    new StringBuilder(""), txnIds, "txn_id", true, false);
            int updateCnt = 0;
            for (String query : queries) {
                LOG.debug("Going to execute update <" + query + ">");
                updateCnt += stmt.executeUpdate(query);
            }
            if (updateCnt == numTxnsToHeartbeat) {
                //fast pass worked, i.e. all txns we were asked to heartbeat were Open as expected
                dbConn.commit();
                return rsp;
            }
            //if here, do the slow path so that we can return info txns which were not in expected state
            dbConn.rollback();
            for (long txn = rqst.getMin(); txn <= rqst.getMax(); txn++) {
                try {
                    heartbeatTxn(dbConn, txn);
                } catch (NoSuchTxnException e) {
                    nosuch.add(txn);
                } catch (TxnAbortedException e) {
                    aborted.add(txn);
                }
            }
            return rsp;
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "heartbeatTxnRange(" + rqst + ")");
            throw new MetaException(
                    "Unable to select from transaction database " + StringUtils.stringifyException(e));
        } finally {
            close(null, stmt, dbConn);
        }
    } catch (RetryException e) {
        return heartbeatTxnRange(rqst);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

@Override
@RetrySemantics.Idempotent//from www  . j  ava2 s.  co m
public CompactionResponse compact(CompactionRequest rqst) throws MetaException {
    // Put a compaction request in the queue.
    try {
        Connection dbConn = null;
        Statement stmt = null;
        TxnStore.MutexAPI.LockHandle handle = null;
        try {
            lockInternal();
            /**
             * MUTEX_KEY.CompactionScheduler lock ensures that there is only 1 entry in
             * Initiated/Working state for any resource.  This ensures that we don't run concurrent
             * compactions for any resource.
             */
            handle = getMutexAPI().acquireLock(MUTEX_KEY.CompactionScheduler.name());
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();

            long id = generateCompactionQueueId(stmt);

            StringBuilder sb = new StringBuilder("select cq_id, cq_state from COMPACTION_QUEUE where")
                    .append(" cq_state IN(").append(quoteChar(INITIATED_STATE)).append(",")
                    .append(quoteChar(WORKING_STATE)).append(") AND cq_database=")
                    .append(quoteString(rqst.getDbname())).append(" AND cq_table=")
                    .append(quoteString(rqst.getTablename())).append(" AND ");
            if (rqst.getPartitionname() == null) {
                sb.append("cq_partition is null");
            } else {
                sb.append("cq_partition=").append(quoteString(rqst.getPartitionname()));
            }

            LOG.debug("Going to execute query <" + sb.toString() + ">");
            ResultSet rs = stmt.executeQuery(sb.toString());
            if (rs.next()) {
                long enqueuedId = rs.getLong(1);
                String state = compactorStateToResponse(rs.getString(2).charAt(0));
                LOG.info("Ignoring request to compact " + rqst.getDbname() + "/" + rqst.getTablename() + "/"
                        + rqst.getPartitionname() + " since it is already " + quoteString(state) + " with id="
                        + enqueuedId);
                return new CompactionResponse(enqueuedId, state, false);
            }
            close(rs);
            StringBuilder buf = new StringBuilder(
                    "insert into COMPACTION_QUEUE (cq_id, cq_database, " + "cq_table, ");
            String partName = rqst.getPartitionname();
            if (partName != null)
                buf.append("cq_partition, ");
            buf.append("cq_state, cq_type");
            if (rqst.getProperties() != null) {
                buf.append(", cq_tblproperties");
            }
            if (rqst.getRunas() != null)
                buf.append(", cq_run_as");
            buf.append(") values (");
            buf.append(id);
            buf.append(", '");
            buf.append(rqst.getDbname());
            buf.append("', '");
            buf.append(rqst.getTablename());
            buf.append("', '");
            if (partName != null) {
                buf.append(partName);
                buf.append("', '");
            }
            buf.append(INITIATED_STATE);
            buf.append("', '");
            switch (rqst.getType()) {
            case MAJOR:
                buf.append(MAJOR_TYPE);
                break;

            case MINOR:
                buf.append(MINOR_TYPE);
                break;

            default:
                LOG.debug("Going to rollback");
                dbConn.rollback();
                throw new MetaException("Unexpected compaction type " + rqst.getType().toString());
            }
            if (rqst.getProperties() != null) {
                buf.append("', '");
                buf.append(new StringableMap(rqst.getProperties()).toString());
            }
            if (rqst.getRunas() != null) {
                buf.append("', '");
                buf.append(rqst.getRunas());
            }
            buf.append("')");
            String s = buf.toString();
            LOG.debug("Going to execute update <" + s + ">");
            stmt.executeUpdate(s);
            LOG.debug("Going to commit");
            dbConn.commit();
            return new CompactionResponse(id, INITIATED_RESPONSE, true);
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "compact(" + rqst + ")");
            throw new MetaException(
                    "Unable to select from transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeStmt(stmt);
            closeDbConn(dbConn);
            if (handle != null) {
                handle.releaseLocks();
            }
            unlockInternal();
        }
    } catch (RetryException e) {
        return compact(rqst);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

@RetrySemantics.ReadOnly
public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException {
    ShowCompactResponse response = new ShowCompactResponse(new ArrayList<ShowCompactResponseElement>());
    Connection dbConn = null;/*  w ww. j a v a2  s . c  o  m*/
    Statement stmt = null;
    try {
        try {
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();
            String s = "select cq_database, cq_table, cq_partition, cq_state, cq_type, cq_worker_id, " +
            //-1 because 'null' literal doesn't work for all DBs...
                    "cq_start, -1 cc_end, cq_run_as, cq_hadoop_job_id, cq_id from COMPACTION_QUEUE union all "
                    + "select cc_database, cc_table, cc_partition, cc_state, cc_type, cc_worker_id, "
                    + "cc_start, cc_end, cc_run_as, cc_hadoop_job_id, cc_id from COMPLETED_COMPACTIONS";
            //what I want is order by cc_end desc, cc_start asc (but derby has a bug https://issues.apache.org/jira/browse/DERBY-6013)
            //to sort so that currently running jobs are at the end of the list (bottom of screen)
            //and currently running ones are in sorted by start time
            //w/o order by likely currently running compactions will be first (LHS of Union)
            LOG.debug("Going to execute query <" + s + ">");
            ResultSet rs = stmt.executeQuery(s);
            while (rs.next()) {
                ShowCompactResponseElement e = new ShowCompactResponseElement();
                e.setDbname(rs.getString(1));
                e.setTablename(rs.getString(2));
                e.setPartitionname(rs.getString(3));
                e.setState(compactorStateToResponse(rs.getString(4).charAt(0)));
                switch (rs.getString(5).charAt(0)) {
                case MAJOR_TYPE:
                    e.setType(CompactionType.MAJOR);
                    break;
                case MINOR_TYPE:
                    e.setType(CompactionType.MINOR);
                    break;
                default:
                    //do nothing to handle RU/D if we add another status
                }
                e.setWorkerid(rs.getString(6));
                long start = rs.getLong(7);
                if (!rs.wasNull()) {
                    e.setStart(start);
                }
                long endTime = rs.getLong(8);
                if (endTime != -1) {
                    e.setEndTime(endTime);
                }
                e.setRunAs(rs.getString(9));
                e.setHadoopJobId(rs.getString(10));
                e.setId(rs.getLong(11));
                response.addToCompacts(e);
            }
            LOG.debug("Going to rollback");
            dbConn.rollback();
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "showCompact(" + rqst + ")");
            throw new MetaException(
                    "Unable to select from transaction database " + StringUtils.stringifyException(e));
        } finally {
            closeStmt(stmt);
            closeDbConn(dbConn);
        }
        return response;
    } catch (RetryException e) {
        return showCompact(rqst);
    }
}

From source file:org.apache.hadoop.hive.metastore.MyXid.java

@Override
public Table getTable(String dbName, String tableName) throws MetaException, NoSuchObjectException {
    boolean success = false;

    Connection con;/*from  ww w. ja  va  2s  . co  m*/
    Statement ps = null;
    Table tbl = new Table();

    dbName = dbName.toLowerCase();
    tableName = tableName.toLowerCase();

    try {
        con = getSegmentConnectionForRead(dbName);
    } catch (MetaStoreConnectException e1) {
        LOG.error("get table error, db=" + dbName + ", tbl=" + tableName + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    } catch (SQLException e1) {
        LOG.error("get table error, db=" + dbName + ", tbl=" + tableName + ", msg=" + e1.getMessage());
        throw new MetaException(e1.getMessage());
    }

    try {
        con.setAutoCommit(false);
        con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
        ps = con.createStatement();

        String sql = "SELECT tbl_id, create_time"
                + ", is_compressed, retention, tbl_type, db_name, tbl_name, tbl_owner "
                + ", tbl_format, pri_part_type, sub_part_type, pri_part_key, sub_part_key "
                + ", input_format, output_format, serde_name, serde_lib, tbl_location, tbl_comment "
                + " from TBLS where db_name='" + dbName + "' and tbl_name='" + tableName + "'";

        ResultSet tblSet = ps.executeQuery(sql);
        boolean isTblFind = false;
        StorageDescriptor sd = null;
        SerDeInfo sdInfo = null;
        String priPartKey = null;
        String subPartKey = null;
        Partition priPart = null;
        Partition subPart = null;
        long tblID = 0;

        String comment = null;
        String format = null;
        Timestamp createTime = null;
        String tblType = null;

        boolean hasPriPart = false;
        boolean hasSubPart = false;

        while (tblSet.next()) {
            isTblFind = true;
            tblID = tblSet.getLong(1);

            createTime = tblSet.getTimestamp(2);

            if (createTime != null) {
                tbl.setCreateTime((int) (createTime.getTime() / 1000));
            }

            sd = new StorageDescriptor();
            sdInfo = new SerDeInfo();
            sd.setCompressed(tblSet.getBoolean(3));

            tbl.setRetention((int) tblSet.getLong(4));
            tblType = tblSet.getString(5);
            tbl.setTableType(tblType);
            tbl.setDbName(tblSet.getString(6));
            tbl.setTableName(tblSet.getString(7));
            tbl.setOwner(tblSet.getString(8));

            format = tblSet.getString(9);

            priPartKey = tblSet.getString(12);
            subPartKey = tblSet.getString(13);

            if (priPartKey != null && !priPartKey.isEmpty()) {
                hasPriPart = true;
                priPart = new Partition();
                priPart.setLevel(0);
                priPart.setDbName(tblSet.getString(6));
                priPart.setTableName(tblSet.getString(7));
                priPart.setParType(tblSet.getString(10));
            }

            if (subPartKey != null && !subPartKey.isEmpty()) {
                hasSubPart = true;
                subPart = new Partition();
                subPart.setLevel(1);
                subPart.setDbName(tblSet.getString(6));
                subPart.setTableName(tblSet.getString(7));
                subPart.setParType(tblSet.getString(11));
            }

            sd.setInputFormat(tblSet.getString(14));
            sd.setOutputFormat(tblSet.getString(15));
            sdInfo.setName(tblSet.getString(16));
            sdInfo.setSerializationLib(tblSet.getString(17));
            sd.setLocation(tblSet.getString(18));
            comment = tblSet.getString(19);

            break;
        }

        tblSet.close();

        if (!isTblFind) {
            LOG.error(dbName + "." + tableName + " table not found");
            throw new NoSuchObjectException(dbName + "." + tableName + " table not found");
        }

        List<FieldSchema> fieldList = new ArrayList<FieldSchema>();
        Map<String, FieldSchema> fieldMap = new LinkedHashMap<String, FieldSchema>();

        sql = "SELECT column_name, type_name, comment from columns where tbl_id=" + tblID
                + " order by column_index asc";
        ResultSet colSet = ps.executeQuery(sql);
        while (colSet.next()) {
            FieldSchema field = new FieldSchema();
            field.setName(colSet.getString(1));
            field.setType(colSet.getString(2));
            field.setComment(colSet.getString(3));

            fieldList.add(field);
            fieldMap.put(colSet.getString(1), field);
        }
        colSet.close();

        sd.setCols(fieldList);

        sql = "SELECT param_type, param_key, param_value  from table_params where tbl_id=" + tblID;
        ResultSet paramSet = ps.executeQuery(sql);
        Map<String, String> tblParamMap = new HashMap<String, String>();
        Map<String, String> sdParamMap = new HashMap<String, String>();
        Map<String, String> serdeParam = new HashMap<String, String>();

        while (paramSet.next()) {
            String type = paramSet.getString(1);
            if (type == null)
                continue;

            if (type.equalsIgnoreCase("sd")) {
                sdParamMap.put(paramSet.getString(2), paramSet.getString(3));
            } else if (type.equalsIgnoreCase("serde")) {
                serdeParam.put(paramSet.getString(2), paramSet.getString(3));
            } else if (type.equalsIgnoreCase("tbl")) {
                tblParamMap.put(paramSet.getString(2), paramSet.getString(3));
            } else {
                tblParamMap.put(paramSet.getString(2), paramSet.getString(3));
            }
        }
        paramSet.close();

        if (comment != null && !comment.isEmpty()) {
            tblParamMap.put("comment", comment);
        }

        if (format != null && !format.isEmpty()) {
            tblParamMap.put("type", format);
        }

        tbl.setParameters(tblParamMap);
        sd.setParameters(sdParamMap);
        sdInfo.setParameters(serdeParam);

        List<String> bucketCols = new ArrayList<String>();
        sql = "select bucket_col_name from bucket_cols where tbl_id=" + tblID + " order by col_index asc";
        ResultSet bucketSet = ps.executeQuery(sql);
        while (bucketSet.next()) {
            bucketCols.add(bucketSet.getString(1));
        }

        bucketSet.close();
        if (bucketCols.size() > 0) {
            sd.setBucketCols(bucketCols);
            String numBucketStr = sd.getParameters().get("NUM_BUCKETS");
            if (numBucketStr == null) {
                sd.setNumBuckets(-1);
            } else {
                sd.setNumBuckets(Integer.valueOf(numBucketStr));
            }
        } else {
            sd.setBucketCols(bucketCols);
            sd.setNumBuckets(-1);
        }

        sd.getParameters().remove("NUM_BUCKETS");

        List<Order> sortCols = new ArrayList<Order>();
        sql = "select sort_column_name, sort_order from sort_cols where tbl_id=" + tblID
                + " order by col_index asc";
        ResultSet sortSet = ps.executeQuery(sql);
        while (sortSet.next()) {
            Order o = new Order();
            o.setCol(sortSet.getString(1));
            o.setOrder(sortSet.getInt(2));
            sortCols.add(o);
        }

        sortSet.close();
        sd.setSortCols(sortCols);

        sd.setSerdeInfo(sdInfo);
        tbl.setSd(sd);

        if (hasPriPart) {
            sql = "SELECT level, part_name, part_values from  PARTITIONS where tbl_id=" + tblID;
            ResultSet partSet = ps.executeQuery(sql);
            Map<String, List<String>> priPartSpace = new LinkedHashMap<String, List<String>>();
            Map<String, List<String>> subPartSpace = new LinkedHashMap<String, List<String>>();

            while (partSet.next()) {
                int level = partSet.getInt(1);
                switch (level) {
                case 0:
                    String priName = partSet.getString(2);
                    List<String> priValueList = new ArrayList<String>();
                    Array priSpaceArray = partSet.getArray(3);

                    if (priSpaceArray != null) {
                        ResultSet priValueSet = priSpaceArray.getResultSet();

                        while (priValueSet.next()) {
                            priValueList.add(priValueSet.getString(2));
                        }
                    }

                    priPartSpace.put(priName, priValueList);
                    break;

                case 1:
                    String subName = partSet.getString(2);
                    List<String> subValueList = new ArrayList<String>();
                    Array subSpaceArray = partSet.getArray(3);

                    if (subSpaceArray != null) {
                        ResultSet subValueSet = subSpaceArray.getResultSet();
                        while (subValueSet.next()) {
                            subValueList.add(subValueSet.getString(2));
                        }
                    }

                    subPartSpace.put(subName, subValueList);
                    break;

                default:
                    break;
                }
            }

            partSet.close();

            priPart.setParSpaces(priPartSpace);

            priPart.setParKey(fieldMap.get(priPartKey.toLowerCase()));

            if (hasSubPart) {
                subPart.setParSpaces(subPartSpace);
                subPart.setParKey(fieldMap.get(subPartKey.toLowerCase()));
            }
        }

        tbl.setPriPartition(priPart);
        tbl.setSubPartition(subPart);

        if (tblType.equalsIgnoreCase("VIRTUAL_VIEW")) {
            sql = "select view_original_text, view_expanded_text, vtables from " + " tdwview where tbl_id="
                    + tblID;

            ResultSet viewSet = ps.executeQuery(sql);
            while (viewSet.next()) {
                tbl.setViewOriginalText(viewSet.getString(1));
                tbl.setViewExpandedText(viewSet.getString(2));
                tbl.setVtables(viewSet.getString(3));
                break;
            }
        }

        con.commit();
        success = true;
    } catch (SQLException sqlex) {
        sqlex.printStackTrace();
        LOG.error("get table error, db=" + dbName + ", tbl=" + tableName + ", msg=" + sqlex.getMessage());
        throw new MetaException(sqlex.getMessage());
    } finally {
        if (!success) {
            try {
                con.rollback();
            } catch (SQLException e) {
            }
        }

        closeStatement(ps);
        closeConnection(con);
    }

    if (success)
        return tbl;
    else
        return null;
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

/**
 * Retry-by-caller note:/*from   w  w w.j av a2s.  c o  m*/
 * This may be retried after dbConn.commit.  At worst, it will create duplicate entries in
 * TXN_COMPONENTS which won't affect anything.  See more comments in {@link #commitTxn(CommitTxnRequest)}
 */
@Override
@RetrySemantics.SafeToRetry
public void addDynamicPartitions(AddDynamicPartitions rqst)
        throws NoSuchTxnException, TxnAbortedException, MetaException {
    Connection dbConn = null;
    Statement stmt = null;
    ResultSet lockHandle = null;
    ResultSet rs = null;
    try {
        try {
            lockInternal();
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();
            lockHandle = lockTransactionRecord(stmt, rqst.getTxnid(), TXN_OPEN);
            if (lockHandle == null) {
                //ensures txn is still there and in expected state
                ensureValidTxn(dbConn, rqst.getTxnid(), stmt);
                shouldNeverHappen(rqst.getTxnid());
            }
            //for RU this may be null so we should default it to 'u' which is most restrictive
            OpertaionType ot = OpertaionType.UPDATE;
            if (rqst.isSetOperationType()) {
                ot = OpertaionType.fromDataOperationType(rqst.getOperationType());
            }
            List<String> rows = new ArrayList<>();
            for (String partName : rqst.getPartitionnames()) {
                rows.add(rqst.getTxnid() + "," + quoteString(rqst.getDbname()) + ","
                        + quoteString(rqst.getTablename()) + "," + quoteString(partName) + ","
                        + quoteChar(ot.sqlConst));
            }
            int modCount = 0;
            //record partitions that were written to
            List<String> queries = sqlGenerator.createInsertValuesStmt(
                    "TXN_COMPONENTS (tc_txnid, tc_database, tc_table, tc_partition, tc_operation_type)", rows);
            for (String query : queries) {
                LOG.debug("Going to execute update <" + query + ">");
                modCount = stmt.executeUpdate(query);
            }
            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "addDynamicPartitions(" + rqst + ")");
            throw new MetaException(
                    "Unable to insert into from transaction database " + StringUtils.stringifyException(e));
        } finally {
            close(lockHandle, stmt, dbConn);
            unlockInternal();
        }
    } catch (RetryException e) {
        addDynamicPartitions(rqst);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

/**
 * Clean up corresponding records in metastore tables when corresponding object is dropped,
 * specifically: TXN_COMPONENTS, COMPLETED_TXN_COMPONENTS, COMPACTION_QUEUE, COMPLETED_COMPACTIONS
 * Retry-by-caller note: this is only idempotent assuming it's only called by dropTable/Db/etc
 * operations./*w  ww  .  ja  va2  s. c  o m*/
 */
@Override
@RetrySemantics.Idempotent
public void cleanupRecords(HiveObjectType type, Database db, Table table, Iterator<Partition> partitionIterator)
        throws MetaException {
    try {
        Connection dbConn = null;
        Statement stmt = null;

        try {
            String dbName;
            String tblName;
            dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
            stmt = dbConn.createStatement();
            List<String> queries = new ArrayList<String>();
            StringBuilder buff = new StringBuilder();

            switch (type) {
            case DATABASE:
                dbName = db.getName();

                buff.append("delete from TXN_COMPONENTS where tc_database='");
                buff.append(dbName);
                buff.append("'");
                queries.add(buff.toString());

                buff.setLength(0);
                buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_database='");
                buff.append(dbName);
                buff.append("'");
                queries.add(buff.toString());

                buff.setLength(0);
                buff.append("delete from COMPACTION_QUEUE where cq_database='");
                buff.append(dbName);
                buff.append("'");
                queries.add(buff.toString());

                buff.setLength(0);
                buff.append("delete from COMPLETED_COMPACTIONS where cc_database='");
                buff.append(dbName);
                buff.append("'");
                queries.add(buff.toString());

                break;
            case TABLE:
                dbName = table.getDbName();
                tblName = table.getTableName();

                buff.append("delete from TXN_COMPONENTS where tc_database='");
                buff.append(dbName);
                buff.append("' and tc_table='");
                buff.append(tblName);
                buff.append("'");
                queries.add(buff.toString());

                buff.setLength(0);
                buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_database='");
                buff.append(dbName);
                buff.append("' and ctc_table='");
                buff.append(tblName);
                buff.append("'");
                queries.add(buff.toString());

                buff.setLength(0);
                buff.append("delete from COMPACTION_QUEUE where cq_database='");
                buff.append(dbName);
                buff.append("' and cq_table='");
                buff.append(tblName);
                buff.append("'");
                queries.add(buff.toString());

                buff.setLength(0);
                buff.append("delete from COMPLETED_COMPACTIONS where cc_database='");
                buff.append(dbName);
                buff.append("' and cc_table='");
                buff.append(tblName);
                buff.append("'");
                queries.add(buff.toString());

                break;
            case PARTITION:
                dbName = table.getDbName();
                tblName = table.getTableName();
                List<FieldSchema> partCols = table.getPartitionKeys(); // partition columns
                List<String> partVals; // partition values
                String partName;

                while (partitionIterator.hasNext()) {
                    Partition p = partitionIterator.next();
                    partVals = p.getValues();
                    partName = Warehouse.makePartName(partCols, partVals);

                    buff.append("delete from TXN_COMPONENTS where tc_database='");
                    buff.append(dbName);
                    buff.append("' and tc_table='");
                    buff.append(tblName);
                    buff.append("' and tc_partition='");
                    buff.append(partName);
                    buff.append("'");
                    queries.add(buff.toString());

                    buff.setLength(0);
                    buff.append("delete from COMPLETED_TXN_COMPONENTS where ctc_database='");
                    buff.append(dbName);
                    buff.append("' and ctc_table='");
                    buff.append(tblName);
                    buff.append("' and ctc_partition='");
                    buff.append(partName);
                    buff.append("'");
                    queries.add(buff.toString());

                    buff.setLength(0);
                    buff.append("delete from COMPACTION_QUEUE where cq_database='");
                    buff.append(dbName);
                    buff.append("' and cq_table='");
                    buff.append(tblName);
                    buff.append("' and cq_partition='");
                    buff.append(partName);
                    buff.append("'");
                    queries.add(buff.toString());

                    buff.setLength(0);
                    buff.append("delete from COMPLETED_COMPACTIONS where cc_database='");
                    buff.append(dbName);
                    buff.append("' and cc_table='");
                    buff.append(tblName);
                    buff.append("' and cc_partition='");
                    buff.append(partName);
                    buff.append("'");
                    queries.add(buff.toString());
                }

                break;
            default:
                throw new MetaException("Invalid object type for cleanup: " + type);
            }

            for (String query : queries) {
                LOG.debug("Going to execute update <" + query + ">");
                stmt.executeUpdate(query);
            }

            LOG.debug("Going to commit");
            dbConn.commit();
        } catch (SQLException e) {
            LOG.debug("Going to rollback");
            rollbackDBConn(dbConn);
            checkRetryable(dbConn, e, "cleanupRecords");
            if (e.getMessage().contains("does not exist")) {
                LOG.warn("Cannot perform cleanup since metastore table does not exist");
            } else {
                throw new MetaException("Unable to clean up " + StringUtils.stringifyException(e));
            }
        } finally {
            closeStmt(stmt);
            closeDbConn(dbConn);
        }
    } catch (RetryException e) {
        cleanupRecords(type, db, table, partitionIterator);
    }
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

/**
 * For testing only, do not use.//from w  w  w . j  a v a 2  s . c  o  m
 */
@VisibleForTesting
public int numLocksInLockTable() throws SQLException, MetaException {
    Connection dbConn = null;
    Statement stmt = null;
    ResultSet rs = null;
    try {
        dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
        stmt = dbConn.createStatement();
        String s = "select count(*) from HIVE_LOCKS";
        LOG.debug("Going to execute query <" + s + ">");
        rs = stmt.executeQuery(s);
        rs.next();
        int rc = rs.getInt(1);
        // Necessary to clean up the transaction in the db.
        dbConn.rollback();
        return rc;
    } finally {
        close(rs, stmt, dbConn);
    }
}