List of usage examples for java.sql Connection TRANSACTION_READ_COMMITTED
int TRANSACTION_READ_COMMITTED
To view the source code for java.sql Connection TRANSACTION_READ_COMMITTED.
Click Source Link
From source file:je3.rmi.MudClient.java
/** * This main() method is the standalone program that figures out what * database to connect to with what driver, connects to the database, * creates a PersistentBankServer object, and registers it with the registry, * making it available for client use/*from w ww . ja v a2 s .com*/ **/ public static void main(String[] args) { try { // Create a new Properties object. Attempt to initialize it from // the BankDB.props file or the file optionally specified on the // command line, ignoring errors. Properties p = new Properties(); try { p.load(new FileInputStream(args[0])); } catch (Exception e) { try { p.load(new FileInputStream("BankDB.props")); } catch (Exception e2) {} } // The BankDB.props file (or file specified on the command line) // must contain properties "driver" and "database", and may // optionally contain properties "user" and "password". String driver = p.getProperty("driver"); String database = p.getProperty("database"); String user = p.getProperty("user", ""); String password = p.getProperty("password", ""); // Load the database driver class Class.forName(driver); // Connect to the database that stores our accounts Connection db = DriverManager.getConnection(database, user, password); // Configure the database to allow multiple queries and updates // to be grouped into atomic transactions db.setAutoCommit(false); db.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); // Create a server object that uses our database connection PersistentBankServer bank = new PersistentBankServer(db); // Read a system property to figure out how to name this server. // Use "SecondRemote" as the default. String name = System.getProperty("bankname", "SecondRemote"); // Register the server with the name Naming.rebind(name, bank); // And tell everyone that we're up and running. System.out.println(name + " is open and ready for customers."); } catch (Exception e) { System.err.println(e); if (e instanceof SQLException) System.err.println("SQL State: " + ((SQLException)e).getSQLState()); System.err.println("Usage: java [-Dbankname=<name>] " + "je3.rmi.PersistentBankServer " + "[<dbpropsfile>]"); System.exit(1); } }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
/** * This enters locks into the queue in {@link #LOCK_WAITING} mode. * * Isolation Level Notes:/*from w ww .ja va 2 s . c om*/ * 1. We use S4U (withe read_committed) to generate the next (ext) lock id. This serializes * any 2 {@code enqueueLockWithRetry()} calls. * 2. We use S4U on the relevant TXNS row to block any concurrent abort/commit/etc operations * @see #checkLockWithRetry(Connection, long, long) */ private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException { boolean success = false; Connection dbConn = null; try { Statement stmt = null; ResultSet rs = null; ResultSet lockHandle = null; try { lockInternal(); dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); long txnid = rqst.getTxnid(); stmt = dbConn.createStatement(); if (isValidTxn(txnid)) { //this also ensures that txn is still there in expected state lockHandle = lockTransactionRecord(stmt, txnid, TXN_OPEN); if (lockHandle == null) { ensureValidTxn(dbConn, txnid, stmt); shouldNeverHappen(txnid); } } /** Get the next lock id. * This has to be atomic with adding entries to HIVE_LOCK entries (1st add in W state) to prevent a race. * Suppose ID gen is a separate txn and 2 concurrent lock() methods are running. 1st one generates nl_next=7, * 2nd nl_next=8. Then 8 goes first to insert into HIVE_LOCKS and acquires the locks. Then 7 unblocks, * and add it's W locks but it won't see locks from 8 since to be 'fair' {@link #checkLock(java.sql.Connection, long)} * doesn't block on locks acquired later than one it's checking*/ String s = sqlGenerator.addForUpdateClause("select nl_next from NEXT_LOCK_ID"); LOG.debug("Going to execute query <" + s + ">"); rs = stmt.executeQuery(s); if (!rs.next()) { LOG.debug("Going to rollback"); dbConn.rollback(); throw new MetaException( "Transaction tables not properly " + "initialized, no record found in next_lock_id"); } long extLockId = rs.getLong(1); s = "update NEXT_LOCK_ID set nl_next = " + (extLockId + 1); LOG.debug("Going to execute update <" + s + ">"); stmt.executeUpdate(s); if (txnid > 0) { List<String> rows = new ArrayList<>(); // For each component in this lock request, // add an entry to the txn_components table for (LockComponent lc : rqst.getComponent()) { if (lc.isSetIsAcid() && !lc.isIsAcid()) { //we don't prevent using non-acid resources in a txn but we do lock them continue; } boolean updateTxnComponents; if (!lc.isSetOperationType()) { //request came from old version of the client updateTxnComponents = true;//this matches old behavior } else { switch (lc.getOperationType()) { case INSERT: case UPDATE: case DELETE: if (!lc.isSetIsDynamicPartitionWrite()) { //must be old client talking, i.e. we don't know if it's DP so be conservative updateTxnComponents = true; } else { /** * we know this is part of DP operation and so we'll get * {@link #addDynamicPartitions(AddDynamicPartitions)} call with the list * of partitions actually chaged. */ updateTxnComponents = !lc.isIsDynamicPartitionWrite(); } break; case SELECT: updateTxnComponents = false; break; case NO_TXN: /*this constant is a bit of a misnomer since we now always have a txn context. It just means the operation is such that we don't care what tables/partitions it affected as it doesn't trigger a compaction or conflict detection. A better name would be NON_TRANSACTIONAL.*/ updateTxnComponents = false; break; default: //since we have an open transaction, only 4 values above are expected throw new IllegalStateException( "Unexpected DataOperationType: " + lc.getOperationType() + " agentInfo=" + rqst.getAgentInfo() + " " + JavaUtils.txnIdToString(txnid)); } } if (!updateTxnComponents) { continue; } String dbName = lc.getDbname(); String tblName = lc.getTablename(); String partName = lc.getPartitionname(); rows.add(txnid + ", '" + dbName + "', " + (tblName == null ? "null" : "'" + tblName + "'") + ", " + (partName == null ? "null" : "'" + partName + "'") + "," + quoteString( OpertaionType.fromDataOperationType(lc.getOperationType()).toString())); } List<String> queries = sqlGenerator.createInsertValuesStmt( "TXN_COMPONENTS (tc_txnid, tc_database, tc_table, tc_partition, tc_operation_type)", rows); for (String query : queries) { LOG.debug("Going to execute update <" + query + ">"); int modCount = stmt.executeUpdate(query); } } List<String> rows = new ArrayList<>(); long intLockId = 0; for (LockComponent lc : rqst.getComponent()) { if (lc.isSetOperationType() && lc.getOperationType() == DataOperationType.UNSET && (conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) || conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEZ_TEST))) { //old version of thrift client should have (lc.isSetOperationType() == false) but they do not //If you add a default value to a variable, isSet() for that variable is true regardless of the where the //message was created (for object variables. It works correctly for boolean vars, e.g. LockComponent.isAcid). //in test mode, upgrades are not tested, so client version and server version of thrift always matches so //we see UNSET here it means something didn't set the appropriate value. throw new IllegalStateException("Bug: operationType=" + lc.getOperationType() + " for component " + lc + " agentInfo=" + rqst.getAgentInfo()); } intLockId++; String dbName = lc.getDbname(); String tblName = lc.getTablename(); String partName = lc.getPartitionname(); LockType lockType = lc.getType(); char lockChar = 'z'; switch (lockType) { case EXCLUSIVE: lockChar = LOCK_EXCLUSIVE; break; case SHARED_READ: lockChar = LOCK_SHARED; break; case SHARED_WRITE: lockChar = LOCK_SEMI_SHARED; break; } long now = getDbTime(dbConn); rows.add(extLockId + ", " + intLockId + "," + txnid + ", " + quoteString(dbName) + ", " + valueOrNullLiteral(tblName) + ", " + valueOrNullLiteral(partName) + ", " + quoteChar(LOCK_WAITING) + ", " + quoteChar(lockChar) + ", " + //for locks associated with a txn, we always heartbeat txn and timeout based on that (isValidTxn(txnid) ? 0 : now) + ", " + valueOrNullLiteral(rqst.getUser()) + ", " + valueOrNullLiteral(rqst.getHostname()) + ", " + valueOrNullLiteral(rqst.getAgentInfo()));// + ")"; } List<String> queries = sqlGenerator .createInsertValuesStmt("HIVE_LOCKS (hl_lock_ext_id, hl_lock_int_id, hl_txnid, hl_db, " + "hl_table, hl_partition,hl_lock_state, hl_lock_type, " + "hl_last_heartbeat, hl_user, hl_host, hl_agent_info)", rows); for (String query : queries) { LOG.debug("Going to execute update <" + query + ">"); int modCount = stmt.executeUpdate(query); } dbConn.commit(); success = true; return new ConnectionLockIdPair(dbConn, extLockId); } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "enqueueLockWithRetry(" + rqst + ")"); throw new MetaException( "Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { close(lockHandle); close(rs, stmt, null); if (!success) { /* This needs to return a "live" connection to be used by operation that follows it. Thus it only closes Connection on failure/retry. */ closeDbConn(dbConn); } unlockInternal(); } } catch (RetryException e) { return enqueueLockWithRetry(rqst); } }
From source file:org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.java
@Override public boolean supportsResultSetConcurrency(int type, int concurrency) throws SQLException { // TODO: review return type == ResultSet.TYPE_FORWARD_ONLY && concurrency == Connection.TRANSACTION_READ_COMMITTED; }
From source file:org.openconcerto.sql.model.SQLDataSource.java
public final void setInitialTransactionIsolation(int level) { if (level != Connection.TRANSACTION_READ_UNCOMMITTED && level != Connection.TRANSACTION_READ_COMMITTED && level != Connection.TRANSACTION_REPEATABLE_READ && level != Connection.TRANSACTION_SERIALIZABLE) throw new IllegalArgumentException("Invalid value :" + level); synchronized (this) { if (this.txIsolation != level) { this.txIsolation = level; // perhaps do like setInitialSchema() : i.e. call setTransactionIsolation() on // existing connections this.invalidateAllConnections(false); }/*from w w w .j a va 2 s .co m*/ } }
From source file:org.apache.hadoop.hive.metastore.MyXid.java
@Override public List<String> getDatabases() throws MetaException { Connection con;// w ww. j av a2 s. c o m Statement stmt = null; List<String> dbNameList = new ArrayList<String>(); try { con = getGlobalConnection(); } catch (MetaStoreConnectException e1) { LOG.error("get databases error msg=" + e1.getMessage()); throw new MetaException(e1.getMessage()); } catch (SQLException e1) { LOG.error("get databases error msg=" + e1.getMessage()); throw new MetaException(e1.getMessage()); } try { con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); stmt = con.createStatement(); String sql = "select db_name, owner from router"; ResultSet ret = stmt.executeQuery(sql); dbNameList = new ArrayList<String>(); while (ret.next()) { dbNameList.add(ret.getString(1)); } ret.close(); } catch (SQLException x) { x.printStackTrace(); try { con.rollback(); } catch (SQLException e) { e.printStackTrace(); } LOG.error("get databases error msg=" + x.getMessage()); throw new MetaException(x.getMessage()); } finally { closeStatement(stmt); closeConnection(con); } return dbNameList; }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
private LockResponse checkLockWithRetry(Connection dbConn, long extLockId, long txnId) throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, MetaException { try {/*from w ww . j av a 2s .co m*/ try { lockInternal(); if (dbConn.isClosed()) { //should only get here if retrying this op dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); } return checkLock(dbConn, extLockId); } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "checkLockWithRetry(" + extLockId + "," + txnId + ")"); throw new MetaException( "Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { unlockInternal(); closeDbConn(dbConn); } } catch (RetryException e) { return checkLockWithRetry(dbConn, extLockId, txnId); } }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
/** * Why doesn't this get a txnid as parameter? The caller should either know the txnid or know there isn't one. * Either way getTxnIdFromLockId() will not be needed. This would be a Thrift change. * * Also, when lock acquisition returns WAITING, it's retried every 15 seconds (best case, see DbLockManager.backoff(), * in practice more often)// w w w . j a v a 2 s . c o m * which means this is heartbeating way more often than hive.txn.timeout and creating extra load on DB. * * The clients that operate in blocking mode, can't heartbeat a lock until the lock is acquired. * We should make CheckLockRequest include timestamp or last request to skip unnecessary heartbeats. Thrift change. * * {@link #checkLock(java.sql.Connection, long)} must run at SERIALIZABLE (make sure some lock we are checking * against doesn't move from W to A in another txn) but this method can heartbeat in * separate txn at READ_COMMITTED. * * Retry-by-caller note: * Retryable because {@link #checkLock(Connection, long)} is */ @Override @RetrySemantics.SafeToRetry public LockResponse checkLock(CheckLockRequest rqst) throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException { try { Connection dbConn = null; long extLockId = rqst.getLockid(); try { lockInternal(); dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); // Heartbeat on the lockid first, to assure that our lock is still valid. // Then look up the lock info (hopefully in the cache). If these locks // are associated with a transaction then heartbeat on that as well. LockInfo info = getTxnIdFromLockId(dbConn, extLockId); if (info == null) { throw new NoSuchLockException("No such lock " + JavaUtils.lockIdToString(extLockId)); } if (info.txnId > 0) { heartbeatTxn(dbConn, info.txnId); } else { heartbeatLock(dbConn, extLockId); } //todo: strictly speaking there is a bug here. heartbeat*() commits but both heartbeat and //checkLock() are in the same retry block, so if checkLock() throws, heartbeat is also retired //extra heartbeat is logically harmless, but ... return checkLock(dbConn, extLockId); } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "checkLock(" + rqst + " )"); throw new MetaException("Unable to update transaction database " + JavaUtils.lockIdToString(extLockId) + " " + StringUtils.stringifyException(e)); } finally { closeDbConn(dbConn); unlockInternal(); } } catch (RetryException e) { return checkLock(rqst); } }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
/** * This would have been made simpler if all locks were associated with a txn. Then only txn needs to * be heartbeated, committed, etc. no need for client to track individual locks. * When removing locks not associated with txn this potentially conflicts with * heartbeat/performTimeout which are update/delete of HIVE_LOCKS thus will be locked as needed by db. * since this only removes from HIVE_LOCKS at worst some lock acquire is delayed *//*from w w w. j a v a2 s . c o m*/ @RetrySemantics.Idempotent public void unlock(UnlockRequest rqst) throws NoSuchLockException, TxnOpenException, MetaException { try { Connection dbConn = null; Statement stmt = null; long extLockId = rqst.getLockid(); try { /** * This method is logically like commit for read-only auto commit queries. * READ_COMMITTED since this only has 1 delete statement and no new entries with the * same hl_lock_ext_id can be added, i.e. all rows with a given hl_lock_ext_id are * created in a single atomic operation. * Theoretically, this competes with {@link #lock(org.apache.hadoop.hive.metastore.api.LockRequest)} * but hl_lock_ext_id is not known until that method returns. * Also competes with {@link #checkLock(org.apache.hadoop.hive.metastore.api.CheckLockRequest)} * but using SERIALIZABLE doesn't materially change the interaction. * If "delete" stmt misses, additional logic is best effort to produce meaningful error msg. */ dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); //hl_txnid <> 0 means it's associated with a transaction String s = "delete from HIVE_LOCKS where hl_lock_ext_id = " + extLockId + " AND (hl_txnid = 0 OR" + " (hl_txnid <> 0 AND hl_lock_state = '" + LOCK_WAITING + "'))"; //(hl_txnid <> 0 AND hl_lock_state = '" + LOCK_WAITING + "') is for multi-statement txns where //some query attempted to lock (thus LOCK_WAITING state) but is giving up due to timeout for example LOG.debug("Going to execute update <" + s + ">"); int rc = stmt.executeUpdate(s); if (rc < 1) { LOG.debug("Going to rollback"); dbConn.rollback(); LockInfo info = getTxnIdFromLockId(dbConn, extLockId); if (info == null) { //didn't find any lock with extLockId but at ReadCommitted there is a possibility that //it existed when above delete ran but it didn't have the expected state. LOG.info("No lock in " + LOCK_WAITING + " mode found for unlock(" + JavaUtils.lockIdToString(rqst.getLockid()) + ")"); //bail here to make the operation idempotent return; } if (info.txnId != 0) { String msg = "Unlocking locks associated with transaction not permitted. " + info; //if a lock is associated with a txn we can only "unlock" if if it's in WAITING state // which really means that the caller wants to give up waiting for the lock LOG.error(msg); throw new TxnOpenException(msg); } if (info.txnId == 0) { //we didn't see this lock when running DELETE stmt above but now it showed up //so should "should never happen" happened... String msg = "Found lock in unexpected state " + info; LOG.error(msg); throw new MetaException(msg); } } LOG.debug("Going to commit"); dbConn.commit(); } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "unlock(" + rqst + ")"); throw new MetaException("Unable to update transaction database " + JavaUtils.lockIdToString(extLockId) + " " + StringUtils.stringifyException(e)); } finally { closeStmt(stmt); closeDbConn(dbConn); } } catch (RetryException e) { unlock(rqst); } }
From source file:de.innovationgate.webgate.api.jdbc.WGDatabaseImpl.java
/** * @see de.innovationgate.webgate.api.WGDatabaseCore#open(WGDatabase, * String, String, String, boolean) *///from ww w . ja v a2 s.com public WGUserAccess open(WGDatabase db, String path, String user, String pwd, boolean prepareOnly) throws WGAPIException { try { this._db = db; this._path = path; this._aclImpl = new ACLImpl(this); String jdbcDriver = (String) db.getCreationOptions().get("Driver"); if (jdbcDriver == null) { jdbcDriver = (String) db.getCreationOptions().get("hibernate.connection.driver_class"); } // Determine dll version _csVersion = determineCSVersion(db, jdbcDriver, path, user, pwd); _ddlVersion = _csVersion.getVersion(); _fileHandling = createFileHandling(); boolean useSharedPool = WGUtils.getBooleanMapValue(db.getCreationOptions(), WGDatabase.COPTION_SHAREDPOOL, true); if (useSharedPool && db.getCreationOptions().containsKey(Database.OPTION_PATH) && db.getServer() instanceof SharedPoolJDBCDatabaseServer) { SharedPoolJDBCDatabaseServer poolServer = (SharedPoolJDBCDatabaseServer) db.getServer(); if (poolServer.isPoolAvailable(_csVersion)) { try { _connProvider = poolServer.createPoolConnectionProvider( (String) db.getCreationOptions().get(Database.OPTION_PATH)); WGFactory.getLogger() .info("Database '" + db.getDbReference() + "' uses the shared connection pool of database server '" + db.getServer().getTitle(Locale.getDefault()) + "'"); } catch (WGInvalidDatabaseException e) { throw e; } catch (Exception e) { throw new WGInvalidDatabaseException("Exception connecting to shared database server pool", e); } } } // Create regular connection provider if no shared one available/allowed if (_connProvider == null) { Properties props = new Properties(); if (path.startsWith("jdbc:")) { putDefaultConPoolProps(db, props); } if (user != null || pwd != null) { props.put("hibernate.connection.username", WGUtils.getValueOrDefault(user, "")); props.put("hibernate.connection.password", WGUtils.getValueOrDefault(pwd, "")); } String driverClass = (String) db.getCreationOptions().get("Driver"); props.put(Environment.ISOLATION, String.valueOf(Connection.TRANSACTION_READ_COMMITTED)); props.putAll(db.getCreationOptions()); try { _connProvider = new JDBCConnectionProvider(path, driverClass, props, true); } catch (JDBCConnectionException e) { throw new WGInvalidDatabaseException("Exception creating connection pool", e); } } // Build Session factory and builder buildSessionFactory(db, path, user, pwd, _csVersion, _connProvider); if ("true".equals(System.getProperty("de.innovationgate.wga.hibernate.enable_jmx"))) { _sessionFactory.getStatistics().setStatisticsEnabled(true); try { Object statisticsMBean = Proxy.newProxyInstance(getClass().getClassLoader(), new Class<?>[] { StatisticsMXBean.class }, new InvocationHandler() { @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { return method.invoke(_sessionFactory.getStatistics(), args); } }); _jmxManager = new JmxManager(statisticsMBean, new ObjectName("de.innovationgate.WGAMonitor:name=Hibernate-Statistics,db=" + JmxManager.normalizeJmxKey(db.getDbReference()))); } catch (Exception e2) { WGFactory.getLogger().error("Exception enabling JMX for Hibernate statistics", e2); } } _sessionBuilder = _sessionFactory.withOptions(); // Determine save isolation _saveIsolationActive = _ddlVersion >= WGDatabase.CSVERSION_WGA5; if (db.getCreationOptions().containsKey("SaveIsolation")) { _saveIsolationActive = Boolean.parseBoolean((String) db.getCreationOptions().get("SaveIsolation")); } // parse masterPersistenceTimeout if (db.getCreationOptions().containsKey(COPTION_MASTERPERSISTENCE_TIMEOUT)) { _masterPersistenceTimeout = Long .parseLong((String) db.getCreationOptions().get(COPTION_MASTERPERSISTENCE_TIMEOUT)); } // parse HQL query default type String hqlType = (String) db.getCreationOptions().get(COPTION_HQL_FETCH_TYPE); if (hqlType != null) { _hqlLazyByDefault = hqlType.equals(HQL_FETCHTYPE_LAZY); } String hqlLazyParentCheck = (String) db.getCreationOptions().get(COPTION_HQL_LAZY_PARENTCHECK); if (hqlLazyParentCheck != null) { _hqlLazyParentCheck = Boolean.parseBoolean(hqlLazyParentCheck); } // open session WGUserAccess accessLevel; try { accessLevel = openSession(MasterLoginAuthSession.getInstance(), pwd, true); } catch (WGUnavailableException e) { throw new WGInvalidDatabaseException("Error opening initial session", e); } catch (WGBackendException e) { throw new WGInvalidDatabaseException("Error opening initial session", e); } if (accessLevel.getAccessLevel() <= WGDatabase.ACCESSLEVEL_NOACCESS) { try { close(); } catch (WGBackendException e1) { WGFactory.getLogger().error(e1); } } return accessLevel; } catch (WGInvalidDatabaseException e) { if (_connProvider != null) { if (_connProvider instanceof Stoppable) { ((Stoppable) _connProvider).stop(); } _connProvider = null; } throw e; } }
From source file:org.apache.hadoop.hive.metastore.MyXid.java
@Override public void createTable(Table tbl) throws InvalidObjectException, MetaException, AlreadyExistsException { if (tbl == null) { throw new InvalidObjectException("unvalid parameters, tbl is null"); }//from w ww .ja v a 2 s . co m if (tbl.getTableType() == null) { tbl.setTableType("MANAGED_TABLE"); } if (tbl.getTableType().equalsIgnoreCase("VIRTUAL_VIEW")) { jdbcCreateView(tbl); return; } tbl.setDbName(tbl.getDbName().toLowerCase()); tbl.setTableName(tbl.getTableName().toLowerCase()); LOG.debug("first, check the name is valid or not"); if (!MetaStoreUtils.validateName(tbl.getTableName()) || !MetaStoreUtils.validateColNames(tbl.getSd().getCols()) || (tbl.getPriPartition() != null && !MetaStoreUtils.validateName(tbl.getPriPartition().getParKey().getName())) || (tbl.getSubPartition() != null && !MetaStoreUtils.validateName(tbl.getSubPartition().getParKey().getName()))) { throw new InvalidObjectException(tbl.getTableName() + " is not a valid object name"); } long tblID = genTblID(tbl.getDbName(), tbl.getTableName()); boolean success = false; Connection con; PreparedStatement ps = null; Statement stmt = null; Path tblPath = null; Warehouse wh = new Warehouse(hiveConf); boolean madeDir = false; LOG.debug("2, generate table path "); if (tbl.getSd().getLocation() == null || tbl.getSd().getLocation().isEmpty()) { tblPath = wh.getDefaultTablePath(tbl.getDbName(), tbl.getTableName()); } else { if (tbl.getTableType().equalsIgnoreCase("EXTERNAL_TABLE")) { LOG.warn("Location: " + tbl.getSd().getLocation() + "specified for non-external table:" + tbl.getTableName()); } tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); } tbl.getSd().setLocation(tblPath.toString()); try { con = getSegmentConnection(tbl.getDbName()); } catch (MetaStoreConnectException e1) { LOG.error("create table error, db=" + tbl.getDbName() + ", table=" + tbl.getTableName() + ", msg=" + e1.getMessage()); throw new MetaException(e1.getMessage()); } catch (SQLException e1) { LOG.error("create table error, db=" + tbl.getDbName() + ", table=" + tbl.getTableName() + ", msg=" + e1.getMessage()); throw new MetaException(e1.getMessage()); } try { con.setAutoCommit(false); con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); stmt = con.createStatement(); LOG.debug("1 check the table is exist or not"); String sql = "select tbl_id from tbls where db_name='" + tbl.getDbName().toLowerCase() + "' and tbl_name='" + tbl.getTableName().toLowerCase() + "'"; boolean isTblFind = false; ResultSet checkTblSet = stmt.executeQuery(sql); while (checkTblSet.next()) { isTblFind = true; break; } checkTblSet.close(); if (isTblFind) { throw new AlreadyExistsException( "table " + tbl.getDbName() + ":" + tbl.getTableName() + " has exist"); } LOG.debug("2 insert into tbls"); ps = con.prepareStatement("INSERT INTO TBLS(tbl_id, is_compressed, retention, tbl_type, db_name, " + "tbl_name, tbl_owner, tbl_format" + ", pri_part_type, sub_part_type, pri_part_key, sub_part_key, input_format, output_format" + ", serde_name, serde_lib, tbl_location, tbl_comment)" + " values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"); StorageDescriptor sd = tbl.getSd(); if (sd == null || sd.getSerdeInfo() == null) { throw new MetaException("storage descriptor of table " + tbl.getTableName() + " is null"); } SerDeInfo sdInfo = sd.getSerdeInfo(); ps.setLong(1, tblID); ps.setBoolean(2, sd.isCompressed()); ps.setLong(3, tbl.getRetention()); if (tbl.getParameters() != null && tbl.getParameters().get("EXTERNAL") != null && tbl.getParameters().get("EXTERNAL").equalsIgnoreCase("TRUE")) { ps.setString(4, "EXTERNAL_TABLE"); } else { ps.setString(4, tbl.getTableType()); } ps.setString(5, tbl.getDbName()); ps.setString(6, tbl.getTableName()); ps.setString(7, tbl.getOwner()); if (tbl.getParameters() == null) { ps.setString(8, null); } else { ps.setString(8, tbl.getParameters().get("type")); } Partition priPart = tbl.getPriPartition(); Partition subPart = tbl.getSubPartition(); if (priPart != null) { ps.setString(11, priPart.getParKey().getName()); ps.setString(9, priPart.getParType()); } else { ps.setString(11, null); ps.setString(9, null); } if (subPart != null) { ps.setString(12, subPart.getParKey().getName()); ps.setString(10, subPart.getParType()); } else { ps.setString(12, null); ps.setString(10, null); } ps.setString(13, sd.getInputFormat()); ps.setString(14, sd.getOutputFormat()); ps.setString(15, sdInfo.getName()); ps.setString(16, sdInfo.getSerializationLib()); ps.setString(17, sd.getLocation()); if (tbl.getParameters() == null) { ps.setString(18, null); } else { ps.setString(18, tbl.getParameters().get("comment")); } ps.executeUpdate(); ps.close(); LOG.debug("3 insert into partitions"); if (priPart != null) { ps = con.prepareStatement( "INSERT INTO PARTITIONS(level, tbl_id," + "part_name, part_values) values(?,?,?,?)"); Map<String, List<String>> partSpaceMap = priPart.getParSpaces(); for (Map.Entry<String, List<String>> entry : partSpaceMap.entrySet()) { ps.setInt(1, 0); ps.setLong(2, tblID); ps.setString(3, entry.getKey()); if (entry.getValue() != null) { Array spaceArray = con.createArrayOf("varchar", entry.getValue().toArray()); ps.setArray(4, spaceArray); } else { ps.setArray(4, null); } ps.addBatch(); } ps.executeBatch(); ps.close(); } if (subPart != null) { ps = con.prepareStatement( "INSERT INTO PARTITIONS(level, tbl_id," + "part_name, part_values) values(?,?,?,?)"); Map<String, List<String>> partSpaceMap = subPart.getParSpaces(); for (Map.Entry<String, List<String>> entry : partSpaceMap.entrySet()) { ps.setInt(1, 1); ps.setLong(2, tblID); ps.setString(3, entry.getKey()); if (entry.getValue() != null) { Array spaceArray = con.createArrayOf("varchar", entry.getValue().toArray()); ps.setArray(4, spaceArray); } else { ps.setArray(4, null); } ps.addBatch(); } ps.executeBatch(); ps.close(); } LOG.debug("4 insert into columns"); ps = con.prepareStatement("INSERT INTO COLUMNS(column_index, tbl_id, column_name, type_name, comment) " + " values(?,?,?,?,?)"); List<FieldSchema> fieldList = sd.getCols(); int fieldSize = fieldList.size(); for (int i = 0; i < fieldSize; i++) { FieldSchema field = fieldList.get(i); ps.setInt(1, i); ps.setLong(2, tblID); ps.setString(3, field.getName().toLowerCase()); ps.setString(4, field.getType()); ps.setString(5, field.getComment()); ps.addBatch(); } ps.executeBatch(); ps.close(); LOG.debug("5 insert into parameters"); boolean createExtDirIfNotExist = true; if (tbl.getParametersSize() > 0) { String createExtDirIfNotExistStr = tbl.getParameters().get("hive.exttable.createdir.ifnotexist"); LOG.info("XXcreateExtDirIfNotExistStr=" + createExtDirIfNotExistStr); if (createExtDirIfNotExistStr != null && createExtDirIfNotExistStr.equalsIgnoreCase("false")) { createExtDirIfNotExist = false; } tbl.getParameters().remove("hive.exttable.createdir.ifnotexist"); } if (tbl.getParametersSize() > 0 || sd.getParametersSize() > 0 || sd.getSerdeInfo().getParametersSize() > 0 || sd.getNumBuckets() > -1) { ps = con.prepareStatement("insert into table_params(tbl_id, param_type, param_key, param_value) " + " values(?,?,?,?)"); if (tbl.getParametersSize() > 0) { for (Map.Entry<String, String> entry : tbl.getParameters().entrySet()) { if (entry.getKey().equalsIgnoreCase("type") || entry.getKey().equalsIgnoreCase("comment")) continue; ps.setLong(1, tblID); ps.setString(2, "TBL"); ps.setString(3, entry.getKey()); ps.setString(4, entry.getValue()); ps.addBatch(); } } if (sd.getParametersSize() > 0) { for (Map.Entry<String, String> entry : sd.getParameters().entrySet()) { ps.setLong(1, tblID); ps.setString(2, "SD"); ps.setString(3, entry.getKey()); ps.setString(4, entry.getValue()); ps.addBatch(); } } if (sd.getSerdeInfo().getParametersSize() > 0) { for (Map.Entry<String, String> entry : sd.getSerdeInfo().getParameters().entrySet()) { ps.setLong(1, tblID); ps.setString(2, "SERDE"); ps.setString(3, entry.getKey()); ps.setString(4, entry.getValue()); ps.addBatch(); } } if (sd.getNumBuckets() > -1) { ps.setLong(1, tblID); ps.setString(2, "SD"); ps.setString(3, "NUM_BUCKETS"); ps.setString(4, String.valueOf(sd.getNumBuckets())); ps.addBatch(); } ps.executeBatch(); ps.close(); } if (tbl.getSd().getBucketCols() != null && !tbl.getSd().getBucketCols().isEmpty()) { ps = con.prepareStatement( "insert into bucket_cols(tbl_id, bucket_col_name, col_index) values(?,?,?)"); int index = 0; for (String col : tbl.getSd().getBucketCols()) { ps.setLong(1, tblID); ps.setString(2, col.toLowerCase()); ps.setInt(3, index); index++; ps.addBatch(); } ps.executeBatch(); ps.close(); } if (tbl.getSd().getSortCols() != null && !tbl.getSd().getSortCols().isEmpty()) { ps = con.prepareStatement( "insert into sort_cols(tbl_id, sort_column_name, sort_order, col_index) values(?,?,?,?)"); int index = 0; for (Order o : tbl.getSd().getSortCols()) { ps.setLong(1, tblID); ps.setString(2, o.getCol()); ps.setInt(3, o.getOrder()); ps.setInt(4, index); index++; ps.addBatch(); } ps.executeBatch(); ps.close(); } LOG.debug("make hdfs directory for table"); if (createExtDirIfNotExist && tblPath != null) { if (!wh.isDir(tblPath)) { if (!wh.mkdirs(tblPath)) { throw new MetaException(tblPath + " is not a directory or unable to create one"); } madeDir = true; } if (tbl.getPriPartition() != null) { Set<String> priPartNames = tbl.getPriPartition().getParSpaces().keySet(); Set<String> subPartNames = null; if (tbl.getSubPartition() != null) { subPartNames = tbl.getSubPartition().getParSpaces().keySet(); } List<Path> partPaths = Warehouse.getPartitionPaths(tblPath, priPartNames, subPartNames); for (Path partPath : partPaths) { if (!wh.mkdirs(partPath)) { throw new MetaException( "Partition path " + partPath + " is not a directory or unable to create one."); } } } } con.commit(); success = true; } catch (SQLException sqlex) { LOG.error("create table error db=" + tbl.getDbName() + ", table=" + tbl.getTableName() + ",msg=" + sqlex.getMessage()); sqlex.printStackTrace(); throw new MetaException(sqlex.getMessage()); } finally { if (!success) { try { con.rollback(); } catch (SQLException e) { } if (madeDir) { wh.deleteDir(tblPath, true); } } closeStatement(stmt); closeStatement(ps); closeConnection(con); } return; }