List of usage examples for java.sql Connection setSavepoint
Savepoint setSavepoint() throws SQLException;
Savepoint
object that represents it. From source file:mom.trd.opentheso.bdd.helper.RelationsHelper.java
/** * Cette fonction permet de rajouter une relation type Groupe ou domaine * un concept/*from w w w . jav a2s . c o m*/ * * @param conn * @param idConcept * @param idGroup * @param idThesaurus * @param idUser * @return boolean */ public boolean addRelationMT(Connection conn, String idConcept, String idThesaurus, String idGroup, int idUser) { Statement stmt; boolean status = false; String query; Savepoint savepoint = null; try { // Get connection from pool savepoint = conn.setSavepoint(); try { stmt = conn.createStatement(); try { /* if (!new RelationsHelper().addRelationHistorique(conn, idConcept, idThesaurus, idConcept, "MT", idUser, "ADD")) { return false; }*/ query = "Insert into concept" + "(id_concept, id_thesaurus, id_ark, top_concept, id_group)" + " values (" + "'" + idConcept + "'" + ",'" + idThesaurus + "'" + ",''," + "false" + ",'" + idGroup + "')"; stmt.executeUpdate(query); status = true; } finally { stmt.close(); } } finally { // conn.close(); } } catch (SQLException sqle) { // Log exception if (sqle.getSQLState().equalsIgnoreCase("23505")) { try { if (savepoint != null) { conn.rollback(savepoint); status = true; } } catch (SQLException ex) { Logger.getLogger(RelationsHelper.class.getName()).log(Level.SEVERE, null, ex); } } else { log.error("Error while adding relation MT of Concept : " + idConcept, sqle); } } return status; }
From source file:com.novartis.opensource.yada.YADAQuery.java
/** * Set a transactional or non-transactional connection for the source * @since 4.0.0/*from w ww .ja v a2 s. c o m*/ * @param app the app name stored in the query * @param transactions set to {@code true} to execute multiple queries as a single transaction. * @throws YADAConnectionException when the connection can't be opened */ public void setConnection(String app, boolean transactions) throws YADAConnectionException { if (this.getProtocol().equals(Parser.SOAP)) { this.setSOAPConnection(ConnectionFactory.getConnectionFactory().getSOAPConnection()); } else { this.setConnection(ConnectionFactory.getConnectionFactory().getConnection(app)); if (transactions) { try { Connection c = (Connection) this.getConnection(); c.setAutoCommit(false); try { this.setSavepoint(c.setSavepoint()); } catch (SQLException e) { String msg = "This JDBC driver does not support savepoints."; l.warn(msg); } } catch (SQLException e) { String msg = "Unable to configure connection for transaction."; throw new YADAConnectionException(msg, e); } } } }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
/** * Lock acquisition is meant to be fair, so every lock can only block on some lock with smaller * hl_lock_ext_id by only checking earlier locks. * * For any given SQL statment all locks required by it are grouped under single extLockId and are * granted all at once or all locks wait. * * This is expected to run at READ_COMMITTED. * * Note: this calls acquire() for (extLockId,intLockId) but extLockId is the same and we either take * all locks for given extLockId or none. Would be more efficient to update state on all locks * at once. Semantics are the same since this is all part of the same txn. * * If there is a concurrent commitTxn/rollbackTxn, those can only remove rows from HIVE_LOCKS. * If they happen to be for the same txnid, there will be a WW conflict (in MS DB), if different txnid, * checkLock() will in the worst case keep locks in Waiting state a little longer. *///from w w w .j a v a 2 s.c o m @RetrySemantics.SafeToRetry("See @SafeToRetry") private LockResponse checkLock(Connection dbConn, long extLockId) throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, MetaException, SQLException { TxnStore.MutexAPI.LockHandle handle = null; Statement stmt = null; ResultSet rs = null; LockResponse response = new LockResponse(); /** * todo: Longer term we should pass this from client somehow - this would be an optimization; once * that is in place make sure to build and test "writeSet" below using OperationType not LockType * With Static Partitions we assume that the query modifies exactly the partitions it locked. (not entirely * realistic since Update/Delete may have some predicate that filters out all records out of * some partition(s), but plausible). For DP, we acquire locks very wide (all known partitions), * but for most queries only a fraction will actually be updated. #addDynamicPartitions() tells * us exactly which ones were written to. Thus using this trick to kill a query early for * DP queries may be too restrictive. */ boolean isPartOfDynamicPartitionInsert = true; try { /** * checkLock() must be mutexed against any other checkLock to make sure 2 conflicting locks * are not granted by parallel checkLock() calls. */ handle = getMutexAPI().acquireLock(MUTEX_KEY.CheckLock.name()); List<LockInfo> locksBeingChecked = getLockInfoFromLockId(dbConn, extLockId);//being acquired now response.setLockid(extLockId); LOG.debug("checkLock(): Setting savepoint. extLockId=" + JavaUtils.lockIdToString(extLockId)); Savepoint save = dbConn.setSavepoint(); StringBuilder query = new StringBuilder( "select hl_lock_ext_id, " + "hl_lock_int_id, hl_db, hl_table, hl_partition, hl_lock_state, " + "hl_lock_type, hl_txnid from HIVE_LOCKS where hl_db in ("); Set<String> strings = new HashSet<String>(locksBeingChecked.size()); //This the set of entities that the statement represented by extLockId wants to update List<LockInfo> writeSet = new ArrayList<>(); for (LockInfo info : locksBeingChecked) { strings.add(info.db); if (!isPartOfDynamicPartitionInsert && info.type == LockType.SHARED_WRITE) { writeSet.add(info); } } if (!writeSet.isEmpty()) { if (writeSet.get(0).txnId == 0) { //Write operation always start a txn throw new IllegalStateException( "Found Write lock for " + JavaUtils.lockIdToString(extLockId) + " but no txnid"); } stmt = dbConn.createStatement(); StringBuilder sb = new StringBuilder( " ws_database, ws_table, ws_partition, " + "ws_txnid, ws_commit_id " + "from WRITE_SET where ws_commit_id >= " + writeSet.get(0).txnId + " and (");//see commitTxn() for more info on this inequality for (LockInfo info : writeSet) { sb.append("(ws_database = ").append(quoteString(info.db)).append(" and ws_table = ") .append(quoteString(info.table)).append(" and ws_partition ") .append(info.partition == null ? "is null" : "= " + quoteString(info.partition)) .append(") or "); } sb.setLength(sb.length() - 4);//nuke trailing " or " sb.append(")"); //1 row is sufficient to know we have to kill the query rs = stmt.executeQuery(sqlGenerator.addLimitClause(1, sb.toString())); if (rs.next()) { /** * if here, it means we found an already committed txn which overlaps with the current one and * it updated the same resource the current txn wants to update. By First-committer-wins * rule, current txn will not be allowed to commit so may as well kill it now; This is just an * optimization to prevent wasting cluster resources to run a query which is known to be DOA. * {@link #commitTxn(CommitTxnRequest)} has the primary responsibility to ensure this. * checkLock() runs at READ_COMMITTED so you could have another (Hive) txn running commitTxn() * in parallel and thus writing to WRITE_SET. commitTxn() logic is properly mutexed to ensure * that we don't "miss" any WW conflicts. We could've mutexed the checkLock() and commitTxn() * as well but this reduces concurrency for very little gain. * Note that update/delete (which runs as dynamic partition insert) acquires a lock on the table, * but WRITE_SET has entries for actual partitions updated. Thus this optimization will "miss" * the WW conflict but it will be caught in commitTxn() where actual partitions written are known. * This is OK since we want 2 concurrent updates that update different sets of partitions to both commit. */ String resourceName = rs.getString(1) + '/' + rs.getString(2); String partName = rs.getString(3); if (partName != null) { resourceName += '/' + partName; } String msg = "Aborting " + JavaUtils.txnIdToString(writeSet.get(0).txnId) + " since a concurrent committed transaction [" + JavaUtils.txnIdToString(rs.getLong(4)) + "," + rs.getLong(5) + "] has already updated resouce '" + resourceName + "'"; LOG.info(msg); if (abortTxns(dbConn, Collections.singletonList(writeSet.get(0).txnId), true) != 1) { throw new IllegalStateException(msg + " FAILED!"); } dbConn.commit(); throw new TxnAbortedException(msg); } close(rs, stmt, null); } boolean first = true; for (String s : strings) { if (first) first = false; else query.append(", "); query.append('\''); query.append(s); query.append('\''); } query.append(")"); // If any of the table requests are null, then I need to pull all the // table locks for this db. boolean sawNull = false; strings.clear(); for (LockInfo info : locksBeingChecked) { if (info.table == null) { sawNull = true; break; } else { strings.add(info.table); } } if (!sawNull) { query.append(" and (hl_table is null or hl_table in("); first = true; for (String s : strings) { if (first) first = false; else query.append(", "); query.append('\''); query.append(s); query.append('\''); } query.append("))"); // If any of the partition requests are null, then I need to pull all // partition locks for this table. sawNull = false; strings.clear(); for (LockInfo info : locksBeingChecked) { if (info.partition == null) { sawNull = true; break; } else { strings.add(info.partition); } } if (!sawNull) { query.append(" and (hl_partition is null or hl_partition in("); first = true; for (String s : strings) { if (first) first = false; else query.append(", "); query.append('\''); query.append(s); query.append('\''); } query.append("))"); } } query.append(" and hl_lock_ext_id <= ").append(extLockId); LOG.debug("Going to execute query <" + query.toString() + ">"); stmt = dbConn.createStatement(); rs = stmt.executeQuery(query.toString()); SortedSet<LockInfo> lockSet = new TreeSet<LockInfo>(new LockInfoComparator()); while (rs.next()) { lockSet.add(new LockInfo(rs)); } // Turn the tree set into an array so we can move back and forth easily // in it. LockInfo[] locks = lockSet.toArray(new LockInfo[lockSet.size()]); if (LOG.isTraceEnabled()) { LOG.trace("Locks to check(full): "); for (LockInfo info : locks) { LOG.trace(" " + info); } } for (LockInfo info : locksBeingChecked) { // Find the lock record we're checking int index = -1; for (int i = 0; i < locks.length; i++) { if (locks[i].equals(info)) { index = i; break; } } // If we didn't find the lock, then it must not be in the table if (index == -1) { LOG.debug("Going to rollback"); dbConn.rollback(); throw new MetaException( "How did we get here, we heartbeated our lock before we started! ( " + info + ")"); } // If we've found it and it's already been marked acquired, // then just look at the other locks. if (locks[index].state == LockState.ACQUIRED) { /**this is what makes this method @SafeToRetry*/ continue; } // Look at everything in front of this lock to see if it should block // it or not. boolean acquired = false; for (int i = index - 1; i >= 0; i--) { // Check if we're operating on the same database, if not, move on if (!locks[index].db.equals(locks[i].db)) { continue; } // If table is null on either of these, then they are claiming to // lock the whole database and we need to check it. Otherwise, // check if they are operating on the same table, if not, move on. if (locks[index].table != null && locks[i].table != null && !locks[index].table.equals(locks[i].table)) { continue; } // If partition is null on either of these, then they are claiming to // lock the whole table and we need to check it. Otherwise, // check if they are operating on the same partition, if not, move on. if (locks[index].partition != null && locks[i].partition != null && !locks[index].partition.equals(locks[i].partition)) { continue; } // We've found something that matches what we're trying to lock, // so figure out if we can lock it too. LockAction lockAction = jumpTable.get(locks[index].type).get(locks[i].type).get(locks[i].state); LOG.debug("desired Lock: " + info + " checked Lock: " + locks[i] + " action: " + lockAction); switch (lockAction) { case WAIT: if (!ignoreConflict(info, locks[i])) { /*we acquire all locks for a given query atomically; if 1 blocks, all go into (remain) in * Waiting state. wait() will undo any 'acquire()' which may have happened as part of * this (metastore db) transaction and then we record which lock blocked the lock * we were testing ('info').*/ wait(dbConn, save); String sqlText = "update HIVE_LOCKS" + " set HL_BLOCKEDBY_EXT_ID=" + locks[i].extLockId + ", HL_BLOCKEDBY_INT_ID=" + locks[i].intLockId + " where HL_LOCK_EXT_ID=" + info.extLockId + " and HL_LOCK_INT_ID=" + info.intLockId; LOG.debug("Executing sql: " + sqlText); int updCnt = stmt.executeUpdate(sqlText); if (updCnt != 1) { shouldNeverHappen(info.txnId, info.extLockId, info.intLockId); } LOG.debug("Going to commit"); dbConn.commit(); response.setState(LockState.WAITING); LOG.debug("Lock(" + info + ") waiting for Lock(" + locks[i] + ")"); return response; } //fall through to ACQUIRE case ACQUIRE: acquire(dbConn, stmt, extLockId, info); acquired = true; break; case KEEP_LOOKING: continue; } if (acquired) break; // We've acquired this lock component, // so get out of the loop and look at the next component. } // If we've arrived here and we have not already acquired, it means there's nothing in the // way of the lock, so acquire the lock. if (!acquired) acquire(dbConn, stmt, extLockId, info); } // We acquired all of the locks, so commit and return acquired. LOG.debug("Going to commit"); dbConn.commit(); response.setState(LockState.ACQUIRED); } finally { close(rs, stmt, null); if (handle != null) { handle.releaseLocks(); } } return response; }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
/** * Concurrency/isolation notes://from w w w . j av a 2s.c o m * This is mutexed with {@link #openTxns(OpenTxnRequest)} and other {@link #commitTxn(CommitTxnRequest)} * operations using select4update on NEXT_TXN_ID. Also, mutexes on TXNX table for specific txnid:X * see more notes below. * In order to prevent lost updates, we need to determine if any 2 transactions overlap. Each txn * is viewed as an interval [M,N]. M is the txnid and N is taken from the same NEXT_TXN_ID sequence * so that we can compare commit time of txn T with start time of txn S. This sequence can be thought of * as a logical time counter. If S.commitTime < T.startTime, T and S do NOT overlap. * * Motivating example: * Suppose we have multi-statment transactions T and S both of which are attempting x = x + 1 * In order to prevent lost update problem, the the non-overlapping txns must lock in the snapshot * that they read appropriately. In particular, if txns do not overlap, then one follows the other * (assumig they write the same entity), and thus the 2nd must see changes of the 1st. We ensure * this by locking in snapshot after * {@link #openTxns(OpenTxnRequest)} call is made (see {@link org.apache.hadoop.hive.ql.Driver#acquireLocksAndOpenTxn()}) * and mutexing openTxn() with commit(). In other words, once a S.commit() starts we must ensure * that txn T which will be considered a later txn, locks in a snapshot that includes the result * of S's commit (assuming no other txns). * As a counter example, suppose we have S[3,3] and T[4,4] (commitId=txnid means no other transactions * were running in parallel). If T and S both locked in the same snapshot (for example commit of * txnid:2, which is possible if commitTxn() and openTxnx() is not mutexed) * 'x' would be updated to the same value by both, i.e. lost update. */ @Override @RetrySemantics.Idempotent("No-op if already committed") public void commitTxn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException { long txnid = rqst.getTxnid(); try { Connection dbConn = null; Statement stmt = null; ResultSet lockHandle = null; ResultSet commitIdRs = null, rs; try { lockInternal(); dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); stmt = dbConn.createStatement(); /** * Runs at READ_COMMITTED with S4U on TXNS row for "txnid". S4U ensures that no other * operation can change this txn (such acquiring locks). While lock() and commitTxn() * should not normally run concurrently (for same txn) but could due to bugs in the client * which could then corrupt internal transaction manager state. Also competes with abortTxn(). */ lockHandle = lockTransactionRecord(stmt, txnid, TXN_OPEN); if (lockHandle == null) { //if here, txn was not found (in expected state) TxnStatus actualTxnStatus = findTxnState(txnid, stmt); if (actualTxnStatus == TxnStatus.COMMITTED) { /** * This makes the operation idempotent * (assume that this is most likely due to retry logic) */ LOG.info("Nth commitTxn(" + JavaUtils.txnIdToString(txnid) + ") msg"); return; } raiseTxnUnexpectedState(actualTxnStatus, txnid); shouldNeverHappen(txnid); //dbConn is rolled back in finally{} } String conflictSQLSuffix = "from TXN_COMPONENTS where tc_txnid=" + txnid + " and tc_operation_type IN(" + quoteChar(OpertaionType.UPDATE.sqlConst) + "," + quoteChar(OpertaionType.DELETE.sqlConst) + ")"; rs = stmt.executeQuery(sqlGenerator.addLimitClause(1, "tc_operation_type " + conflictSQLSuffix)); if (rs.next()) { close(rs); //if here it means currently committing txn performed update/delete and we should check WW conflict /** * This S4U will mutex with other commitTxn() and openTxns(). * -1 below makes txn intervals look like [3,3] [4,4] if all txns are serial * Note: it's possible to have several txns have the same commit id. Suppose 3 txns start * at the same time and no new txns start until all 3 commit. * We could've incremented the sequence for commitId is well but it doesn't add anything functionally. */ commitIdRs = stmt .executeQuery(sqlGenerator.addForUpdateClause("select ntxn_next - 1 from NEXT_TXN_ID")); if (!commitIdRs.next()) { throw new IllegalStateException("No rows found in NEXT_TXN_ID"); } long commitId = commitIdRs.getLong(1); Savepoint undoWriteSetForCurrentTxn = dbConn.setSavepoint(); /** * "select distinct" is used below because * 1. once we get to multi-statement txns, we only care to record that something was updated once * 2. if {@link #addDynamicPartitions(AddDynamicPartitions)} is retried by caller it my create * duplicate entries in TXN_COMPONENTS * but we want to add a PK on WRITE_SET which won't have unique rows w/o this distinct * even if it includes all of it's columns */ int numCompsWritten = stmt.executeUpdate( "insert into WRITE_SET (ws_database, ws_table, ws_partition, ws_txnid, ws_commit_id, ws_operation_type)" + " select distinct tc_database, tc_table, tc_partition, tc_txnid, " + commitId + ", tc_operation_type " + conflictSQLSuffix); /** * see if there are any overlapping txns wrote the same element, i.e. have a conflict * Since entire commit operation is mutexed wrt other start/commit ops, * committed.ws_commit_id <= current.ws_commit_id for all txns * thus if committed.ws_commit_id < current.ws_txnid, transactions do NOT overlap * For example, [17,20] is committed, [6,80] is being committed right now - these overlap * [17,20] committed and [21,21] committing now - these do not overlap. * [17,18] committed and [18,19] committing now - these overlap (here 18 started while 17 was still running) */ rs = stmt.executeQuery(sqlGenerator.addLimitClause(1, "committed.ws_txnid, committed.ws_commit_id, committed.ws_database," + "committed.ws_table, committed.ws_partition, cur.ws_commit_id cur_ws_commit_id, " + "cur.ws_operation_type cur_op, committed.ws_operation_type committed_op " + "from WRITE_SET committed INNER JOIN WRITE_SET cur " + "ON committed.ws_database=cur.ws_database and committed.ws_table=cur.ws_table " + //For partitioned table we always track writes at partition level (never at table) //and for non partitioned - always at table level, thus the same table should never //have entries with partition key and w/o "and (committed.ws_partition=cur.ws_partition or (committed.ws_partition is null and cur.ws_partition is null)) " + "where cur.ws_txnid <= committed.ws_commit_id" + //txns overlap; could replace ws_txnid // with txnid, though any decent DB should infer this " and cur.ws_txnid=" + txnid + //make sure RHS of join only has rows we just inserted as // part of this commitTxn() op " and committed.ws_txnid <> " + txnid + //and LHS only has committed txns //U+U and U+D is a conflict but D+D is not and we don't currently track I in WRITE_SET at all " and (committed.ws_operation_type=" + quoteChar(OpertaionType.UPDATE.sqlConst) + " OR cur.ws_operation_type=" + quoteChar(OpertaionType.UPDATE.sqlConst) + ")")); if (rs.next()) { //found a conflict String committedTxn = "[" + JavaUtils.txnIdToString(rs.getLong(1)) + "," + rs.getLong(2) + "]"; StringBuilder resource = new StringBuilder(rs.getString(3)).append("/") .append(rs.getString(4)); String partitionName = rs.getString(5); if (partitionName != null) { resource.append('/').append(partitionName); } String msg = "Aborting [" + JavaUtils.txnIdToString(txnid) + "," + rs.getLong(6) + "]" + " due to a write conflict on " + resource + " committed by " + committedTxn + " " + rs.getString(7) + "/" + rs.getString(8); close(rs); //remove WRITE_SET info for current txn since it's about to abort dbConn.rollback(undoWriteSetForCurrentTxn); LOG.info(msg); //todo: should make abortTxns() write something into TXNS.TXN_META_INFO about this if (abortTxns(dbConn, Collections.singletonList(txnid), true) != 1) { throw new IllegalStateException(msg + " FAILED!"); } dbConn.commit(); close(null, stmt, dbConn); throw new TxnAbortedException(msg); } else { //no conflicting operations, proceed with the rest of commit sequence } } else { /** * current txn didn't update/delete anything (may have inserted), so just proceed with commit * * We only care about commit id for write txns, so for RO (when supported) txns we don't * have to mutex on NEXT_TXN_ID. * Consider: if RO txn is after a W txn, then RO's openTxns() will be mutexed with W's * commitTxn() because both do S4U on NEXT_TXN_ID and thus RO will see result of W txn. * If RO < W, then there is no reads-from relationship. */ } // Move the record from txn_components into completed_txn_components so that the compactor // knows where to look to compact. String s = "insert into COMPLETED_TXN_COMPONENTS select tc_txnid, tc_database, tc_table, " + "tc_partition from TXN_COMPONENTS where tc_txnid = " + txnid; LOG.debug("Going to execute insert <" + s + ">"); int modCount = 0; if ((modCount = stmt.executeUpdate(s)) < 1) { //this can be reasonable for an empty txn START/COMMIT or read-only txn //also an IUD with DP that didn't match any rows. LOG.info("Expected to move at least one record from txn_components to " + "completed_txn_components when committing txn! " + JavaUtils.txnIdToString(txnid)); } s = "delete from TXN_COMPONENTS where tc_txnid = " + txnid; LOG.debug("Going to execute update <" + s + ">"); modCount = stmt.executeUpdate(s); s = "delete from HIVE_LOCKS where hl_txnid = " + txnid; LOG.debug("Going to execute update <" + s + ">"); modCount = stmt.executeUpdate(s); s = "delete from TXNS where txn_id = " + txnid; LOG.debug("Going to execute update <" + s + ">"); modCount = stmt.executeUpdate(s); LOG.debug("Going to commit"); dbConn.commit(); } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "commitTxn(" + rqst + ")"); throw new MetaException( "Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { close(commitIdRs); close(lockHandle, stmt, dbConn); unlockInternal(); } } catch (RetryException e) { commitTxn(rqst); } }