List of usage examples for java.sql Connection TRANSACTION_READ_COMMITTED
int TRANSACTION_READ_COMMITTED
To view the source code for java.sql Connection TRANSACTION_READ_COMMITTED.
Click Source Link
From source file:org.executequery.gui.browser.ConnectionPanel.java
/** * Sets the values for the tx level on the tx combo * based on the tx level in the connection object. *///from w w w . j a v a 2 s.com private void setTransactionIsolationLevel() { int index = 0; int isolationLevel = databaseConnection.getTransactionIsolation(); switch (isolationLevel) { case Connection.TRANSACTION_NONE: index = 1; break; case Connection.TRANSACTION_READ_UNCOMMITTED: index = 2; break; case Connection.TRANSACTION_READ_COMMITTED: index = 3; break; case Connection.TRANSACTION_REPEATABLE_READ: index = 4; break; case Connection.TRANSACTION_SERIALIZABLE: index = 5; break; } txCombo.setSelectedIndex(index); }
From source file:ca.gnewton.lusql.core.LuSqlMain.java
static void setupOptions() { options.addOption(OptionBuilder.hasArgs().withDescription( "Subquery in the form \"field|A:A:A|sql\" or \"field|A:A:A A:A:A...|sql\" or \"field|sql\" (See -i for A:A:A values). Note that you can have multiple -Qs. Also note that putting a '*' before the field indicates you want the results cached (useful only if there is a possible for subsequent cache hits. Use only if you know what you are doing.") .create("Q")); options.addOption(OptionBuilder.hasArgs().withDescription( "For DocSinks (Indexes) that support multiple real indexes, either to eventual merging or as-is") .create("L")); options.addOption(OptionBuilder.hasArgs().withDescription( "Set static Document field and value. This is a field that has the same value for all saved documents. Format: \"field=value\" or \"A:A:A:field=value\" (See -i for A:A:A values)") .create("g")); options.addOption(OptionBuilder.hasArg() .withDescription(/*from w ww. j av a 2 s .com*/ "Full name class implementing Lucene Analyzer; Default: " + LuSql.DefaultAnalyzerClassName) .create("a")); options.addOption(OptionBuilder.hasArg() .withDescription( "Offset # documents to ignore before indexing. Default:" + LuSqlFields.OffsetDefault) .create("O")); options.addOption(OptionBuilder.hasArg().withDescription( "Full name class implementing DocSink (the index class). Default: " + LuSql.DefaultDocSinkClassName) .create(CLIDocSinkClassName)); options.addOption(OptionBuilder.hasArg() .withDescription("Full name class implementing DocSource (the index class). Default: " + LuSql.DefaultDocSourceClassName) .create("so")); options.addOption(OptionBuilder.hasArg().withDescription( "Primary key field name fron DocSource to be used in DocSink. Only for DocSinks that need it. Lucene does not. BDB does. For JDBCDocSource, if not set, uses first field in SQL query.") .create("P")); options.addOption("A", false, "Append to existing Lucene index."); options.addOption(OptionBuilder.hasArg() .withDescription("Queue size for multithreading. Default: numThreads * 50").create("S")); options.addOption(OptionBuilder.hasArg().withDescription( "Tries to limit activity to keep load average below this (float) value. Can reduce performance. Default: " + LuSql.loadAverageLimit) .create("V")); options.addOption("J", false, "For multiple indexes (see -L) do not merge. Default: false"); options.addOption("X", false, "Print out command line arguments"); options.addOption("Y", false, "Silent output"); options.addOption("o", false, "If supported, have DocSink write to stdout"); ////////////////////////// options.addOption(OptionBuilder.hasArg() //.isRequired() .withDescription( "JDBC connection URL: REQUIRED _OR_ Source location (Source dependent: file, url, etc)") .create("c")); ////////////////////////// options.addOption(OptionBuilder.hasArg() .withDescription("Verbose output chunk size. Default:" + LuSqlFields.DefaultChunkSize).create("C")); ////////////////////////// options.addOption(OptionBuilder.hasArg() .withDescription("Amount of documents to be processed per thread. Default:" + LuSqlFields.DefaultWorkPerThread + ". Increasing tends to improve throughput; Decreasing tends to reduce memory problems and can alleviate an \"Out of memory\" exception. Should be 5-100 for medium/small documents. Should be 1 for very large documents.") .create("w")); ////////////////////////// options.addOption(OptionBuilder.hasArg() .withDescription("Full name of DB driver class (should be in CLASSPATH); Default: " + LuSql.DefaultJDBCDriverClassName) .create("d")); ////////////////////////// options.addOption(OptionBuilder.hasArgs() .withDescription("Full name class implementing DocumentFilter; Default: " + LuSql.DefaultDocFilterClassName + " (does nothing). This is applied before each Lucene Document is added to the Index. If it returns null, nothing is added. Note that multiple filters are allowed. They are applied in the same order as they appear in the command line.") .create(CLIDocFiltersClassName)); ////////////////////////// options.addOption(OptionBuilder.hasArgs().withDescription( "Only include these fields from DocSource. Example: -F author -F id. Is absolute (i.e. even if you have additional fields - like in your SQL query - they will be filtered out.") .create("F")); ////////////////////////// options.addOption("I", true, "Global field index parameters. This sets all the fields parameters to this one set. Format: A:A:A. See -i for A:A:A values. Note that -i has precedence over -I."); ////////////////////////// options.addOption(OptionBuilder.hasArgs().withDescription( "Size of internal arrays of documents. One of these arrays is put on the queue. So the number of objects waiting to be processed is K*S (array size * queue size). For small objects have more (k=100). For large objects have fewer (k=5). Default: " + LuSqlFields.DefaultDocPacketSize) .create("K")); ////////////////////////// options.addOption(OptionBuilder.hasArgs().withDescription( "Full name plugin class; Get description and properties options needed by specific plugin (filter, source, or sink.") .create("e")); StringBuilder sb = new StringBuilder(); sb.append("One set per field in SQL, and in same order as in SQL. "); sb.append("Used only if you want to overide the defaults (below). "); sb.append("See for more information Field.Index, Field.Store, Field.TermVector in"); sb.append( "org.apache.lucene.document.Field http://lucene.apache.org/java/3_0_2/api/core/org/apache/lucene/document/Field.html"); //http://lucene.apache.org/java/2_2_0/api/org/apache/lucene/document/Field.html"); sb.append("\nDefault: A:A:A= " //+ Util.getIndex(LuSql.IndexDefault, IndexParameterValues) //+ Util.getIndex(LuSql.StoreDefault, StoreParameterValues) //+ Util.getIndex(LuSql.TermVectorDefault, TermVectorParameterValues) + LuceneFieldParameters.rindex.get(LuSql.defaultLuceneFieldParameters.getIndex()) + ":" + LuceneFieldParameters.rstorex.get(LuSql.defaultLuceneFieldParameters.getStore()) + ":" + LuceneFieldParameters.rtermx.get(LuSql.defaultLuceneFieldParameters.getTermVector())); sb.append("\nField Index Parameter values:"); sb.append("\nIndex: Default: " + LuceneFieldParameters.rindex.get(LuSql.defaultLuceneFieldParameters.getIndex())); sb.append("\n"); Set<String> names = LuceneFieldParameters.indexx.keySet(); for (String name : names) { sb.append("\n- " + name); } sb.append("\nStore: Default: " + LuceneFieldParameters.rstorex.get(LuSql.defaultLuceneFieldParameters.getStore())); sb.append("\n"); names = LuceneFieldParameters.storex.keySet(); for (String name : names) { sb.append("\n- " + name); } sb.append("\n Term vector: Default: " + LuceneFieldParameters.rtermx.get(LuSql.defaultLuceneFieldParameters.getTermVector())); sb.append("\n"); names = LuceneFieldParameters.termx.keySet(); for (String name : names) { sb.append("\n- " + name); } options.addOption(OptionBuilder.hasArgs().withDescription( "Field index parameters. \nFormat: \"fieldName=A:A:A\". Note that -i can have a slightly different interpretation depending on the DocSource. For DocSource implementation where the syntax of the query allows for separate definition of the query and the fields of interest (like SQL), all of the fields defined in the query are stored/indexed. For other DocSource's where only the query can be defined and the fields of interest cannot (like the Lucene syntax of the LucenDocSource), the \"-i\" syntax is the only way to set the fields to be used. " + sb) .create("i")); ////////////////////// options.addOption(OptionBuilder.hasArg() //.isRequired() .withDescription("Sink destination (i.e. Lucene index to create/write to). Default: " + LuSql.DefaultSinkLocationName) .create("l")); ////////////////////// options.addOption("N", true, "Number of thread for multithreading. Defaults: Runtime.getRuntime().availableProcessors()) *" + LuSql.ThreadFactor + ". For this machine this is: " + (Runtime.getRuntime().availableProcessors() * LuSql.ThreadFactor)); ////////////////////// options.addOption(OptionBuilder.hasArgs().withDescription( "Properties to be passed to the DocSource driver. Can be is multiple. Example: -pso foo=bar -pso \"start=test 4\"") .create("pso")); ////////////////////// options.addOption(OptionBuilder.hasArgs() .withDescription( "Properties to be passed to the DocSink driver. Can be multiple. See 'pso' for examples") .create("psi")); ////////////////////// options.addOption(OptionBuilder.hasArgs().withDescription( "Properties to be passed to a filter. Can be multiple. Identify filter using integer (zero is the first filter). Example: -pf 0:size=10 -pf 0:name=fred -pf 1:reduce=true") .create("pf")); ////////////////////// options.addOption(OptionBuilder .withDescription("Read from source using source driver's internal compression, if it supports it") .create("zso")); options.addOption(OptionBuilder .withDescription("Have sink driver use internal compression (opaque), if it supports it") .create("zsi")); ////////////////////// options.addOption("n", true, "Number of documents to add. If unset all records from query are used."); ////////////////////// options.addOption("M", true, "Changes the meta replacement string for the -Q command line parameters. Default: " + SubQuery.getKeyMeta()); ////////////////////// options.addOption("m", false, "Turns off need get around MySql driver-caused OutOfMemory problem in large queries. Sets Statement.setFetchSize(Integer.MIN_VALUE)" + "\n See http://benjchristensen.wordpress.com/2008/05/27/mysql-jdbc-memory-usage-on-large-resultset"); ////////////////////// options.addOption(OptionBuilder.hasArg().withDescription("Set JDBC Transaction level. Default: " + DefaultTransactionIsolation + ". Values:\n" + Connection.TRANSACTION_NONE + " TRANSACTION_NONE\n" + Connection.TRANSACTION_READ_UNCOMMITTED + " TRANSACTION_READ_UNCOMMITTED\n" + Connection.TRANSACTION_READ_COMMITTED + " TRANSACTION_READ_COMMITTED\n" + Connection.TRANSACTION_REPEATABLE_READ + " TRANSACTION_REPEATABLE_READ\n" + Connection.TRANSACTION_SERIALIZABLE + " TRANSACTION_SERIALIZABLE\n " + "(See http://java.sun.com/j2se/1.5.0/docs/api/constant-values.html#java.sql.Connection.TRANSACTION_NONE)") .create("E")); ////////////////////// options.addOption("p", true, "Properties file"); ////////////////////// ////////////////////// options.addOption(OptionBuilder.hasArg() //.isRequired() .withDescription("Primary SQL query (in double quotes). Only used by JDBC driver").create("q")); ////////////////////// options.addOption("r", true, "LuceneRAMBufferSizeInMBs: IndexWriter.setRAMBufferSizeMB(). Only used by Lucene sinks. Default: " + Double.toString(LuSql.DefaultRAMBufferSizeMB)); ////////////////////// options.addOption("s", true, "Name of stop word file to use (relative or full path). If supported by DocSource"); ////////////////////// options.addOption("T", false, "Turn off multithreading. Note that multithreading does not guarantee the ordering of documents. If you want the order of Lucene documents to match the ordering of DB records generated by the SQL query, turn-off multithreading"); ////////////////////// options.addOption("t", false, "Test mode. Does not open up Lucene index. Prints (-n) records from SQL query"); ////////////////////// options.addOption("v", false, "Verbose mode"); ////////////////////// options.addOption("onlyMap", false, "Only use the fields from the DocSource that are mapped using -map"); options.addOption(OptionBuilder.hasArgs().withDescription( "Field map. Transforms field names in DocSource to new fieldnames: Example -map \"AU=author\", where \"AU\" is the original (source) field name and \"author\" is the new (sink) field") .create("map")); }
From source file:org.apache.qpid.server.store.derby.DerbyMessageStore.java
/** * Convenience method to create a new Connection configured for TRANSACTION_READ_COMMITED * isolation and with auto-commit transactions disabled. *//*from ww w . j ava2s . c om*/ private Connection newConnection() throws SQLException { final Connection connection = DriverManager.getConnection(_connectionURL); try { connection.setAutoCommit(false); connection.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); } catch (SQLException sqlEx) { try { connection.close(); } finally { throw sqlEx; } } return connection; }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
@Override @RetrySemantics.Idempotent/* ww w. ja v a 2 s .co m*/ public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException { long txnid = rqst.getTxnid(); try { Connection dbConn = null; Statement stmt = null; try { lockInternal(); dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); if (abortTxns(dbConn, Collections.singletonList(txnid), true) != 1) { stmt = dbConn.createStatement(); TxnStatus status = findTxnState(txnid, stmt); if (status == TxnStatus.ABORTED) { LOG.info("abortTxn(" + JavaUtils.txnIdToString(txnid) + ") requested by it is already " + TxnStatus.ABORTED); return; } raiseTxnUnexpectedState(status, txnid); } LOG.debug("Going to commit"); dbConn.commit(); } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "abortTxn(" + rqst + ")"); throw new MetaException( "Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { close(null, stmt, dbConn); unlockInternal(); } } catch (RetryException e) { abortTxn(rqst); } }
From source file:org.apache.metamodel.jdbc.JdbcDataContextTest.java
public void testReleaseConnectionsInCompiledQuery() throws Exception { final int connectionPoolSize = 2; final int threadCount = 4; final int noOfCallsPerThreads = 30; final BasicDataSource ds = new BasicDataSource(); ds.setDriverClassName("org.hsqldb.jdbcDriver"); ds.setUrl("jdbc:hsqldb:res:metamodel"); ds.setInitialSize(connectionPoolSize); ds.setMaxActive(connectionPoolSize); ds.setMaxWait(10000);/* w ww. jav a 2 s . c o m*/ ds.setMinEvictableIdleTimeMillis(1800000); ds.setMinIdle(0); ds.setMaxIdle(connectionPoolSize); ds.setNumTestsPerEvictionRun(3); ds.setTimeBetweenEvictionRunsMillis(-1); ds.setDefaultTransactionIsolation(java.sql.Connection.TRANSACTION_READ_COMMITTED); final JdbcDataContext dataContext = new JdbcDataContext(ds, new TableType[] { TableType.TABLE, TableType.VIEW }, null); final JdbcCompiledQuery compiledQuery = (JdbcCompiledQuery) dataContext.query().from("CUSTOMERS") .select("CUSTOMERNAME").where("CUSTOMERNUMBER").eq(new QueryParameter()).compile(); assertEquals(0, compiledQuery.getActiveLeases()); assertEquals(0, compiledQuery.getIdleLeases()); final String compliedQueryString = compiledQuery.toSql(); assertEquals( "SELECT _CUSTOMERS_._CUSTOMERNAME_ FROM PUBLIC._CUSTOMERS_ WHERE _CUSTOMERS_._CUSTOMERNUMBER_ = ?", compliedQueryString.replace('\"', '_')); assertEquals(0, compiledQuery.getActiveLeases()); assertEquals(0, compiledQuery.getIdleLeases()); ExecutorService executorService = Executors.newFixedThreadPool(threadCount); final CountDownLatch latch = new CountDownLatch(threadCount); final List<Throwable> errors = new ArrayList<Throwable>(); final Runnable runnable = new Runnable() { @Override public void run() { try { for (int i = 0; i < noOfCallsPerThreads; i++) { final DataSet dataSet = dataContext.executeQuery(compiledQuery, new Object[] { 103 }); try { assertTrue(dataSet.next()); Row row = dataSet.getRow(); assertNotNull(row); assertEquals("Atelier graphique", row.getValue(0).toString()); assertFalse(dataSet.next()); } finally { dataSet.close(); } } } catch (Throwable e) { errors.add(e); } finally { latch.countDown(); } } }; for (int i = 0; i < threadCount; i++) { executorService.execute(runnable); } try { latch.await(60000, TimeUnit.MILLISECONDS); if (errors.size() > 0) { throw new IllegalStateException(errors.get(0)); } assertTrue(true); } finally { executorService.shutdownNow(); } assertEquals(0, compiledQuery.getActiveLeases()); compiledQuery.close(); assertEquals(0, compiledQuery.getActiveLeases()); assertEquals(0, compiledQuery.getIdleLeases()); }
From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java
@Override @RetrySemantics.Idempotent//from w w w.ja va 2 s. c o m public void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException { List<Long> txnids = rqst.getTxn_ids(); try { Connection dbConn = null; try { dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); int numAborted = abortTxns(dbConn, txnids, false); if (numAborted != txnids.size()) { LOG.warn("Abort Transactions command only aborted " + numAborted + " out of " + txnids.size() + " transactions. It's possible that the other " + (txnids.size() - numAborted) + " transactions have been aborted or committed, or the transaction ids are invalid."); } LOG.debug("Going to commit"); dbConn.commit(); } catch (SQLException e) { LOG.debug("Going to rollback"); rollbackDBConn(dbConn); checkRetryable(dbConn, e, "abortTxns(" + rqst + ")"); throw new MetaException( "Unable to update transaction database " + StringUtils.stringifyException(e)); } finally { closeDbConn(dbConn); } } catch (RetryException e) { abortTxns(rqst); } }
From source file:org.apache.hadoop.hive.metastore.MyXid.java
public static DBRouterInfo getDBRouter(String db) throws MetaException { Connection con = null;/* ww w.j a v a 2 s .c o m*/ ; Statement ps = null; DBRouterInfo route = null; db = db.toLowerCase(); try { con = getGlobalConnection(); } catch (MetaStoreConnectException e1) { LOG.error("get db router error, user=" + user + ", msg=" + e1.getMessage()); throw new MetaException(e1.getMessage()); } catch (SQLException e1) { LOG.error("get db router error, user=" + user + ", msg=" + e1.getMessage()); throw new MetaException(e1.getMessage()); } try { con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); ps = con.createStatement(); String sql = "select db_name, seg_addr, secondary_seg_addr, is_db_split, " + "describe from router where db_name='" + db + "'"; ResultSet rs = ps.executeQuery(sql); while (rs.next()) { route = new DBRouterInfo(); route.setDBName(rs.getString(1)); route.setSegmentDBUrl(rs.getString(2)); route.setSecondarySegmentDBUrl(rs.getString(3)); route.setHasTableRouter(rs.getBoolean(4)); route.setDetail(rs.getString(5)); LOG.debug("db name is " + route.getDBName() + "\n" + " segment addr is " + route.getSegmentDBUrl() + "\n" + " second segment addr is " + route.getSecondarySlaveDBUrl() + "\n" + " is has table route is " + route.getHasTableRouter() + "\n" + " detail is " + route.getDetail()); } rs.close(); } catch (SQLException sqlex) { LOG.error("get user error, user=" + user + ", msg=" + sqlex.getMessage()); sqlex.printStackTrace(); throw new MetaException(sqlex.getMessage()); } finally { closeStatement(ps); closeConnection(con); } return route; }
From source file:de.hybris.platform.test.TransactionTest.java
@Test public void testLocking() throws Exception { if (Config.isHSQLDBUsed()) { LOG.warn("HDSQLDB doesnt seem to support SELECT FOR UPDATE properly so we don't test it any more"); return;//from w w w. j a va2 s.co m } final ProductManager productManager = ProductManager.getInstance(); final Currency curr = C2LManager.getInstance().createCurrency("TestCurr"); /** Verify that we can begin a transaction, lock an entity, then commit without an exception occurring. */ { final Transaction transaction = Transaction.current(); try { assertNotNull("Transaction object is null", transaction); assertFalse("A previous transaction is already running.", transaction.isRunning()); transaction.begin(); final Product productForTest1 = productManager.createProduct("transactionLockingTest1"); transaction.commit(); transaction.begin(); transaction.lock(productForTest1); transaction.commit(); } catch (final Exception e) { transaction.rollback(); throw e; } } { /** Verify that an IllegalStateException is thrown if we attempt to lock outside of a transaction. */ final Transaction transaction = Transaction.current(); try { assertNotNull("Transaction object is null", transaction); assertFalse("A previous transaction is already running.", transaction.isRunning()); final Product productForTest2 = productManager.createProduct("transactionLockingTest2"); transaction.lock(productForTest2); fail("Expected IllegalStateException to occur when attempting to lock an item outside of a transaction."); } // An IllegalStateException is expected for this test to pass. catch (final IllegalStateException e) { // } } /** * Verify that if we attempt to acquire a lock on the same entity multiple times from the same transaction, that * no errors occur. */ { final Transaction transaction = Transaction.current(); try { assertNotNull("Transaction object is null", transaction); assertFalse("A previous transaction is already running.", transaction.isRunning()); final Product productForTest3 = productManager.createProduct("transactionLockingTest3"); transaction.begin(); for (int i = 0; i < 10; i++) { transaction.lock(productForTest3); } transaction.commit(); } catch (final Exception e) { transaction.rollback(); throw e; } } /** * Verify that if we begin a transaction, lock an entity, then commit multiple times that a lock can be acquired * each time. */ { final Transaction transaction = Transaction.current(); try { final Product productForTest4 = productManager.createProduct("transactionLockingTest4"); for (int i = 0; i < 10; i++) { assertNotNull("Transaction object is null", transaction); assertFalse("A previous transaction is already running.", transaction.isRunning()); transaction.begin(); transaction.lock(productForTest4); transaction.commit(); } } catch (final Exception e) { transaction.rollback(); throw e; } } /** * Verify that if we begin a transaction, lock an entity, then rollback multiple times that a lock can be acquired * each time. */ { final Transaction transaction = Transaction.current(); try { final Product productForTest5 = productManager.createProduct("transactionLockingTest5"); for (int i = 0; i < 10; i++) { assertNotNull("Transaction object is null", transaction); assertFalse("A previous transaction is already running.", transaction.isRunning()); transaction.begin(); transaction.lock(productForTest5); transaction.rollback(); } } catch (final Exception e) { transaction.rollback(); throw e; } } /** * Verify that we can not lock after a transaction has been committed. */ { final Transaction transaction = Transaction.current(); try { final Product productForTest6 = productManager.createProduct("transactionLockingTest6"); assertNotNull("Transaction object is null", transaction); assertFalse("A previous transaction is already running.", transaction.isRunning()); transaction.begin(); transaction.commit(); transaction.lock(productForTest6); fail("A lock was acquired after the transaction has been committed."); } // An IllegalStateException is expected for the test to pass catch (final IllegalStateException e) { // } } /** * Verify that we can not lock after a transaction has been rolled back. */ { final Transaction transaction = Transaction.current(); try { final Product productForTest7 = productManager.createProduct("transactionLockingTest7"); assertNotNull("Transaction object is null", transaction); assertFalse("A previous transaction is already running.", transaction.isRunning()); transaction.begin(); transaction.rollback(); transaction.lock(productForTest7); fail("A lock was acquired after the transaction has been rolled back."); } // An IllegalStateException is expected for the test to pass catch (final IllegalStateException e) { // } } /** * Verify multiple threads attempting to lock the same object and the behavior that occurs. */ try { final Order lockedOrder = OrderManager.getInstance().createOrder(// "lockedOrder", // JaloSession.getCurrentSession().getUser(), // curr, // Calendar.getInstance().getTime(), // true); lockedOrder.setTotal(0.0d); final ComposedType composedType = lockedOrder.getComposedType(); final String checkQuery = "SELECT " + composedType.getAttributeDescriptorIncludingPrivate(Order.TOTAL).getDatabaseColumn() + " FROM " + composedType.getTable() + " WHERE PK = ?"; final int THREADS = 16; // Create an executor service that uses 16 threads to test // the transaction locking final ExecutorService executor = Executors.newFixedThreadPool(// THREADS, // new ThreadFactory() { final Tenant threadFactoryTenant = Registry.getCurrentTenant(); @Override public Thread newThread(final Runnable runnable) { return new Thread() { protected void prepareThread() { Registry.setCurrentTenant(threadFactoryTenant); } protected void unprepareThread() { JaloSession.deactivate(); Registry.unsetCurrentTenant(); } @Override public void run() { try { prepareThread(); runnable.run(); } finally { unprepareThread(); } } }; } }); // Create 8 callables that will concurrently // attempt to lock the same object. final AtomicInteger stackCounter = new AtomicInteger(); final List<Callable<Object>> callables = new ArrayList<Callable<Object>>(); for (int j = 0; j < THREADS; j++) { callables.add(new Callable<Object>() { @Override public Object call() throws Exception { final PK pk = lockedOrder.getPK(); if (pk == null) { throw new IllegalStateException(); } for (int k = 0; k < 100; k++) { final Transaction transaction = Transaction.current(); assertNotNull("Transaction object is null", transaction); PreparedStatement statement = null; ResultSet resultSet = null; try { transaction.begin(); transaction.setTransactionIsolationLevel(Connection.TRANSACTION_READ_COMMITTED); transaction.lock(lockedOrder); final int stack = stackCounter.incrementAndGet(); if (stack > 1) { stackCounter.decrementAndGet(); throw new IllegalStateException("Got " + stack + " threads in protected area!"); } statement = transaction.getTXBoundConnection().prepareStatement(checkQuery); statement.setLong(1, lockedOrder.getPK().getLongValue()); resultSet = statement.executeQuery(); if (!resultSet.next()) { throw new IllegalStateException("Expected result set"); } final double dbValue = resultSet.getDouble(1); final double jaloValue = lockedOrder.getTotal(); if (Math.abs(dbValue - jaloValue) >= 1d) { throw new IllegalStateException( "Jalo value differs from db value : " + jaloValue + "<>" + dbValue); } lockedOrder.setTotal(jaloValue + 1.0d); stackCounter.decrementAndGet(); transaction.commit(); } catch (final Exception e) { e.printStackTrace(); transaction.rollback(); throw e; } finally { Utilities.tryToCloseJDBC(null, statement, resultSet, true); } } return null; } }); } // Get the value of each future to determine if an exception was thrown. for (final Future<Object> future : executor.invokeAll(callables)) { future.get(); } final double expected = THREADS * 100; assertEquals(// "Total value of order after all transaction differs", // expected, // ((Order) JaloSession.getCurrentSession().getItem(lockedOrder.getPK())).getTotal(), 0.000001); } catch (final IllegalStateException e) { e.printStackTrace(); throw e; } /** * Verify changes to a value on a lock */ // TODO: /** * Tests related to caching */ // TODO: }
From source file:org.wso2.carbon.registry.core.jdbc.dao.JDBCResourceDAO.java
public void fillChildren(CollectionImpl collection, DataAccessManager dataAccessManager) throws RegistryException { if (Transaction.isStarted()) { fillChildren(collection, 0, -1, JDBCDatabaseTransaction.getConnection()); } else {//ww w . j a v a2 s. c o m Connection conn = null; boolean transactionSucceeded = false; try { if (!(dataAccessManager instanceof JDBCDataAccessManager)) { String msg = "Failed to fill children. Invalid data access manager."; log.error(msg); throw new RegistryException(msg); } conn = ((JDBCDataAccessManager) dataAccessManager).getDataSource().getConnection(); // If a managed connection already exists, use that instead of a new // connection. JDBCDatabaseTransaction.ManagedRegistryConnection temp = JDBCDatabaseTransaction .getManagedRegistryConnection(conn); if (temp != null) { conn.close(); conn = temp; } if (conn.getTransactionIsolation() != Connection.TRANSACTION_READ_COMMITTED) { conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); } conn.setAutoCommit(false); fillChildren(collection, 0, -1, conn); transactionSucceeded = true; } catch (SQLException e) { String msg = "Failed to get child paths of " + collection.getPath() + ". " + e.getMessage(); log.error(msg, e); throw new RegistryException(msg, e); } finally { if (transactionSucceeded) { try { conn.commit(); } catch (SQLException e) { log.error("Failed to commit the database connection used in " + "getting child paths of the collection " + collection.getPath()); } } else if (conn != null) { try { conn.rollback(); } catch (SQLException e) { log.error("Failed to rollback the database connection used in " + "getting child paths of the collection " + collection.getPath()); } } if (conn != null) { try { conn.close(); } catch (SQLException e) { log.error("Failed to close the database connection opened in " + "getting the child paths of " + collection.getPath(), e); } } } } }
From source file:org.wso2.carbon.repository.core.jdbc.dao.JDBCResourceDAO.java
public void fillChildren(CollectionImpl collection, DataAccessManager dataAccessManager) throws RepositoryException { if (Transaction.isStarted()) { fillChildren(collection, 0, -1, JDBCDatabaseTransaction.getConnection()); } else {/*from ww w. j av a 2s .co m*/ Connection conn = null; boolean transactionSucceeded = false; try { if (!(dataAccessManager instanceof JDBCDataAccessManager)) { String msg = "Failed to fill children. Invalid data access manager."; log.error(msg); throw new RepositoryException(msg); } conn = ((JDBCDataAccessManager) dataAccessManager).getDataSource().getConnection(); // If a managed connection already exists, use that instead of a new // connection. JDBCDatabaseTransaction.ManagedRegistryConnection temp = JDBCDatabaseTransaction .getManagedRegistryConnection(conn); if (temp != null) { conn.close(); conn = temp; } if (conn.getTransactionIsolation() != Connection.TRANSACTION_READ_COMMITTED) { conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED); } conn.setAutoCommit(false); fillChildren(collection, 0, -1, conn); transactionSucceeded = true; } catch (SQLException e) { String msg = "Failed to get child paths of " + collection.getPath() + ". " + e.getMessage(); log.error(msg, e); throw new RepositoryException(msg, e); } finally { if (transactionSucceeded) { try { conn.commit(); } catch (SQLException e) { log.error("Failed to commit the database connection used in " + "getting child paths of the collection " + collection.getPath()); } } else if (conn != null) { try { conn.rollback(); } catch (SQLException e) { log.error("Failed to rollback the database connection used in " + "getting child paths of the collection " + collection.getPath()); } } if (conn != null) { try { conn.close(); } catch (SQLException e) { log.error("Failed to close the database connection opened in " + "getting the child paths of " + collection.getPath(), e); } } } } }