List of usage examples for java.util.concurrent ExecutorService awaitTermination
boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;
From source file:org.mule.modules.jmsbatchmessaging.JmsBatchMessagingConnector.java
/** * Consume messages in batches from a JMS destination. * <p/>/* w w w . j av a 2s .c o m*/ * {@sample.xml ../../../doc/jms-batch-messaging-connector.xml.sample * jms-batch-messaging:consume} * * @param destinationName The JMS destination to consume messages from * @param amountOfThreads The amount of threads used to consume messages in parallel batches * @param batchSize The size of each batch * @param timeout The timeout, in milliseconds, to wait before releasing a batch that hasn't received its full batch of messages * @param isTopic Whether or not the JMS destination is a topic. * @throws Exception */ @Source public void consume(String destinationName, int amountOfThreads, Boolean isTopic, int batchSize, long timeout, final SourceCallback callback) throws Exception { Lock lock = null; try { lock = muleContext.getLockFactory().createLock("JMS_BATCH_MESSAGING_CONSUMER_LOCK"); lock.lock(); logger.debug(String.format("Starting batch (size=%s) processing with %s threads", batchSize, amountOfThreads)); ExecutorService executorService = Executors.newFixedThreadPool(amountOfThreads); for (int i = 0; i < amountOfThreads; i++) { executorService.execute(new DestinationMessageConsumer(muleContext, batchSize, timeout, callback, connector, destinationName, isTopic, isTransactional)); } executorService.shutdown(); try { executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { logger.debug("Thread interrupted"); } } finally { if (lock != null) { lock.unlock(); } } }
From source file:com.amazonaws.services.kinesis.producer.KinesisProducerTest.java
@Test public void multipleInstances() throws Exception { int N = 8;/*from ww w . j a va2s . c om*/ final KinesisProducer[] kps = new KinesisProducer[N]; ExecutorService exec = Executors.newFixedThreadPool(N); for (int i = 0; i < N; i++) { final int n = i; exec.submit(new Runnable() { @Override public void run() { try { kps[n] = getProducer(null, null); } catch (Exception e) { log.error("Error starting KPL", e); } } }); } exec.shutdown(); exec.awaitTermination(30, TimeUnit.SECONDS); Thread.sleep(10000); for (int i = 0; i < N; i++) { assertNotNull(kps[i]); assertNotNull(kps[i].getMetrics()); kps[i].destroy(); } }
From source file:org.apache.activemq.broker.jmx.ConcurrentMoveTest.java
public void testConcurrentMove() throws Exception { // Send some messages connection = connectionFactory.createConnection(); connection.start();//w w w. jav a2 s .c o m Session session = connection.createSession(transacted, authMode); destination = createDestination(); MessageProducer producer = session.createProducer(destination); for (int i = 0; i < messageCount; i++) { Message message = session.createTextMessage("Message: " + i); producer.send(message); } long usageBeforMove = broker.getPersistenceAdapter().size(); LOG.info("Store usage:" + usageBeforMove); // Now get the QueueViewMBean and purge String objectNameStr = broker.getBrokerObjectName().toString(); objectNameStr += ",destinationType=Queue,destinationName=" + getDestinationString(); ObjectName queueViewMBeanName = assertRegisteredObjectName(objectNameStr); final QueueViewMBean proxy = (QueueViewMBean) MBeanServerInvocationHandler.newProxyInstance(mbeanServer, queueViewMBeanName, QueueViewMBean.class, true); final ActiveMQQueue to = new ActiveMQQueue("TO"); ((RegionBroker) broker.getRegionBroker()).addDestination(broker.getAdminConnectionContext(), to, false); ExecutorService executorService = Executors.newCachedThreadPool(); for (int i = 0; i < 50; i++) { executorService.execute(new Runnable() { @Override public void run() { try { proxy.moveMatchingMessagesTo(null, to.getPhysicalName()); } catch (Exception e) { e.printStackTrace(); } } }); } executorService.shutdown(); executorService.awaitTermination(5, TimeUnit.MINUTES); long count = proxy.getQueueSize(); assertEquals("Queue size", count, 0); assertEquals("Browse size", proxy.browseMessages().size(), 0); objectNameStr = broker.getBrokerObjectName().toString(); objectNameStr += ",destinationType=Queue,destinationName=" + to.getQueueName(); queueViewMBeanName = assertRegisteredObjectName(objectNameStr); QueueViewMBean toProxy = (QueueViewMBean) MBeanServerInvocationHandler.newProxyInstance(mbeanServer, queueViewMBeanName, QueueViewMBean.class, true); count = toProxy.getQueueSize(); assertEquals("Queue size", count, messageCount); long usageAfterMove = broker.getPersistenceAdapter().size(); LOG.info("Store usage, before: " + usageBeforMove + ", after:" + usageAfterMove); LOG.info("Store size increase:" + FileUtils.byteCountToDisplaySize(usageAfterMove - usageBeforMove)); assertTrue("Usage not more than doubled", usageAfterMove < (usageBeforMove * 3)); producer.close(); }
From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClientTest.java
@Test public void testConcurrentCollectionUpdate() throws Exception { int cussThreadCount = 2; int cussQueueSize = 100; int numDocs = 100; int numRunnables = 5; int expected = numDocs * numRunnables; try (ConcurrentUpdateSolrClient concurrentClient = new ConcurrentUpdateSolrClient( jetty.getBaseUrl().toString(), cussQueueSize, cussThreadCount)) { concurrentClient.setPollQueueTime(0); // ensure it doesn't block where there's nothing to do yet concurrentClient.blockUntilFinished(); // Delete all existing documents. concurrentClient.deleteByQuery("collection1", "*:*"); int poolSize = 5; ExecutorService threadPool = ExecutorUtil.newMDCAwareFixedThreadPool(poolSize, new SolrjNamedThreadFactory("testCUSS")); for (int r = 0; r < numRunnables; r++) threadPool//from w w w.j a va 2 s. c o m .execute(new SendDocsRunnable(String.valueOf(r), numDocs, concurrentClient, "collection1")); // ensure all docs are sent threadPool.awaitTermination(5, TimeUnit.SECONDS); threadPool.shutdown(); concurrentClient.commit("collection1"); assertEquals(expected, concurrentClient.query("collection1", new SolrQuery("*:*")).getResults().getNumFound()); // wait until all requests are processed by CUSS concurrentClient.blockUntilFinished(); concurrentClient.shutdownNow(); } try (ConcurrentUpdateSolrClient concurrentClient = new ConcurrentUpdateSolrClient( jetty.getBaseUrl().toString() + "/collection1", cussQueueSize, cussThreadCount)) { assertEquals(expected, concurrentClient.query(new SolrQuery("*:*")).getResults().getNumFound()); } }
From source file:io.wcm.caravan.pipeline.impl.JsonPipelineMultipleSubscriptionsTest.java
@Test public void subscribeConcurrentlyToTransformedPipelineOutputs() throws InterruptedException { // this test verifies that pipelines actions are only executed once, even if there are multiple concurrent subscribers firstStep = newPipelineWithResponseBody("{id:123}"); secondStep = firstStep.applyAction(action); when(action.execute(any(), any())).thenReturn(firstStep.getOutput()); // create multiple simultaneous threads that subscribe to the same pipeline output // and use a CountDownLatch to delay the subscription until all threads have been started CountDownLatch countDown = new CountDownLatch(100); ExecutorService executorService = Executors.newCachedThreadPool(); while (countDown.getCount() > 0) { executorService.submit(() -> { countDown.await();//from w w w . j a va 2 s. c o m secondStep.getOutput().subscribe(Subscribers.empty()); return null; // this is required for the lambda to be considered a Callable<Void> and therefore be allowed to throw exceptions }); countDown.countDown(); } executorService.shutdown(); executorService.awaitTermination(1, TimeUnit.MINUTES); verify(action, times(1)).execute(any(), any()); }
From source file:org.apache.usergrid.persistence.qakka.serialization.queuemessages.impl.MessageCounterSerializationTest.java
@Test public void testConcurrentOperation() { // create multiple threads, each will increment and decrement counter by same number Injector injector = getInjector();/*from w ww . j a v a 2s . c o m*/ MessageCounterSerialization mcs = injector.getInstance(MessageCounterSerialization.class); String queueName = "mtco_queue_" + RandomStringUtils.randomAlphanumeric(10); int poolSize = 20; int numThreads = 20; int numCounts = 3000; ExecutorService execService = Executors.newFixedThreadPool(poolSize); for (int i = 0; i < numThreads; i++) { execService.submit(() -> { for (int j = 0; j < numCounts; j++) { mcs.incrementCounter(queueName, DatabaseQueueMessage.Type.DEFAULT, 1); } for (int k = 0; k < numCounts; k++) { mcs.decrementCounter(queueName, DatabaseQueueMessage.Type.DEFAULT, 1); } }); } execService.shutdown(); try { while (!execService.awaitTermination(3, TimeUnit.SECONDS)) { System.out .println("Waiting... " + mcs.getCounterValue(queueName, DatabaseQueueMessage.Type.DEFAULT)); } } catch (InterruptedException e) { e.printStackTrace(); } // at end counter should be zero Assert.assertEquals(0, mcs.getCounterValue(queueName, DatabaseQueueMessage.Type.DEFAULT)); }
From source file:org.onosproject.demo.DemoInstaller.java
/** * Shutdown a pool cleanly if possible./*from w w w . j av a 2 s . c o m*/ * * @param pool an executorService */ private void shutdownAndAwaitTermination(ExecutorService pool) { pool.shutdown(); // Disable new tasks from being submitted try { // Wait a while for existing tasks to terminate if (!pool.awaitTermination(10, TimeUnit.SECONDS)) { pool.shutdownNow(); // Cancel currently executing tasks // Wait a while for tasks to respond to being cancelled if (!pool.awaitTermination(10, TimeUnit.SECONDS)) { log.error("Pool did not terminate"); } } } catch (Exception ie) { // (Re-)Cancel if current thread also interrupted pool.shutdownNow(); // Preserve interrupt status Thread.currentThread().interrupt(); } }
From source file:org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClientTest.java
@Test public void testConcurrentUpdate() throws Exception { TestServlet.clear();/*from w ww. j ava 2 s . c o m*/ String serverUrl = jetty.getBaseUrl().toString() + "/cuss/foo"; int cussThreadCount = 2; int cussQueueSize = 100; // for tracking callbacks from CUSS final AtomicInteger successCounter = new AtomicInteger(0); final AtomicInteger errorCounter = new AtomicInteger(0); final StringBuilder errors = new StringBuilder(); @SuppressWarnings("serial") ConcurrentUpdateSolrClient concurrentClient = new OutcomeCountingConcurrentUpdateSolrClient(serverUrl, cussQueueSize, cussThreadCount, successCounter, errorCounter, errors); concurrentClient.setPollQueueTime(0); // ensure it doesn't block where there's nothing to do yet concurrentClient.blockUntilFinished(); int poolSize = 5; ExecutorService threadPool = ExecutorUtil.newMDCAwareFixedThreadPool(poolSize, new SolrjNamedThreadFactory("testCUSS")); int numDocs = 100; int numRunnables = 5; for (int r = 0; r < numRunnables; r++) threadPool.execute(new SendDocsRunnable(String.valueOf(r), numDocs, concurrentClient)); // ensure all docs are sent threadPool.awaitTermination(5, TimeUnit.SECONDS); threadPool.shutdown(); // wait until all requests are processed by CUSS concurrentClient.blockUntilFinished(); concurrentClient.shutdownNow(); assertEquals("post", TestServlet.lastMethod); // expect all requests to be successful int expectedSuccesses = TestServlet.numReqsRcvd.get(); assertTrue(expectedSuccesses > 0); // at least one request must have been sent assertTrue("Expected no errors but got " + errorCounter.get() + ", due to: " + errors.toString(), errorCounter.get() == 0); assertTrue("Expected " + expectedSuccesses + " successes, but got " + successCounter.get(), successCounter.get() == expectedSuccesses); int expectedDocs = numDocs * numRunnables; assertTrue("Expected CUSS to send " + expectedDocs + " but got " + TestServlet.numDocsRcvd.get(), TestServlet.numDocsRcvd.get() == expectedDocs); }
From source file:com.opengamma.bbg.replay.BloombergTickWriterTest.java
@Test public void ticksWriting() throws Exception { ZonedDateTime startTime = ZonedDateTime.now(Clock.systemUTC()); //run test for 5secs long runTime = 5000; ExecutorService writerExecutor = Executors.newSingleThreadExecutor(); Future<?> writerFuture = writerExecutor.submit(_writer); //create ticks generators ExecutorService ticksGeneratorExec = Executors.newSingleThreadExecutor(); Future<?> ticksGenFuture = ticksGeneratorExec.submit(_ticksGenerator); s_logger.info("Test running for {}ms to generate ticks", runTime); Thread.sleep(runTime);//from w w w. j a v a2s . c o m //terminate ticks generation after 1mins _ticksGenerator.terminate(); sendTerminateMessage(); //test should fail if ticksGenerator throws an exception ticksGenFuture.get(); ticksGeneratorExec.shutdown(); ticksGeneratorExec.awaitTermination(1, TimeUnit.SECONDS); //test should fail if writer throws an exception writerFuture.get(); writerExecutor.shutdown(); writerExecutor.awaitTermination(1, TimeUnit.SECONDS); ZonedDateTime endTime = ZonedDateTime.now(Clock.systemUTC()); //now lets replay generated allTicks.dat Set<String> buids = Sets.newHashSet(_ticker2buid.values()); UnitTestTickReceiver receiver = new UnitTestTickReceiver(); BloombergTicksReplayer player = new BloombergTicksReplayer(Mode.AS_FAST_AS_POSSIBLE, _rootDir.getAbsolutePath(), receiver, startTime, endTime, buids); player.start(); while (player.isRunning()) { Thread.sleep(1000); } assertTrue(receiver.count() > 0); }