List of usage examples for java.util.concurrent ScheduledExecutorService shutdownNow
List<Runnable> shutdownNow();
From source file:com.graphaware.importer.context.BaseImportContext.java
/** * {@inheritDoc}/*w w w . ja va 2 s.c o m*/ */ @Override public final void shutdown() { ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); executor.scheduleAtFixedRate(new Runnable() { @Override public void run() { LOG.info("I am still alive!"); } }, 1, 1, TimeUnit.MINUTES); preShutdown(); indexProvider().shutdown(); inserter().shutdown(); postShutdown(); executor.shutdownNow(); }
From source file:com.ah.be.license.AeroLicenseTimer.java
/** * Stop your defined license timer./* w w w . ja v a 2 s . c o m*/ * * @param arg_Timer - * @param arg_Future - * @return String : the error message */ public static String stopLicenseTimer(ScheduledExecutorService arg_Timer, ScheduledFuture<?> arg_Future) { try { if (!arg_Timer.isShutdown()) { if (null != arg_Future) { arg_Future.cancel(false); } // Disable new tasks from being submitted. arg_Timer.shutdown(); try { // Wait a while for existing tasks to terminate. if (!arg_Timer.awaitTermination(5, TimeUnit.SECONDS)) { // Cancel currently executing tasks. arg_Timer.shutdownNow(); // Wait a while for tasks to respond to being canceled. if (!arg_Timer.awaitTermination(5, TimeUnit.SECONDS)) { return "The license timer does not terminate."; } } } catch (InterruptedException ie) { // (Re-)Cancel if current thread also interrupted. //arg_Timer.shutdownNow(); } } } catch (Exception e) { return "There is something wrong with timer stop."; } return null; }
From source file:com.adaptris.core.jms.FailoverJmsProducerCase.java
public void testEventuallyConnects() throws Exception { final EmbeddedActiveMq broker = new EmbeddedActiveMq(); FailoverJmsConnection connection = new FailoverJmsConnection(); connection.addConnection(new JmsConnection(new BasicActiveMqImplementation("tcp://localhost:123456"))); connection.addConnection(broker.getJmsConnection(new BasicActiveMqImplementation(), true)); connection.setConnectionRetryInterval(new TimeInterval(250L, TimeUnit.MILLISECONDS)); connection.addExceptionListener(new StandaloneConsumer()); connection.setRegisterOwner(true);/*w w w . j av a 2 s . com*/ ScheduledExecutorService es = Executors.newSingleThreadScheduledExecutor(); try { es.schedule(new Runnable() { @Override public void run() { try { broker.start(); } catch (Exception e) { throw new RuntimeException(e); } } }, 2L, TimeUnit.SECONDS); LifecycleHelper.initAndStart(connection); } finally { broker.destroy(); LifecycleHelper.stopAndClose(connection); es.shutdownNow(); } }
From source file:com.brienwheeler.lib.concurrent.ExecutorsTest.java
@Test public void testNewSingleThreadScheduledExecutor() { NamedThreadFactory threadFactory = new NamedThreadFactory(THREAD_FACTORY_NAME); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(threadFactory); ScheduledFuture<?> future1 = executor.schedule(new NullRunnable(), 10, TimeUnit.MILLISECONDS); ScheduledFuture<Integer> future2 = executor.schedule(new IntCallable(1), 10, TimeUnit.MILLISECONDS); ScheduledFuture<?> future3 = executor.scheduleAtFixedRate(new NullRunnable(), 10, 10, TimeUnit.MILLISECONDS); ScheduledFuture<?> future4 = executor.scheduleWithFixedDelay(new NullRunnable(), 10, 10, TimeUnit.MILLISECONDS); List<Runnable> notRun = executor.shutdownNow(); Assert.assertTrue(executor.isShutdown()); Assert.assertEquals(4, notRun.size()); Assert.assertTrue(CollectionUtils.containsInstance(notRun, future1)); Assert.assertTrue(CollectionUtils.containsInstance(notRun, future2)); Assert.assertTrue(CollectionUtils.containsInstance(notRun, future3)); Assert.assertTrue(CollectionUtils.containsInstance(notRun, future4)); }
From source file:com.all.download.manager.ScheduledExecutorServiceSingleton.java
public void destroy() { Iterator<ScheduledExecutorService> iterator = scheduledExecutorServiceList.iterator(); while (iterator.hasNext()) { ScheduledExecutorService scheduledExecutorService = iterator.next(); try {//from ww w . j a va2 s . c o m // close the thread pool, should only be closed here but no harm if // it was closed somewhere else scheduledExecutorService.shutdown(); if (!scheduledExecutorService.awaitTermination(5, TimeUnit.SECONDS)) { log.warn("Thread pool did not terminate correctly"); } } catch (InterruptedException ie) { scheduledExecutorService.shutdownNow(); } iterator.remove(); } }
From source file:org.gss_project.gss.server.ejb.TransactionHelper.java
/** * Execute the supplied command until it completes, ignoring transaction * rollbacks. Try at least TRANSACTION_RETRIES times before giving up, * each time waiting a random amount of time, using an exponential * backoff scheme. See http://en.wikipedia.org/wiki/Exponential_backoff * for the basic idea.//from www.j a v a 2s . com * * @param command the command to execute * @return the value returned by the command * @throws Exception any other exception thrown by the command */ public T tryExecute(final Callable<T> command) throws Exception { T returnValue = null; // Schedule a Future task to call the command after delay milliseconds. int delay = 0; ScheduledExecutorService executor = Executors.newScheduledThreadPool(1); for (int i = 0; i < TRANSACTION_RETRIES; i++) { final int retry = i; ScheduledFuture<T> future = executor.schedule(new Callable<T>() { @Override public T call() throws Exception { return command.call(); } }, delay, TimeUnit.MILLISECONDS); try { returnValue = future.get(); break; } catch (ExecutionException e) { Throwable cause = e.getCause(); if (!(cause instanceof EJBTransactionRolledbackException) || retry == TRANSACTION_RETRIES - 1) { logger.info("Transaction retry #" + (i + 1) + " failed due to " + cause); executor.shutdownNow(); if (cause instanceof Exception) throw (Exception) cause; if (cause instanceof Error) throw (Error) cause; } delay = MIN_TIMEOUT + (int) (MIN_TIMEOUT * Math.random() * (i + 1)); String origCause = cause.getCause() == null ? cause.getClass().getName() : cause.getCause().getClass().getName(); logger.info( "Transaction retry #" + (i + 1) + " scheduled in " + delay + " msec due to " + origCause); } } executor.shutdownNow(); return returnValue; }
From source file:org.apache.nifi.controller.StandardFlowService.java
@Override public void stop(final boolean force) { writeLock.lock();//from www . jav a2 s .co m try { if (!isRunning()) { return; } running.set(false); if (clusterCoordinator != null) { final Thread shutdownClusterCoordinator = new Thread(new Runnable() { @Override public void run() { clusterCoordinator.shutdown(); } }); shutdownClusterCoordinator.setDaemon(true); shutdownClusterCoordinator.setName("Shutdown Cluster Coordinator"); shutdownClusterCoordinator.start(); } if (!controller.isTerminated()) { controller.shutdown(force); } if (configuredForClustering && senderListener != null) { try { senderListener.stop(); } catch (final IOException ioe) { logger.warn("Protocol sender/listener did not stop gracefully due to: " + ioe); } } final ScheduledExecutorService executorService = executor.get(); if (executorService != null) { if (force) { executorService.shutdownNow(); } else { executorService.shutdown(); } boolean graceful; try { graceful = executorService.awaitTermination(gracefulShutdownSeconds, TimeUnit.SECONDS); } catch (final InterruptedException e) { graceful = false; } if (!graceful) { logger.warn("Scheduling service did not gracefully shutdown within configured " + gracefulShutdownSeconds + " second window"); } } } finally { writeLock.unlock(); } }
From source file:org.apache.storm.grouping.LoadAwareShuffleGroupingTest.java
private void runSimpleBenchmark(LoadAwareCustomStreamGrouping grouper, List<Integer> availableTaskIds, LoadMapping loadMapping) {// w w w . jav a 2s. c o m // Task Id not used, so just pick a static value final int inputTaskId = 100; WorkerTopologyContext context = mockContext(availableTaskIds); grouper.prepare(context, null, availableTaskIds); // periodically calls refreshLoad in 1 sec to simulate worker load update timer ScheduledExecutorService refreshService = MoreExecutors .getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1)); refreshService.scheduleAtFixedRate(() -> grouper.refreshLoad(loadMapping), 1, 1, TimeUnit.SECONDS); long current = System.currentTimeMillis(); int idx = 0; while (true) { grouper.chooseTasks(inputTaskId, Lists.newArrayList()); idx++; if (idx % 100000 == 0) { // warm up 60 seconds if (System.currentTimeMillis() - current >= 60_000) { break; } } } current = System.currentTimeMillis(); for (int i = 1; i <= 2_000_000_000; i++) { grouper.chooseTasks(inputTaskId, Lists.newArrayList()); } LOG.info("Duration: {} ms", (System.currentTimeMillis() - current)); refreshService.shutdownNow(); }
From source file:com.amazonaws.services.dynamodbv2.streamsadapter.functionals.CorrectnessTest.java
/** * This test spawns a thread to periodically write items to the source table. It shuts down and restarts the KCL * worker while writes are happening (to simulate the real-world situation of a worker dying and another taking its * place). There are two things being verified here: * 1. New KCL worker resumes from the checkpoint * 2. All stream records are processed//from w w w . ja v a2s . c o m * * @throws Exception */ @Test public void workerFailureTest() throws Exception { LOG.info("Starting single shard KCL worker failure test."); KinesisClientLibConfiguration workerConfig = new KinesisClientLibConfiguration(leaseTable, streamId, credentials, KCL_WORKER_ID).withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON); startKCLWorker(workerConfig); // A thread that keeps writing to the table every 2 seconds ScheduledExecutorService loadGeneratorService = Executors.newSingleThreadScheduledExecutor(); loadGeneratorService.scheduleAtFixedRate(new Runnable() { @Override public void run() { insertAndUpdateItems(1); } }, 0/* initialDelay */, 2/* period */, TimeUnit.SECONDS); while (recordProcessorFactory.getNumRecordsProcessed() < 10) { LOG.info("Sleep till first few records are processed"); Thread.sleep(THREAD_SLEEP_2S); } shutDownKCLWorker(); // Calculate number of records processed by first worker and also the number of processed-but-not-checkpointed // records, since checkpoint happens after every batch of 10 records int numRecordsProcessedByFirstWorker = recordProcessorFactory.getNumRecordsProcessed(); int numRecordsNotCheckpointed = numRecordsProcessedByFirstWorker % ReplicatingRecordProcessor.CHECKPOINT_BATCH_SIZE; // Start a new worker startKCLWorker(workerConfig); while (recordProcessorFactory.getNumRecordsProcessed() < 0) { LOG.info("Sleep till RecordProcessor is initialized"); Thread.sleep(THREAD_SLEEP_2S); } loadGeneratorService.shutdown(); if (!loadGeneratorService.awaitTermination(THREAD_SLEEP_5S, TimeUnit.MILLISECONDS)) { loadGeneratorService.shutdownNow(); } int numStreamRecords = 2 * this.numItemsInSrcTable; int remainingRecordsToBeProcessed = numStreamRecords - numRecordsProcessedByFirstWorker + numRecordsNotCheckpointed; /* * The second worker must process atleast remainingRecordsToBeProcessed * num of records so that we have replicated everything to destination * table. Thus, this should never technically end up as an infinite * loop. If it does, something else is gone wrong. */ while (recordProcessorFactory.getNumRecordsProcessed() < remainingRecordsToBeProcessed) { LOG.info("Sleep till remaining records are processed"); Thread.sleep(THREAD_SLEEP_2S); } shutDownKCLWorker(); ScanResult srcTableScan = TestUtil.scanTable(dynamoDBClient, srcTable); ScanResult destTableScan = TestUtil.scanTable(dynamoDBClient, destTable); assertEquals(srcTableScan.getItems(), destTableScan.getItems()); }
From source file:org.apache.storm.grouping.LoadAwareShuffleGroupingTest.java
private void runMultithreadedBenchmark(LoadAwareCustomStreamGrouping grouper, List<Integer> availableTaskIds, LoadMapping loadMapping, int numThreads) throws InterruptedException, ExecutionException { // Task Id not used, so just pick a static value final int inputTaskId = 100; final WorkerTopologyContext context = mockContext(availableTaskIds); // Call prepare with our available taskIds grouper.prepare(context, null, availableTaskIds); // periodically calls refreshLoad in 1 sec to simulate worker load update timer ScheduledExecutorService refreshService = MoreExecutors .getExitingScheduledExecutorService(new ScheduledThreadPoolExecutor(1)); refreshService.scheduleAtFixedRate(() -> grouper.refreshLoad(loadMapping), 1, 1, TimeUnit.SECONDS); long current = System.currentTimeMillis(); int idx = 0;// w w w.ja va 2s.c o m while (true) { grouper.chooseTasks(inputTaskId, Lists.newArrayList()); idx++; if (idx % 100000 == 0) { // warm up 60 seconds if (System.currentTimeMillis() - current >= 60_000) { break; } } } final int groupingExecutionsPerThread = 2_000_000_000; List<Callable<Long>> threadTasks = Lists.newArrayList(); for (int x = 0; x < numThreads; x++) { Callable<Long> threadTask = new Callable<Long>() { @Override public Long call() throws Exception { long current = System.currentTimeMillis(); for (int i = 1; i <= groupingExecutionsPerThread; i++) { grouper.chooseTasks(inputTaskId, Lists.newArrayList()); } return System.currentTimeMillis() - current; } }; // Add to our collection. threadTasks.add(threadTask); } ExecutorService executor = Executors.newFixedThreadPool(threadTasks.size()); List<Future<Long>> taskResults = executor.invokeAll(threadTasks); // Wait for all tasks to complete Long maxDurationMillis = 0L; for (Future taskResult : taskResults) { while (!taskResult.isDone()) { Thread.sleep(100); } Long durationMillis = (Long) taskResult.get(); if (maxDurationMillis < durationMillis) { maxDurationMillis = durationMillis; } } LOG.info("Max duration among threads is : {} ms", maxDurationMillis); refreshService.shutdownNow(); }