List of usage examples for java.util.concurrent ExecutorService shutdownNow
List<Runnable> shutdownNow();
From source file:org.springframework.amqp.rabbit.test.RepeatProcessor.java
public Statement apply(final Statement base, FrameworkMethod method, final Object target) { Repeat repeat = AnnotationUtils.findAnnotation(method.getMethod(), Repeat.class); if (repeat == null) { return base; }//from w w w . j a v a2 s. c o m final int repeats = repeat.value(); if (repeats <= 1) { return base; } initializeIfNecessary(target); if (concurrency <= 0) { return new Statement() { @Override public void evaluate() throws Throwable { try { for (int i = 0; i < repeats; i++) { try { base.evaluate(); } catch (Throwable t) { throw new IllegalStateException( "Failed on iteration: " + i + " of " + repeats + " (started at 0)", t); } } } finally { finalizeIfNecessary(target); } } }; } return new Statement() { @Override public void evaluate() throws Throwable { List<Future<Boolean>> results = new ArrayList<Future<Boolean>>(); ExecutorService executor = Executors.newFixedThreadPool(concurrency); CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(executor); try { for (int i = 0; i < repeats; i++) { final int count = i; results.add(completionService.submit(new Callable<Boolean>() { public Boolean call() { try { base.evaluate(); } catch (Throwable t) { throw new IllegalStateException("Failed on iteration: " + count, t); } return true; } })); } for (int i = 0; i < repeats; i++) { Future<Boolean> future = completionService.take(); assertTrue("Null result from completer", future.get()); } } finally { executor.shutdownNow(); finalizeIfNecessary(target); } } }; }
From source file:org.apache.helix.messaging.handling.HelixTaskExecutor.java
private void shutdownAndAwaitTermination(ExecutorService pool) { LOG.info("Shutting down pool: " + pool); pool.shutdown(); // Disable new tasks from being submitted try {/*from w ww . j a v a 2s . co m*/ // Wait a while for existing tasks to terminate if (!pool.awaitTermination(200, TimeUnit.MILLISECONDS)) { List<Runnable> waitingTasks = pool.shutdownNow(); // Cancel currently executing tasks LOG.info("Tasks that never commenced execution: " + waitingTasks); // Wait a while for tasks to respond to being cancelled if (!pool.awaitTermination(200, TimeUnit.MILLISECONDS)) { LOG.error("Pool did not fully terminate in 200ms. pool: " + pool); } } } catch (InterruptedException ie) { // (Re-)Cancel if current thread also interrupted LOG.error("Interruped when waiting for shutdown pool: " + pool, ie); pool.shutdownNow(); // Preserve interrupt status Thread.currentThread().interrupt(); } }
From source file:edu.duke.cabig.c3pr.webservice.integration.C3PREmbeddedTomcatTestBase.java
/** * /*from www . j av a 2 s. c om*/ */ private void stopContainer() { try { if (container != null) { logger.info("Stopping Tomcat..."); // stopping Tomcat may block, so we need to do it in another // thread and join. final ExecutorService executor = Executors.newSingleThreadExecutor(); try { Future future = executor.submit(new Runnable() { public void run() { try { container.stop(); container = null; logger.info("Tomcat has been stopped."); } catch (LifecycleException e) { logger.severe(ExceptionUtils.getFullStackTrace(e)); } } }); future.get(TOMCAT_SHUTDOWN_TIMEOUT, TimeUnit.SECONDS); } finally { executor.shutdownNow(); } } } catch (Exception e) { logger.severe(ExceptionUtils.getFullStackTrace(e)); } }
From source file:com.emc.ecs.sync.storage.CasStorageTest.java
private List<String> createTestClips(FPPool pool, int maxBlobSize, int thisMany, Writer summaryWriter) throws Exception { ExecutorService service = Executors.newFixedThreadPool(CAS_THREADS); System.out.print("Creating clips"); List<String> clipIds = Collections.synchronizedList(new ArrayList<String>()); List<String> summaries = Collections.synchronizedList(new ArrayList<String>()); for (int clipIdx = 0; clipIdx < thisMany; clipIdx++) { service.submit(new ClipWriter(pool, clipIds, maxBlobSize, summaries)); }/*from w ww .ja va2s. c o m*/ service.shutdown(); service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES); service.shutdownNow(); Collections.sort(summaries); for (String summary : summaries) { summaryWriter.append(summary); } System.out.println(); return clipIds; }
From source file:com.github.kubernetes.java.client.live.KubernetesApiClientLiveTest.java
@Test public void testCreatePod() throws Exception { log.info("Testing Pods ...."); if (log.isDebugEnabled()) { log.debug("Creating a Pod " + pod); }/*from w w w . j av a2s . c o m*/ Pod createPod = getClient().createPod(pod); assertEquals(pod.getId(), createPod.getId()); assertNotNull(getClient().getPod(pod.getId())); assertEquals("Waiting", createPod.getCurrentState().getStatus()); ExecutorService executor = Executors.newSingleThreadExecutor(); Future<Pod> future = executor.submit(new Callable<Pod>() { public Pod call() throws Exception { Pod newPod; do { log.info("Waiting for Pod to be ready: " + pod.getId()); Thread.sleep(1000); newPod = getClient().getPod(pod.getId()); StateInfo info = newPod.getCurrentState().getInfo("master"); if (info.getState("waiting") != null) { throw new RuntimeException("Pod is waiting due to " + info.getState("waiting")); } } while (!"Running".equals(newPod.getCurrentState().getStatus())); return newPod; } }); try { createPod = future.get(90, TimeUnit.SECONDS); } finally { executor.shutdownNow(); } assertNotNull(createPod.getCurrentState().getInfo("master").getState("running")); assertNotNull(createPod.getCurrentState().getNetInfo().getState("running")); // test recreation from same id try { getClient().createPod(pod); fail("Should have thrown exception"); } catch (Exception e) { // ignore } assertNotNull(getClient().getPod(pod.getId())); }
From source file:org.openspaces.rest.space.SpaceTaskAPIController.java
private ModelAndView execute(String spaceName, String locators, final SpaceTaskRequest request) { GigaSpace space = ControllerUtils.xapCache.get(spaceName, locators); final int instanceCount = ControllerUtils.xapCache.getInstances(spaceName); ExecutorService svc = Executors.newFixedThreadPool(instanceCount); int instances = 0; log.fine("request.target=" + request.target); if (request.target != null && !request.target.equals("all")) { instances = 1;/*from ww w. ja v a 2 s .co m*/ } else { instances = instanceCount; } System.out.println("instances=" + instances); List<Callable<Object>> tasks = new ArrayList<Callable<Object>>(instances); for (int i = 0; i < instances; i++) { Object routing = 0; if (request.target != null && request.target.equals("all")) { routing = i; } else { routing = request.target; } tasks.add(new ScriptCallable(space, request, routing)); } ModelAndView mv = new ModelAndView("jsonView"); List<Object> model = new ArrayList<Object>(instances); try { List<Future<Object>> results = svc.invokeAll(tasks); for (Future<Object> fut : results) { if (fut.get() != null) model.add(fut.get()); } mv.addObject("results", model); } catch (Exception e) { throw new RuntimeException(e); } finally { svc.shutdownNow(); } return mv; }
From source file:io.druid.data.input.impl.prefetch.PrefetchableTextFilesFirehoseFactory.java
@Override public Firehose connect(StringInputRowParser firehoseParser, File temporaryDirectory) throws IOException { if (!cacheManager.isEnabled() && maxFetchCapacityBytes == 0) { return super.connect(firehoseParser, temporaryDirectory); }//from ww w .j ava 2 s . c o m if (objects == null) { objects = ImmutableList.copyOf(Preconditions.checkNotNull(initObjects(), "objects")); } Preconditions.checkState(temporaryDirectory.exists(), "temporaryDirectory[%s] does not exist", temporaryDirectory); Preconditions.checkState(temporaryDirectory.isDirectory(), "temporaryDirectory[%s] is not a directory", temporaryDirectory); LOG.info("Create a new firehose for [%d] objects", objects.size()); // fetchExecutor is responsible for background data fetching final ExecutorService fetchExecutor = Execs.singleThreaded("firehose_fetch_%d"); final Fetcher<T> fetcher = new Fetcher<>(cacheManager, objects, fetchExecutor, temporaryDirectory, maxFetchCapacityBytes, prefetchTriggerBytes, fetchTimeout, maxFetchRetry, this::openObjectStream); return new FileIteratingFirehose(new Iterator<LineIterator>() { @Override public boolean hasNext() { return fetcher.hasNext(); } @Override public LineIterator next() { if (!hasNext()) { throw new NoSuchElementException(); } final OpenedObject<T> openedObject = fetcher.next(); final InputStream stream; try { stream = wrapObjectStream(openedObject.getObject(), openedObject.getObjectStream()); } catch (IOException e) { throw new RuntimeException(e); } return new ResourceCloseableLineIterator(new InputStreamReader(stream, StandardCharsets.UTF_8), openedObject.getResourceCloser()); } }, firehoseParser, () -> { fetchExecutor.shutdownNow(); try { Preconditions.checkState(fetchExecutor.awaitTermination(fetchTimeout, TimeUnit.MILLISECONDS)); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new ISE("Failed to shutdown fetch executor during close"); } }); }
From source file:com.legstar.host.server.PoolingTest.java
/** * Test work dispatched in more than one pool. * @throws Exception if test fails/*from ww w.j a v a2 s. co m*/ */ public void testScheduleMultiplePools() throws Exception { ExecutorService executor = Executors.newFixedThreadPool(CLIENT_THREADS); WorkManager wm = new WorkManagerImpl(executor); EngineHandler engHandler = new EngineHandler(getPoolingEngineConfig()); engHandler.init(); LegStarAddress address1 = new LegStarAddress("TheMainframe"); address1.setHostUserID(HOST_USERID); address1.setHostPassword(HOST_PASSWORD); LegStarAddress address2 = new LegStarAddress("TheMainframe"); address2.setHostUserID("IBMUSER"); address2.setHostPassword(HOST_PASSWORD); Client[] clients = new Client[3]; for (int i = 0; i < clients.length; i++) { LegStarAddress address = ((i % 2) == 0) ? address1 : address2; LegStarRequest request = new LegStarRequest("Request01", address, getLsfileaeRequestMessage()); clients[i] = new Client(engHandler.getEngine(), "Client" + new Integer(i).toString(), request); wm.schedule(clients[i], new ClientListener()); } /* Time is needed to process these requests */ Thread.sleep(10000L); engHandler.stop(); executor.shutdownNow(); for (int i = 0; i < clients.length; i++) { assertEquals(LsfileaeCases.getHostBytesHexReply100(), HostData .toHexString(clients[i].getRequest().getResponseMessage().getDataParts().get(0).getContent())); } }
From source file:de.unisb.cs.st.javalanche.mutation.runtime.testDriver.MutationTestDriver.java
/** * Runs given test in a new thread with specified timeout * (DEFAULT_TIMEOUT_IN_SECONDS) and stores the results in given testResult. * //from w ww .j av a 2 s .c o m * @param r * the test to be run * @return the time needed for executing the test */ protected long runWithTimeoutOld(MutationTestRunnable r) { // ArrayList<Thread> threadsPre = ThreadUtil.getThreads(); ExecutorService service = Executors.newSingleThreadExecutor(); Future<?> future = service.submit(r); StopWatch stopWatch = new StopWatch(); stopWatch.start(); service.shutdown(); String exceptionMessage = null; Throwable capturedThrowable = null; try { logger.debug("Start test: "); boolean terminated = service.awaitTermination(timeout, TimeUnit.SECONDS); logger.debug("First timeout"); long time1 = stopWatch.getTime(); if (!terminated) { service.shutdownNow(); } future.get(1, TimeUnit.SECONDS); logger.debug("Second timeout"); long time2 = stopWatch.getTime(); if (time2 - time1 > 1000) { logger.info("Process got some extra time: " + (time2 - time1) + " " + time2); } future.cancel(true); } catch (InterruptedException e) { capturedThrowable = e; } catch (ExecutionException e) { capturedThrowable = e; } catch (TimeoutException e) { exceptionMessage = "Mutation causes test timeout"; capturedThrowable = e; } catch (Throwable t) { capturedThrowable = t; } finally { if (capturedThrowable != null) { if (exceptionMessage == null) { exceptionMessage = "Exception caught during test execution."; } r.setFailed(exceptionMessage, capturedThrowable); } } if (!future.isDone()) { r.setFailed("Mutated Thread is still running after timeout.", null); switchOfMutation(future); } stopWatch.stop(); if (!r.hasFinished()) { shutDown(r, stopWatch); } logger.debug("End timed test, it took " + stopWatch.getTime() + " ms"); return stopWatch.getTime(); }
From source file:org.apache.flume.channel.kafka.TestKafkaChannel.java
/** * This method starts a channel, puts events into it. The channel is then * stopped and restarted. Then we check to make sure if all events we put * come out. Optionally, 10 events are rolled back, * and optionally we restart the agent immediately after and we try to pull it * out.//from w w w . j av a2 s . c om * * @param rollback * @param retryAfterRollback * @throws Exception */ private void doTestStopAndStart(boolean rollback, boolean retryAfterRollback) throws Exception { final KafkaChannel channel = startChannel(true); ExecutorService underlying = Executors.newCachedThreadPool(); ExecutorCompletionService<Void> submitterSvc = new ExecutorCompletionService<Void>(underlying); final List<List<Event>> events = createBaseList(); putEvents(channel, events, submitterSvc); int completed = 0; wait(submitterSvc, 5); channel.stop(); final KafkaChannel channel2 = startChannel(true); int total = 50; if (rollback && !retryAfterRollback) { total = 40; } final List<Event> eventsPulled = pullEvents(channel2, submitterSvc, total, rollback, retryAfterRollback); wait(submitterSvc, 5); channel2.stop(); if (!retryAfterRollback && rollback) { final KafkaChannel channel3 = startChannel(true); int expectedRemaining = 50 - eventsPulled.size(); final List<Event> eventsPulled2 = pullEvents(channel3, submitterSvc, expectedRemaining, false, false); wait(submitterSvc, 5); Assert.assertEquals(expectedRemaining, eventsPulled2.size()); eventsPulled.addAll(eventsPulled2); channel3.stop(); } underlying.shutdownNow(); verify(eventsPulled); }