List of usage examples for java.util.concurrent ExecutorService shutdown
void shutdown();
From source file:voldemort.store.readonly.swapper.StoreSwapperTest.java
@Test public void testAdminStoreSwapperWithoutRollback() throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); try {//from w w w. ja v a 2s . c o m // Use the admin store swapper StoreSwapper swapper = new AdminStoreSwapper(cluster, executor, adminClient, 1000000, false, false); testFetchSwapWithoutRollback(swapper); } finally { executor.shutdown(); } }
From source file:com.linkedin.pinot.integration.tests.OfflineClusterIntegrationTest.java
@BeforeClass public void setUp() throws Exception { //Clean up/* w ww. j a v a2s .co m*/ ensureDirectoryExistsAndIsEmpty(_tmpDir); ensureDirectoryExistsAndIsEmpty(_segmentDir); ensureDirectoryExistsAndIsEmpty(_tarDir); // Start the cluster startCluster(); // Unpack the Avro files final List<File> avroFiles = unpackAvroData(_tmpDir, SEGMENT_COUNT); createTable(); // Load data into H2 ExecutorService executor = Executors.newCachedThreadPool(); setupH2AndInsertAvro(avroFiles, executor); // Create segments from Avro data buildSegmentsFromAvro(avroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null); // Initialize query generator setupQueryGenerator(avroFiles, executor); executor.shutdown(); executor.awaitTermination(10, TimeUnit.MINUTES); // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online final CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT); // Upload the segments int i = 0; for (String segmentName : _tarDir.list()) { System.out.println("Uploading segment " + (i++) + " : " + segmentName); File file = new File(_tarDir, segmentName); FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file), file.length()); } // Wait for all segments to be online latch.await(); TOTAL_DOCS = 115545; long timeInTwoMinutes = System.currentTimeMillis() + 2 * 60 * 1000L; long numDocs; while ((numDocs = getCurrentServingNumDocs()) < TOTAL_DOCS) { System.out.println("Current number of documents: " + numDocs); if (System.currentTimeMillis() < timeInTwoMinutes) { Thread.sleep(1000); } else { Assert.fail("Segments were not completely loaded within two minutes"); } } }
From source file:org.apache.flume.sink.hdfs.HDFSEventSink.java
@Override public void stop() { // do not constrain close() calls with a timeout synchronized (sfWritersLock) { for (Entry<String, BucketWriter> entry : sfWriters.entrySet()) { LOG.info("Closing {}", entry.getKey()); try { entry.getValue().close(); } catch (Exception ex) { LOG.warn("Exception while closing " + entry.getKey() + ". " + "Exception follows.", ex); if (ex instanceof InterruptedException) { Thread.currentThread().interrupt(); }/*from w ww .j a v a2s.co m*/ } } } // shut down all our thread pools ExecutorService toShutdown[] = { callTimeoutPool, timedRollerPool }; for (ExecutorService execService : toShutdown) { execService.shutdown(); try { while (execService.isTerminated() == false) { execService.awaitTermination(Math.max(defaultCallTimeout, callTimeout), TimeUnit.MILLISECONDS); } } catch (InterruptedException ex) { LOG.warn("shutdown interrupted on " + execService, ex); } } callTimeoutPool = null; timedRollerPool = null; synchronized (sfWritersLock) { sfWriters.clear(); sfWriters = null; } sinkCounter.stop(); super.stop(); }
From source file:com.espertech.esper.multithread.TestMTContextListenerDispatch.java
private void tryPerformanceDispatch(int numThreads, int numRepeats) throws Exception { MyListener listener = new MyListener(); engine.getEPAdministrator().getStatement("select").addListener(listener); List<Object>[] events = new ArrayList[numThreads]; int eventId = 0; for (int threadNum = 0; threadNum < numThreads; threadNum++) { events[threadNum] = new ArrayList<Object>(); for (int eventNum = 0; eventNum < numRepeats; eventNum++) { // range: 1 to 1000 int partition = (int) (Math.random() * 50); eventId++;// www.j a v a 2 s. c om events[threadNum].add(new SupportBean(new Integer(partition).toString(), eventId)); } } ExecutorService threadPool = Executors.newFixedThreadPool(numThreads); Future futures[] = new Future[numThreads]; long startTime = System.currentTimeMillis(); for (int i = 0; i < numThreads; i++) { Callable callable = new SendEventCallable(i, engine, events[i].iterator()); futures[i] = threadPool.submit(callable); } for (Future future : futures) { assertEquals(true, future.get()); } long delta = System.currentTimeMillis() - startTime; threadPool.shutdown(); threadPool.awaitTermination(10, TimeUnit.SECONDS); // print those events not received for (List<Object> eventList : events) { for (Object event : eventList) { if (!listener.getBeans().contains(event)) { log.info("Expected event was not received, event " + event); } } } assertEquals(numRepeats * numThreads, listener.getBeans().size()); assertTrue("delta=" + delta, delta < 500); }
From source file:edu.cmu.cs.lti.ark.fn.Semafor.java
/** * Reads conll sentences, parses them, and writes the json-serialized results. * * @param inputSupplier where to read conll sentences from * @param outputSupplier where to write the results to * @param numThreads the number of threads to use * @throws IOException/*from w w w . j a va 2 s .c o m*/ * @throws InterruptedException */ public void runParser(final InputSupplier<? extends Readable> inputSupplier, final OutputSupplier<? extends Writer> outputSupplier, final int numThreads) throws IOException, InterruptedException { // use the producer-worker-consumer pattern to parse all sentences in multiple threads, while keeping // output in order. final BlockingQueue<Future<Optional<SemaforParseResult>>> results = Queues .newLinkedBlockingDeque(5 * numThreads); final ExecutorService workerThreadPool = newFixedThreadPool(numThreads); // try to shutdown gracefully. don't worry too much if it doesn't work Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { @Override public void run() { try { workerThreadPool.shutdown(); workerThreadPool.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException ignored) { } } })); final PrintWriter output = new PrintWriter(outputSupplier.getOutput()); try { // Start thread to fetch computed results and write to file final Thread consumer = new Thread(new Runnable() { @Override public void run() { while (!Thread.currentThread().isInterrupted()) { try { final Optional<SemaforParseResult> oResult = results.take().get(); if (!oResult.isPresent()) break; // got poison pill. we're done output.println(oResult.get().toJson()); output.flush(); } catch (Exception e) { e.printStackTrace(); throw new RuntimeException(e); } } } }); consumer.start(); // in main thread, put placeholders on results queue (so results stay in order), then // tell a worker thread to fill up the placeholder final SentenceCodec.SentenceIterator sentences = ConllCodec.readInput(inputSupplier.getInput()); try { int i = 0; while (sentences.hasNext()) { final Sentence sentence = sentences.next(); final int sentenceId = i; results.put(workerThreadPool.submit(new Callable<Optional<SemaforParseResult>>() { @Override public Optional<SemaforParseResult> call() throws Exception { final long start = System.currentTimeMillis(); try { final SemaforParseResult result = parseSentence(sentence); final long end = System.currentTimeMillis(); System.err.printf("parsed sentence %d in %d millis.%n", sentenceId, end - start); return Optional.of(result); } catch (Exception e) { e.printStackTrace(); throw e; } } })); i++; } // put a poison pill on the queue to signal that we're done results.put(workerThreadPool.submit(new Callable<Optional<SemaforParseResult>>() { @Override public Optional<SemaforParseResult> call() throws Exception { return Optional.absent(); } })); workerThreadPool.shutdown(); } finally { closeQuietly(sentences); } // wait for consumer to finish consumer.join(); } finally { closeQuietly(output); } System.err.println("Done."); }
From source file:com.linkedin.pinot.integration.tests.DefaultColumnsClusterIntegrationTest.java
protected void setUp(boolean sendSchema) throws Exception { // Set up directories. FileUtils.deleteQuietly(TMP_DIR);/*from w w w . ja v a 2 s .c om*/ Assert.assertTrue(TMP_DIR.mkdirs()); Assert.assertTrue(SEGMENT_DIR.mkdir()); Assert.assertTrue(TAR_DIR.mkdir()); // Start the cluster. startZk(); startController(); startBroker(); startServer(); // Create the table. addOfflineTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", -1, "", null, null); // Add the schema. if (sendSchema) { sendSchema(); } // Unpack the Avro files. List<File> avroFiles = unpackAvroData(TMP_DIR, SEGMENT_COUNT); // Load data into H2. ExecutorService executor = Executors.newCachedThreadPool(); setupH2AndInsertAvro(avroFiles, executor); // Create segments from Avro data. buildSegmentsFromAvro(avroFiles, executor, 0, SEGMENT_DIR, TAR_DIR, "mytable", false, null); // Initialize query generator. setupQueryGenerator(avroFiles, executor); executor.shutdown(); executor.awaitTermination(10, TimeUnit.MINUTES); // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments // are online. CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT); // Upload the segments. for (String segmentName : TAR_DIR.list()) { File file = new File(TAR_DIR, segmentName); FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file), file.length()); } // Wait for all segments to be ONLINE. latch.await(); waitForSegmentsOnline(); }
From source file:com.espertech.esper.multithread.dispatchmodel.TestMTDispatch.java
private void trySend(int numThreads, int numCount, int ratioDoubleAdd, UpdateDispatchViewModel updateDispatchView, DispatchService dispatchService) throws Exception { // execute/*from w w w. ja v a 2s . co m*/ ExecutorService threadPool = Executors.newFixedThreadPool(numThreads); Future future[] = new Future[numThreads]; DispatchCallable callables[] = new DispatchCallable[numThreads]; DispatchProducer producer = new DispatchProducer(updateDispatchView); for (int i = 0; i < numThreads; i++) { callables[i] = new DispatchCallable(producer, i, numCount, ratioDoubleAdd, updateDispatchView, dispatchService); future[i] = threadPool.submit(callables[i]); } threadPool.shutdown(); threadPool.awaitTermination(10, TimeUnit.SECONDS); for (int i = 0; i < numThreads; i++) { assertTrue((Boolean) future[i].get()); } }
From source file:com.ebay.jetstream.event.processor.esper.ESPTest.java
public void testProcessor() { EsperProcessor processor = getProcessor("ESPTestProcessor"); ESPTestSink sink = new ESPTestSink(); List<EventSink> sinks = new ArrayList<EventSink>(); sinks.add(sink);/*w ww . j a va2s. c om*/ processor.setEventSinks(sinks); // TODO: start not exposed - processor.start(); // it was stopped while running previous test ExecutorService threadPool = Executors.newCachedThreadPool(new ESPTestThreadFactory()); Runnable runnables[] = new ESPTestRunnable[THREADS_NUM]; try { for (int i = 0; i < THREADS_NUM; i++) { runnables[i] = new ESPTestRunnable(processor, i); threadPool.submit(runnables[i]); } threadPool.shutdown(); threadPool.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { fail("InterruptedException: " + e.getMessage()); } assertTrue("ExecutorService failed to shut down properly", threadPool.isShutdown()); // processor.stop(); try { Thread.sleep(3000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } assertEquals(THREADS_NUM, sink.getCount()); testLogger.info("sink first, last = [" + sink.getIds().first() + "," + sink.getIds().last() + "]"); }
From source file:com.npstrandberg.simplemq.MessageQueueImp.java
private void shutdownAndAwaitTermination(ExecutorService pool) { pool.shutdown(); // Disable new tasks from being submitted try {// ww w . j ava2 s .co m // Wait a while for existing tasks to terminate if (!pool.awaitTermination(5, TimeUnit.SECONDS)) { pool.shutdownNow(); // Cancel currently executing tasks // Wait a while for tasks to respond to being cancelled if (!pool.awaitTermination(5, TimeUnit.SECONDS)) { System.err.println("Pool did not terminate"); } } } catch (InterruptedException ie) { // (Re-)Cancel if current thread also interrupted pool.shutdownNow(); // Preserve interrupt status Thread.currentThread().interrupt(); } }