Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.stem.ProtocolTest.java

@Test
@Ignore // TODO: it's ignored because it's ran endlessly
public void testMultiSourcesWritePerformance() throws Exception {
    StorageNodeClient client = new StorageNodeClient(host, port);
    client.start();/*from www .j  a va  2  s.c  o m*/

    byte[] blob = TestUtils.generateRandomBlob(65536);
    byte[] key = DigestUtils.md5(blob);
    Set<UUID> disks = Layout.getInstance().getMountPoints().keySet();

    List<WriteBlobMessage> messages = new ArrayList<WriteBlobMessage>(disks.size());
    for (UUID disk : disks) {
        WriteBlobMessage op = new WriteBlobMessage();
        op.disk = disk;
        op.key = key;
        op.blob = blob;
        messages.add(op);
    }

    int threadsNum = messages.size();
    ExecutorService service = Executors.newFixedThreadPool(threadsNum);
    for (int j = 0; j < threadsNum; ++j) {
        ClientThread clientThread = new ClientThread(messages.get(j), j);
        service.submit(clientThread);
    }

    service.shutdown();
    service.awaitTermination(10, TimeUnit.MINUTES);
}

From source file:com.ebay.jetstream.event.processor.esper.raw.EsperTest.java

@Test
public void aggregationTest() {
    Configuration configuration = new Configuration();
    configuration.configure(// w w w .  j av  a2  s  .  c om
            new File("src/test/java/com/ebay/jetstream/event/processor/esper/raw/EsperTestConfig.xml"));
    EPServiceProvider epService = EPServiceProviderManager.getProvider("EsperTest", configuration);
    EsperTestAggregationStatement esperStmt = new EsperTestAggregationStatement(epService.getEPAdministrator());
    EsperTestAggregationListener listener = new EsperTestAggregationListener();
    esperStmt.addListener(listener);

    ExecutorService threadPool = Executors.newCachedThreadPool(new EsperTestThreadFactory());
    EsperTestAggregationRunnable runnables[] = new EsperTestAggregationRunnable[THREADS_NUM_AGGRTEST];
    try {
        for (int i = 0; i < THREADS_NUM_AGGRTEST; i++) {
            runnables[i] = new EsperTestAggregationRunnable(epService, i);
            threadPool.submit(runnables[i]);
        }
        threadPool.shutdown();
        threadPool.awaitTermination(200, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        fail("InterruptedException: " + e.getMessage());
    }
    assertTrue("ExecutorService failed to shut down properly", threadPool.isShutdown());
    assertEquals(THREADS_NUM_AGGRTEST * 2, listener.getCount());
    assertEquals(THREADS_NUM_AGGRTEST, m_aggregationResults.size()); // only one result per oroginal event
    for (int i = 0; i < THREADS_NUM_AGGRTEST; i++) {
        assertEquals(11.0 + 4. * i, m_aggregationResults.get(i), 1.e-06);
    }
    assertEquals(THREADS_NUM_AGGRTEST, m_aggregationAvgResults.size()); // only one result per oroginal event
    for (int i = 0; i < THREADS_NUM_AGGRTEST; i++) {
        assertEquals((11.0 + 4. * i) / 4., m_aggregationAvgResults.get(i), 1.e-06);
    }
}

From source file:com.linkedin.pinot.integration.tests.MetadataAndDictionaryAggregationPlanClusterIntegrationTest.java

private void loadDataIntoH2(List<File> avroFiles) throws Exception {
    ExecutorService executor = Executors.newCachedThreadPool();
    setUpH2Connection(avroFiles, executor);
    executor.shutdown();//from  w w w . ja  v  a  2  s  . co  m
    executor.awaitTermination(10, TimeUnit.MINUTES);
}

From source file:com.shmsoft.dmass.ec2.EC2Agent.java

private void setInitializedState(Cluster cluster) {
    ExecutorService es = Executors.newCachedThreadPool();
    for (Server server : cluster) {
        LoginChecker checker = new LoginChecker();
        checker.setServer(server);//from  w  w  w  . ja v  a 2s.c o  m
        server.setCheckerThread(checker);
        es.execute(checker);

    }
    es.shutdown();
    boolean finished = false;
    try {
        finished = es.awaitTermination(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        e.printStackTrace(System.out);
    }
    // TODO what to do if 'finished" is false       
}

From source file:org.apache.hadoop.hbase.util.TestIdLock.java

@Test
public void testMultipleClients() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
    try {// w ww  .  j av  a 2  s.  c om
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec);
        for (int i = 0; i < NUM_THREADS; ++i)
            ecs.submit(new IdLockTestThread("client_" + i));
        for (int i = 0; i < NUM_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
        }
        idLock.assertMapEmpty();
    } finally {
        exec.shutdown();
        exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
    }
}

From source file:io.anserini.IndexerCW09B.java

public int indexWithThreads(int numThreads) throws IOException, InterruptedException {

    System.out.println(//from  www  .  j ava  2  s  .  com
            "Indexing with " + numThreads + " threads to directory '" + indexPath.toAbsolutePath() + "'...");

    final Directory dir = FSDirectory.open(indexPath);

    final IndexWriterConfig iwc = new IndexWriterConfig(analyzer());

    iwc.setSimilarity(new BM25Similarity());
    iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    iwc.setRAMBufferSizeMB(256.0);
    iwc.setUseCompoundFile(false);
    iwc.setMergeScheduler(new ConcurrentMergeScheduler());

    final IndexWriter writer = new IndexWriter(dir, iwc);

    final ExecutorService executor = Executors.newFixedThreadPool(numThreads);

    for (Path f : discoverWarcFiles(docDir))
        executor.execute(new IndexerThread(writer, f));

    //add some delay to let some threads spawn by scheduler
    Thread.sleep(30000);
    executor.shutdown(); // Disable new tasks from being submitted

    try {
        // Wait for existing tasks to terminate
        while (!executor.awaitTermination(5, TimeUnit.MINUTES)) {
            Thread.sleep(1000);
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        executor.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }

    int numIndexed = writer.maxDoc();

    try {
        writer.commit();
    } finally {
        writer.close();
    }

    return numIndexed;
}

From source file:org.aludratest.jenkins.aludratest.AludratestProjectStatisticsReport.java

private synchronized void cacheStatistics(String fromBuildNumber, String toBuildNumber) {
    cachedStatistics = new ProjectStatistics();

    // check if range (or at least start) is given
    int startBuildNo = -1;
    int endBuildNo = -1;
    if (fromBuildNumber != null && !"".equals(fromBuildNumber)) {
        try {//from w  w w. ja v  a  2s. c  o m
            startBuildNo = Integer.parseInt(fromBuildNumber);
            if (toBuildNumber != null && !"".equals(toBuildNumber)) {
                endBuildNo = Integer.parseInt(toBuildNumber);
            }

            if (startBuildNo < 0) {
                // relative mode: Find last N builds
                Run<?, ?> build = project.getLastBuild();
                int buildCount = 0;
                int targetBuildCount = startBuildNo * -1;
                while (build != null && buildCount < targetBuildCount) {
                    if (new File(build.getRootDir(), AludratestStatisticsPublisher.STATISTICS_FILE_NAME)
                            .isFile()) {
                        buildCount++;
                    }
                    startBuildNo = build.getNumber();
                    build = build.getPreviousBuild();
                }

                // no toBuild supported then
                endBuildNo = -1;
            }

        } catch (NumberFormatException e) {
            startBuildNo = endBuildNo = -1;
        }
    }

    // iterate over all builds having a stats file
    Run<?, ?> build = startBuildNo == -1 ? project.getFirstBuild() : project.getBuildByNumber(startBuildNo);
    if (build == null) {
        // no fallback here, no caching - empty results
        return;
    }

    // optimized, lengthy code to parallelize String -> JSON parsing
    // useful for MANY builds with HUGE amount of test cases
    List<Callable<Void>> runnables = new ArrayList<>();
    final Map<Integer, JSONObject> parsedObjects = new ConcurrentHashMap<>();

    while (build != null && (endBuildNo == -1 || build.getNumber() <= endBuildNo)) {
        final File statsFile = new File(build.getRootDir(), AludratestStatisticsPublisher.STATISTICS_FILE_NAME);
        if (statsFile.isFile()) {
            final int buildNumber = build.getNumber();
            runnables.add(new Callable<Void>() {
                @Override
                public Void call() {
                    try {
                        JSONObject o = (JSONObject) JSONSerializer
                                .toJSON(FileUtils.readFileToString(statsFile, "UTF-8"));
                        parsedObjects.put(Integer.valueOf(buildNumber), o);
                    } catch (IOException e) {
                        // TODO log
                    }
                    return null;
                }
            });
        }

        build = build.getNextBuild();
    }

    if (!runnables.isEmpty()) {
        ExecutorService svc = Executors
                .newFixedThreadPool(Math.max(2, Runtime.getRuntime().availableProcessors() - 1));
        try {
            svc.invokeAll(runnables);
            svc.shutdown();
            if (!svc.awaitTermination(5, TimeUnit.MINUTES)) {
                // took too long...
                // TODO handle somehow
            }
        } catch (InterruptedException e) {
            return;
        }
    }

    List<Integer> keys = new ArrayList<>(parsedObjects.keySet());
    Collections.sort(keys);

    for (Integer buildNumber : keys) {
        // check if there is a display name for a build; otherwise, use # + number
        Run<?, ?> b = project.getBuildByNumber(buildNumber.intValue());
        String dn = (b != null ? b.getDisplayName() : null);
        if (dn == null) {
            dn = "#" + buildNumber;
        }
        cachedStatistics.addBuildData(buildNumber.intValue(), dn, parsedObjects.get(buildNumber));
    }
}

From source file:com.sastix.cms.common.services.htmltopdf.PdfTest.java

@Test
public void performanceTest() throws InterruptedException {
    int NTHREDS = 30;//the lesser the threads, the completion time increases. At least 15 threads for better performance on my laptop
    ExecutorService executor = Executors.newFixedThreadPool(NTHREDS);
    long start = DateTime.now().getMillis();
    for (int i = 0; i < numberOfTasks; i++) {
        Runnable worker = new PdfRunnable(i,
                "<html><head><meta charset=\"utf-8\"></head><h1>Mller</h1></html>");
        executor.execute(worker);//from   w ww . j a  v a2s . co m
    }
    try {
        latch.await();
    } catch (InterruptedException E) {
        // handle
    }

    executor.shutdown();
    executor.awaitTermination(5, TimeUnit.SECONDS);
    assertEquals(cmap.size(), numberOfTasks);
    long passed = DateTime.now().getMillis() - start;
    LOG.info("Millis passed: " + passed);
    LOG.info("Seconds passed: " + (double) passed / 1000);
}

From source file:com.linkedin.pinot.integration.tests.StarTreeClusterIntegrationTest.java

/**
 * Generate the reference and star tree indexes and upload to corresponding tables.
 * @param avroFiles//from   www . j  a  v  a  2s.com
 * @param tableName
 * @param starTree
 * @throws IOException
 * @throws ArchiveException
 * @throws InterruptedException
 */
private void generateAndUploadSegments(List<File> avroFiles, String tableName, boolean starTree)
        throws IOException, ArchiveException, InterruptedException {
    BaseClusterIntegrationTest.ensureDirectoryExistsAndIsEmpty(_segmentsDir);
    BaseClusterIntegrationTest.ensureDirectoryExistsAndIsEmpty(_tarredSegmentsDir);

    ExecutorService executor = Executors.newCachedThreadPool();
    BaseClusterIntegrationTest.buildSegmentsFromAvro(avroFiles, executor, 0, _segmentsDir, _tarredSegmentsDir,
            tableName, starTree, getSingleValueColumnsSchema());

    executor.shutdown();
    executor.awaitTermination(TIMEOUT_IN_SECONDS, TimeUnit.SECONDS);

    for (String segmentName : _tarredSegmentsDir.list()) {
        LOGGER.info("Uploading segment {}", segmentName);
        File file = new File(_tarredSegmentsDir, segmentName);
        FileUploadUtils.sendSegmentFile(ControllerTestUtils.DEFAULT_CONTROLLER_HOST,
                ControllerTestUtils.DEFAULT_CONTROLLER_API_PORT, segmentName, new FileInputStream(file),
                file.length());
    }
}

From source file:com.linkedin.pinot.integration.tests.OfflineClusterIntegrationTest.java

@BeforeClass
public void setUp() throws Exception {
    //Clean up//from w  ww  .j  a  v a2s  .c om
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_tarDir);

    // Start the cluster
    startCluster();

    // Unpack the Avro files
    final List<File> avroFiles = unpackAvroData(_tmpDir, SEGMENT_COUNT);

    createTable();

    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);

    // Create segments from Avro data
    buildSegmentsFromAvro(avroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null);

    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT);

    // Upload the segments
    int i = 0;
    for (String segmentName : _tarDir.list()) {
        System.out.println("Uploading segment " + (i++) + " : " + segmentName);
        File file = new File(_tarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all segments to be online
    latch.await();
    TOTAL_DOCS = 115545;
    long timeInTwoMinutes = System.currentTimeMillis() + 2 * 60 * 1000L;
    long numDocs;
    while ((numDocs = getCurrentServingNumDocs()) < TOTAL_DOCS) {
        System.out.println("Current number of documents: " + numDocs);
        if (System.currentTimeMillis() < timeInTwoMinutes) {
            Thread.sleep(1000);
        } else {
            Assert.fail("Segments were not completely loaded within two minutes");
        }
    }
}