Example usage for java.util.concurrent ExecutorService awaitTermination

List of usage examples for java.util.concurrent ExecutorService awaitTermination

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService awaitTermination.

Prototype

boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;

Source Link

Document

Blocks until all tasks have completed execution after a shutdown request, or the timeout occurs, or the current thread is interrupted, whichever happens first.

Usage

From source file:org.apache.cayenne.datasource.ManagedPoolingDataSourceIT.java

@Test
public void testGetConnection_OnBackendShutdown() throws SQLException, InterruptedException {

    // note that this assertion can only work reliably when the pool is inactive...
    assertEquals(poolSize, managedPool.poolSize() + managedPool.canExpandSize());

    Collection<PoolTask> tasks = createTasks(4);
    ExecutorService executor = Executors.newFixedThreadPool(4);

    for (int j = 0; j < 10; j++) {
        for (PoolTask task : tasks) {
            executor.submit(task);/*from   w  w w .  j  a  v a 2  s . c om*/
        }
    }

    dataSourceManager.off();
    Thread.sleep(500);

    for (int j = 0; j < 10; j++) {
        for (PoolTask task : tasks) {
            executor.submit(task);
        }
    }

    Thread.sleep(100);

    dataSourceManager.on();

    for (int j = 0; j < 10; j++) {
        for (PoolTask task : tasks) {
            executor.submit(task);
        }
    }

    executor.shutdown();
    executor.awaitTermination(2, TimeUnit.SECONDS);

    // note that this assertion can only work reliably when the pool is inactive...
    assertEquals(poolSize, managedPool.poolSize() + managedPool.canExpandSize());
}

From source file:com.amazonaws.services.cloudtrail.processinglibrary.AWSCloudTrailProcessingExecutor.java

/**
 * Helper function to gracefully stop an {@link ExecutorService}.
 *
 * @param threadPool the thread pool to stop.
 *//*w  ww .  j a v a2 s  .c  o  m*/
private void stopThreadPool(ExecutorService threadPool) {
    LibraryUtils.checkCondition(threadPool == null, "Thread pool is null when calling stop");

    if (threadPool.isShutdown()) {
        logger.debug(threadPool.toString() + " is already stopped.");

    } else {

        logger.debug(threadPool.toString() + " is about to shutdown.");
        threadPool.shutdown(); // Shutdown thread pool

        try { // Wait for shutdown
            threadPool.awaitTermination(this.config.getThreadTerminationDelaySeconds(), TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            logger.debug("Wait thread pool termination is interrupted.");
        }

        if (!threadPool.isShutdown()) { // ShutdownNow after waiting
            logger.debug(threadPool.toString() + " is force to shutdown now.");
            threadPool.shutdownNow();
        }

        logger.debug(threadPool.toString() + " is stopped.");
    }
}

From source file:io.anserini.index.IndexClueWeb09b.java

public int indexWithThreads(int numThreads) throws IOException, InterruptedException {

    System.out.println(//from  ww  w  . j av a  2  s  .c  o  m
            "Indexing with " + numThreads + " threads to directory '" + indexPath.toAbsolutePath() + "'...");

    final Directory dir = FSDirectory.open(indexPath);

    final IndexWriterConfig iwc = new IndexWriterConfig(analyzer());

    iwc.setSimilarity(new BM25Similarity());
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    iwc.setRAMBufferSizeMB(256.0);
    iwc.setUseCompoundFile(false);
    iwc.setMergeScheduler(new ConcurrentMergeScheduler());

    final IndexWriter writer = new IndexWriter(dir, iwc);

    final ExecutorService executor = Executors.newFixedThreadPool(numThreads);

    List<Path> warcFiles = discoverWarcFiles(docDir);
    if (doclimit > 0 && warcFiles.size() < doclimit)
        warcFiles = warcFiles.subList(0, doclimit);

    for (Path f : warcFiles)
        executor.execute(new IndexerThread(writer, f));

    //add some delay to let some threads spawn by scheduler
    Thread.sleep(30000);
    executor.shutdown(); // Disable new tasks from being submitted

    try {
        // Wait for existing tasks to terminate
        while (!executor.awaitTermination(5, TimeUnit.MINUTES)) {
            Thread.sleep(1000);
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        executor.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }

    int numIndexed = writer.maxDoc();

    try {
        writer.commit();
        if (optimize)
            writer.forceMerge(1);
    } finally {
        writer.close();
    }

    return numIndexed;
}

From source file:com.linkedin.pinot.integration.tests.HybridClusterIntegrationTest.java

@BeforeClass
public void setUp() throws Exception {
    //Clean up/* ww  w.  ja  v  a  2 s .  co m*/
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_tarDir);

    // Start Zk, Kafka and Pinot
    startHybridCluster();

    // Unpack the Avro files
    TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class
            .getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))),
            _tmpDir);

    _tmpDir.mkdirs();

    final List<File> avroFiles = getAllAvroFiles();

    File schemaFile = getSchemaFile();
    schema = Schema.fromFile(schemaFile);
    addSchema(schemaFile, schema.getSchemaName());
    final List<String> invertedIndexColumns = makeInvertedIndexColumns();
    final String sortedColumn = makeSortedColumn();

    // Create Pinot table
    addHybridTable("mytable", "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC,
            schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn,
            invertedIndexColumns, null);
    LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = "
            + invertedIndexColumns);

    // Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime)
    final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
    final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);

    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);

    // Create segments from Avro data
    LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles);
    buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null);

    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);

    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);

    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = new CountDownLatch(1);
    HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance",
            InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR);
    manager.connect();
    manager.addExternalViewChangeListener(new ExternalViewChangeListener() {
        @Override
        public void onExternalViewChange(List<ExternalView> externalViewList,
                NotificationContext changeContext) {
            for (ExternalView externalView : externalViewList) {
                if (externalView.getId().contains("mytable")) {

                    Set<String> partitionSet = externalView.getPartitionSet();
                    if (partitionSet.size() == offlineSegmentCount) {
                        int onlinePartitionCount = 0;

                        for (String partitionId : partitionSet) {
                            Map<String, String> partitionStateMap = externalView.getStateMap(partitionId);
                            if (partitionStateMap.containsValue("ONLINE")) {
                                onlinePartitionCount++;
                            }
                        }

                        if (onlinePartitionCount == offlineSegmentCount) {
                            System.out.println("Got " + offlineSegmentCount
                                    + " online tables, unlatching the main thread");
                            latch.countDown();
                        }
                    }
                }
            }
        }
    });

    // Upload the segments
    int i = 0;
    for (String segmentName : _tarDir.list()) {
        System.out.println("Uploading segment " + (i++) + " : " + segmentName);
        File file = new File(_tarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file),
                file.length());
    }

    // Wait for all offline segments to be online
    latch.await();

    // Load realtime data into Kafka
    LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles);
    pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC);

    // Wait until the Pinot event count matches with the number of events in the Avro files
    int pinotRecordCount, h2RecordCount;
    long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L;

    Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    statement.execute("select count(*) from mytable");
    ResultSet rs = statement.getResultSet();
    rs.first();
    h2RecordCount = rs.getInt(1);
    rs.close();

    waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes);
}

From source file:com.mobilecashout.osprey.remote.RemoteClient.java

private synchronized void executeInParallel(RemoteRunnable runnable, DeploymentContext context,
        String[] roles) {//from  w  ww  .  ja  v a 2s .com
    if (null == roles) {
        roles = new String[] { "all" };
    }
    while (!semaphore.tryAcquire()) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException e) {
            context.setFailed();
            throw new RuntimeException(e);
        }
    }

    final ExecutorService pool = Executors.newFixedThreadPool(sessions.size());
    final ArrayList<Future<Boolean>> futures = new ArrayList<>();

    for (RemoteTarget remoteTarget : sessions) {
        if (!remoteTarget.getTarget().hasAnyRole(roles)) {
            continue;
        }
        Future<Boolean> executor = pool.submit(() -> {
            runnable.run(remoteTarget, context);
            return true;
        });
        futures.add(executor);
    }

    pool.shutdown();

    try {
        pool.awaitTermination(Integer.MAX_VALUE, TimeUnit.MINUTES);
        for (Future future : futures) {
            future.get();
        }
        Thread.sleep(100);
    } catch (InterruptedException | ExecutionException e) {
        context.setFailed();
        logger.fatal(e.getMessage(), e);
    } finally {
        semaphore.release();
    }
}

From source file:org.apache.hadoop.hbase.util.TestIdReadWriteLock.java

@Test(timeout = 60000)
public void testMultipleClients() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(NUM_THREADS);
    try {//from  w  w w  . j av  a2s  .c  om
        ExecutorCompletionService<Boolean> ecs = new ExecutorCompletionService<Boolean>(exec);
        for (int i = 0; i < NUM_THREADS; ++i)
            ecs.submit(new IdLockTestThread("client_" + i));
        for (int i = 0; i < NUM_THREADS; ++i) {
            Future<Boolean> result = ecs.take();
            assertTrue(result.get());
        }
        // make sure the entry pool will be cleared after GC and purge call
        int entryPoolSize = idLock.purgeAndGetEntryPoolSize();
        LOG.debug("Size of entry pool after gc and purge: " + entryPoolSize);
        assertEquals(0, entryPoolSize);
    } finally {
        exec.shutdown();
        exec.awaitTermination(5000, TimeUnit.MILLISECONDS);
    }
}

From source file:ca.zadrox.dota2esportticker.service.UpdateMatchService.java

private void updateResults() {
    long currentTime = TimeUtils.getUTCTime();
    long lastUpdateTime = PrefUtils.lastResultsUpdate(this);

    // no point in consuming so much data - skip update if last checked time is recent.
    if (currentTime - lastUpdateTime < 60000 * 10) {
        LogUtils.LOGD(TAG, "Too soon, not bothering to check.");
        return;/*from  ww  w. j  a va 2s.c  o  m*/
    }

    if (!checkForConnectivity()) {
        LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(UPDATE_NO_CONNECTIVITY));
        return;
    }

    LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(UPDATE_STARTED));

    Uri resultsUri = MatchContract.SeriesEntry.buildLiveSeriesUri(TimeUtils.getUTCTime());

    Cursor updateCursor = this.getContentResolver().query(resultsUri,
            new String[] { MatchContract.SeriesEntry.COLUMN_GG_MATCH_PAGE }, null, null, null);
    // LOGD(TAG, "Not executed");

    ExecutorService executorService = Executors.newFixedThreadPool(10);
    ArrayList<Future<BundledMatchItem>> matchItemFutures = new ArrayList<Future<BundledMatchItem>>();

    int i = 0;
    while (updateCursor.moveToNext()) {
        matchItemFutures.add(executorService.submit(new MatchGetter(updateCursor.getString(0), true)));
        i++;
    }

    updateCursor.close();

    executorService.shutdown();
    try {
        executorService.awaitTermination(20L, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
    LogUtils.LOGD(TAG, "Stopping Retrieval, elements submitted for fetching: " + i);

    ArrayList<ContentValues> sE = new ArrayList<ContentValues>();
    ArrayList<ContentValues> rE = new ArrayList<ContentValues>();

    for (Future<BundledMatchItem> matchItemFuture : matchItemFutures) {
        try {
            BundledMatchItem matchItem = matchItemFuture.get();
            if (matchItem != null) {
                sE.add(matchItem.mMatch);
                if (matchItem.hasResult) {
                    rE.add(matchItem.mResult);
                }
            }
        } catch (InterruptedException e) {
            Log.e(TAG, "Should never get here");
        } catch (ExecutionException e) {
            Log.e(TAG, "Oops;");
        }
    }
    if (!sE.isEmpty()) {
        ContentValues[] seriesEntries = new ContentValues[sE.size()];
        sE.toArray(seriesEntries);

        this.getContentResolver().bulkInsert(MatchContract.SeriesEntry.CONTENT_URI, seriesEntries);
    }

    if (!rE.isEmpty()) {
        ContentValues[] resultEntries = new ContentValues[rE.size()];
        resultEntries = rE.toArray(resultEntries);

        this.getContentResolver().bulkInsert(MatchContract.ResultEntry.CONTENT_URI, resultEntries);
    }

    LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(UPDATE_COMPLETE));

    PrefUtils.setLastResultsUpdateTime(this, currentTime);
}

From source file:org.wso2.das.integration.tests.esb.ESBAnalyticsStatisticsTestCase.java

/**
 * Publish sample data for tenants// w ww . ja  va2 s . c om
 * 
 * @param tenants
 * @throws Exception
 */
private void publishSampleData(int noOfProxies, int requestsPerProxy, int noOfMediators, int NoOfFaults,
        boolean enablePayloads, boolean enableProperties, int[] tenants) throws Exception {
    ExecutorService executorService = Executors.newFixedThreadPool(noOfProxies * tenants.length);
    for (int tenantId : TestConstants.TENANT_IDS) {
        for (int i = 0; i < noOfProxies; i++) {
            DataPublisherClient dataPublisherClient = new DataPublisherClient();
            executorService.execute(new ConcurrentEventsPublisher(dataPublisherClient, tenantId,
                    requestsPerProxy, "AccuracyTestProxy_" + i, noOfMediators, NoOfFaults, enablePayloads,
                    enableProperties, SLEEP_BETWEEN_REQUESTS));
        }
    }
    executorService.shutdown();
    executorService.awaitTermination(WAIT_FOR_PUBLISHING_IN_MINUTES, TimeUnit.MINUTES);
}

From source file:com.opentransport.rdfmapper.nmbs.ScrapeTrip.java

private void requestJsons(Map trainDelays) {
    String trainName;//from  w w  w.  j  av a 2s  .c  o  m
    Iterator iterator = trainDelays.entrySet().iterator();

    ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_CONNECTIONS_TO_IRAIL_API);
    while (iterator.hasNext()) {
        Map.Entry mapEntry = (Map.Entry) iterator.next();
        trainName = returnCorrectTrainFormat((String) mapEntry.getKey());
        url = "https://api.irail.be/vehicle/?id=BE.NMBS." + trainName + "&format=json";
        System.out.println("HTTP GET - " + url);
        countConnections++;
        pool.submit(new DownloadDelayedTrains(trainName, url));
    }
    pool.shutdown();

    try {
        pool.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
        // all tasks have now finished (unless an exception is thrown abo
    } catch (InterruptedException ex) {
        Logger.getLogger(ScrapeTrip.class.getName()).log(Level.SEVERE, null, ex);
        errorWriter.writeError(ex.toString());
    }
}

From source file:com.blacklocus.jres.request.index.JresUpdateDocumentScriptTest.java

@Test(expected = ExecutionException.class)
public void testRetryOnConflictExpectError() throws InterruptedException, ExecutionException {
    final String index = "JresUpdateDocumentScriptTest.testRetryOnConflictExpectError".toLowerCase();
    final String type = "test";
    final String id = "warzone";

    final AtomicReference<String> error = new AtomicReference<String>();
    final int numThreads = 16, numIterations = 100;

    ExecutorService x = Executors.newFixedThreadPool(numThreads);
    List<Future<?>> futures = new ArrayList<Future<?>>(numThreads);
    for (int i = 0; i < numThreads; i++) {
        futures.add(x.submit(new Callable<Void>() {
            @Override/*from w  w w  .j a va 2s .c o  m*/
            public Void call() throws Exception {
                for (int j = 0; j < numIterations; j++) {
                    jres.quest(new JresUpdateDocumentScript(index, type, id, "ctx._source.value += 1", null,
                            ImmutableMap.of("value", 0), null));
                }
                return null;
            }
        }));
    }
    x.shutdown();
    x.awaitTermination(1, TimeUnit.MINUTES);

    for (Future<?> future : futures) {
        // expecting a conflict exception from ElasticSearch
        future.get();
    }
}