Example usage for java.util.concurrent ExecutorService shutdown

List of usage examples for java.util.concurrent ExecutorService shutdown

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdown.

Prototype

void shutdown();

Source Link

Document

Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be accepted.

Usage

From source file:com.brienwheeler.lib.concurrent.ExecutorsTest.java

@Test
public void testNewSingleThreadExecutorInvokeAny()
        throws InterruptedException, ExecutionException, TimeoutException {
    NamedThreadFactory threadFactory = new NamedThreadFactory(THREAD_FACTORY_NAME);
    ExecutorService executor = Executors.newSingleThreadExecutor(threadFactory);

    IntCallable one = new IntCallable(1);
    IntCallable two = new IntCallable(2);
    ArrayList<Callable<Integer>> tasks = new ArrayList<Callable<Integer>>();
    tasks.add(one);/*  w  ww.j a  va  2s.c o  m*/
    tasks.add(two);

    Integer result = executor.invokeAny(tasks);
    Assert.assertTrue(result == 1 || result == 2);

    result = executor.invokeAny(tasks, 10, TimeUnit.MILLISECONDS);
    Assert.assertTrue(result == 1 || result == 2);

    executor.shutdown();
}

From source file:com.stb.async.ParallelExecutionProcess.java

/**
 *
 * @param processList/* w w  w.j  a  v  a  2  s  .c om*/
 */
public void initiateDecode(List processList) {

    Date startTime = new java.util.Date();
    System.out.println("Start Work" + startTime);
    ExecutorService es = Executors.newFixedThreadPool(10);
    Collections.sort(processList, new ProcessCompare());

    List<Future> futures = new ArrayList<>();

    for (Iterator it = processList.iterator(); it.hasNext();) {
        Process e = (Process) it.next();
        workerId = processList.indexOf(e);
        System.out.println("* Start Decode process " + processList.indexOf(e));
        futures.add(es.submit(() -> {
            new DecodedSTBProcesses((Process) processList.get(ParallelExecutionProcess.workerId)).doWork();
            return null;
        }));
    }

    es.shutdown();

    System.out.println(
            "... The Process is under execution! Using CPU core which are available, wait while work is being done....");
    int ctr = 0;
    for (Future future : futures) {
        try {
            future.get(); // blocking call, explicitly waiting for the response from a specific task, not necessarily the first task that is completed
            System.out.println("** Response of process " + ++ctr + " is in.");
        } catch (InterruptedException | ExecutionException e) {
        }
    }

    Date endTime = new java.util.Date();
    System.out.println("End work at " + endTime);
    System.out.println("Total decoding took " + new Double(0.001 * (endTime.getTime() - startTime.getTime()))
            + " seconds");
    System.exit(0);
}

From source file:com.opengamma.bbg.replay.BloombergTickWriterTest.java

@Test
public void ticksWriting() throws Exception {
    ZonedDateTime startTime = ZonedDateTime.now(Clock.systemUTC());

    //run test for 5secs
    long runTime = 5000;
    ExecutorService writerExecutor = Executors.newSingleThreadExecutor();
    Future<?> writerFuture = writerExecutor.submit(_writer);

    //create ticks generators
    ExecutorService ticksGeneratorExec = Executors.newSingleThreadExecutor();
    Future<?> ticksGenFuture = ticksGeneratorExec.submit(_ticksGenerator);

    s_logger.info("Test running for {}ms to generate ticks", runTime);
    Thread.sleep(runTime);//ww w.  j  av  a  2 s  .c om

    //terminate ticks generation after 1mins
    _ticksGenerator.terminate();
    sendTerminateMessage();

    //test should fail if ticksGenerator throws an exception
    ticksGenFuture.get();
    ticksGeneratorExec.shutdown();
    ticksGeneratorExec.awaitTermination(1, TimeUnit.SECONDS);

    //test should fail if writer throws an exception
    writerFuture.get();
    writerExecutor.shutdown();
    writerExecutor.awaitTermination(1, TimeUnit.SECONDS);

    ZonedDateTime endTime = ZonedDateTime.now(Clock.systemUTC());

    //now lets replay generated allTicks.dat
    Set<String> buids = Sets.newHashSet(_ticker2buid.values());
    UnitTestTickReceiver receiver = new UnitTestTickReceiver();
    BloombergTicksReplayer player = new BloombergTicksReplayer(Mode.AS_FAST_AS_POSSIBLE,
            _rootDir.getAbsolutePath(), receiver, startTime, endTime, buids);
    player.start();
    while (player.isRunning()) {
        Thread.sleep(1000);
    }
    assertTrue(receiver.count() > 0);
}

From source file:io.ecarf.core.cloud.task.processor.dictionary.AssembleDictionaryTask.java

@Override
public void run() throws IOException {

    log.info("Assembling dictionary, memory usage: " + Utils.getMemoryUsageInGB() + "GB");

    Stopwatch stopwatch = Stopwatch.createStarted();

    List<StorageObject> objects = this.cloudService.listCloudStorageObjects(bucket);

    //Set<String> files = new HashSet<>();

    List<Item> items = new ArrayList<>();

    for (StorageObject object : objects) {

        String filename = object.getName();

        if (filename.endsWith(FilenameUtils.KRYO_SERIALIZED_EXT)) {
            //files.add(filename);
            items.add(new Item(filename, object.getSize().longValue()));
        }/*  w  ww.  ja v  a  2s . c om*/
    }

    log.info("Found " + items.size() + ", serialized files");

    int processors = Runtime.getRuntime().availableProcessors();

    BinPackingPartition function = new BinPackingPartition(items);
    function.setMaxBinItems((long) processors);
    List<Partition> partitions = function.partition();

    TermDictionary dictionary = TermDictionary.populateRDFOWLData(new TermDictionaryConcurrent());

    List<Callable<Void>> tasks = getSubTasks(partitions, dictionary);

    try {

        // check if we only have one file to process
        if (tasks.size() == 1) {

            tasks.get(0).call();

        } else if (processors == 1) {
            // only one process then process synchronously

            for (Callable<Void> task : tasks) {
                task.call();
            }

        } else {

            // multiple cores
            ExecutorService executor = Utils.createFixedThreadPool(processors);

            try {

                executor.invokeAll(tasks);

            } finally {
                executor.shutdown();
            }
        }

        tasks = null;

    } catch (Exception e) {
        log.error("Failed to process multiple files", e);
        throw new IOException(e);

    }

    int dicSize = dictionary.size();

    log.info("Successfully assembled dictionary with size: " + dicSize + ", max resourceId: "
            + dictionary.getLargestResourceId() + ", memory usage: " + Utils.getMemoryUsageInGB() + "GB"
            + ", timer: " + stopwatch);

    // extract the terms and encode the schema if needed
    if (StringUtils.isNotBlank(this.schemaFile) && StringUtils.isNotBlank(this.schemaBucket)) {
        this.encodeSchema(dictionary);
    }

    // encode the term stats file is needed
    if (StringUtils.isNotBlank(this.termStatsFile) && StringUtils.isNotBlank(this.encodedTermStatsFile)) {
        this.encodeTermsStats(dictionary);
    }

    // if no name provided for the dictionary file then create a default
    if (StringUtils.isBlank(this.dictionaryFile)) {
        this.dictionaryFile = this.cloudService.getInstanceId() + '_'
                + FilenameUtils.getSerializedGZipedDictionaryFilename();
    }

    this.dictionaryFile = FilenameUtils.getLocalFilePath(this.dictionaryFile);

    dictionary = ((ConcurrentDictionary) dictionary).getNonConcurrentDictionary();

    log.info("Successfully created non concurrent dictionary for serialization, memory usage: "
            + Utils.getMemoryUsageInGB() + "GB" + ", timer: " + stopwatch);

    dictionary.toFile(dictionaryFile, true);

    dictionary = null;

    log.info("Successfully serialized dictionary with size: " + dicSize + ", memory usage: "
            + Utils.getMemoryUsageInGB() + "GB" + ", timer: " + stopwatch);

    if (StringUtils.isBlank(this.targetBucket)) {
        this.targetBucket = bucket;
    }

    this.cloudService.uploadFileToCloudStorage(dictionaryFile, this.targetBucket);

    log.info("Successfully assembled, serialized and uploaded dictionary, memory usage: "
            + Utils.getMemoryUsageInGB() + "GB" + ", timer: " + stopwatch);

}

From source file:io.anserini.index.IndexClueWeb09b.java

public int indexWithThreads(int numThreads) throws IOException, InterruptedException {

    System.out.println(//from   ww  w  . j  a v  a 2s. c  o  m
            "Indexing with " + numThreads + " threads to directory '" + indexPath.toAbsolutePath() + "'...");

    final Directory dir = FSDirectory.open(indexPath);

    final IndexWriterConfig iwc = new IndexWriterConfig(analyzer());

    iwc.setSimilarity(new BM25Similarity());
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    iwc.setRAMBufferSizeMB(256.0);
    iwc.setUseCompoundFile(false);
    iwc.setMergeScheduler(new ConcurrentMergeScheduler());

    final IndexWriter writer = new IndexWriter(dir, iwc);

    final ExecutorService executor = Executors.newFixedThreadPool(numThreads);

    List<Path> warcFiles = discoverWarcFiles(docDir);
    if (doclimit > 0 && warcFiles.size() < doclimit)
        warcFiles = warcFiles.subList(0, doclimit);

    for (Path f : warcFiles)
        executor.execute(new IndexerThread(writer, f));

    //add some delay to let some threads spawn by scheduler
    Thread.sleep(30000);
    executor.shutdown(); // Disable new tasks from being submitted

    try {
        // Wait for existing tasks to terminate
        while (!executor.awaitTermination(5, TimeUnit.MINUTES)) {
            Thread.sleep(1000);
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        executor.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }

    int numIndexed = writer.maxDoc();

    try {
        writer.commit();
        if (optimize)
            writer.forceMerge(1);
    } finally {
        writer.close();
    }

    return numIndexed;
}

From source file:com.quixey.hadoop.fs.oss.MultiPartUploader.java

@SuppressWarnings("unchecked")
private List<PartETag> uploadParts(final String key, final File file, final String uploadId, int parts)
        throws IOException {

    // construct thread pool
    ExecutorService pool = newExecutorService(file, parts);

    final Future<PartETag>[] futures = new Future[parts];
    for (int i = 0; i < parts; i++) {
        final int partNum = i;
        futures[i] = pool.submit(new Callable<PartETag>() {
            @Override//from  w w w . j av a2  s  . c  om
            public PartETag call() throws Exception {
                return uploadPart(key, file, uploadId, partNum);
            }
        });
    }
    pool.shutdown();

    // wait for uploads to complete
    awaitTermination(pool);

    // retrieve etags and verify uploads
    PartETag[] eTags = new PartETag[parts];
    int i = 0;
    for (Future<PartETag> future : futures) {
        try {
            eTags[i++] = future.get();
        } catch (InterruptedException | ExecutionException e) {
            throw new IOException("Unable to upload part " + i, e);
        }
    }

    return Arrays.asList(eTags);
}

From source file:com.esri.cordova.geolocation.AdvancedGeolocation.java

/**
 * Shutdown cordova thread pool. This assumes we are in control of all tasks running
 * in the thread pool.//from w  w  w . ja  v a  2s .c om
 * Additional info: http://developer.android.com/reference/java/util/concurrent/ExecutorService.html
 * @param pool Cordova application's thread pool
 */
private void shutdownAndAwaitTermination(ExecutorService pool) {
    Log.d(TAG, "Attempting to shutdown cordova threadpool");
    if (!pool.isShutdown()) {
        try {
            // Disable new tasks from being submitted
            pool.shutdown();
            // Wait a while for existing tasks to terminate
            if (!pool.awaitTermination(5, TimeUnit.SECONDS)) {
                pool.shutdownNow(); // Cancel currently executing tasks
                // Wait a while for tasks to respond to being cancelled
                if (!pool.awaitTermination(30, TimeUnit.SECONDS)) {
                    System.err.println("Cordova thread pool did not terminate.");
                }
            }
        } catch (InterruptedException ie) {
            // Preserve interrupt status
            Thread.currentThread().interrupt();
        }
    }
}

From source file:com.example.AzureADAuthenticationFilter.java

private AuthenticationResult getAccessTokenFromClientCredentials() throws Throwable {
    AuthenticationContext context = null;
    AuthenticationResult result = null;/*w ww  .j  a v  a2 s.co  m*/
    ExecutorService service = null;
    try {
        service = Executors.newFixedThreadPool(1);
        context = new AuthenticationContext(authority + tenant + "/", true, service);
        Future<AuthenticationResult> future = context.acquireToken("https://graph.windows.net",
                new ClientCredential(clientId, clientSecret), null);
        result = future.get();
    } catch (ExecutionException e) {
        throw e.getCause();
    } finally {
        service.shutdown();
    }

    if (result == null) {
        throw new ServiceUnavailableException("authentication result was null");
    }
    return result;
}

From source file:oz.hadoop.yarn.api.core.LocalApplicationLaunchTests.java

@Test(timeout = 2000)
public void validateJavaContainerLaunchImmediateTermination() throws Exception {
    final YarnApplication<Void> yarnApplication = YarnAssembly
            .forApplicationContainer(SimpleRandomDelayContainer.class, ByteBuffer.wrap("Hello".getBytes()))
            .containerCount(2).memory(512).withApplicationMaster().maxAttempts(2).priority(2)
            .build("sample-yarn-application");
    assertFalse(yarnApplication.isRunning());
    ExecutorService executor = Executors.newCachedThreadPool();
    executor.execute(new Runnable() {
        @Override/*from w w w .  j a  v a  2  s. c  o  m*/
        public void run() {
            yarnApplication.launch();
        }
    });
    assertFalse(yarnApplication.isRunning());
    yarnApplication.terminate();
    assertEquals(0, yarnApplication.liveContainers());
    assertFalse(yarnApplication.isRunning());
    executor.shutdown();
}