Example usage for java.util.concurrent ExecutorService isTerminated

List of usage examples for java.util.concurrent ExecutorService isTerminated

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService isTerminated.

Prototype

boolean isTerminated();

Source Link

Document

Returns true if all tasks have completed following shut down.

Usage

From source file:com.brienwheeler.lib.concurrent.ExecutorsTest.java

@Test
public void testNewSingleThreadExecutorShutdownNow() throws InterruptedException {
    NamedThreadFactory threadFactory = new NamedThreadFactory(THREAD_FACTORY_NAME);
    ExecutorService executor = Executors.newSingleThreadExecutor(threadFactory);

    executor.submit(new SleepRunnable(10L));
    Future<?> notExecutedRunnable = executor.submit(new NullRunnable());
    Future<?> notExecutedCallable = executor.submit(new NullCallable());
    Future<Integer> notEexecutedRunnable2 = executor.submit(new NullRunnable(), 1);

    List<Runnable> notExecuted = executor.shutdownNow();
    Assert.assertTrue(executor.isShutdown());
    Assert.assertEquals(3, notExecuted.size());
    Assert.assertTrue(CollectionUtils.containsInstance(notExecuted, notExecutedRunnable));
    Assert.assertTrue(CollectionUtils.containsInstance(notExecuted, notExecutedCallable));
    Assert.assertTrue(CollectionUtils.containsInstance(notExecuted, notEexecutedRunnable2));

    executor.awaitTermination(10, TimeUnit.MILLISECONDS);
    Assert.assertTrue(executor.isTerminated());
}

From source file:it.wami.map.mongodeploy.OsmSaxHandler.java

@Override
public void endDocument() throws SAXException {
    super.endDocument();
    if (nodesQueue.size() > 0) {
        System.out.println("remaining nodes: " + nodesQueue.size());
        saveEntry(nodesQueue, COLL_NODES);
    }//w  w  w  .  j a  v  a 2  s  . c  o m
    if (waysQueue.size() > 0) {
        System.out.println("remaining ways: " + waysQueue.size());
        saveEntry(waysQueue, COLL_WAYS);
    }
    if (!relationRunnables.isEmpty()) {
        if (!relationRunnables.isEmpty()) {
            int cores = Runtime.getRuntime().availableProcessors();
            ExecutorService executorService = Executors.newFixedThreadPool(cores);

            for (Runnable currentRunnable : relationRunnables) {
                executorService.execute(currentRunnable);

            }
            executorService.shutdown();
            while (!executorService.isTerminated()) {
            }
            relationRunnables.clear();
            System.out.println("remaining relations: " + relationsQueue.size());
            saveEntry(relationsQueue, COLL_RELATIONS);
        }
    }
    if (!relationsQueue.isEmpty()) {
        System.out.println("remaining relations: " + relationsQueue.size());
        saveEntry(relationsQueue, COLL_RELATIONS);
    }
    if (tagsQueue.size() > 0) {
        System.out.println("remaining tags: " + tagsQueue.size());
        saveEntry(tagsQueue, COLL_TAGS);
    }
    end = System.currentTimeMillis();
    long time = end - start;
    System.out.println(
            "End of document; time - " + (time / (60 * 60 * 1000)) % 60 + "h, " + (time / (60 * 1000)) % 60
                    + "m, " + (time / 1000) % 60 + "s, " + time % 1000 + "ms (" + (end - start) + ")");
}

From source file:it.wami.map.mongodeploy.OsmSaxHandler.java

/**
 * /*w  ww .j  ava2s.c  o  m*/
 * @param way the Way
 */
private void populateWayGeo(Way way) {
    Runnable r = new WayRunnable(db, way, waysQueue);

    waysRunnables.add(r);
    int current = (int) (readWays % WAYS_CHUNK);

    if (current == WAYS_CHUNK - 1) {
        int cores = Runtime.getRuntime().availableProcessors();
        ExecutorService executorService = Executors.newFixedThreadPool(cores);
        for (Runnable currentRunnable : waysRunnables) {
            executorService.execute(currentRunnable);
        }
        waysRunnables = Collections.synchronizedList(new ArrayList<Runnable>());
        executorService.shutdown();
        while (!executorService.isTerminated()) {
        }

        saveEntry(waysQueue, COLL_WAYS);
    }
}

From source file:it.wami.map.mongodeploy.OsmSaxHandler.java

private void populateRelation(Relation relation) {
    Runnable r = new RelationRunnable(db, relation, relationsQueue);

    relationRunnables.add(r);/*from   w  w  w .  j a va  2  s.c om*/
    int current = (int) (readRelations % RELATIONS_CHUNK);

    if (current == RELATIONS_CHUNK - 1) {
        int cores = Runtime.getRuntime().availableProcessors();
        ExecutorService executorService = Executors.newFixedThreadPool(cores);
        for (Runnable currentRunnable : relationRunnables) {
            executorService.execute(currentRunnable);
        }
        relationRunnables = Collections.synchronizedList(new ArrayList<Runnable>());
        executorService.shutdown();
        while (!executorService.isTerminated()) {
        }

        saveEntry(relationsQueue, COLL_RELATIONS);
    }
}

From source file:stroom.index.server.BenchmarkIndex.java

@Override
public void run() {
    init();//from  w  ww .ja  v  a2s  .c om

    final long batchStartTime = System.currentTimeMillis();

    final IndexShardWriterImpl[] writers = new IndexShardWriterImpl[indexShards.length];
    for (int i = 0; i < writers.length; i++) {
        final IndexShard indexShard = indexShards[i];
        writers[i] = new IndexShardWriterImpl(indexShardService, indexFields, indexShard.getIndex(),
                indexShard);
        writers[i].setRamBufferSizeMB(ramBufferMbSize);
        writers[i].open(true);
    }
    final AtomicLong atomicLong = new AtomicLong();

    final long indexStartTime = System.currentTimeMillis();

    final ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(jobSize);
    for (int i = 0; i < jobSize; i++) {
        final Runnable r = () -> {
            long myId;
            while ((myId = atomicLong.incrementAndGet()) < docCount) {
                try {
                    final int idx = (int) (myId % writers.length);
                    writers[idx].addDocument(getDocument(myId));
                } catch (final Exception e) {
                    e.printStackTrace();
                }
            }
        };
        threadPoolExecutor.execute(r);
    }

    threadPoolExecutor.shutdown();

    // Wait for termination.
    while (!threadPoolExecutor.isTerminated()) {
        // Wait 1 second.
        ThreadUtil.sleep(1000);

        final long docsSoFar = atomicLong.get();
        final long secondsSoFar = (System.currentTimeMillis() - batchStartTime) / 1000;

        for (int i = 0; i < writers.length; i++) {
            final IndexShardWriterImpl impl = writers[i];
            final IndexShard indexShard = indexShards[i];

            if (secondsSoFar > 0) {
                final long docsPerSecond = docsSoFar / secondsSoFar;
                impl.sync();
                LOGGER.info("run() - " + StringUtils.rightPad(ModelStringUtil.formatCsv(docsSoFar), 10)
                        + " doc ps " + ModelStringUtil.formatCsv(docsPerSecond) + " ("
                        + indexShard.getFileSizeString() + ")");
            }
            if (nextCommit != null && docsSoFar > nextCommit) {
                impl.flush();
                nextCommit = ((docsSoFar / commitCount) * commitCount) + commitCount;
                LOGGER.info("run() - commit " + docsSoFar + " next commit is " + nextCommit);
            }
        }
    }
    final long indexEndTime = System.currentTimeMillis();
    final long secondsSoFar = (System.currentTimeMillis() - batchStartTime) / 1000;
    final long docsPerSecond = atomicLong.get() / secondsSoFar;

    for (final IndexShardWriter writer : writers) {
        writer.close();
    }

    final long batchEndTime = System.currentTimeMillis();

    LOGGER.info("runWrite() - Complete");
    LOGGER.info("=====================");
    LOGGER.info("");
    LOGGER.info("Using Args");
    LOGGER.info("==========");
    LoggerPrintStream traceStream = LoggerPrintStream.create(LOGGER, false);
    traceArguments(traceStream);
    traceStream.close();
    LOGGER.info("");
    LOGGER.info("Stats");
    LOGGER.info("=====");

    LOGGER.info("Open Time  " + toMsNiceString(indexStartTime - batchStartTime));
    LOGGER.info("Index Time " + toMsNiceString(indexEndTime - indexStartTime));
    LOGGER.info("Close Time " + toMsNiceString(batchEndTime - indexEndTime));
    LOGGER.info("Total Time " + toMsNiceString(batchEndTime - batchStartTime));
    LOGGER.info("");
    LOGGER.info("Final Docs PS " + ModelStringUtil.formatCsv(docsPerSecond));

    traceStream = LoggerPrintStream.create(LOGGER, false);
    for (int i = 0; i < writers.length; i++) {
        LOGGER.info("");
        final IndexShardWriterImpl impl = writers[i];
        LOGGER.info("Writer " + StringUtils.leftPad(String.valueOf(i), 2));
        LOGGER.info("=========");
        impl.trace(traceStream);
    }
    traceStream.close();

    LOGGER.info("");
    LOGGER.info("Search");
    LOGGER.info("=====");

    try {
        final IndexShardSearcherImpl[] reader = new IndexShardSearcherImpl[indexShards.length];
        final IndexReader[] readers = new IndexReader[indexShards.length];
        for (int i = 0; i < reader.length; i++) {
            reader[i] = new IndexShardSearcherImpl(indexShards[i]);
            reader[i].open();
            readers[i] = reader[i].getReader();
        }

        for (final String arg : docArgs) {
            doSearchOnField(readers, arg);
        }

        doSearchOnField(readers, "multifield");
        doSearchOnField(readers, "dupfield");

        LOGGER.info("=====");

        for (int i = 0; i < reader.length; i++) {
            reader[i].close();
        }

    } catch (final Exception ex) {
        ex.printStackTrace();
    }

}

From source file:org.codice.ddf.commands.catalog.ReplicationCommand.java

@Override
protected Object executeWithSubject() throws Exception {
    final CatalogFacade catalog = getCatalog();

    final CatalogFacade framework = new Framework(getService(CatalogFramework.class));
    Set<String> sourceIds = framework.getSourceIds();

    while (true) {
        if (StringUtils.isBlank(sourceId) || !sourceIds.contains(sourceId)) {
            console.println("Please enter the Source ID you would like to replicate:");
            for (String id : sourceIds) {
                console.println("\t" + id);
            }// ww w.jav a  2s.c o m
        } else {
            break;
        }
        sourceId = getInput("ID:  ");
    }

    if (batchSize > MAX_BATCH_SIZE || batchSize < 1) {
        console.println("Batch Size must be between 1 and 1000.");
        return null;
    }

    start = System.currentTimeMillis();

    final Filter filter = (cqlFilter != null) ? CQL.toFilter(cqlFilter)
            : getFilter(getFilterStartTime(start), start, Metacard.EFFECTIVE);

    QueryImpl query = new QueryImpl(filter);
    query.setRequestsTotalResultsCount(true);
    query.setPageSize(batchSize);
    query.setSortBy(new SortByImpl(Metacard.EFFECTIVE, SortOrder.DESCENDING));
    QueryRequest queryRequest = new QueryRequestImpl(query, Arrays.asList(sourceId));
    SourceResponse response;
    try {
        response = framework.query(queryRequest);
    } catch (Exception e) {
        printErrorMessage("Error occurred while querying the Federated Source.\n" + e.getMessage());
        return null;
    }

    final long totalHits = response.getHits();
    final long totalPossible;
    if (totalHits == 0) {
        console.println("No records were found to replicate.");
        return null;
    }

    // If the maxMetacards is set, restrict the totalPossible to the number of maxMetacards
    if (maxMetacards > 0 && maxMetacards <= totalHits) {
        totalPossible = maxMetacards;
    } else {
        totalPossible = totalHits;
    }

    console.println("Starting replication for " + totalPossible + " Records");

    if (multithreaded > 1 && totalPossible > batchSize) {
        BlockingQueue<Runnable> blockingQueue = new ArrayBlockingQueue<>(multithreaded);
        RejectedExecutionHandler rejectedExecutionHandler = new ThreadPoolExecutor.CallerRunsPolicy();
        final ExecutorService executorService = new ThreadPoolExecutor(multithreaded, multithreaded, 0L,
                TimeUnit.MILLISECONDS, blockingQueue, rejectedExecutionHandler);
        console.printf("Running %d threads during replication.%n", multithreaded);

        do {
            LOGGER.debug("In loop at iteration {}", queryIndex.get());
            final int startIndex = queryIndex.get();
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    int count = queryAndIngest(framework, catalog, startIndex, filter);
                    printProgressAndFlush(start, totalPossible, ingestCount.addAndGet(count));
                }
            });
        } while (queryIndex.addAndGet(batchSize) <= totalPossible);
        executorService.shutdown();

        while (!executorService.isTerminated()) {
            try {
                TimeUnit.SECONDS.sleep(1);
            } catch (InterruptedException e) {
                // ignore
            }
        }
    } else {
        do {
            int count = queryAndIngest(framework, catalog, queryIndex.get(), filter);
            printProgressAndFlush(start, totalPossible, ingestCount.addAndGet(count));
        } while (queryIndex.addAndGet(batchSize) <= totalPossible);
    }

    console.println();
    long end = System.currentTimeMillis();
    String completed = String.format(
            " %d record(s) replicated; %d record(s) failed; completed in %3.3f seconds.", ingestCount.get(),
            failedCount.get(), (end - start) / MS_PER_SECOND);
    LOGGER.info("Replication Complete: {}", completed);
    console.println(completed);

    if (StringUtils.isNotBlank(failedDir)) {
        writeFailedMetacards(failedMetacards);
    }

    return null;
}

From source file:org.trnltk.apps.morphology.contextless.parser.CachingMorphologicParserApp.java

@App("Parse sample TBMM Journal w/o bulk parse")
public void parseTbmmJournal_b0241h_noBulkParse() throws Exception {
    final File tokenizedFile = new File("core/src/test/resources/tokenizer/tbmm_b0241h_tokenized.txt");
    final List<String> lines = Files.readLines(tokenizedFile, Charsets.UTF_8);
    final LinkedList<String> words = new LinkedList<String>();
    final HashSet<String> uniqueWords = new HashSet<String>();
    for (String line : lines) {
        final ArrayList<String> strings = Lists
                .newArrayList(Splitter.on(" ").trimResults().omitEmptyStrings().split(line));
        words.addAll(strings);/*ww  w  .  j  a v  a2s .  c  om*/
        uniqueWords.addAll(strings);
    }

    final int initialL1CacheSize = uniqueWords.size();
    final int maxL1CacheSize = initialL1CacheSize;

    final MorphologicParserCache l1Cache = new LRUMorphologicParserCache(NUMBER_OF_THREADS, initialL1CacheSize,
            maxL1CacheSize);

    final ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_THREADS);

    final MorphologicParser[] parsers = new MorphologicParser[NUMBER_OF_THREADS];
    for (int i = 0; i < parsers.length; i++) {
        parsers[i] = new CachingMorphologicParser(new TwoLevelMorphologicParserCache(BULK_SIZE, l1Cache),
                contextlessMorphologicParser, true);
    }

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    for (int i = 0; i < words.size(); i++) {
        final MorphologicParser parser = parsers[i % NUMBER_OF_THREADS];
        final String word = words.get(i);
        final int wordIndex = i;
        pool.execute(new SingleParseCommand(parser, word, wordIndex, false));
    }

    pool.shutdown();
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(500, TimeUnit.MILLISECONDS);
    }

    stopWatch.stop();

    System.out.println("Total time :" + stopWatch.toString());
    System.out.println("Nr of tokens : " + words.size());
    System.out.println("Avg time : " + (stopWatch.getTime() * 1.0d) / (words.size() * 1.0d) + " ms");
}

From source file:me.carpela.network.pt.cracker.tools.ttorrent.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght)
        throws InterruptedException, IOException, NoSuchAlgorithmException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {

        length += file.length();/*from  w w  w.  j  a  v a2  s  .  c  o m*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    return hashes.toString();
}

From source file:com.dumontierlab.pdb2rdf.Pdb2Rdf.java

private static void load(CommandLine cmd, final Map<String, Double> stats) {
    String username = "dba";
    String password = "dba";
    String host = "localhost";
    int port = 1111;
    DetailLevel detailLevel = null;//from ww  w.  j  a va 2s  .c  o  m
    if (cmd.hasOption("detailLevel")) {
        try {
            detailLevel = Enum.valueOf(DetailLevel.class, cmd.getOptionValue("detailLevel"));
        } catch (IllegalArgumentException e) {
            LOG.fatal("Invalid argument value for detailLevel option", e);
            System.exit(1);
        }
    }
    final DetailLevel f_detailLevel = detailLevel;

    if (cmd.hasOption("username")) {
        username = cmd.getOptionValue("username");
    }
    if (cmd.hasOption("password")) {
        password = cmd.getOptionValue("password");
    }
    if (cmd.hasOption("host")) {
        host = cmd.getOptionValue("host");
    }
    if (cmd.hasOption("port")) {
        try {
            port = Integer.parseInt(cmd.getOptionValue("port"));
        } catch (NumberFormatException e) {
            LOG.fatal("Invalid port number: " + cmd.getOptionValue("port"));
            System.exit(1);
        }
    }

    final VirtuosoDaoFactory factory = new VirtuosoDaoFactory(host, port, username, password);
    ExecutorService pool = getThreadPool(cmd);

    final ProgressMonitor monitor = getProgressMonitor();
    final Pdb2RdfInputIterator i = processInput(cmd);
    final int inputSize = i.size();
    final AtomicInteger progressCount = new AtomicInteger();

    if (monitor != null) {
        monitor.setProgress(0, inputSize);
    }

    while (i.hasNext()) {
        final InputSource input = i.next();
        pool.execute(new Runnable() {
            public void run() {
                PdbXmlParser parser = new PdbXmlParser();
                UriBuilder uriBuilder = new UriBuilder();
                PdbRdfModel model = null;
                try {
                    model = new VirtPdbRdfModel(factory, Bio2RdfPdbUriPattern.PDB_GRAPH, uriBuilder,
                            factory.getTripleStoreDao());
                    if (f_detailLevel != null) {
                        parser.parse(input, model, f_detailLevel);
                    } else {
                        parser.parse(input, model);
                    }
                    if (stats != null) {
                        updateStats(stats, model);
                    }
                    if (monitor != null) {
                        monitor.setProgress(progressCount.incrementAndGet(), inputSize);
                    }

                } catch (Exception e) {
                    LOG.error("Uanble to parse input for pdb=" + (model != null ? model.getPdbId() : "null"),
                            e);
                }
            }
        });
    }
    pool.shutdown();
    while (!pool.isTerminated()) {
        try {
            pool.awaitTermination(1, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            break;
        }
    }
}

From source file:br.prof.salesfilho.oci.view.console.Main.java

public void extractFeatures() {

    if (this.propertySource.containsProperty("inputDir") && this.propertySource.containsProperty("outputDir")) {

        //Create new thread pool to each image file
        ExecutorService executor = Executors.newFixedThreadPool(2);

        BodyWomanFeatureExtractorExecutor e1 = new BodyWomanFeatureExtractorExecutor(true);
        e1.setInputDir(this.propertySource.getProperty("inputDir").toString());
        e1.setOutputDir(this.propertySource.getProperty("outputDir").toString());
        e1.setKernelSize(Double.valueOf(this.propertySource.getProperty("kernelsize").toString()));
        e1.setDatabaseName(this.propertySource.getProperty("databaseName").toString());

        executor.execute(e1);//w w  w.jav  a2  s.  co  m

        BodyWomanFeatureExtractorExecutor e2 = new BodyWomanFeatureExtractorExecutor(false);

        e2.setInputDir(this.propertySource.getProperty("inputDir").toString());
        e2.setOutputDir(this.propertySource.getProperty("outputDir").toString());
        e2.setKernelSize(Double.valueOf(this.propertySource.getProperty("kernelsize").toString()));
        e2.setDatabaseName(this.propertySource.getProperty("databaseName").toString());

        executor.execute(e2);

        //Wait finish
        executor.shutdown();
        while (!executor.isTerminated()) {
        }
        File databaseFile = new File(e1.getDatabaseName());
        bodyWomanDescriptorService.openDatabase(databaseFile);
        bodyWomanDescriptorService.add(e1.getBodyWomanDescriptor());
        bodyWomanDescriptorService.add(e2.getBodyWomanDescriptor());
        bodyWomanDescriptorService.save(databaseFile);

    } else {
        usage();
    }
}