Example usage for java.util.concurrent ExecutorService isTerminated

List of usage examples for java.util.concurrent ExecutorService isTerminated

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService isTerminated.

Prototype

boolean isTerminated();

Source Link

Document

Returns true if all tasks have completed following shut down.

Usage

From source file:org.trnltk.apps.morphology.contextless.parser.CachingMorphologicParserApp.java

@App("Parse sample TBMM Journal with bulk parse")
public void parseTbmmJournal_b0241h_withBulkParse() throws Exception {
    final File tokenizedFile = new File("core/src/test/resources/tokenizer/tbmm_b0241h_tokenized.txt");
    final List<String> lines = Files.readLines(tokenizedFile, Charsets.UTF_8);
    final LinkedList<String> words = new LinkedList<String>();
    final HashSet<String> uniqueWords = new HashSet<String>();
    for (String line : lines) {
        final ArrayList<String> strings = Lists
                .newArrayList(Splitter.on(" ").trimResults().omitEmptyStrings().split(line));
        words.addAll(strings);//from  w ww  . j ava  2  s . co  m
        uniqueWords.addAll(strings);
    }

    final int initialL1CacheSize = uniqueWords.size();
    final int maxL1CacheSize = initialL1CacheSize;

    final MorphologicParserCache l1Cache = new LRUMorphologicParserCache(NUMBER_OF_THREADS, initialL1CacheSize,
            maxL1CacheSize);

    final ExecutorService pool = Executors.newFixedThreadPool(NUMBER_OF_THREADS);

    final MorphologicParser[] parsers = new MorphologicParser[NUMBER_OF_THREADS];
    for (int i = 0; i < parsers.length; i++) {
        parsers[i] = new CachingMorphologicParser(new TwoLevelMorphologicParserCache(BULK_SIZE, l1Cache),
                contextlessMorphologicParser, true);
    }

    final StopWatch stopWatch = new StopWatch();
    stopWatch.start();

    for (int i = 0; i < words.size(); i = i + BULK_SIZE) {
        final MorphologicParser parser = parsers[(i / BULK_SIZE) % NUMBER_OF_THREADS];
        int start = i;
        int end = i + BULK_SIZE < words.size() ? i + BULK_SIZE : words.size();
        final int wordIndex = i;

        final List<String> subWordList = words.subList(start, end);
        pool.execute(new BulkParseCommand(parser, subWordList, wordIndex, false));
    }

    pool.shutdown();
    while (!pool.isTerminated()) {
        System.out.println("Waiting pool to be terminated!");
        pool.awaitTermination(500, TimeUnit.MILLISECONDS);
    }

    stopWatch.stop();

    System.out.println("Total time :" + stopWatch.toString());
    System.out.println("Nr of tokens : " + words.size());
    System.out.println("Avg time : " + (stopWatch.getTime() * 1.0d) / (words.size() * 1.0d) + " ms");
}

From source file:com.paniclauncher.workers.InstanceInstaller.java

private void downloadMods(ArrayList<Mod> mods) {
    fireSubProgressUnknown();//from   w  ww .  ja v  a  2 s  .  c o m
    ExecutorService executor = Executors.newFixedThreadPool(8);
    ArrayList<PanicLauncherDownloadable> downloads = getDownloadableMods();
    totalDownloads = downloads.size();

    for (PanicLauncherDownloadable download : downloads) {
        executor.execute(download);
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
    }

    for (Mod mod : mods) {
        if (!downloads.contains(mod) && !isCancelled()) {
            fireTask(App.settings.getLocalizedString("common.downloading") + " " + mod.getFile());
            mod.download(this);
        }
    }
}

From source file:com.paniclauncher.workers.InstanceInstaller.java

private void downloadMojangStuffNew() {
    firePropertyChange("doing", null, App.settings.getLocalizedString("instance.downloadingresources"));
    firePropertyChange("subprogressint", null, null);
    ExecutorService executor = Executors.newFixedThreadPool(8);
    ArrayList<MojangDownloadable> downloads = getNeededResources();
    totalResources = downloads.size();//from   w w w  . j av a2 s.c  o m

    for (MojangDownloadable download : downloads) {
        executor.execute(download);
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
    }
    if (!isCancelled()) {
        fireTask(App.settings.getLocalizedString("instance.organisinglibraries"));
        fireSubProgress(0);
        if (!isServer) {
            String[] libraries = librariesNeeded.split(",");
            for (String libraryFile : libraries) {
                Utils.copyFile(new File(App.settings.getLibrariesDir(), libraryFile), getBinDirectory());
            }
            String[] natives = nativesNeeded.split(",");
            for (String nativeFile : natives) {
                Utils.unzip(new File(App.settings.getLibrariesDir(), nativeFile), getNativesDirectory());
            }
            Utils.delete(new File(getNativesDirectory(), "META-INF"));
        }
        if (isServer) {
            Utils.copyFile(
                    new File(App.settings.getJarsDir(), "minecraft_server." + this.minecraftVersion + ".jar"),
                    getRootDirectory());
        } else {
            Utils.copyFile(new File(App.settings.getJarsDir(), this.minecraftVersion + ".jar"),
                    new File(getBinDirectory(), "minecraft.jar"), true);
        }
    }
}

From source file:org.pentaho.reporting.engine.classic.core.testsupport.gold.GoldTestBase.java

protected void runAllGoldReportsInParallel(int threads) throws Exception {
    initializeTestEnvironment();/*w  w  w . ja  v  a2  s  .  c o m*/

    final List<Throwable> errors = Collections.synchronizedList(new ArrayList<Throwable>());

    final ExecutorService threadPool = new ThreadPoolExecutor(threads, threads, 0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(), new TestThreadFactory(), new ThreadPoolExecutor.AbortPolicy());

    List<ExecuteReportRunner> reports = new ArrayList<ExecuteReportRunner>();
    reports.addAll(collectReports("reports", ReportProcessingMode.legacy, errors));
    reports.addAll(collectReports("reports", ReportProcessingMode.migration, errors));
    reports.addAll(collectReports("reports", ReportProcessingMode.current, errors));
    reports.addAll(collectReports("reports-4.0", ReportProcessingMode.migration, errors));
    reports.addAll(collectReports("reports-4.0", ReportProcessingMode.current, errors));

    for (ExecuteReportRunner report : reports) {
        threadPool.submit(report);
    }

    threadPool.shutdown();
    while (threadPool.isTerminated() == false) {
        threadPool.awaitTermination(5, TimeUnit.MINUTES);
    }
    if (errors.isEmpty() == false) {
        Log log = LogFactory.getLog(GoldTestBase.class);
        for (Throwable throwable : errors) {
            log.error("Failed", throwable);
        }
        Assert.fail();
    }
}

From source file:broadwick.montecarlo.MonteCarlo.java

@Override
public void run() {
    log.trace("Starting Monte Carlo results producer thread");
    try {/*from   w  w  w  .  j  a v a2s.  co  m*/
        final int poolSize = Runtime.getRuntime().availableProcessors();
        final ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("MCScenarioProducer-%d")
                .setDaemon(true).build();
        final ExecutorService es = Executors.newFixedThreadPool(poolSize, threadFactory);
        final RNG generator = new RNG(RNG.Generator.Well44497b);

        final StopWatch sw = new StopWatch();
        sw.start();
        for (int i = 0; i < numSimulations; i++) {
            es.submit(new Runnable() {
                @Override
                public void run() {
                    try {
                        log.trace("Monte Carlo producer: creating scenario object");
                        final MonteCarloScenario scenario = simulation.copyOf();
                        final MonteCarloResults results = scenario
                                .run(generator.getInteger(0, Integer.MAX_VALUE - 1));
                        log.trace("Monte Carlo producer: generated results {}", results.getExpectedValue());
                        queue.put(results);
                    } catch (Exception e) {
                        log.error("Error running Monte Carlo simulation {}",
                                Throwables.getStackTraceAsString(e));
                    }
                }
            });
        }
        es.shutdown();
        while (!es.isTerminated()) {
            es.awaitTermination(1, TimeUnit.SECONDS);
        }
        queue.put(new Poison());

        sw.stop();
        log.info("Finished {} simulations in {}.", numSimulations, sw);
    } catch (Exception ex) {
        log.error("Monte Carlo simulation error: {}", Throwables.getStackTraceAsString(ex));
    }
}

From source file:broadwick.Broadwick.java

/**
 * Run the Broadwick framework.//from  w  w w . j  a  v a  2s . c  o m
 */
@SuppressWarnings("squid:S1147")
public void run() {
    if (project != null) {
        final StopWatch sw = new StopWatch();
        sw.start();

        // initialise the data, by reading the data files and/or the database.
        log.info("Running broadwick {}", BroadwickVersion.getVersionAndTimeStamp());

        try (DataReader dr = new DataReader(project.getData())) {
            final Map<String, Model> registeredModels = registerModels(project, dr.getLookup());
            log.info("Running broadwick for the following models {}", registeredModels.keySet());

            // Run the models, each on a separate thread.
            // TODO in a single-threaded grid environment we cannot do this - need to think again here....
            final int poolSize = registeredModels.size();
            if (poolSize > 0) {
                final ThreadFactory threadFactory = new ThreadFactoryBuilder()
                        .setNameFormat("BroadwickModels-%d").setDaemon(true).build();
                final ExecutorService es = Executors.newFixedThreadPool(poolSize, threadFactory);

                //final StopWatch sw = new StopWatch();
                for (final Entry<String, Model> entry : registeredModels.entrySet()) {
                    es.submit(new Runnable() {
                        @Override
                        public void run() {
                            final String modelName = entry.getKey();
                            final Model model = entry.getValue();
                            try {
                                log.info("Running {} [{}]", modelName, model.getClass().getCanonicalName());
                                model.init();
                                model.run();
                                model.finalise();
                            } catch (Exception ex) {
                                log.error("Error running model {}. see stack trace from details.", modelName);
                                log.error("{}", Throwables.getStackTraceAsString(ex));
                            }
                        }
                    });
                }
                es.shutdown();
                while (!es.isTerminated()) {
                    es.awaitTermination(10, TimeUnit.SECONDS);
                }
                //sw.stop();
                //log.trace("Finished {} simulations in {}.", maxSimulations, sw);
            }
        } catch (Exception ex) {
            log.error("{}", ex.getLocalizedMessage());
            log.error("{}", Throwables.getStackTraceAsString(ex));
            log.error("Something went wrong. See previous messages for details.");
        }

        log.info("Simulation complete. {}", sw.toString());
        // In rare circumstances, where exceptions are caught and the simulation has completed but
        // there are still tasks being submitted to the executor, we need to force the progam to quit.
        Runtime.getRuntime().exit(0);
    }
}

From source file:org.wso2.carbon.registry.extensions.handlers.ZipWSDLMediaTypeHandler.java

protected void uploadFiles(List<UploadTask> tasks, File tempFile, Stack<File> fileList, File tempDir,
        int poolSize, String path, List<String> uriList, RequestContext requestContext)
        throws RegistryException {
    CommonUtil.loadImportedArtifactMap();
    try {/*from  w w w . jav a 2  s .  c  om*/
        if (poolSize <= 0) {
            boolean updateLockAvailable = CommonUtil.isUpdateLockAvailable();
            if (!updateLockAvailable) {
                CommonUtil.releaseUpdateLock();
            }
            try {
                for (UploadTask task : tasks) {
                    task.run();
                }
            } finally {
                if (!updateLockAvailable) {
                    CommonUtil.acquireUpdateLock();
                }
            }
        } else {
            ExecutorService executorService = Executors.newFixedThreadPool(poolSize);
            if (!CommonUtil.isArtifactIndexMapExisting()) {
                CommonUtil.createArtifactIndexMap();
            }
            if (!CommonUtil.isSymbolicLinkMapExisting()) {
                CommonUtil.createSymbolicLinkMap();
            }
            for (UploadTask task : tasks) {
                executorService.submit(task);
            }
            executorService.shutdown();
            while (!executorService.isTerminated()) {

            }
        }
    } finally {
        CommonUtil.clearImportedArtifactMap();
    }
    try {
        if (CommonUtil.isArtifactIndexMapExisting()) {
            Map<String, String> artifactIndexMap = CommonUtil.getAndRemoveArtifactIndexMap();

            if (log.isDebugEnabled()) {
                for (Map.Entry<String, String> entry : artifactIndexMap.entrySet()) {
                    log.debug("Added Artifact Entry: " + entry.getKey());
                }
            }

            //                CommonUtil.addGovernanceArtifactEntriesWithRelativeValues(
            //                        CommonUtil.getUnchrootedSystemRegistry(requestContext), artifactIndexMap);
        }
        Registry registry = requestContext.getRegistry();
        if (!isDisableSymlinkCreation() && CommonUtil.isSymbolicLinkMapExisting()) {
            Map<String, String> symbolicLinkMap = CommonUtil.getAndRemoveSymbolicLinkMap();

            for (Map.Entry<String, String> entry : symbolicLinkMap.entrySet()) {
                if (log.isDebugEnabled()) {
                    log.debug("Added Symbolic Link: " + entry.getKey());
                }
                try {
                    if (registry.resourceExists(entry.getKey())) {
                        registry.removeLink(entry.getKey());
                    }
                } catch (RegistryException ignored) {
                    // we are not bothered above errors in getting rid of symbolic links.
                }
                requestContext.getSystemRegistry().createLink(entry.getKey(), entry.getValue());
            }
        }
    } catch (RegistryException e) {
        log.error("Unable to build artifact index.", e);
    }
    Map<String, String> taskResults = new LinkedHashMap<String, String>();
    for (UploadTask task : tasks) {
        if (task.getFailed()) {
            taskResults.put(task.getUri(), null);
        } else {
            taskResults.put(task.getUri(), task.getResult());
        }
    }
    onPutCompleted(path, taskResults, uriList, requestContext);
    try {
        delete(tempFile);
        while (!fileList.isEmpty()) {
            delete(fileList.pop());
        }
        FileUtils.deleteDirectory(tempDir);
    } catch (IOException e) {
        log.error("Unable to cleanup temporary files", e);
    }
    log.info("Completed uploading files from archive file");
}

From source file:com.idocbox.flame.Helios.java

/**
 * fire them!/* ww  w .  j  ava2 s .  c o  m*/
 * @param ds       data source.
 * @param dsSpliter data source spliter.
 * @param mapper   mapper.
 * @param reducer  reducer.
 * @return
 */
public Collector<Map<K, V>> fire(JobConfig<K, V, T> jobConfig) {

    long start = System.currentTimeMillis();

    Collector<Map<K, V>> resultCollector = null;

    // data source.
    DataSource<T> dataSource = jobConfig.getDataSource();
    // data source spliter.
    DataSourceSpliter<T> dataSourceSpliter = jobConfig.getDataSourceSpliter();
    // mapper worker. root mapper worker.
    MapperWorker<K, V, T> mapperWorker = jobConfig.getMapperWorker();
    // reducer worker. root reducer worker.
    ReducerWorker<K, V> reducerWorker = jobConfig.getReducerWorker();
    // mapper.
    Mapper<K, V, T> mapper = jobConfig.getMapper();
    // reducer.
    Reducer<K, V> reducer = jobConfig.getReducer();
    // keeper.
    Keeper<Collector<Map<K, V>>> keeper = jobConfig.getKeeper();

    // spliting phase.

    //split data source into serveral data source.
    log.info("spliting datasource ...");
    Map<String, DataSource<T>> dsMap = dataSourceSpliter.split(dataSource);

    long m1 = System.currentTimeMillis();
    long cost1 = m1 - start;
    double seconds1 = cost1 / 1000;
    log.info("spliting datasource: cost " + seconds1 + " s");

    // generate worker for mapper.create()
    if (null == dsMap || dsMap.isEmpty()) {
        log.info("Splited data source is empty! exit flame!");
        return null;
    }

    // mapping phase.

    // generate mapper worker.
    log.info("mapping && reducing ...");
    Set<String> dsKeys = dsMap.keySet();
    //mapper thread size.
    int mapperThreadSize = dsKeys.size() > jobConfig.getMaxMapperWorker() ? jobConfig.getMaxMapperWorker()
            : dsKeys.size();
    //create mapper worker thread pool.
    ExecutorService mapperWorkerThreadPool = Executors.newFixedThreadPool(mapperThreadSize);
    int dataSourceSize = 0;
    for (String key : dsKeys) {
        //create mapper worker baby.
        MapperWorker<K, V, T> mapperWorkerBaby = mapperWorker.create(key);

        //assign data source and run the worker.
        DataSource<T> dsUnit = dsMap.get(key);
        if (null != dsUnit) {
            //execute mapper work in thread pool.
            mapperWorkerThreadPool
                    .execute(new MapperWorkerRunable<K, V, T>(mapperWorkerBaby, dsUnit, mapper, keeper));

            dataSourceSize++;
        }
    }
    //shutdown executor service.
    mapperWorkerThreadPool.shutdown();

    // reduce phase.

    //generate reducer worker, assign mapper worker's compute result
    // to reducer worker.

    //mapper thread size.
    //create reducer worker thread pool.
    ExecutorService reducerWorkerThreadPool = Executors.newFixedThreadPool(jobConfig.getMaxReducerWorker());

    //get 2 collector, merge them into one, then passed to reducer.
    Set<ReducerWorker<K, V>> reducerWorkers = new HashSet<ReducerWorker<K, V>>();
    int j = 0;
    int expectedReducTime = dataSourceSize - 1;
    while (true) {//reduce while there is more than one element in set.
        if (mapperWorkerThreadPool.isTerminated()) {
            int count = keeper.count();
            if (count == 0) {//no mapped result.
                log.info("there is no result given by mapper. exit!");
                return null;
            }
        }
        if (j == expectedReducTime) {
            log.info("complete reduce. exit flame.");
            break;
        }

        Set<Collector<Map<K, V>>> collectors = new HashSet<Collector<Map<K, V>>>(2);
        collectors.add(keeper.take());
        collectors.add(keeper.take());

        // get an idle worker.
        ReducerWorker<K, V> reducerWorkerBaby = chooseIdle(reducerWorkers, reducerWorker);

        log.info("reducing, collector size = " + keeper.size());

        reducerWorkerThreadPool
                .execute(new ReducerWorkerRunnable<K, V>(reducerWorkerBaby, collectors, reducer, keeper));

        j++;
    }

    //shutdown reducer worker thread pool.
    reducerWorkerThreadPool.shutdown();

    // collect result phase.
    while (!reducerWorkerThreadPool.isTerminated()) {
        Thread.yield();
    }
    if (null != keeper && keeper.size() == 1) {
        resultCollector = keeper.poll();
    } else {// error occured.
        int size = 0;
        if (null != keeper) {
            size = keeper.size();
        }
        log.info("after reduce, the result collector is not expected! collector size is " + size);
    }

    //return result collector.
    long end = System.currentTimeMillis();
    long cost = end - m1;
    double seconds = cost / 1000;
    log.info("mapping & reducing: cost " + seconds + " s");

    return resultCollector;
}

From source file:com.bigdata.dastor.service.StorageService.java

/** shuts node off to writes, empties memtables and the commit log. */
public synchronized void drain() throws IOException, InterruptedException, ExecutionException {
    ExecutorService mutationStage = StageManager.getStage(StageManager.MUTATION_STAGE);
    if (mutationStage.isTerminated()) {
        logger_.warn("Cannot drain node (did it already happen?)");
        return;//ww w .  j a  v a 2 s  .c  o m
    }
    setMode("Starting drain process", true);
    Gossiper.instance.stop();
    setMode("Draining: shutting down MessageService", false);
    MessagingService.shutdown();
    setMode("Draining: emptying MessageService pools", false);
    MessagingService.waitFor();

    // lets flush.
    setMode("Draining: flushing column families", false);
    for (String tableName : DatabaseDescriptor.getNonSystemTables())
        for (Future f : Table.open(tableName).flush())
            f.get();

    setMode("Draining: replaying commit log", false);
    CommitLog.instance().forceNewSegment();
    // want to make sure that any segments deleted as a result of flushing are gone.
    DeletionService.waitFor();
    CommitLog.recover();

    // commit log recovery just sends work to the mutation stage. (there could have already been work there anyway.  
    // Either way, we need to let this one drain naturally, and then we're finished.
    setMode("Draining: clearing mutation stage", false);
    mutationStage.shutdown();
    while (!mutationStage.isTerminated())
        mutationStage.awaitTermination(5, TimeUnit.SECONDS);

    setMode("Node is drained", true);
}

From source file:com.turn.ttorrent.common.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght)
        throws InterruptedException, IOException, NoSuchAlgorithmException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / pieceLenght)) });

        length += file.length();/* w w  w . j  ava  2 s.com*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}