Example usage for java.util.concurrent ExecutorService isTerminated

List of usage examples for java.util.concurrent ExecutorService isTerminated

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService isTerminated.

Prototype

boolean isTerminated();

Source Link

Document

Returns true if all tasks have completed following shut down.

Usage

From source file:Main.java

public static boolean stop(ExecutorService executorService, int waitBeforeTerminateSecs, Logger logger)
/*     */ {/*from  www.  ja  v  a 2 s.  co m*/
    /*  53 */int waitMillis = Math.max(1000, 1000 * waitBeforeTerminateSecs);
    /*     */
    /*     */
    /*  56 */executorService.shutdown();
    /*     */
    /*     */
    /*  59 */boolean stopped = false;
    /*  60 */while ((waitMillis > 0) && (!stopped)) {
        /*  61 */long startMillis = System.currentTimeMillis();
        /*     */try {
            /*  63 */logger.debug("Waiting for thread pool to stop");
            /*  64 */stopped = executorService.awaitTermination(waitMillis, TimeUnit.MILLISECONDS);
            /*     */} catch (InterruptedException e) {
            /*  66 */logger.debug("Thread was interrupted while it was waiting for thread pool to stop", e);
            /*  67 */Thread.currentThread().interrupt();
            /*  68 */break;
            /*     */}
        /*  70 */waitMillis = (int) (waitMillis - (System.currentTimeMillis() - startMillis));
        /*     */}
    /*     */
    /*  73 */if (!executorService.isTerminated()) {
        /*  74 */logger.warn("Thread pool will be forcibly stopped now if it has not already stopped");
        /*  75 */executorService.shutdownNow();
        /*     */try {
            /*  77 */stopped = executorService.awaitTermination(waitBeforeTerminateSecs, TimeUnit.SECONDS);
            /*     */}
        /*     */catch (InterruptedException e) {
        }
        /*     */
        /*  81 */if (!executorService.isTerminated()) {
            /*  82 */logger.warn("Could not shutdown thread pool in [{}] seconds",
                    Integer.valueOf(waitBeforeTerminateSecs));
            /*     */}
        /*     */}
    /*     */
    /*  86 */return stopped;
    /*     */}

From source file:com.quixey.hadoop.fs.oss.MultiPartUploader.java

private void awaitTermination(ExecutorService pool) {
    while (!pool.isTerminated()) {
        try {/*from  w ww.jav a2  s.c  o  m*/
            pool.awaitTermination(5, TimeUnit.SECONDS);
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
    }
}

From source file:org.geoserver.bkprst.RestoreTask.java

@Override
public void run() {

    // If previous' backup info cannot be read, aborts the restore
    // Writes info about backup in a file
    BackupTask backupInfo = this.readBackupInfo(this.path);
    if (backupInfo == null) {
        LOGGER.severe("Backup data info were not written properly, the restore will not start");
        this.state = BrTaskState.FAILED;
        return;/*from w  w w. ja  v a2s  .co  m*/
    }

    // Sets up the filter to exclude some directories according to the previous backup info
    IOFileFilter excludeFilter = this.getExcludeFilter(backupInfo.includeData, backupInfo.includeGwc,
            backupInfo.includeLog);

    // Sets up source and destination
    File srcMount = new File(this.path);
    File trgMount = this.dataRoot.root();

    // Sets transaction
    this.trans = new RestoreTransaction(this, srcMount, trgMount, excludeFilter);

    try {
        // Start transanction
        this.trans.start();
        if (checkForHalt()) {
            return;
        }

        // Sets up the copy task
        ExecutorService ex = Executors.newFixedThreadPool(2);
        if (ex == null || ex.isTerminated()) {
            throw new IllegalArgumentException(
                    "Unable to run asynchronously using a terminated or null ThreadPoolExecutor");
        }
        ExecutorCompletionService<File> cs = new ExecutorCompletionService<File>(ex);

        this.act = new CopyTree(excludeFilter, cs, srcMount, trgMount);
        this.act.addCopyListener(new DefaultProgress(this.id.toString()) {
            public void onUpdateProgress(float percent) {
                super.onUpdateProgress(percent);
                progress = percent;
            }
        });

        // Starts restore
        int workSize = this.act.copy();
        LOGGER.info("Restore " + this.id + " has started");
        this.startTime = new Date();
        this.state = BrTaskState.RUNNING;

        // This is to keep track af restore advancement
        while (workSize-- > 0) {
            Future<File> future = cs.take();
            try {
                LOGGER.info("copied file: " + future.get());
            } catch (ExecutionException e) {

                LOGGER.log(Level.INFO, e.getLocalizedMessage(), e);
            }
            if (checkForHalt()) {
                ex.shutdown();
                if (!ex.awaitTermination(5, TimeUnit.SECONDS)) {
                    throw new RuntimeException("Unable to stop backup task");
                }
                return;
            }
        }

        // Restore completed
        this.trans.commit();

        // reload the config from disk
        getGeoServer().reload();
    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, e.getLocalizedMessage(), e);

        // In case of errors, rollback
        this.trans.rollback();
    } finally {
        haltSemaphore.release();
    }
}

From source file:org.geoserver.bkprst.BackupTask.java

@Override
public void run() {

    // Sets up the filter to exclude some directories according to the previous backup info
    IOFileFilter excludeFilter = this.getExcludeFilter(this.includeData, this.includeGwc, this.includeLog);

    // Sets up source and destination
    File srcMount = this.dataRoot.root();
    File trgMount = new File(this.path);

    // Sets transaction
    this.trans = new BackupTransaction(this, srcMount, trgMount, excludeFilter);

    try {/*from   ww w  .  j av  a2  s.co  m*/
        // Deletes dest directory if existing
        if (trgMount.exists()) {
            Remove.deleteDirectory(trgMount,
                    FileFilterUtils.or(FileFilterUtils.directoryFileFilter(), FileFilterUtils.fileFileFilter()),
                    true, true);
        }

        // Starts transanction
        this.trans.start();
        if (checkForHalt()) {
            LOGGER.fine("run:Halt requested " + this.id);
            return;
        }

        // Sets up the copy task
        ExecutorService ex = Executors.newFixedThreadPool(2);
        if (ex == null || ex.isTerminated()) {
            throw new IllegalArgumentException(
                    "Unable to run asynchronously using a terminated or null ThreadPoolExecutor");
        }
        ExecutorCompletionService<File> cs = new ExecutorCompletionService<File>(ex);

        this.act = new CopyTree(excludeFilter, cs, srcMount, trgMount);
        this.act.addCopyListener(new DefaultProgress(this.id.toString()) {
            public void onUpdateProgress(float percent) {
                super.onUpdateProgress(percent);
                progress = percent;
            }
        });

        // Starts backup
        int workSize = this.act.copy();

        // This is to keep track af restore advancement
        while (workSize-- > 0) {
            Future<File> future;
            try {
                future = cs.take();
                LOGGER.info("copied file: " + future.get());
            } catch (Exception e) {
                LOGGER.log(Level.INFO, e.getLocalizedMessage(), e);
            }

            if (checkForHalt()) {
                LOGGER.fine("run:Halt requested, shutting down threads " + this.id);
                ex.shutdown();
                if (!ex.awaitTermination(5, TimeUnit.SECONDS)) {
                    throw new RuntimeException("Unable to stop backup task");
                }
                return;
            }
        }

        // Writes info about backup
        if (!this.writeBackupInfo(this.path)) {
            LOGGER.severe(
                    "Backup data info were not written properly, a restore operation will fail on this data");
            this.state = BrTaskState.FAILED;
        }

        if (checkForHalt()) {
            LOGGER.fine("run:Halt requested " + this.id);
            return;
        }
        // Restore completed
        this.trans.commit();

    } catch (Exception e) {
        LOGGER.log(Level.SEVERE, e.getLocalizedMessage(), e);
        // In case of errors, rollbacks
        this.trans.rollback();
    } finally {
        haltSemaphore.release();
    }
}

From source file:com.brienwheeler.lib.concurrent.ExecutorsTest.java

@Test
public void testNewSingleThreadExecutorShutdownClean() throws InterruptedException {
    NamedThreadFactory threadFactory = new NamedThreadFactory(THREAD_FACTORY_NAME);
    ExecutorService executor = Executors.newSingleThreadExecutor(threadFactory);
    Assert.assertFalse(executor.isShutdown());
    Assert.assertFalse(executor.isTerminated());

    executor.execute(new NullRunnable());

    executor.shutdown();/*  w w  w .j a  v  a2  s . com*/
    Assert.assertTrue(executor.isShutdown());
    executor.awaitTermination(10, TimeUnit.MILLISECONDS);
    Assert.assertTrue(executor.isTerminated());
}

From source file:org.sonar.runner.api.CommandExecutor.java

private void monitorProcess(final ProcessMonitor processMonitor, final ExecutorService executor,
        final Process process) {
    new Thread() {
        @Override//from www. j  a  v  a2 s.  co  m
        public void run() {
            while (!executor.isTerminated()) {
                if (processMonitor.stop()) {
                    process.destroy();
                }
                try {
                    Thread.sleep(100);
                } catch (InterruptedException e) {
                    // ignore
                }
            }
        }
    }.start();
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run queries as fast as possible.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue whenever the queue length is low, and start <code>numThreads</code> worker threads to fetch queries from the
 * queue and send them.//  w w w. ja  v  a2s .  c o  m
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void multiThreadedQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);

            // Keep 20 queries inside the query queue.
            while (queryQueue.size() == 20) {
                Thread.sleep(1);

                long currentTime = System.currentTimeMillis();
                if (currentTime - reportStartTime >= reportIntervalMs) {
                    long timePassed = currentTime - startTime;
                    int numQueriesExecutedInt = numQueriesExecuted.get();
                    LOGGER.info(
                            "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, "
                                    + "Average Client Time: {}ms.",
                            timePassed, numQueriesExecutedInt,
                            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                            totalBrokerTime.get() / (double) numQueriesExecutedInt,
                            totalClientTime.get() / (double) numQueriesExecutedInt);
                    reportStartTime = currentTime;
                    numReportIntervals++;

                    if ((numIntervalsToReportAndClearStatistics != 0)
                            && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                        numReportIntervals = 0;
                        startTime = currentTime;
                        reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                                statisticsList);
                    }
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, Average Broker Time: {}ms, "
                    + "Average Client Time: {}ms.",
            timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:FullReindexer.java

public void reindex() throws IOException {
    ExecutorService executor = Executors.newFixedThreadPool(totalThreads);

    for (int i = 0; i < totalThreads; i++) {
        Worker worker = new Worker(i, totalThreads, readClient, writeClient);
        executor.execute(worker);/*  ww w.j a  va2s. c  o m*/
    }

    executor.shutdown();

    while (!executor.isTerminated()) {
        // Wait until done
    }

    readClient.close();
    writeClient.close();
}

From source file:com.linkedin.pinot.tools.perf.QueryRunner.java

/**
 * Use multiple threads to run query at a target QPS.
 * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the
 * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send
 * them.//w ww  . j  a va  2 s  .  c  o m
 * <p>The main thread is responsible for collecting and logging the statistic information periodically.
 * <p>Queries are picked sequentially from the query file.
 * <p>Query runner will stop when all queries in the query file has been executed number of times configured.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.
 * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times.
 * @param numThreads number of threads sending queries.
 * @param startQPS start QPS (target QPS).
 * @param reportIntervalMs report interval in milliseconds.
 * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear
 *                                               them, 0 means never.
 * @throws Exception
 */
public static void targetQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        int numTimesToRunQueries, int numThreads, double startQPS, int reportIntervalMs,
        int numIntervalsToReportAndClearStatistics) throws Exception {
    List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>();
    AtomicInteger numQueriesExecuted = new AtomicInteger(0);
    AtomicLong totalBrokerTime = new AtomicLong(0L);
    AtomicLong totalClientTime = new AtomicLong(0L);
    List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS));

    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);
    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime,
                totalClientTime, statisticsList));
    }
    executorService.shutdown();

    int queryIntervalMs = (int) (MILLIS_PER_SECOND / startQPS);
    long startTime = System.currentTimeMillis();
    long reportStartTime = startTime;
    int numReportIntervals = 0;
    int numTimesExecuted = 0;
    while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) {
        if (executorService.isTerminated()) {
            LOGGER.error("All threads got exception and already dead.");
            return;
        }

        for (String query : queries) {
            queryQueue.add(query);
            Thread.sleep(queryIntervalMs);

            long currentTime = System.currentTimeMillis();
            if (currentTime - reportStartTime >= reportIntervalMs) {
                long timePassed = currentTime - startTime;
                int numQueriesExecutedInt = numQueriesExecuted.get();
                LOGGER.info(
                        "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                                + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.",
                        startQPS, timePassed, numQueriesExecutedInt,
                        numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
                        totalBrokerTime.get() / (double) numQueriesExecutedInt,
                        totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size());
                reportStartTime = currentTime;
                numReportIntervals++;

                if ((numIntervalsToReportAndClearStatistics != 0)
                        && (numReportIntervals == numIntervalsToReportAndClearStatistics)) {
                    numReportIntervals = 0;
                    startTime = currentTime;
                    reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime,
                            statisticsList);
                }
            }
        }
        numTimesExecuted++;
    }

    // Wait for all queries getting executed.
    while (queryQueue.size() != 0) {
        Thread.sleep(1);
    }
    executorService.shutdownNow();
    while (!executorService.isTerminated()) {
        Thread.sleep(1);
    }

    long timePassed = System.currentTimeMillis() - startTime;
    int numQueriesExecutedInt = numQueriesExecuted.get();
    LOGGER.info("--------------------------------------------------------------------------------");
    LOGGER.info("FINAL REPORT:");
    LOGGER.info(
            "Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, "
                    + "Average Broker Time: {}ms, Average Client Time: {}ms.",
            startQPS, timePassed, numQueriesExecutedInt,
            numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND),
            totalBrokerTime.get() / (double) numQueriesExecutedInt,
            totalClientTime.get() / (double) numQueriesExecutedInt);
    for (Statistics statistics : statisticsList) {
        statistics.report();
    }
}

From source file:org.yamj.core.service.ArtworkProcessScheduler.java

@Scheduled(initialDelay = 30000, fixedDelay = 60000)
public void processArtwork() throws Exception {
    int maxThreads = configService.getIntProperty("yamj3.scheduler.artworkprocess.maxThreads", 1);
    if (maxThreads <= 0) {
        if (!messageDisabled) {
            messageDisabled = Boolean.TRUE;
            LOG.info("Artwork processing is disabled");
        }//from  ww w  . j  a v  a2s  . com
        return;
    } else {
        messageDisabled = Boolean.FALSE;
    }

    int maxResults = configService.getIntProperty("yamj3.scheduler.artworkprocess.maxResults", 20);
    List<QueueDTO> queueElements = artworkStorageService.getArtworLocatedQueue(maxResults);
    if (CollectionUtils.isEmpty(queueElements)) {
        LOG.debug("No artwork found to process");
        return;
    }

    LOG.info("Found {} artwork objects to process; process with {} threads", queueElements.size(), maxThreads);
    BlockingQueue<QueueDTO> queue = new LinkedBlockingQueue<QueueDTO>(queueElements);

    ExecutorService executor = Executors.newFixedThreadPool(maxThreads);
    for (int i = 0; i < maxThreads; i++) {
        ArtworkProcessRunner worker = new ArtworkProcessRunner(queue, artworkProcessorService);
        executor.execute(worker);
    }
    executor.shutdown();

    // run until all workers have finished
    while (!executor.isTerminated()) {
        try {
            TimeUnit.SECONDS.sleep(5);
        } catch (InterruptedException ignore) {
        }
    }

    LOG.debug("Finished artwork processing");
}