Example usage for java.util.concurrent ExecutorService shutdown

List of usage examples for java.util.concurrent ExecutorService shutdown

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdown.

Prototype

void shutdown();

Source Link

Document

Initiates an orderly shutdown in which previously submitted tasks are executed, but no new tasks will be accepted.

Usage

From source file:com.comcast.cdn.traffic_control.traffic_router.core.dns.ZoneManager.java

protected static void initZoneCache(final TrafficRouter tr) {
    synchronized (ZoneManager.class) {
        final CacheRegister cacheRegister = tr.getCacheRegister();
        final JSONObject config = cacheRegister.getConfig();

        int poolSize = 1;
        final double scale = config.optDouble("zonemanager.threadpool.scale", 0.75);
        final int cores = Runtime.getRuntime().availableProcessors();

        if (cores > 2) {
            final Double s = Math.floor((double) cores * scale);

            if (s.intValue() > 1) {
                poolSize = s.intValue();
            }/*from w  ww.j a va 2 s  . c  o m*/
        }

        final ExecutorService initExecutor = Executors.newFixedThreadPool(poolSize);

        final ExecutorService ze = Executors.newFixedThreadPool(poolSize);
        final ScheduledExecutorService me = Executors.newScheduledThreadPool(2); // 2 threads, one for static, one for dynamic, threads to refresh zones
        final int maintenanceInterval = config.optInt("zonemanager.cache.maintenance.interval", 300); // default 5 minutes
        final String dspec = "expireAfterAccess="
                + config.optString("zonemanager.dynamic.response.expiration", "300s"); // default to 5 minutes

        final LoadingCache<ZoneKey, Zone> dzc = createZoneCache(ZoneCacheType.DYNAMIC,
                CacheBuilderSpec.parse(dspec));
        final LoadingCache<ZoneKey, Zone> zc = createZoneCache(ZoneCacheType.STATIC);

        initZoneDirectory();

        try {
            LOGGER.info("Generating zone data");
            generateZones(tr, zc, dzc, initExecutor);
            initExecutor.shutdown();
            initExecutor.awaitTermination(5, TimeUnit.MINUTES);
            LOGGER.info("Zone generation complete");
        } catch (final InterruptedException ex) {
            LOGGER.warn("Initialization of zone data exceeded time limit of 5 minutes; continuing", ex);
        } catch (IOException ex) {
            LOGGER.fatal("Caught fatal exception while generating zone data!", ex);
        }

        me.scheduleWithFixedDelay(getMaintenanceRunnable(dzc, ZoneCacheType.DYNAMIC, maintenanceInterval), 0,
                maintenanceInterval, TimeUnit.SECONDS);
        me.scheduleWithFixedDelay(getMaintenanceRunnable(zc, ZoneCacheType.STATIC, maintenanceInterval), 0,
                maintenanceInterval, TimeUnit.SECONDS);

        final ExecutorService tze = ZoneManager.zoneExecutor;
        final ScheduledExecutorService tme = ZoneManager.zoneMaintenanceExecutor;
        final LoadingCache<ZoneKey, Zone> tzc = ZoneManager.zoneCache;
        final LoadingCache<ZoneKey, Zone> tdzc = ZoneManager.dynamicZoneCache;

        ZoneManager.zoneExecutor = ze;
        ZoneManager.zoneMaintenanceExecutor = me;
        ZoneManager.dynamicZoneCache = dzc;
        ZoneManager.zoneCache = zc;

        if (tze != null) {
            tze.shutdownNow();
        }

        if (tme != null) {
            tme.shutdownNow();
        }

        if (tzc != null) {
            tzc.invalidateAll();
        }

        if (tdzc != null) {
            tdzc.invalidateAll();
        }
    }
}

From source file:com.norconex.collector.core.crawler.AbstractCrawler.java

protected void processReferences(final ICrawlDataStore refStore, final JobStatusUpdater statusUpdater,
        final JobSuite suite, final boolean delete) {

    int numThreads = getCrawlerConfig().getNumThreads();
    final CountDownLatch latch = new CountDownLatch(numThreads);
    ExecutorService pool = Executors.newFixedThreadPool(numThreads);

    for (int i = 0; i < numThreads; i++) {
        final int threadIndex = i + 1;
        LOG.debug(getId() + ": Crawler thread #" + threadIndex + " started.");
        pool.execute(new ProcessReferencesRunnable(suite, statusUpdater, refStore, delete, latch));
    }//from   w ww.ja  va2  s .  com

    try {
        latch.await();
        pool.shutdown();
    } catch (InterruptedException e) {
        throw new CollectorException(e);
    }
}

From source file:com.yoshio3.modules.AzureADServerAuthModule.java

private AuthenticationResult getAccessTokenFromRefreshToken(String refreshToken, String currentUri)
        throws Throwable {
    AuthenticationContext context;//  w  ww  . j  a va 2  s  .c  o  m
    AuthenticationResult result;
    ExecutorService service = null;
    try {
        service = Executors.newFixedThreadPool(1);
        context = new AuthenticationContext(authority + tenant + "/", true, service);
        Future<AuthenticationResult> future = context.acquireTokenByRefreshToken(refreshToken,
                new ClientCredential(clientId, secretKey), null, null);
        result = future.get();
    } catch (ExecutionException e) {
        throw e.getCause();
    } finally {
        if (service != null) {
            service.shutdown();
        }
    }

    if (result == null) {
        throw new ServiceUnavailableException("authentication result was null");
    }
    return result;

}

From source file:com.idocbox.flame.Helios.java

/**
 * fire them!//from ww w.j  a  v a2s  . c o  m
 * @param ds       data source.
 * @param dsSpliter data source spliter.
 * @param mapper   mapper.
 * @param reducer  reducer.
 * @return
 */
public Collector<Map<K, V>> fire(JobConfig<K, V, T> jobConfig) {

    long start = System.currentTimeMillis();

    Collector<Map<K, V>> resultCollector = null;

    // data source.
    DataSource<T> dataSource = jobConfig.getDataSource();
    // data source spliter.
    DataSourceSpliter<T> dataSourceSpliter = jobConfig.getDataSourceSpliter();
    // mapper worker. root mapper worker.
    MapperWorker<K, V, T> mapperWorker = jobConfig.getMapperWorker();
    // reducer worker. root reducer worker.
    ReducerWorker<K, V> reducerWorker = jobConfig.getReducerWorker();
    // mapper.
    Mapper<K, V, T> mapper = jobConfig.getMapper();
    // reducer.
    Reducer<K, V> reducer = jobConfig.getReducer();
    // keeper.
    Keeper<Collector<Map<K, V>>> keeper = jobConfig.getKeeper();

    // spliting phase.

    //split data source into serveral data source.
    log.info("spliting datasource ...");
    Map<String, DataSource<T>> dsMap = dataSourceSpliter.split(dataSource);

    long m1 = System.currentTimeMillis();
    long cost1 = m1 - start;
    double seconds1 = cost1 / 1000;
    log.info("spliting datasource: cost " + seconds1 + " s");

    // generate worker for mapper.create()
    if (null == dsMap || dsMap.isEmpty()) {
        log.info("Splited data source is empty! exit flame!");
        return null;
    }

    // mapping phase.

    // generate mapper worker.
    log.info("mapping && reducing ...");
    Set<String> dsKeys = dsMap.keySet();
    //mapper thread size.
    int mapperThreadSize = dsKeys.size() > jobConfig.getMaxMapperWorker() ? jobConfig.getMaxMapperWorker()
            : dsKeys.size();
    //create mapper worker thread pool.
    ExecutorService mapperWorkerThreadPool = Executors.newFixedThreadPool(mapperThreadSize);
    int dataSourceSize = 0;
    for (String key : dsKeys) {
        //create mapper worker baby.
        MapperWorker<K, V, T> mapperWorkerBaby = mapperWorker.create(key);

        //assign data source and run the worker.
        DataSource<T> dsUnit = dsMap.get(key);
        if (null != dsUnit) {
            //execute mapper work in thread pool.
            mapperWorkerThreadPool
                    .execute(new MapperWorkerRunable<K, V, T>(mapperWorkerBaby, dsUnit, mapper, keeper));

            dataSourceSize++;
        }
    }
    //shutdown executor service.
    mapperWorkerThreadPool.shutdown();

    // reduce phase.

    //generate reducer worker, assign mapper worker's compute result
    // to reducer worker.

    //mapper thread size.
    //create reducer worker thread pool.
    ExecutorService reducerWorkerThreadPool = Executors.newFixedThreadPool(jobConfig.getMaxReducerWorker());

    //get 2 collector, merge them into one, then passed to reducer.
    Set<ReducerWorker<K, V>> reducerWorkers = new HashSet<ReducerWorker<K, V>>();
    int j = 0;
    int expectedReducTime = dataSourceSize - 1;
    while (true) {//reduce while there is more than one element in set.
        if (mapperWorkerThreadPool.isTerminated()) {
            int count = keeper.count();
            if (count == 0) {//no mapped result.
                log.info("there is no result given by mapper. exit!");
                return null;
            }
        }
        if (j == expectedReducTime) {
            log.info("complete reduce. exit flame.");
            break;
        }

        Set<Collector<Map<K, V>>> collectors = new HashSet<Collector<Map<K, V>>>(2);
        collectors.add(keeper.take());
        collectors.add(keeper.take());

        // get an idle worker.
        ReducerWorker<K, V> reducerWorkerBaby = chooseIdle(reducerWorkers, reducerWorker);

        log.info("reducing, collector size = " + keeper.size());

        reducerWorkerThreadPool
                .execute(new ReducerWorkerRunnable<K, V>(reducerWorkerBaby, collectors, reducer, keeper));

        j++;
    }

    //shutdown reducer worker thread pool.
    reducerWorkerThreadPool.shutdown();

    // collect result phase.
    while (!reducerWorkerThreadPool.isTerminated()) {
        Thread.yield();
    }
    if (null != keeper && keeper.size() == 1) {
        resultCollector = keeper.poll();
    } else {// error occured.
        int size = 0;
        if (null != keeper) {
            size = keeper.size();
        }
        log.info("after reduce, the result collector is not expected! collector size is " + size);
    }

    //return result collector.
    long end = System.currentTimeMillis();
    long cost = end - m1;
    double seconds = cost / 1000;
    log.info("mapping & reducing: cost " + seconds + " s");

    return resultCollector;
}

From source file:org.ligoj.app.plugin.prov.azure.AbstractAzureToolPluginResource.java

/**
 * Get the Azure bearer token from the authority.
 *///  w w  w.  ja  v  a  2  s. c  o m
private String getAccessTokenFromUserCredentials(final String tenant, final String principal,
        final String key) {
    final ExecutorService service = newExecutorService();
    try {
        final AuthenticationContext context = newAuthenticationContext(tenant, service);
        /*
         * Replace {client_id} with ApplicationID and {password} with password that were used to create Service
         * Principal above.
         */
        final ClientCredential credential = new ClientCredential(principal, key);
        return context.acquireToken(getManagementUrl(), credential, null).get().getAccessToken();
    } catch (final ExecutionException | InterruptedException | MalformedURLException e) {
        // Authentication failed
        log.info("Azure authentication failed for tenant {} and principal {}", tenant, principal, e);
    } finally {
        service.shutdown();
    }
    return null;
}

From source file:edu.cmu.lti.oaqa.bioqa.providers.kb.TmToolConceptProvider.java

@Override
public List<Concept> getConcepts(List<JCas> jcases) throws AnalysisEngineProcessException {
    // send request
    List<String> normalizedTexts = jcases.stream().map(JCas::getDocumentText)
            .map(PubAnnotationConvertUtil::normalizeText).collect(toList());
    ListMultimap<Integer, PubAnnotation.Denotation> index2denotations = Multimaps
            .synchronizedListMultimap(ArrayListMultimap.create());
    ExecutorService es = Executors.newCachedThreadPool();
    for (String trigger : triggers) {
        es.submit(() -> {/*  w w  w  . j a  v  a 2s .  c o  m*/
            try {
                List<String> denotationStrings = requestConcepts(normalizedTexts, trigger);
                assert denotationStrings.size() == jcases.size();
                for (int i = 0; i < jcases.size(); i++) {
                    PubAnnotation.Denotation[] denotations = gson.fromJson(denotationStrings.get(i),
                            PubAnnotation.Denotation[].class);
                    index2denotations.putAll(i, Arrays.asList(denotations));
                }
            } catch (Exception e) {
                throw TmToolConceptProviderException.unknownException(trigger, e);
            }
        });
    }
    es.shutdown();
    try {
        boolean status = es.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        if (!status) {
            throw new AnalysisEngineProcessException();
        }
    } catch (InterruptedException e) {
        throw new AnalysisEngineProcessException(e);
    }
    // convert denotation strings
    List<Concept> concepts = new ArrayList<>();
    for (int i = 0; i < jcases.size(); i++) {
        JCas jcas = jcases.get(i);
        List<PubAnnotation.Denotation> denotations = index2denotations.get(i);
        try {
            concepts.addAll(PubAnnotationConvertUtil.convertDenotationsToConcepts(jcas, denotations));
        } catch (StringIndexOutOfBoundsException e) {
            throw TmToolConceptProviderException.offsetOutOfBounds(jcas.getDocumentText(), denotations, e);
        }
    }
    return concepts;
}

From source file:com.nts.alphamale.handler.ExecutorHandler.java

/**
 * parallel execute synchronous commandline
 * @param cmdList/*  ww w  .j a  v a 2 s  . c o m*/
 * @param timeoutSecond
 * @return
 */
public List<Map<String, Object>> executeParallel(List<CommandLine> cmdList, int timeoutSecond) {
    ExecutorService executor = Executors.newCachedThreadPool();
    List<Future<Map<String, Object>>> resultList;
    List<Map<String, Object>> results = new ArrayList<Map<String, Object>>();
    List<SynchronousTask> taskList = new ArrayList<SynchronousTask>();
    for (CommandLine cmd : cmdList) {
        taskList.add(new SynchronousTask(cmd, timeoutSecond * 1000));
    }
    try {
        resultList = executor.invokeAll(taskList, timeoutSecond + 10, TimeUnit.SECONDS);
        for (Future<Map<String, Object>> result : resultList) {
            results.add(result.get());
        }
    } catch (InterruptedException e) {
        log.error(e.getMessage());
    } catch (ExecutionException e) {
        log.error(e.getMessage());
    }
    if (!executor.isShutdown()) {
        executor.shutdown();
    }
    return results;
}

From source file:it.wami.map.mongodeploy.OsmSaxHandler.java

/**
 * /*from w w  w  . j  a v a2 s.  c o  m*/
 * @param way the Way
 */
private void populateWayGeo(Way way) {
    Runnable r = new WayRunnable(db, way, waysQueue);

    waysRunnables.add(r);
    int current = (int) (readWays % WAYS_CHUNK);

    if (current == WAYS_CHUNK - 1) {
        int cores = Runtime.getRuntime().availableProcessors();
        ExecutorService executorService = Executors.newFixedThreadPool(cores);
        for (Runnable currentRunnable : waysRunnables) {
            executorService.execute(currentRunnable);
        }
        waysRunnables = Collections.synchronizedList(new ArrayList<Runnable>());
        executorService.shutdown();
        while (!executorService.isTerminated()) {
        }

        saveEntry(waysQueue, COLL_WAYS);
    }
}

From source file:it.wami.map.mongodeploy.OsmSaxHandler.java

private void populateRelation(Relation relation) {
    Runnable r = new RelationRunnable(db, relation, relationsQueue);

    relationRunnables.add(r);//from w w  w  .java 2 s  .c o  m
    int current = (int) (readRelations % RELATIONS_CHUNK);

    if (current == RELATIONS_CHUNK - 1) {
        int cores = Runtime.getRuntime().availableProcessors();
        ExecutorService executorService = Executors.newFixedThreadPool(cores);
        for (Runnable currentRunnable : relationRunnables) {
            executorService.execute(currentRunnable);
        }
        relationRunnables = Collections.synchronizedList(new ArrayList<Runnable>());
        executorService.shutdown();
        while (!executorService.isTerminated()) {
        }

        saveEntry(relationsQueue, COLL_RELATIONS);
    }
}

From source file:com.googlecode.jcasockets.perf.Client.java

public void execute() throws InterruptedException, ExecutionException {
    int numberOfThreads = clientOptions.getNumberOfThreads();
    String ipAddress = clientOptions.getIpAddress();
    List<Integer> ports = clientOptions.getPorts();
    ExecutorService executorService = Executors.newFixedThreadPool(numberOfThreads);
    try {//from  w  w  w  . j  a v  a 2 s . c om
        Collection<Callable<ExecutionStatistics>> senderTestRunners = new ArrayList<Callable<ExecutionStatistics>>(
                numberOfThreads);
        for (Integer port : ports) {
            for (int i = 0; i < numberOfThreads; i++) {
                SocketSender socketSender = socketSenderFactory.createSocketSender(ipAddress, port);
                senderTestRunners.add(new SenderTestRunner(clientOptions, socketSender));
            }
        }
        List<Future<ExecutionStatistics>> executionStatisticsFutures = executorService
                .invokeAll(senderTestRunners);
        executionStatistics = new ExecutionStatistics(null);
        for (Future<ExecutionStatistics> future : executionStatisticsFutures) {
            ExecutionStatistics that = future.get();
            executionStatistics.combine(that);
        }
    } finally {
        executorService.shutdown();
    }
}