Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:com.asakusafw.runtime.stage.launcher.LauncherOptionsParser.java

private Map<Path, Path> processLibraryCache(List<Path> libraryPaths) throws IOException, InterruptedException {
    boolean useCache = computeEnabled();
    if (useCache) {
        Path repositoryPath = computeRepositoryPath();
        File temporary = computeTemporaryDirectory();
        int threads = Math.max(configuration.getInt(KEY_MAX_THREADS, DEFAULT_MAX_THREADS), MINIMUM_MAX_THREADS);
        int retryCount = configuration.getInt(KEY_CACHE_RETRY_COUNT, DEFAULT_CACHE_RETRY_COUNT);
        long retryInterval = configuration.getLong(KEY_CACHE_RETRY_INTERVAL, DEFAULT_CACHE_RETRY_INTERVAL);
        FileCacheRepository unit = new HadoopFileCacheRepository(configuration, repositoryPath,
                new LocalFileLockProvider<Path>(new File(temporary, PATH_LOCK_DIRECTORY)),
                new ConstantRetryStrategy(retryCount, retryInterval));
        ExecutorService executor = Executors.newFixedThreadPool(threads, DAEMON_THREAD_FACTORY);
        try {/*ww w .jav a 2  s. c  om*/
            BatchFileCacheRepository repo = new ConcurrentBatchFileCacheRepository(unit, executor);
            return repo.resolve(libraryPaths);
        } finally {
            executor.shutdownNow();
        }
    } else {
        return new NullBatchFileCacheRepository().resolve(libraryPaths);
    }
}

From source file:org.apache.bookkeeper.common.util.OrderedExecutor.java

/**
 * {@inheritDoc}// www  . j av  a2s.  c  o  m
 */
@Override
public List<Runnable> shutdownNow() {
    List<Runnable> runnables = new ArrayList<Runnable>();
    for (ExecutorService executor : threads) {
        runnables.addAll(executor.shutdownNow());
    }
    return runnables;
}

From source file:com.googlecode.concurrentlinkedhashmap.MultiThreadedTest.java

private void handleTimout(ConcurrentLinkedHashMap<?, ?> cache, ExecutorService es, TimeoutException e) {
    for (StackTraceElement[] trace : Thread.getAllStackTraces().values()) {
        for (StackTraceElement element : trace) {
            info("\tat " + element);
        }//from  w  w w  .j  a va  2  s . c  o m
        if (trace.length > 0) {
            info("------");
        }
    }
    es.shutdownNow();
    try {
        es.awaitTermination(10, SECONDS);
    } catch (InterruptedException ex) {
        fail("", ex);
    }

    // Print the state of the cache
    debug("Cached Elements: %s", cache.toString());
    debug("Deque Forward:\n%s", ascendingToString(cache));
    debug("Deque Backward:\n%s", descendingToString(cache));

    // Print the recorded failures
    for (String failure : failures) {
        debug(failure);
    }
    fail("Spun forever", e);
}

From source file:org.csc.phynixx.connection.MTPooledConnectionIT.java

private void startRunners(IActOnConnection actOnConnection, int numThreads) throws Exception {
    exceptions.clear();// w  w w .  j  a  v a 2s . com
    ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    for (int i = 0; i < numThreads; i++) {
        Callable<Object> task = new Caller(actOnConnection);
        executorService.submit(task);
    }

    executorService.shutdown();

    // 10 seconds per execution
    boolean inTime = executorService.awaitTermination(10000 * CONNECTION_POOL_SIZE, TimeUnit.SECONDS);
    if (!inTime) {
        if (!executorService.isShutdown()) {
            List<Runnable> runnables = executorService.shutdownNow();
        }
        throw new IllegalStateException(
                "Execution was stopped after " + 10 * CONNECTION_POOL_SIZE + " seconds");
    }
    if (exceptions.size() > 0) {
        for (int i = 0; i < exceptions.size(); i++) {
            Exception ex = (Exception) exceptions.get(i);
            ex.printStackTrace();
        }
        throw new IllegalStateException("Error occurred", exceptions.get(0));
    }
}

From source file:com.liveramp.hank.partition_server.UpdateManager.java

@Override
public void update() throws IOException {
    HankTimer timer = new HankTimer();
    try {//from ww w.  jav a  2s. c  om

        // Delete unknown files
        deleteUnknownFiles();
        // Perform update
        Semaphore concurrentUpdatesSemaphore = new Semaphore(configurator.getNumConcurrentUpdates());
        List<Throwable> encounteredThrowables = new ArrayList<Throwable>();
        PartitionUpdateTaskStatisticsAggregator partitionUpdateTaskStatisticsAggregator = new PartitionUpdateTaskStatisticsAggregator();
        Map<String, Queue<PartitionUpdateTask>> dataDirectoryToUpdateTasks = new HashMap<String, Queue<PartitionUpdateTask>>();
        List<PartitionUpdateTask> allUpdateTasks = buildPartitionUpdateTasks(
                partitionUpdateTaskStatisticsAggregator, encounteredThrowables);
        // Build and organize update tasks per data directory
        for (PartitionUpdateTask updateTask : allUpdateTasks) {
            String dataDirectory = updateTask.getDataDirectory();
            Queue<PartitionUpdateTask> updateTasks = dataDirectoryToUpdateTasks.get(dataDirectory);
            if (updateTasks == null) {
                updateTasks = new LinkedList<PartitionUpdateTask>();
                dataDirectoryToUpdateTasks.put(dataDirectory, updateTasks);
            }
            updateTasks.add(updateTask);
        }

        // Logging
        LOG.info("Number of update tasks: " + allUpdateTasks.size());
        for (Map.Entry<String, Queue<PartitionUpdateTask>> entry : dataDirectoryToUpdateTasks.entrySet()) {
            LOG.info("Number of update tasks scheduled in " + entry.getKey() + ": " + entry.getValue().size());
        }

        // Build executor services
        Map<String, ExecutorService> dataDirectoryToExecutorService = new HashMap<String, ExecutorService>();
        for (String dataDirectory : dataDirectoryToUpdateTasks.keySet()) {
            dataDirectoryToExecutorService.put(dataDirectory,
                    new UpdateThreadPoolExecutor(configurator.getMaxConcurrentUpdatesPerDataDirectory(),
                            new UpdaterThreadFactory(dataDirectory), concurrentUpdatesSemaphore));
        }

        LOG.info("Submitting update tasks for " + dataDirectoryToUpdateTasks.size() + " directories.");

        // Execute tasks. We execute one task for each data directory and loop around so that the tasks
        // attempt to acquire the semaphore in a reasonable order.
        boolean remaining = true;
        while (remaining) {
            remaining = false;
            for (Map.Entry<String, Queue<PartitionUpdateTask>> entry : dataDirectoryToUpdateTasks.entrySet()) {
                // Pop next task
                Queue<PartitionUpdateTask> partitionUpdateTasks = entry.getValue();
                if (!partitionUpdateTasks.isEmpty()) {
                    PartitionUpdateTask partitionUpdateTask = partitionUpdateTasks.remove();
                    // Execute task
                    dataDirectoryToExecutorService.get(entry.getKey()).execute(partitionUpdateTask);
                }
                if (!partitionUpdateTasks.isEmpty()) {
                    remaining = true;
                }
            }
        }

        LOG.info("All update tasks submitted, shutting down executor services");

        // Shutdown executors
        for (ExecutorService executorService : dataDirectoryToExecutorService.values()) {
            executorService.shutdown();
        }

        LOG.info("Waiting for executors to finish.");

        // Wait for executors to finish
        for (Map.Entry<String, ExecutorService> entry : dataDirectoryToExecutorService.entrySet()) {
            String directory = entry.getKey();
            ExecutorService executorService = entry.getValue();

            boolean keepWaiting = true;
            while (keepWaiting) {
                try {
                    LOG.info("Waiting for updates to complete on data directory: " + directory);
                    boolean terminated = executorService.awaitTermination(
                            UPDATE_EXECUTOR_TERMINATION_CHECK_TIMEOUT_VALUE,
                            UPDATE_EXECUTOR_TERMINATION_CHECK_TIMEOUT_UNIT);
                    if (terminated) {
                        // We finished executing all tasks
                        // Otherwise, timeout elapsed and current thread was not interrupted. Keep waiting.
                        LOG.info("Finished updates for directory: " + directory);
                        keepWaiting = false;
                    }
                    // Record update ETA
                    Hosts.setUpdateETA(host, partitionUpdateTaskStatisticsAggregator.computeETA());
                } catch (InterruptedException e) {
                    // Received interruption (stop request).
                    // Swallow the interrupted state and ask the executor to shutdown immediately. Also, keep waiting.
                    LOG.info(
                            "The update manager was interrupted. Stopping the update process (stop executing new partition update tasks"
                                    + " and wait for those that were running to finish).");
                    // Shutdown all executors
                    for (ExecutorService otherExecutorService : dataDirectoryToExecutorService.values()) {
                        otherExecutorService.shutdownNow();
                    }
                    // Record failed update exception (we need to keep waiting)
                    encounteredThrowables.add(
                            new IOException("Failed to complete update: update interruption was requested."));
                }
            }
        }

        LOG.info("All executors have finished updates");

        // Shutdown all executors
        for (ExecutorService executorService : dataDirectoryToExecutorService.values()) {
            executorService.shutdownNow();
        }

        LOG.info("Finished with " + encounteredThrowables.size() + " errors.");

        // Detect failures
        if (!encounteredThrowables.isEmpty()) {
            LOG.error(String.format("%d exceptions encountered while running partition update tasks:",
                    encounteredThrowables.size()));
            int i = 0;
            for (Throwable t : encounteredThrowables) {
                LOG.error(String.format("Exception %d/%d:", ++i, encounteredThrowables.size()), t);
            }
            throw new IOException(String.format(
                    "Failed to complete update: %d exceptions encountered while running partition update tasks.",
                    encounteredThrowables.size()));
        }

        // Garbage collect useless host domains
        garbageCollectHostDomains(host);

        // Log statistics
        partitionUpdateTaskStatisticsAggregator.logStats();

    } catch (IOException e) {
        LOG.info("Update failed and took " + FormatUtils.formatSecondsDuration(timer.getDurationMs() / 1000));
        throw e;
    }
    LOG.info("Update succeeded and took " + FormatUtils.formatSecondsDuration(timer.getDurationMs() / 1000));
}

From source file:eu.cloud4soa.tests.TestReqSec_Deploy.java

public void deploy() {
    Collection<CallableNode> children = new ArrayList<CallableNode>();
    for (String applicationInstanceName : applicationInstances.keySet()) {
        String applicationInstanceUriId = applicationInstances.get(applicationInstanceName);
        //Creazione Thread...
        CallableNode callableNode = new CallableNode(BASE_URI, applicationInstanceUriId,
                getPaaSInstanceUriId(selectedPaaS), getPublicKey(), getSecretKey());
        children.add(callableNode);/*  w  w w .ja v a 2  s.c  o  m*/
    }
    ExecutorService executor = Executors.newFixedThreadPool(numberTests);
    try {
        List<Future<Boolean>> invokeAll = executor.invokeAll(children);
        for (Future<Boolean> future : invokeAll) {
            while (!future.isDone())
                ;
            Boolean get = future.get();
        }
        System.out.print("All " + numberTests + " deploy requests are completed!");
        executor.shutdownNow();
    } catch (InterruptedException ex) {
        Logger.getLogger(TestReqSec_Deploy.class.getName()).log(Level.SEVERE, null, ex);
    } catch (ExecutionException ex) {
        Logger.getLogger(TestReqSec_Deploy.class.getName()).log(Level.SEVERE, null, ex);
    }

    //            //Applications
    //            try {
    //                deployApplication(applicationInstanceUriId);
    //            } catch (FileNotFoundException ex) {
    //                Logger.getLogger(TestReqSec_Deploy.class.getName()).log(Level.SEVERE, null, ex);
    //            }
}

From source file:io.specto.hoverfly.junit.core.Hoverfly.java

private void cleanUp() {
    LOGGER.info("Destroying hoverfly process");

    if (startedProcess != null) {
        Process process = startedProcess.getProcess();
        process.destroy();//from   w w  w  .  j a  v  a  2 s .co  m

        // Some platforms terminate process asynchronously, eg. Windows, and cannot guarantee that synchronous file deletion
        // can acquire file lock
        ExecutorService executorService = Executors.newSingleThreadExecutor();
        Future<Integer> future = executorService.submit((Callable<Integer>) process::waitFor);
        try {
            future.get(5, TimeUnit.SECONDS);
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
            LOGGER.warn("Timeout when waiting for hoverfly process to terminate.");
        }
        executorService.shutdownNow();
    }

    proxyConfigurer.restoreProxySystemProperties();
    // TODO: reset default SslContext?
    tempFileManager.purge();
}

From source file:org.apache.camel.impl.DefaultExecutorServiceStrategy.java

public List<Runnable> shutdownNow(ExecutorService executorService) {
    ObjectHelper.notNull(executorService, "executorService");

    if (executorService.isShutdown()) {
        return null;
    }/*from  w ww .j  a va2 s .c  o m*/

    if (LOG.isDebugEnabled()) {
        LOG.debug("ShutdownNow ExecutorService: " + executorService);
    }
    List<Runnable> answer = executorService.shutdownNow();
    if (LOG.isTraceEnabled()) {
        LOG.trace("ShutdownNow ExecutorService: " + executorService + " complete.");
    }

    return answer;
}

From source file:org.onosproject.demo.DemoInstaller.java

/**
 * Shutdown a pool cleanly if possible./*from   w ww. j  a va2 s .co m*/
 *
 * @param pool an executorService
 */
private void shutdownAndAwaitTermination(ExecutorService pool) {
    pool.shutdown(); // Disable new tasks from being submitted
    try {
        // Wait a while for existing tasks to terminate
        if (!pool.awaitTermination(10, TimeUnit.SECONDS)) {
            pool.shutdownNow(); // Cancel currently executing tasks
            // Wait a while for tasks to respond to being cancelled
            if (!pool.awaitTermination(10, TimeUnit.SECONDS)) {
                log.error("Pool did not terminate");
            }
        }
    } catch (Exception ie) {
        // (Re-)Cancel if current thread also interrupted
        pool.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }
}

From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer.java

@SuppressWarnings("deprecation")
public void runLocalization(final InetSocketAddress nmAddr) throws IOException, InterruptedException {
    // load credentials
    initDirs(conf, user, appId, lfs, localDirs, userFolder);
    final Credentials creds = new Credentials();
    DataInputStream credFile = null;
    try {/*from w  w w.j  av a2  s.  c  o m*/
        // assume credentials in cwd
        // TODO: Fix
        Path tokenPath = new Path(String.format(TOKEN_FILE_NAME_FMT, localizerId));
        credFile = lfs.open(tokenPath);
        creds.readTokenStorageStream(credFile);
        // Explicitly deleting token file.
        lfs.delete(tokenPath, false);
    } finally {
        if (credFile != null) {
            credFile.close();
        }
    }
    // create localizer context
    UserGroupInformation remoteUser = UserGroupInformation.createRemoteUser(user);
    remoteUser.addToken(creds.getToken(LocalizerTokenIdentifier.KIND));
    final LocalizationProtocol nodeManager = remoteUser.doAs(new PrivilegedAction<LocalizationProtocol>() {
        @Override
        public LocalizationProtocol run() {
            return getProxy(nmAddr);
        }
    });

    // create user context
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
    for (Token<? extends TokenIdentifier> token : creds.getAllTokens()) {
        ugi.addToken(token);
    }

    ExecutorService exec = null;
    try {
        exec = createDownloadThreadPool();
        CompletionService<Path> ecs = createCompletionService(exec);
        localizeFiles(nodeManager, ecs, ugi);
        return;
    } catch (Throwable e) {
        throw new IOException(e);
    } finally {
        try {
            if (exec != null) {
                exec.shutdownNow();
            }
            LocalDirAllocator.removeContext(appCacheDirContextName);
        } finally {
            closeFileSystems(ugi);
        }
    }
}