Example usage for java.util.concurrent ExecutorService shutdownNow

List of usage examples for java.util.concurrent ExecutorService shutdownNow

Introduction

In this page you can find the example usage for java.util.concurrent ExecutorService shutdownNow.

Prototype

List<Runnable> shutdownNow();

Source Link

Document

Attempts to stop all actively executing tasks, halts the processing of waiting tasks, and returns a list of the tasks that were awaiting execution.

Usage

From source file:com.legstar.host.server.PoolingTest.java

/**
 * Address a request to an invalid address.
 * @throws Exception if test fails//from   w w w .  j  a v  a  2  s  . co  m
 */
public void testScheduleWorkInvalidAddress() throws Exception {
    ExecutorService executor = Executors.newFixedThreadPool(CLIENT_THREADS);
    WorkManager wm = new WorkManagerImpl(executor);
    EngineHandler engHandler = new EngineHandler(getPoolingEngineConfig());
    engHandler.init();

    LegStarAddress address = new LegStarAddress("ThereIsNoSuchMainframe");
    address.setHostUserID(HOST_USERID);
    address.setHostPassword(HOST_PASSWORD);

    LegStarRequest request = new LegStarRequest("Request01", address, getLsfileaeRequestMessage());
    Client client = new Client(engHandler.getEngine(), "Client01", request);
    wm.schedule(client, new ClientListener());

    Thread.sleep(5000L);
    engHandler.stop();
    executor.shutdownNow();
    assertEquals(
            "com.legstar.pool.manager.ConnectionPoolException:"
                    + " No host endpoints matches Address=[hostEndpoint=ThereIsNoSuchMainframe,"
                    + "hostCharset=null," + "hostUserID=P390," + "hostTraceMode=false]",
            request.getException().getMessage());

}

From source file:org.atmosphere.cpr.PoolableBroadcasterFactoryTest.java

@Test
public void concurrentLookupTest() throws InterruptedException {
    String id = "id";
    final CountDownLatch latch = new CountDownLatch(100);
    final AtomicInteger created = new AtomicInteger();

    factory.addBroadcasterListener(new BroadcasterListenerAdapter() {
        @Override//from  www. j  av  a  2  s.  c o  m
        public void onPostCreate(Broadcaster b) {
            created.incrementAndGet();
        }

        @Override
        public void onComplete(Broadcaster b) {

        }

        @Override
        public void onPreDestroy(Broadcaster b) {

        }
    });

    final ConcurrentLinkedQueue<Broadcaster> c = new ConcurrentLinkedQueue<Broadcaster>();
    ExecutorService r = Executors.newCachedThreadPool();
    for (int i = 0; i < 100; i++) {
        r.submit(new Runnable() {
            @Override
            public void run() {
                c.add(factory.lookup("name" + UUID.randomUUID().toString(), true));
                latch.countDown();
            }
        });
    }

    try {
        assertTrue(latch.await(20, TimeUnit.SECONDS));
        assertEquals(created.get(), 100);
        assertEquals(c.size(), 100);

        for (Broadcaster b : c) {
            b.destroy();
        }

        assertNotNull(factory.lookup("name" + UUID.randomUUID().toString(), true).broadcast("test"));

        assertEquals(factory.poolableProvider().poolSize(), 100);

    } finally {
        factory.destroy();
        r.shutdownNow();
    }

}

From source file:io.anserini.index.IndexClueWeb09b.java

public int indexWithThreads(int numThreads) throws IOException, InterruptedException {

    System.out.println(/*from  w  ww .  j a  v  a2  s . c  om*/
            "Indexing with " + numThreads + " threads to directory '" + indexPath.toAbsolutePath() + "'...");

    final Directory dir = FSDirectory.open(indexPath);

    final IndexWriterConfig iwc = new IndexWriterConfig(analyzer());

    iwc.setSimilarity(new BM25Similarity());
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    iwc.setRAMBufferSizeMB(256.0);
    iwc.setUseCompoundFile(false);
    iwc.setMergeScheduler(new ConcurrentMergeScheduler());

    final IndexWriter writer = new IndexWriter(dir, iwc);

    final ExecutorService executor = Executors.newFixedThreadPool(numThreads);

    List<Path> warcFiles = discoverWarcFiles(docDir);
    if (doclimit > 0 && warcFiles.size() < doclimit)
        warcFiles = warcFiles.subList(0, doclimit);

    for (Path f : warcFiles)
        executor.execute(new IndexerThread(writer, f));

    //add some delay to let some threads spawn by scheduler
    Thread.sleep(30000);
    executor.shutdown(); // Disable new tasks from being submitted

    try {
        // Wait for existing tasks to terminate
        while (!executor.awaitTermination(5, TimeUnit.MINUTES)) {
            Thread.sleep(1000);
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        executor.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }

    int numIndexed = writer.maxDoc();

    try {
        writer.commit();
        if (optimize)
            writer.forceMerge(1);
    } finally {
        writer.close();
    }

    return numIndexed;
}

From source file:org.apache.cassandra.hadoop.ColumnFamilyInputFormat.java

public List<InputSplit> getSplits(JobContext context) throws IOException {
    Configuration conf = context.getConfiguration();

    validateConfiguration(conf);//from w w  w .ja va 2  s  . c  om

    // cannonical ranges and nodes holding replicas
    List<TokenRange> masterRangeNodes = getRangeMap(conf);

    keyspace = ConfigHelper.getInputKeyspace(context.getConfiguration());
    cfName = ConfigHelper.getInputColumnFamily(context.getConfiguration());

    // cannonical ranges, split into pieces, fetching the splits in parallel
    ExecutorService executor = Executors.newCachedThreadPool();
    List<InputSplit> splits = new ArrayList<InputSplit>();

    try {
        List<Future<List<InputSplit>>> splitfutures = new ArrayList<Future<List<InputSplit>>>();
        for (TokenRange range : masterRangeNodes) {
            // for each range, pick a live owner and ask it to compute bite-sized splits
            splitfutures.add(executor.submit(new SplitCallable(range, conf)));
        }

        // wait until we have all the results back
        for (Future<List<InputSplit>> futureInputSplits : splitfutures) {
            try {
                splits.addAll(futureInputSplits.get());
            } catch (Exception e) {
                throw new IOException("Could not get input splits", e);
            }
        }
    } finally {
        executor.shutdownNow();
    }

    assert splits.size() > 0;
    Collections.shuffle(splits, new Random(System.nanoTime()));
    return splits;
}

From source file:org.jahia.services.SpringContextSingleton.java

private static Object getBeanInModulesContext(final String beanId, long waitTimeout) {

    for (JahiaTemplatesPackage aPackage : ServicesRegistry.getInstance().getJahiaTemplateManagerService()
            .getAvailableTemplatePackages()) {
        if (aPackage.getContext() != null && aPackage.getContext().containsBean(beanId)) {
            return aPackage.getContext().getBean(beanId);
        }/*from  w w  w  .  j ava  2  s .  c  o  m*/
    }

    // Waiting for a missing bean only makes sense in case it is a part of application context initialization
    // during module startup, because there is a chance for the bean to appear later. Otherwise, multiple threads
    // waiting for missing beans could cause application collapse.
    if (waitTimeout > 0 && isApplicationContextInitializationInProgress()) {

        ExecutorService executor = Executors.newSingleThreadExecutor();

        Future<Object> future = executor.submit(new Callable<Object>() {

            @Override
            public Object call() throws Exception {
                while (true) {
                    Thread.sleep(100);
                    try {
                        return getBeanInModulesContext(beanId, 0);
                    } catch (NoSuchBeanDefinitionException e) {
                        logger.debug("Bean '{}' not found by the task loop, will retry in 100 ms", beanId);
                    }
                }
            }
        });

        if (SettingsBean.getInstance().isDevelopmentMode()) {
            logger.warn(
                    "Detected call to SpringContextSingleton.getBeanInModulesContext(...) for bean '{}' during module startup."
                            + "Since 7.2.0.0 modules spring contexts are started independently, and beans may not be available."
                            + "We recommend to use OSGI services instead of spring beans to communicate between modules.",
                    beanId);
        }
        logger.info("Bean '{}' not found yet, will wait for its availability max {} seconds...", beanId,
                waitTimeout);

        try {
            return future.get(waitTimeout, TimeUnit.SECONDS);
        } catch (TimeoutException e) {
            future.cancel(true);
            logger.debug("Waiting for bean '{}' timed out", beanId);
        } catch (InterruptedException | ExecutionException e) {
            throw new JahiaRuntimeException(e);
        } finally {
            executor.shutdownNow();
        }

        logger.info("Bean '{}' not found in module contexts", beanId);
    }

    throw new NoSuchBeanDefinitionException(beanId);
}

From source file:org.apache.hadoop.fs.FCStatisticsBaseTest.java

@Test(timeout = 70000)
public void testStatisticsThreadLocalDataCleanUp() throws Exception {
    final Statistics stats = new Statistics("test");
    // create a small thread pool to test the statistics
    final int size = 2;
    ExecutorService es = Executors.newFixedThreadPool(size);
    List<Callable<Boolean>> tasks = new ArrayList<Callable<Boolean>>(size);
    for (int i = 0; i < size; i++) {
        tasks.add(new Callable<Boolean>() {
            public Boolean call() {
                // this populates the data set in statistics
                stats.incrementReadOps(1);
                return true;
            }//from ww w .j a v  a 2  s  . com
        });
    }
    // run the threads
    es.invokeAll(tasks);
    // assert that the data size is exactly the number of threads
    final AtomicInteger allDataSize = new AtomicInteger(0);
    allDataSize.set(stats.getAllThreadLocalDataSize());
    Assert.assertEquals(size, allDataSize.get());
    Assert.assertEquals(size, stats.getReadOps());
    // force the GC to collect the threads by shutting down the thread pool
    es.shutdownNow();
    es.awaitTermination(1, TimeUnit.MINUTES);
    es = null;
    System.gc(); // force GC to garbage collect threads

    // wait for up to 60 seconds
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
        @Override
        public Boolean get() {
            int size = stats.getAllThreadLocalDataSize();
            allDataSize.set(size);
            if (size == 0) {
                return true;
            }
            LOG.warn(
                    "not all references have been cleaned up; still " + allDataSize.get() + " references left");
            LOG.warn("triggering another GC");
            System.gc();
            return false;
        }
    }, 500, 60 * 1000);
    Assert.assertEquals(0, allDataSize.get());
    Assert.assertEquals(size, stats.getReadOps());
}

From source file:com.asakusafw.runtime.util.cache.HadoopFileCacheRepositoryTest.java

/**
 * Conflict cache creation.//from   w w w  .  j a va 2s .c  o  m
 * @throws Exception if failed
 */
@Test
public void conflict() throws Exception {
    File source = folder.newFile();
    byte[] bytes = new byte[1024 * 1024];
    try (OutputStream output = new FileOutputStream(source)) {
        for (int i = 0, n = 50; i < n; i++) {
            output.write(bytes);
        }
    }

    Path path = path(source);
    File cacheRepo = folder.newFolder();
    Configuration configuration = new ConfigurationProvider().newInstance();
    LockProvider<Path> locks = new LocalFileLockProvider<>(folder.newFolder());
    RetryStrategy retrier = new ConstantRetryStrategy(30, 100, 200);
    FileCacheRepository cache = new HadoopFileCacheRepository(configuration, path(cacheRepo), locks, retrier);

    List<Future<Path>> futures = new ArrayList<>();
    int count = 10;
    CountDownLatch latch = new CountDownLatch(count);
    ExecutorService executor = Executors.newFixedThreadPool(count);
    try {
        for (int i = 0; i < count; i++) {
            String label = String.format("thread-%d", i);
            futures.add(executor.submit(() -> {
                LOG.info("Wait: resolve @" + label);
                latch.countDown();
                if (latch.await(5, TimeUnit.SECONDS) == false) {
                    throw new TimeoutException();
                }
                LOG.info("Start: resolve @" + label);
                Path result = cache.resolve(path);

                LOG.info("Finish: resolve @" + label);
                return result;
            }));
        }
        executor.shutdown();
        if (executor.awaitTermination(30, TimeUnit.SECONDS) == false) {
            throw new TimeoutException();
        }
    } finally {
        executor.shutdownNow();
    }
    for (Future<Path> future : futures) {
        future.get();
    }
}

From source file:org.apache.hive.hcatalog.templeton.tool.LaunchMapper.java

@Override
public void run(Context context) throws IOException, InterruptedException {

    Configuration conf = context.getConfiguration();
    LauncherDelegator.JobType jobType = LauncherDelegator.JobType.valueOf(conf.get(JOB_TYPE));
    String statusdir = conf.get(STATUSDIR_NAME);
    if (statusdir != null) {
        try {/*from   ww  w. j  av a2s .co m*/
            statusdir = TempletonUtils.addUserHomeDirectoryIfApplicable(statusdir, conf.get("user.name"));
        } catch (URISyntaxException e) {
            String msg = "Invalid status dir URI";
            LOG.error(msg, e);
            throw new IOException(msg, e);
        }
    }

    // Try to reconnect to a child job if one is found
    if (tryReconnectToRunningJob(conf, context, jobType, statusdir)) {
        return;
    }

    // Kill previously launched child MR jobs started by this launcher to prevent having
    // same jobs running side-by-side
    killLauncherChildJobs(conf, context.getJobID().toString());

    // Start the job
    Process proc = startJob(conf, context.getJobID().toString(), conf.get("user.name"),
            conf.get(OVERRIDE_CLASSPATH));

    ExecutorService pool = Executors.newCachedThreadPool();
    executeWatcher(pool, conf, context.getJobID(), proc.getInputStream(), statusdir, STDOUT_FNAME);
    executeWatcher(pool, conf, context.getJobID(), proc.getErrorStream(), statusdir, STDERR_FNAME);
    KeepAlive keepAlive = startCounterKeepAlive(pool, context);

    proc.waitFor();
    keepAlive.sendReport = false;
    pool.shutdown();
    if (!pool.awaitTermination(WATCHER_TIMEOUT_SECS, TimeUnit.SECONDS)) {
        pool.shutdownNow();
    }

    updateJobStateToDoneAndWriteExitValue(conf, statusdir, context.getJobID().toString(), proc.exitValue());

    Boolean enablelog = Boolean.parseBoolean(conf.get(ENABLE_LOG));
    if (enablelog && TempletonUtils.isset(statusdir)) {
        LOG.info("templeton: collecting logs for " + context.getJobID().toString() + " to " + statusdir
                + "/logs");
        LogRetriever logRetriever = new LogRetriever(statusdir, jobType, conf);
        logRetriever.run();
    }
}

From source file:org.apache.bookkeeper.tools.perf.dlog.PerfSegmentReader.java

@Override
protected void execute(Namespace namespace) throws Exception {
    List<DistributedLogManager> managers = new ArrayList<>(flags.numLogs);
    for (int i = 0; i < flags.numLogs; i++) {
        String logName = String.format(flags.logName, i);
        managers.add(namespace.openLog(logName));
    }/* www.j  a  va  2  s.c  om*/
    log.info("Successfully open {} logs", managers.size());

    // Get all the log segments
    final List<Pair<DistributedLogManager, LogSegmentMetadata>> segments = managers.stream()
            .flatMap(manager -> {
                try {
                    return manager.getLogSegments().stream().map(segment -> Pair.of(manager, segment));
                } catch (IOException e) {
                    throw new UncheckedIOException(e);
                }
            }).collect(Collectors.toList());

    final List<Split> splits = segments.stream()
            .flatMap(entry -> getNumSplits(entry.getLeft(), entry.getRight()).stream())
            .collect(Collectors.toList());

    // register shutdown hook to aggregate stats
    Runtime.getRuntime().addShutdownHook(new Thread(() -> {
        isDone.set(true);
        printAggregatedStats(cumulativeRecorder);
    }));

    ExecutorService executor = Executors.newFixedThreadPool(flags.numThreads);
    try {
        for (int i = 0; i < flags.numThreads; i++) {
            final int idx = i;
            final List<Split> splitsThisThread = splits.stream()
                    .filter(split -> splits.indexOf(split) % flags.numThreads == idx)
                    .collect(Collectors.toList());
            executor.submit(() -> {
                try {
                    read(splitsThisThread);
                } catch (Exception e) {
                    log.error("Encountered error at writing records", e);
                }
            });
        }
        log.info("Started {} write threads", flags.numThreads);
        reportStats();
    } finally {
        executor.shutdown();
        if (!executor.awaitTermination(5, TimeUnit.SECONDS)) {
            executor.shutdownNow();
        }
        managers.forEach(manager -> manager.asyncClose());
    }
}

From source file:org.atmosphere.cpr.PoolableBroadcasterFactoryTest.java

@Test
public void concurrentAccessLookupTest() throws InterruptedException {
    final CountDownLatch latch = new CountDownLatch(1000);
    final AtomicInteger created = new AtomicInteger();
    factory.poolableProvider(new UnboundedApachePoolableProvider());
    factory.addBroadcasterListener(new BroadcasterListenerAdapter() {
        @Override// w w  w. j av  a  2s. c  o  m
        public void onPostCreate(Broadcaster b) {
            created.incrementAndGet();
        }

        @Override
        public void onComplete(Broadcaster b) {

        }

        @Override
        public void onPreDestroy(Broadcaster b) {

        }
    });

    final ConcurrentLinkedQueue<Broadcaster> c = new ConcurrentLinkedQueue<Broadcaster>();
    ExecutorService r = Executors.newCachedThreadPool();
    final String me = new String("me");
    for (int i = 0; i < 1000; i++) {
        r.submit(new Runnable() {
            @Override
            public void run() {
                c.add(factory.get(me));
                latch.countDown();
            }
        });

    }
    try {
        assertTrue(latch.await(20, TimeUnit.SECONDS));
        assertEquals(latch.getCount(), 0);
        assertEquals(c.size(), 1000);
        assertEquals(created.get(), 1000);

        for (Broadcaster b : c) {
            b.destroy();
        }

        assertNotNull(factory.lookup("name" + UUID.randomUUID().toString(), true).broadcast("test"));

        assertEquals(factory.poolableProvider().poolSize(), 1000);

    } finally {
        factory.destroy();
        r.shutdownNow();
    }

}