List of usage examples for java.util.concurrent ExecutorService awaitTermination
boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;
From source file:com.emc.vipr.sync.CasMigrationTest.java
protected List<String> createTestClips(FPPool pool, int maxBlobSize, int thisMany, Writer summaryWriter) throws Exception { ExecutorService service = Executors.newFixedThreadPool(CAS_SETUP_THREADS); System.out.print("Creating clips"); List<String> clipIds = Collections.synchronizedList(new ArrayList<String>()); List<String> summaries = Collections.synchronizedList(new ArrayList<String>()); for (int clipIdx = 0; clipIdx < thisMany; clipIdx++) { service.submit(new ClipWriter(pool, clipIds, maxBlobSize, summaries)); }//from ww w. jav a2 s .c o m service.shutdown(); service.awaitTermination(CAS_SETUP_WAIT_MINUTES, TimeUnit.MINUTES); service.shutdownNow(); Collections.sort(summaries); for (String summary : summaries) { summaryWriter.append(summary); } System.out.println(); return clipIds; }
From source file:guru.nidi.languager.check.LinkChecker.java
public List<FindResult<String>> findBrokenLinks() { ExecutorService executor = Executors.newCachedThreadPool(); final List<FindResult<String>> res = Collections.synchronizedList(new ArrayList<FindResult<String>>()); final Set<String> urls = new HashSet<>(); int lineNum = 1; for (MessageLine line : contents.subList(1, contents.size())) { lineNum++;/*from ww w .j ava 2 s . c o m*/ int col = 1; int elemNum = 0; for (String element : line) { final Matcher matcher = LINK_PATTERN.matcher(element); while (matcher.find()) { final String url = matcher.group(); if (!urls.contains(url)) { urls.add(url); executor.submit(new LinkValidator(res, url, new SourcePosition(file, lineNum, col + elemNum + matcher.start()))); } } elemNum++; col += element.length(); } } executor.shutdown(); try { executor.awaitTermination(20, TimeUnit.SECONDS); } catch (InterruptedException e) { //ignore } return res; }
From source file:org.mousephenotype.dcc.crawler.Downloader.java
private void downloadFiles() throws InterruptedException { ExecutorService workers = Executors.newFixedThreadPool(poolSize); while (true) { SortedSet<FileSourceHasZip> f = getActionAndSources(); if (f == null || f.isEmpty()) { logger.info("No download pending; downloader will now exit"); break; } else {/*from w ww . j a v a 2s. c o m*/ ZipDownload zd = attemptDownload(f); if (zd != null) { logger.debug("Successfully downloaded '{}'... will now extract contents", zd.getZfId().getZaId().getZipId().getFileName()); workers.submit(new XmlExtractor(backupDir, zd)); } } } workers.shutdown(); workers.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS); }
From source file:org.apache.hadoop.hive.llap.daemon.impl.TaskExecutorService.java
private void shutdownExecutor(ExecutorService executorService) { executorService.shutdown();/*from www.j a v a2s .c o m*/ try { if (!executorService.awaitTermination(1, TimeUnit.MINUTES)) { executorService.shutdownNow(); } } catch (InterruptedException e) { executorService.shutdownNow(); } }
From source file:org.opennms.netmgt.events.commands.StressCommand.java
@Override protected Object doExecute() { // Apply sane lower bounds to all of the configurable options eventsPerSecondPerThread = Math.max(1, eventsPerSecondPerThread); numberOfThreads = Math.max(1, numberOfThreads); numSeconds = Math.max(1, numSeconds); reportIntervalInSeconds = Math.max(1, reportIntervalInSeconds); batchSize = Math.max(1, batchSize); boolean useJexl = jexlExpressions != null && jexlExpressions.size() > 0; // Display the effective settings and rates double eventsPerSecond = eventsPerSecondPerThread * numberOfThreads; System.out.printf("Generating %d events per second accross %d threads for %d seconds\n", eventsPerSecondPerThread, numberOfThreads, numSeconds); System.out.printf("\t with UEI: %s\n", eventUei); System.out.printf("\t with batch size: %d\n", batchSize); System.out.printf("\t with synchronous calls: %s\n", isSynchronous); System.out.printf("Which will yield an effective\n"); System.out.printf("\t %.2f events per second\n", eventsPerSecond); System.out.printf("\t %.2f total events\n", eventsPerSecond * numSeconds); if (useJexl) { System.out.printf("Using JEXL expressions:\n"); for (String jexlExpression : jexlExpressions) { System.out.printf("\t%s\n", jexlExpression); }//from w w w . j ava 2s.c o m } // Setup the reporter ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS).build(); // Setup the executor final ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("Event Generator #%d").build(); final ExecutorService executor = Executors.newFixedThreadPool(numberOfThreads, threadFactory); System.out.println("Starting."); try { reporter.start(reportIntervalInSeconds, TimeUnit.SECONDS); for (int i = 0; i < numberOfThreads; i++) { final EventGenerator eventGenerator = useJexl ? new JexlEventGenerator(jexlExpressions) : new EventGenerator(); executor.execute(eventGenerator); } System.out.println("Started."); // Wait until we timeout or get interrupted try { Thread.sleep(SECONDS.toMillis(numSeconds)); } catch (InterruptedException e) { } // Stop! try { System.out.println("Stopping."); executor.shutdownNow(); if (!executor.awaitTermination(2, TimeUnit.MINUTES)) { System.err.println("The threads did not stop in time."); } else { System.out.println("Stopped."); } } catch (InterruptedException e) { } } finally { // Make sure we always stop the reporter reporter.stop(); } // And display one last report... reporter.report(); return null; }
From source file:org.rivalry.core.datacollector.DefaultDataCollector.java
@Override public void fetchData(final DCSpec dcSpec, final String username, final String password, final RivalryData rivalryData) { final long start = System.currentTimeMillis(); if (_maxThreads == 1) { final WebDriver webDriver = createWebDriver(); try {//from ww w. j a v a 2 s . c o m for (final Candidate candidate : rivalryData.getCandidates()) { fetchData(webDriver, dcSpec, rivalryData, candidate); } } catch (final Exception e) { e.printStackTrace(); } } else { final ExecutorService executorService = Executors.newFixedThreadPool(_maxThreads); for (final Candidate candidate : rivalryData.getCandidates()) { final Runnable task = new Runnable() { @Override public void run() { final WebDriver webDriver = createWebDriver(); fetchData(webDriver, dcSpec, rivalryData, candidate); } }; System.out.println(candidate.getName() + " submit"); executorService.submit(task); } executorService.shutdown(); try { executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (final InterruptedException e) { e.printStackTrace(); } } _dataPostProcessor.postProcess(rivalryData); final Candidate averageCandidate = createAverageCandidate(rivalryData); final Candidate medianCandidate = createMedianCandidate(rivalryData); if (averageCandidate != null) { rivalryData.getCandidates().add(averageCandidate); } if (medianCandidate != null) { rivalryData.getCandidates().add(medianCandidate); } final long end = System.currentTimeMillis(); logTiming("0 fetchData()", start, end); }
From source file:fi.jumi.launcher.JumiLauncherBuilder.java
public JumiLauncher build() { ExecutorService actorsThreadPool = createActorsThreadPool(); ProcessStarter processStarter = createProcessStarter(); NetworkServer networkServer = createNetworkServer(); OutputStream daemonOutputListener = createDaemonOutputListener(); Actors actors = new MultiThreadedActors(actorsThreadPool, new DynamicEventizerProvider(), new PrintStreamFailureLogger(System.out), new NullMessageListener()); ActorThread actorThread = startActorThread(actors); ActorRef<DaemonSummoner> daemonSummoner = actorThread.bindActor(DaemonSummoner.class, new ProcessStartingDaemonSummoner(new DirBasedSteward(new EmbeddedDaemonJar()), processStarter, networkServer, daemonOutputListener)); ActorRef<SuiteLauncher> suiteLauncher = actorThread.bindActor(SuiteLauncher.class, new RemoteSuiteLauncher(actorThread, daemonSummoner)); return new JumiLauncher(suiteLauncher, () -> { networkServer.close();/*from ww w .java 2 s. c o m*/ actorThread.stop(); actorsThreadPool.shutdown(); try { actorsThreadPool.awaitTermination(1, TimeUnit.MINUTES); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }); }
From source file:org.repodriller.RepositoryMining.java
private void processRepos(SCMRepository repo) { log.info("Git repository in " + repo.getPath()); List<ChangeSet> allCs = range.get(repo.getScm()); if (!reverseOrder) Collections.reverse(allCs); log.info("Total of commits: " + allCs.size()); log.info("Starting threads: " + threads); ExecutorService exec = Executors.newFixedThreadPool(threads); List<List<ChangeSet>> partitions = Lists.partition(allCs, threads); for (List<ChangeSet> partition : partitions) { exec.submit(() -> {/* w w w. j a va2s .c om*/ for (ChangeSet cs : partition) { try { processChangeSet(repo, cs); } catch (OutOfMemoryError e) { System.err.println("Commit " + cs.getId() + " in " + repo.getLastDir() + " caused OOME"); e.printStackTrace(); System.err.println("goodbye :/"); log.fatal("Commit " + cs.getId() + " in " + repo.getLastDir() + " caused OOME", e); log.fatal("Goodbye! ;/"); System.exit(-1); } catch (Throwable t) { log.error(t); } } }); } try { exec.shutdown(); exec.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS); } catch (InterruptedException e) { log.error("error waiting for threads to terminate in " + repo.getLastDir(), e); } }
From source file:com.dtolabs.rundeck.core.execution.impl.jsch.JschNodeExecutor.java
/** * Shutdown the ExecutorService// w w w.ja va 2 s . com */ void shutdownAndAwaitTermination(ExecutorService pool) { pool.shutdownNow(); // Disable new tasks from being submitted try { logger.debug("Waiting up to 30 seconds for ExecutorService to shut down"); // Wait a while for existing tasks to terminate if (!pool.awaitTermination(30, TimeUnit.SECONDS)) { logger.debug("Pool did not terminate"); } } catch (InterruptedException ie) { // (Re-)Cancel if current thread also interrupted pool.shutdownNow(); // Preserve interrupt status Thread.currentThread().interrupt(); } }
From source file:org.springframework.integration.ftp.session.SessionFactoryTests.java
@Test @Ignore//from w w w . j av a2 s . com public void testConnectionLimit() throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); DefaultFtpSessionFactory sessionFactory = new DefaultFtpSessionFactory(); sessionFactory.setHost("192.168.28.143"); sessionFactory.setPassword("password"); sessionFactory.setUsername("user"); final CachingSessionFactory factory = new CachingSessionFactory(sessionFactory, 2); final Random random = new Random(); final AtomicInteger failures = new AtomicInteger(); for (int i = 0; i < 30; i++) { executor.execute(new Runnable() { public void run() { try { Session session = factory.getSession(); Thread.sleep(random.nextInt(5000)); session.close(); } catch (Exception e) { e.printStackTrace(); failures.incrementAndGet(); } } }); } executor.shutdown(); executor.awaitTermination(10000, TimeUnit.SECONDS); assertEquals(0, failures.get()); }