List of usage examples for java.util.concurrent ExecutorService awaitTermination
boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException;
From source file:org.apache.hadoop.util.TestStringUtils.java
@Test //Multithreaded Test GetFormattedTimeWithDiff() public void testGetFormattedTimeWithDiff() throws InterruptedException { ExecutorService executorService = Executors.newFixedThreadPool(16); final CyclicBarrier cyclicBarrier = new CyclicBarrier(10); for (int i = 0; i < 10; i++) { executorService.execute(new Runnable() { @Override//from w ww. j a v a 2 s.co m public void run() { try { cyclicBarrier.await(); } catch (InterruptedException | BrokenBarrierException e) { //Ignored } final long end = System.currentTimeMillis(); final long start = end - 30000; String formattedTime1 = StringUtils.getFormattedTimeWithDiff(FAST_DATE_FORMAT, start, end); String formattedTime2 = StringUtils.getFormattedTimeWithDiff(FAST_DATE_FORMAT, start, end); assertTrue("Method returned inconsistent results indicative of" + " a race condition", formattedTime1.equals(formattedTime2)); } }); } executorService.shutdown(); executorService.awaitTermination(50, TimeUnit.SECONDS); }
From source file:ch.jamiete.hilda.plugins.PluginManager.java
public void disablePlugins() { ExecutorService executor = Executors.newSingleThreadExecutor(); synchronized (this.plugins) { final Iterator<HildaPlugin> iterator = this.plugins.iterator(); while (iterator.hasNext()) { final HildaPlugin entry = iterator.next(); Future<?> future = executor.submit(() -> { try { entry.onDisable();//from www.j av a 2 s . c o m } catch (final Exception e) { Hilda.getLogger().log(Level.WARNING, "Encountered an exception while disabling plugin " + entry.getPluginData().getName(), e); } }); try { future.get(30, TimeUnit.SECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { Hilda.getLogger().log(Level.WARNING, "Plugin " + entry.getPluginData().getName() + " took too long disabling; ceased executing its code", e); } } } executor.shutdown(); try { executor.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException e) { Hilda.getLogger().log(Level.WARNING, "Encountered an exception during the plugin disable grace period", e); } }
From source file:com.linkedin.pinot.tools.admin.command.CreateSegmentCommand.java
@Override public boolean execute() throws Exception { LOGGER.info("Executing command: {}", toString()); // Load generator config if exist. final SegmentGeneratorConfig segmentGeneratorConfig; if (_generatorConfigFile != null) { segmentGeneratorConfig = new ObjectMapper().readValue(new File(_generatorConfigFile), SegmentGeneratorConfig.class); } else {//from ww w . ja v a 2 s .c o m segmentGeneratorConfig = new SegmentGeneratorConfig(); } // Load config from segment generator config. String configDataDir = segmentGeneratorConfig.getDataDir(); if (_dataDir == null) { if (configDataDir == null) { throw new RuntimeException("Must specify dataDir."); } _dataDir = configDataDir; } else { if (configDataDir != null && !configDataDir.equals(_dataDir)) { LOGGER.warn("Find dataDir conflict in command line and config file, use config in command line: {}", _dataDir); } } FileFormat configFormat = segmentGeneratorConfig.getFormat(); if (_format == null) { if (configFormat == null) { throw new RuntimeException("Format cannot be null in config file."); } _format = configFormat; } else { if (configFormat != _format && configFormat != FileFormat.AVRO) { LOGGER.warn("Find format conflict in command line and config file, use config in command line: {}", _format); } } String configOutDir = segmentGeneratorConfig.getOutDir(); if (_outDir == null) { if (configOutDir == null) { throw new RuntimeException("Must specify outDir."); } _outDir = configOutDir; } else { if (configOutDir != null && !configOutDir.equals(_outDir)) { LOGGER.warn("Find outDir conflict in command line and config file, use config in command line: {}", _outDir); } } if (segmentGeneratorConfig.isOverwrite()) { _overwrite = true; } String configTableName = segmentGeneratorConfig.getTableName(); if (_tableName == null) { if (configTableName == null) { throw new RuntimeException("Must specify tableName."); } _tableName = configTableName; } else { if (configTableName != null && !configTableName.equals(_tableName)) { LOGGER.warn( "Find tableName conflict in command line and config file, use config in command line: {}", _tableName); } } String configSegmentName = segmentGeneratorConfig.getSegmentName(); if (_segmentName == null) { if (configSegmentName == null) { throw new RuntimeException("Must specify segmentName."); } _segmentName = configSegmentName; } else { if (configSegmentName != null && !configSegmentName.equals(_segmentName)) { LOGGER.warn( "Find segmentName conflict in command line and config file, use config in command line: {}", _segmentName); } } // Filter out all input files. File dir = new File(_dataDir); if (!dir.exists() || !dir.isDirectory()) { throw new RuntimeException("Data directory " + _dataDir + " not found."); } File[] files = dir.listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { return name.toLowerCase().endsWith(_format.toString().toLowerCase()); } }); if ((files == null) || (files.length == 0)) { throw new RuntimeException("Data directory " + _dataDir + " does not contain " + _format.toString().toUpperCase() + " files."); } // Make sure output directory does not already exist, or can be overwritten. File outDir = new File(_outDir); if (outDir.exists()) { if (!_overwrite) { throw new IOException("Output directory " + _outDir + " already exists."); } else { FileUtils.deleteDirectory(outDir); } } // Set other generator configs from command line. segmentGeneratorConfig.setDataDir(_dataDir); segmentGeneratorConfig.setFormat(_format); segmentGeneratorConfig.setOutDir(_outDir); segmentGeneratorConfig.setOverwrite(_overwrite); segmentGeneratorConfig.setTableName(_tableName); segmentGeneratorConfig.setSegmentName(_segmentName); if (_schemaFile != null) { if (segmentGeneratorConfig.getSchemaFile() != null && !segmentGeneratorConfig.getSchemaFile().equals(_schemaFile)) { LOGGER.warn( "Find schemaFile conflict in command line and config file, use config in command line: {}", _schemaFile); } segmentGeneratorConfig.setSchemaFile(_schemaFile); } if (_readerConfigFile != null) { if (segmentGeneratorConfig.getReaderConfigFile() != null && !segmentGeneratorConfig.getReaderConfigFile().equals(_readerConfigFile)) { LOGGER.warn( "Find readerConfigFile conflict in command line and config file, use config in command line: {}", _readerConfigFile); } segmentGeneratorConfig.setReaderConfigFile(_readerConfigFile); } if (_enableStarTreeIndex) { segmentGeneratorConfig.setEnableStarTreeIndex(true); } if (_starTreeIndexSpecFile != null) { if (segmentGeneratorConfig.getStarTreeIndexSpecFile() != null && !segmentGeneratorConfig.getStarTreeIndexSpecFile().equals(_starTreeIndexSpecFile)) { LOGGER.warn( "Find starTreeIndexSpecFile conflict in command line and config file, use config in command line: {}", _starTreeIndexSpecFile); } segmentGeneratorConfig.setStarTreeIndexSpecFile(_starTreeIndexSpecFile); } ExecutorService executor = Executors.newFixedThreadPool(_numThreads); int cnt = 0; for (final File file : files) { final int segCnt = cnt; executor.execute(new Runnable() { @Override public void run() { try { SegmentGeneratorConfig config = new SegmentGeneratorConfig(segmentGeneratorConfig); config.setInputFilePath(file.getAbsolutePath()); config.setSegmentName(_segmentName + "_" + segCnt); config.loadConfigFiles(); final SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl(); driver.init(config); driver.build(); } catch (Exception e) { throw new RuntimeException(e); } } }); cnt += 1; } executor.shutdown(); return executor.awaitTermination(1, TimeUnit.HOURS); }
From source file:org.apache.reef.io.network.NetworkConnectionServiceTest.java
@Test public void testMultithreadedSharedConnMessagingNetworkConnServiceRate() throws Exception { Assume.assumeFalse("Use log level INFO to run benchmarking", LOG.isLoggable(Level.FINEST)); LOG.log(Level.FINEST, name.getMethodName()); final int[] messageSizes = { 2000 }; // {1,16,32,64,512,64*1024,1024*1024}; for (final int size : messageSizes) { final String message = StringUtils.repeat('1', size); final int numMessages = 300000 / (Math.max(1, size / 512)); final int numThreads = 2; final int totalNumMessages = numMessages * numThreads; final Monitor monitor = new Monitor(); final Codec<String> codec = new StringCodec(); try (final NetworkMessagingTestService messagingTestService = new NetworkMessagingTestService( localAddress)) {/*from w w w. jav a2s .c o m*/ messagingTestService.registerTestConnectionFactory(groupCommClientId, totalNumMessages, monitor, codec); final ExecutorService e = Executors.newCachedThreadPool(); try (final Connection<String> conn = messagingTestService .getConnectionFromSenderToReceiver(groupCommClientId)) { final long start = System.currentTimeMillis(); for (int i = 0; i < numThreads; i++) { e.submit(new Runnable() { @Override public void run() { try { conn.open(); for (int count = 0; count < numMessages; ++count) { // send messages to the receiver. conn.write(message); } } catch (final Exception e) { throw new RuntimeException(e); } } }); } e.shutdown(); e.awaitTermination(30, TimeUnit.SECONDS); monitor.mwait(); final long end = System.currentTimeMillis(); final double runtime = ((double) end - start) / 1000; LOG.log(Level.INFO, "size: " + size + "; messages/s: " + totalNumMessages / runtime + " bandwidth(bytes/s): " + ((double) totalNumMessages * 2 * size) / runtime); // x2 for unicode chars } } } }
From source file:org.apache.hadoop.hdfs.server.blockmanagement.TestBlockManager.java
@Test public void testRemoveBlocks() throws IOException, InterruptedException, ExecutionException { List<DatanodeDescriptor> testNodes = new ArrayList<>(); testNodes.add(nodes.get(0));//from w w w.j a v a2 s . c o m testNodes.add(nodes.get(1)); ExecutorService executor = Executors.newFixedThreadPool(10); List<Future<Object>> futures = new ArrayList<>(); long blockId = 0; List<Long> blockIds = new ArrayList<>(); for (int i = 2; i < 4000 + 2; i++) { for (int j = 0; j < 2; j++) { blockIds.add(blockId); futures.add(executor.submit(new SliceRunner(blockId++, testNodes, i))); } } for (Future<Object> futur : futures) { futur.get(); } executor.shutdown(); executor.awaitTermination(10, TimeUnit.SECONDS); //check that the replicas info are present in the database for (int sid : testNodes.get(0).getSidsOnNode()) { checkNbReplicas(sid, blockIds.size()); } for (int sid : testNodes.get(1).getSidsOnNode()) { checkNbReplicas(sid, blockIds.size()); } bm.removeBlocks(blockIds, testNodes.get(0)); //the replicas should have been removed from node 0 but not from node 1 for (int sid : testNodes.get(0).getSidsOnNode()) { checkNbReplicas(sid, 0); } for (int sid : testNodes.get(1).getSidsOnNode()) { checkNbReplicas(sid, blockIds.size()); } }
From source file:com.gs.collections.impl.parallel.SerialParallelLazyPerformanceTest.java
private void toList(FastList<Integer> collection) { MutableList<Runnable> runnables = FastList.newList(); runnables.add(() -> this.basicSerialToListPerformance(collection, SERIAL_RUN_COUNT)); int cores = Runtime.getRuntime().availableProcessors(); ExecutorService service = Executors.newFixedThreadPool(cores); runnables/* w ww . j av a 2 s. c o m*/ .add(() -> this.basicParallelLazyToListPerformance(collection, PARALLEL_RUN_COUNT, cores, service)); runnables.add(() -> this.basicJava8ParallelLazyToListPerformance(collection, PARALLEL_RUN_COUNT)); List<Integer> arrayList = new ArrayList<>(collection); runnables.add(() -> this.basicJava8ParallelLazyToListPerformance(arrayList, PARALLEL_RUN_COUNT)); this.shuffleAndRun(runnables); service.shutdown(); try { service.awaitTermination(1, TimeUnit.MINUTES); } catch (InterruptedException e) { throw new RuntimeException(e); } }
From source file:net.ychron.unirestins.test.http.UnirestInstTest.java
private void makeParallelRequests() throws InterruptedException { ExecutorService newFixedThreadPool = Executors.newFixedThreadPool(10); final AtomicInteger counter = new AtomicInteger(0); for (int i = 0; i < 200; i++) { newFixedThreadPool.execute(new Runnable() { public void run() { try { unirestInst.get("http://httpbin.org/get").queryString("index", counter.incrementAndGet()) .asJson();/* w ww . j a v a 2 s. co m*/ } catch (UnirestException e) { throw new RuntimeException(e); } } }); } newFixedThreadPool.shutdown(); newFixedThreadPool.awaitTermination(10, TimeUnit.MINUTES); }
From source file:com.wavemaker.tools.apidocs.tools.spring.SpringSwaggerParserTest.java
@Test public void testMultiThread2() throws InterruptedException { ExecutorService service = Executors.newFixedThreadPool(4); final Class<VacationController> controllerClass = VacationController.class; for (int i = 0; i < 10; i++) { final int finalI = i; service.execute(new Runnable() { public void run() { Swagger swagger;/*from w w w. j a v a 2s. c o m*/ try { swagger = runForSingleClass(controllerClass); } catch (SwaggerParserException e) { throw new RuntimeException("Exception while parsing class:" + controllerClass.getName(), e); } Assert.assertNotNull(swagger); assertEquals(1, swagger.getTags().size()); assertEquals(controllerClass.getName(), swagger.getTags().get(0).getFullyQualifiedName()); try { writeToFile(swagger, "mul_class" + controllerClass.getSimpleName() + "_" + finalI + ".json"); } catch (IOException e) { throw new RuntimeException("Error while writing to file", e); } } }); } service.shutdown(); service.awaitTermination(10, TimeUnit.SECONDS); }
From source file:broadwick.montecarlo.MonteCarlo.java
@Override public void run() { log.trace("Starting Monte Carlo results producer thread"); try {//from w w w .ja v a2s .c o m final int poolSize = Runtime.getRuntime().availableProcessors(); final ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("MCScenarioProducer-%d") .setDaemon(true).build(); final ExecutorService es = Executors.newFixedThreadPool(poolSize, threadFactory); final RNG generator = new RNG(RNG.Generator.Well44497b); final StopWatch sw = new StopWatch(); sw.start(); for (int i = 0; i < numSimulations; i++) { es.submit(new Runnable() { @Override public void run() { try { log.trace("Monte Carlo producer: creating scenario object"); final MonteCarloScenario scenario = simulation.copyOf(); final MonteCarloResults results = scenario .run(generator.getInteger(0, Integer.MAX_VALUE - 1)); log.trace("Monte Carlo producer: generated results {}", results.getExpectedValue()); queue.put(results); } catch (Exception e) { log.error("Error running Monte Carlo simulation {}", Throwables.getStackTraceAsString(e)); } } }); } es.shutdown(); while (!es.isTerminated()) { es.awaitTermination(1, TimeUnit.SECONDS); } queue.put(new Poison()); sw.stop(); log.info("Finished {} simulations in {}.", numSimulations, sw); } catch (Exception ex) { log.error("Monte Carlo simulation error: {}", Throwables.getStackTraceAsString(ex)); } }
From source file:de.tudarmstadt.lt.seg.app.Segmenter.java
private void run_parallel() throws Exception { InputStream in = System.in; if (!"-".equals(_filename_in)) in = new FileInputStream(_filename_in); Stream<String> liter = new BufferedReader(new InputStreamReader(in, Charset.defaultCharset())).lines(); ThreadLocal<ISentenceSplitter> sentenceSplitter = ThreadLocal.withInitial(() -> { try {//w ww . j a v a2 s.co m return newSentenceSplitter(); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { throw new RuntimeException(e); } }); ThreadLocal<ITokenizer> tokenizer = ThreadLocal.withInitial(() -> { try { return newTokenizer(); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { throw new RuntimeException(e); } }); final PrintWriter[] w = new PrintWriter[_parallelism]; // init writers for (int i = 0; i < _parallelism; i++) { OutputStream out = System.out; if (!"-".equals(_filename_out)) { out = new FileOutputStream(String.format("%s_%d", _filename_out, i)); } w[i] = new PrintWriter(new OutputStreamWriter(out, Charset.defaultCharset())); } BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(_parallelism * 2, true); ExecutorService es = new ThreadPoolExecutor(_parallelism, _parallelism, 0L, TimeUnit.MILLISECONDS, queue); AtomicLong lc = new AtomicLong(0); liter.forEach((line) -> { // don't try to submit new threads, wait until the thread queue has some capacity again while (queue.remainingCapacity() == 0) try { Thread.sleep(10); } catch (InterruptedException e) { /**/} es.submit(() -> { final long docid = lc.incrementAndGet(); if (docid % 1000 == 0) System.err.format("Processing line %d ('%s')%n", docid, _filename_in); final int w_i = (int) (docid % _parallelism); split_and_tokenize(new StringReader(line.trim()), String.format("%s:%d", _filename_in, docid), sentenceSplitter.get(), tokenizer.get(), _level_filter, _level_normalize, _merge_types, _merge_tokens, _separator_sentence, _separator_token, _separator_desc, w[w_i]); }); }); es.shutdown(); es.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS); // TODO: the stream parallelism version does not work because it submits too many threads at once // AtomicLong lc = new AtomicLong(0); // ForkJoinPool forkJoinPool = new ForkJoinPool(_parallelism); // forkJoinPool.submit(() -> // liter.parallel().forEach((line) -> { // final long docid = lc.incrementAndGet(); // if(docid % 1000 == 0) // System.err.format("Processing line %d ('%s')%n", docid, _filename_in); // // String l = line.replace("\\t", "\t").replace("\\n", "\n"); // split_and_tokenize( // new StringReader(l), // String.format("%s:%d", _filename_in, docid), // sentenceSplitter.get(), // tokenizer.get(), // _level_filter, // _level_normalize, // _merge_types, // _merge_tokens, // _separator_sentence, // _separator_token, // _separator_desc, // w); // })).get(); }