List of usage examples for java.util.concurrent ExecutorService execute
void execute(Runnable command);
From source file:io.pcp.parfait.benchmark.CPUThreadTest.java
private void runBenchmark(boolean cpuTracingEnabled, CPUThreadTestRunner.CpuLookupMethod cpuLookupMethod) { ExecutorService executorService = Executors.newFixedThreadPool(numThreads); List<CPUThreadTestRunner> executions = newArrayList(); ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); threadBean.setThreadCpuTimeEnabled(cpuTracingEnabled); threadBean.setThreadContentionMonitoringEnabled(true); long begin = currentTimeMillis(); for (int i = 0; i < numThreads; i++) { CPUThreadTestRunner cpuThreadTestRunner = new CPUThreadTestRunner(iterations, cpuLookupMethod); executorService.execute(cpuThreadTestRunner); executions.add(cpuThreadTestRunner); }/*from w w w . j a v a 2 s .co m*/ awaitExecutionCompletion(executorService); long end = currentTimeMillis(); long timeTakenms = end - begin; report(executions, timeTakenms, iterations, cpuTracingEnabled, cpuLookupMethod); }
From source file:io.nats.connector.plugins.redis.RedisPubSubPlugin.java
@Override public boolean onNatsInitialized(NATSConnector connector) { this.connector = connector; if (subjectsToChannels == null && channelsToSubjects == null) { logger.error("No subject/channel mapping has been defined."); return false; }//from w w w . jav a2 s. c om try { if (subjectsToChannels != null) { for (String s : subjectsToChannels.keySet()) { connector.subscribe(s); } } } catch (Exception e) { logger.error("NATS Subscription error", e); return false; } if (channelsToSubjects != null) { ExecutorService executor = Executors.newSingleThreadExecutor(); executor.execute(new ListenForRedisUpdates()); } return true; }
From source file:kmi.taa.core.Crawler.java
public void crawlAll(TreeMap<Integer, String> targetUrls, String service, String proxy, String otfile) { SortedSet<Integer> results = Collections.synchronizedSortedSet(new TreeSet<Integer>()); ExecutorService pool = Executors.newFixedThreadPool(100); int howManyUrls = targetUrls.size(); System.out.println("total " + howManyUrls + " to be processed"); List<String> output = Collections.synchronizedList(new ArrayList<String>()); for (Integer targetId : targetUrls.navigableKeySet()) { String uri = targetUrls.get(targetId); pool.execute(new Explorer(targetId, uri, service, proxy, results, otfile, output)); }/*w ww. j a va 2s . c o m*/ pool.shutdown(); while (results.size() < howManyUrls) { System.out.println("already processed " + results.size() + " subject links"); try { Thread.sleep(1000); } catch (InterruptedException e) { log.error("crawlAll error", e); } } resultToFile(output, otfile); System.out.println("already processed " + results.size() + " subject links"); }
From source file:hws.core.ExecutorThread.java
private void startExecutors(ExecutorService serverExecutor) { for (DefaultExecutor defaultExecutor : this.outputStartingOrder) { try {/*from ww w . java2 s .c o m*/ defaultExecutor.start(); } catch (Exception e) { Logger.severe(e.toString()); } } for (String channelName : this.executors.keySet()) { serverExecutor.execute(this.executors.get(channelName)); } }
From source file:org.apache.kylin.tool.StorageCleanupJob.java
private void cleanUnusedHBaseTables(Configuration conf) throws IOException { CubeManager cubeMgr = CubeManager.getInstance(KylinConfig.getInstanceFromEnv()); // get all kylin hbase tables try (HBaseAdmin hbaseAdmin = new HBaseAdmin(conf)) { String tableNamePrefix = IRealizationConstants.SharedHbaseStorageLocationPrefix; HTableDescriptor[] tableDescriptors = hbaseAdmin.listTables(tableNamePrefix + ".*"); List<String> allTablesNeedToBeDropped = new ArrayList<String>(); for (HTableDescriptor desc : tableDescriptors) { String host = desc.getValue(IRealizationConstants.HTableTag); if (KylinConfig.getInstanceFromEnv().getMetadataUrlPrefix().equalsIgnoreCase(host)) { //only take care htables that belongs to self, and created more than 2 days allTablesNeedToBeDropped.add(desc.getTableName().getNameAsString()); }/*from w ww. java2 s.c o m*/ } // remove every segment htable from drop list for (CubeInstance cube : cubeMgr.listAllCubes()) { for (CubeSegment seg : cube.getSegments()) { String tablename = seg.getStorageLocationIdentifier(); if (allTablesNeedToBeDropped.contains(tablename)) { allTablesNeedToBeDropped.remove(tablename); logger.info("Exclude table " + tablename + " from drop list, as the table belongs to cube " + cube.getName() + " with status " + cube.getStatus()); } } } if (delete == true) { // drop tables ExecutorService executorService = Executors.newSingleThreadExecutor(); for (String htableName : allTablesNeedToBeDropped) { FutureTask futureTask = new FutureTask(new DeleteHTableRunnable(hbaseAdmin, htableName)); executorService.execute(futureTask); try { futureTask.get(deleteTimeout, TimeUnit.MINUTES); } catch (TimeoutException e) { logger.warn("It fails to delete htable " + htableName + ", for it cost more than " + deleteTimeout + " minutes!"); futureTask.cancel(true); } catch (Exception e) { e.printStackTrace(); futureTask.cancel(true); } } executorService.shutdown(); } else { System.out.println("--------------- Tables To Be Dropped ---------------"); for (String htableName : allTablesNeedToBeDropped) { System.out.println(htableName); } System.out.println("----------------------------------------------------"); } } }
From source file:ubic.gemma.core.apps.ShellDelegatingBlat.java
/** * @param querySequenceFile query sequence file * @param outputPath output path/* www . j a va 2s . c o m*/ * @return processed results. */ private Collection<BlatResult> jniGfClientCall(final File querySequenceFile, final String outputPath, final int portToUse) throws IOException { try { ShellDelegatingBlat.log.debug("Starting blat run"); FutureTask<Boolean> blatThread = new FutureTask<>(new Callable<Boolean>() { @Override public Boolean call() { ShellDelegatingBlat.this.GfClientCall(host, Integer.toString(portToUse), seqDir, querySequenceFile.getPath(), outputPath); return true; } }); ExecutorService executor = Executors.newSingleThreadExecutor(); executor.execute(blatThread); executor.shutdown(); // wait... StopWatch overallWatch = new StopWatch(); overallWatch.start(); while (!blatThread.isDone()) { try { Thread.sleep(ShellDelegatingBlat.BLAT_UPDATE_INTERVAL_MS); } catch (InterruptedException ie) { throw new RuntimeException(ie); } this.outputFile(outputPath, overallWatch); } overallWatch.stop(); String minutes = TimeUtil.getMinutesElapsed(overallWatch); ShellDelegatingBlat.log.info("Blat took a total of " + minutes + " minutes"); } catch (UnsatisfiedLinkError e) { ShellDelegatingBlat.log.error(e, e); ShellDelegatingBlat.log.info("Falling back on exec()"); this.execGfClient(querySequenceFile, outputPath, portToUse); } return this.processPsl(outputPath, null); }
From source file:edu.cmu.lti.oaqa.bioasq.concept.rerank.scorers.GoPubMedConceptRetrievalScorer.java
@Override public void prepare(JCas jcas) throws AnalysisEngineProcessException { List<String> tokens = TypeUtil.getOrderedTokens(jcas).stream().map(Token::getCoveredText) .map(name -> name.replaceAll("[^A-Za-z0-9_\\-]+", " ").trim()) .filter(name -> !name.isEmpty() && !stoplist.contains(name.toLowerCase())).collect(toList()); List<String> wIdConceptNames = TypeUtil .getConcepts(jcas).stream().filter(concept -> !TypeUtil.getConceptIds(concept).isEmpty()) .map(TypeUtil::getConceptNames).map(names -> names.stream() .map(GoPubMedConceptRetrievalScorer::normalizeQuoteName).collect(joining(" "))) .collect(toList());//from ww w .java 2 s .co m List<String> woIdConceptNames = TypeUtil .getConcepts(jcas).stream().filter(concept -> TypeUtil.getConceptIds(concept).isEmpty()) .map(TypeUtil::getConceptNames).map(names -> names.stream() .map(GoPubMedConceptRetrievalScorer::normalizeQuoteName).collect(joining(" "))) .collect(toList()); List<String> cmentionNames = TypeUtil.getConceptMentions(jcas).stream().map(ConceptMention::getMatchedName) .map(GoPubMedConceptRetrievalScorer::normalizeQuoteName).collect(toList()); ExecutorService es = Executors.newCachedThreadPool(); // execute against all tokens String concatenatedTokens = String.join(" ", tokens); LOG.debug("Query string: {}", concatenatedTokens); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, concatenatedTokens, pages, hits, ontology); String conf = "tokens_concatenated@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } // execute against concatenated concept names String concatenatedConceptNames = String.join(" ", Iterables.concat(wIdConceptNames, woIdConceptNames)); LOG.debug("Query string: {}", concatenatedConceptNames); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, concatenatedConceptNames, pages, hits, ontology); String conf = "concept_names_concatenated@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } // execute against concatenated concept mentions String concatenatedCmentions = String.join(" ", cmentionNames); LOG.debug("Query string: {}", concatenatedCmentions); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, concatenatedCmentions, pages, hits, ontology); String conf = "cmention_names_concatenated@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } // execute against each concept name whose has an ID for (String conceptName : wIdConceptNames) { LOG.debug("Query string: {}", conceptName); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, conceptName, pages, hits, ontology); String conf = "w_id_concept_names_individual@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } } // execute against each concept name whose has no ID for (String conceptName : woIdConceptNames) { LOG.debug("Query string: {}", conceptName); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, conceptName, pages, hits, ontology); String conf = "wo_id_concept_names_individual@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } } // execute against each concept mention for (String cmentionName : cmentionNames) { LOG.debug("Query string: {}", cmentionName); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, cmentionName, pages, hits, ontology); String conf = "cmention_names_individual@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } } es.shutdown(); try { if (!es.awaitTermination(timeout, TimeUnit.MINUTES)) { LOG.warn("Timeout occurs for one or some concept retrieval services."); } } catch (InterruptedException e) { throw new AnalysisEngineProcessException(e); } confs = uri2conf2score.columnKeySet(); }
From source file:info.pancancer.arch3.test.TestWorker.java
@Test public void testWorker_endless() throws Exception { byte[] body = setupMessage(); Delivery testDelivery = new Delivery(mockEnvelope, mockProperties, body); setupMockQueue(testDelivery);//from w w w. ja v a2s . c om Mockito.when(Utilities.parseJSONStr(anyString())).thenCallRealMethod(); Mockito.when(Utilities.parseConfig(anyString())).thenCallRealMethod(); //Because the code that does cleanup in calls resultHandler.waitFor(); we need to actually execute something, even if it does nothing. Mockito.doNothing().when(mockExecutor).execute(any(CommandLine.class), any(DefaultExecuteResultHandler.class)); // This is to mock the cleanup command - we don't really want to execute the command for deleting contents of /datastore, at least not when unit testing on a workstation! PowerMockito.whenNew(DefaultExecutor.class).withNoArguments().thenReturn(mockExecutor); Mockito.when(mockExecHandler.hasResult()).thenReturn(true); PowerMockito.whenNew(DefaultExecuteResultHandler.class).withNoArguments().thenReturn(mockExecHandler); final FutureTask<String> tester = new FutureTask<>(new Callable<String>() { @Override public String call() { LOG.debug("tester thread started"); try { Worker.main(new String[] { "--config", "src/test/resources/workerConfig.ini", "--uuid", "vm123456", "--endless", "--pidFile", "/var/run/arch3_worker.pid" }); } catch (CancellationException | InterruptedException e) { LOG.error("Exception caught: " + e.getMessage()); return e.getMessage(); } catch (Exception e) { e.printStackTrace(); fail("Unexpected exception"); return null; } finally { Mockito.verify(mockAppender, Mockito.atLeastOnce()).doAppend(argCaptor.capture()); String s = appendEventsIntoString(argCaptor.getAllValues()); return s; } } }); final Thread killer = new Thread(new Runnable() { @Override public void run() { LOG.debug("killer thread started"); try { // The endless worker will not end on its own (because it's endless) so we need to wait a little bit (0.5 seconds) and // then kill it as if it were killed by the command-line script (kill_worker_daemon.sh). Thread.sleep(2500); } catch (InterruptedException e) { e.printStackTrace(); LOG.error(e.getMessage()); } tester.cancel(true); } }); ExecutorService es = Executors.newFixedThreadPool(2); es.execute(tester); es.execute(killer); try { tester.get(); } catch (CancellationException e) { Mockito.verify(mockAppender, Mockito.atLeastOnce()).doAppend(argCaptor.capture()); List<LoggingEvent> tmpList = new LinkedList<>(argCaptor.getAllValues()); String output = this.appendEventsIntoString(tmpList); assertTrue(output.contains("The \"--endless\" flag was set, this worker will run endlessly!")); int numJobsPulled = StringUtils.countMatches(output, " WORKER IS PREPARING TO PULL JOB FROM QUEUE "); LOG.info("Number of jobs attempted: " + numJobsPulled); assertTrue("number of jobs attempted > 1", numJobsPulled > 1); } catch (Exception e) { e.printStackTrace(); fail(); } }
From source file:org.apache.kylin.common.restclient.Broadcaster.java
private Broadcaster() { Executors.newSingleThreadExecutor().execute(new Runnable() { @Override// w w w.j a va2 s.c o m public void run() { final String[] nodes = KylinConfig.getInstanceFromEnv().getRestServers(); if (nodes == null || nodes.length < 1) {//TODO if the node count is greater than 1, it means it is a cluster logger.warn("There is no available rest server; check the 'kylin.rest.servers' config"); return; } final List<RestClient> restClients = Lists.newArrayList(); for (String node : nodes) { restClients.add(new RestClient(node)); } final ExecutorService wipingCachePool = Executors.newFixedThreadPool(restClients.size()); while (true) { try { final BroadcastEvent broadcastEvent = broadcastEvents.takeFirst(); logger.info("new broadcast event:" + broadcastEvent); for (final RestClient restClient : restClients) { wipingCachePool.execute(new Runnable() { @Override public void run() { try { restClient.wipeCache(broadcastEvent.getType(), broadcastEvent.getAction(), broadcastEvent.getName()); } catch (IOException e) { logger.warn("Thread failed during wipe cache at " + broadcastEvent); } } }); } } catch (Exception e) { logger.error("error running wiping", e); } } } }); }
From source file:com.yahoo.ads.pb.PistachiosServer.java
public PistachiosServer() { Configuration conf = ConfigurationManager.getConfiguration(); int numBatchThread = conf.getInt("Profile.Process.Number.Batch.Thread", 64); logger.info("numBatchThread=", numBatchThread); ExecutorService executorService = Executors.newFixedThreadPool(numBatchThread); executorService.execute(new Runnable() { @Override//from www . ja v a2s . c om public void run() { while (true) { logger.info("before process synchronousQueue.size()=", synchronousQueue.size()); try { PistachiosMessage message = synchronousQueue.take(); instance.handler.process(message.partition, ByteBuffer.wrap(message.value)); logger.info("after process synchronousQueue.size()=", synchronousQueue.size()); } catch (NoSuchElementException e) { logger.info("error: {}", e); } catch (InterruptedException e) { logger.info("error: {}", e); } catch (TException e) { logger.info("error: {}", e); } } } }); // executorService.shutdown(); }