List of usage examples for java.util.concurrent Executors newCachedThreadPool
public static ExecutorService newCachedThreadPool()
From source file:edu.cmu.lti.oaqa.bioasq.concept.rerank.scorers.GoPubMedConceptRetrievalScorer.java
@Override public void prepare(JCas jcas) throws AnalysisEngineProcessException { List<String> tokens = TypeUtil.getOrderedTokens(jcas).stream().map(Token::getCoveredText) .map(name -> name.replaceAll("[^A-Za-z0-9_\\-]+", " ").trim()) .filter(name -> !name.isEmpty() && !stoplist.contains(name.toLowerCase())).collect(toList()); List<String> wIdConceptNames = TypeUtil .getConcepts(jcas).stream().filter(concept -> !TypeUtil.getConceptIds(concept).isEmpty()) .map(TypeUtil::getConceptNames).map(names -> names.stream() .map(GoPubMedConceptRetrievalScorer::normalizeQuoteName).collect(joining(" "))) .collect(toList());/*from w ww . ja va 2 s . c o m*/ List<String> woIdConceptNames = TypeUtil .getConcepts(jcas).stream().filter(concept -> TypeUtil.getConceptIds(concept).isEmpty()) .map(TypeUtil::getConceptNames).map(names -> names.stream() .map(GoPubMedConceptRetrievalScorer::normalizeQuoteName).collect(joining(" "))) .collect(toList()); List<String> cmentionNames = TypeUtil.getConceptMentions(jcas).stream().map(ConceptMention::getMatchedName) .map(GoPubMedConceptRetrievalScorer::normalizeQuoteName).collect(toList()); ExecutorService es = Executors.newCachedThreadPool(); // execute against all tokens String concatenatedTokens = String.join(" ", tokens); LOG.debug("Query string: {}", concatenatedTokens); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, concatenatedTokens, pages, hits, ontology); String conf = "tokens_concatenated@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } // execute against concatenated concept names String concatenatedConceptNames = String.join(" ", Iterables.concat(wIdConceptNames, woIdConceptNames)); LOG.debug("Query string: {}", concatenatedConceptNames); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, concatenatedConceptNames, pages, hits, ontology); String conf = "concept_names_concatenated@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } // execute against concatenated concept mentions String concatenatedCmentions = String.join(" ", cmentionNames); LOG.debug("Query string: {}", concatenatedCmentions); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, concatenatedCmentions, pages, hits, ontology); String conf = "cmention_names_concatenated@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } // execute against each concept name whose has an ID for (String conceptName : wIdConceptNames) { LOG.debug("Query string: {}", conceptName); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, conceptName, pages, hits, ontology); String conf = "w_id_concept_names_individual@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } } // execute against each concept name whose has no ID for (String conceptName : woIdConceptNames) { LOG.debug("Query string: {}", conceptName); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, conceptName, pages, hits, ontology); String conf = "wo_id_concept_names_individual@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } } // execute against each concept mention for (String cmentionName : cmentionNames) { LOG.debug("Query string: {}", cmentionName); for (BioASQUtil.Ontology ontology : BioASQUtil.Ontology.values()) { es.execute(() -> { try { List<ConceptSearchResult> results = BioASQUtil.searchOntology(service, jcas, cmentionName, pages, hits, ontology); String conf = "cmention_names_individual@" + ontology.name(); updateFeatureTable(results, conf); } catch (IOException e) { throw new RuntimeException(e); } }); } } es.shutdown(); try { if (!es.awaitTermination(timeout, TimeUnit.MINUTES)) { LOG.warn("Timeout occurs for one or some concept retrieval services."); } } catch (InterruptedException e) { throw new AnalysisEngineProcessException(e); } confs = uri2conf2score.columnKeySet(); }
From source file:name.yumao.douyu.http.PlaylistDownloader.java
public static void go(/**final String name,*/ final String num) { // PlaylistDownloader loader = new PlaylistDownloader("http://"); ExecutorService service = Executors.newCachedThreadPool(); // GetList producer = new GetList(); roomnum = num;//w ww . j av a 2s .c o m // Down consumer = new Down(); // final String id = ""; service.execute(new Runnable() { public void run() { while (true) { // // ZhanqiApiVo vo = HttpClientFromZhanqi.QueryZhanqiDownloadUrl(inNum.getText() ); try { String url = HttpClientFromDouyu.getHTML5DownUrl(num); if (!url.equals("")) { // fetchsubPlaylist(new URL(url)); } else { logger.info("error"); outFile = getPath(); } } catch (MalformedURLException e1) { // TODO Auto-generated catch block e1.printStackTrace(); } // // } // try { Thread.sleep(5000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } }); service.execute(new Runnable() { public void run() { while (true) { logger.info("down ......................"); URL down = null; while (true) { try { down = basket.poll(); logger.debug("down:" + down); if (down != null) downloadInternal(down); Thread.sleep(500); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } } //download("outtest"); } } }); }
From source file:voldemort.store.readonly.swapper.StoreSwapperTest.java
@Test public void testAdminStoreSwapperWithoutRollback() throws Exception { ExecutorService executor = Executors.newCachedThreadPool(); try {/*from www . jav a 2 s.c om*/ // Use the admin store swapper StoreSwapper swapper = new AdminStoreSwapper(cluster, executor, adminClient, 1000000, false, false); testFetchSwapWithoutRollback(swapper); } finally { executor.shutdown(); } }
From source file:com.twitter.distributedlog.auditor.DLAuditor.java
public Pair<Set<Long>, Set<Long>> collectLedgers(List<URI> uris, List<List<String>> allocationPaths) throws IOException { Preconditions.checkArgument(uris.size() > 0, "No uri provided to audit"); String zkServers = validateAndGetZKServers(uris); RetryPolicy retryPolicy = new BoundExponentialBackoffRetryPolicy(conf.getZKRetryBackoffStartMillis(), conf.getZKRetryBackoffMaxMillis(), Integer.MAX_VALUE); ZooKeeperClient zkc = ZooKeeperClientBuilder.newBuilder().name("DLAuditor-ZK").zkServers(zkServers) .sessionTimeoutMs(conf.getZKSessionTimeoutMilliseconds()).retryPolicy(retryPolicy) .zkAclId(conf.getZkAclId()).build(); ExecutorService executorService = Executors.newCachedThreadPool(); try {/*from w ww . j a v a 2s .co m*/ BKDLConfig bkdlConfig = resolveBKDLConfig(zkc, uris); logger.info("Resolved bookkeeper config : {}", bkdlConfig); BookKeeperClient bkc = BookKeeperClientBuilder.newBuilder().name("DLAuditor-BK").dlConfig(conf) .zkServers(bkdlConfig.getBkZkServersForWriter()).ledgersPath(bkdlConfig.getBkLedgersPath()) .build(); try { Set<Long> bkLedgers = collectLedgersFromBK(bkc, executorService); Set<Long> dlLedgers = collectLedgersFromDL(uris, allocationPaths); return Pair.of(bkLedgers, dlLedgers); } finally { bkc.close(); } } finally { zkc.close(); executorService.shutdown(); } }
From source file:de.javakaffee.web.msm.integration.NonStickySessionsIntegrationTest.java
@BeforeMethod public void setUp() throws Throwable { final InetSocketAddress address1 = new InetSocketAddress("localhost", MEMCACHED_PORT_1); _daemon1 = createDaemon(address1);//from ww w . ja v a 2 s . c om _daemon1.start(); final InetSocketAddress address2 = new InetSocketAddress("localhost", MEMCACHED_PORT_2); _daemon2 = createDaemon(address2); _daemon2.start(); try { _tomcat1 = startTomcat(TC_PORT_1); _tomcat2 = startTomcat(TC_PORT_2); } catch (final Throwable e) { LOG.error("could not start tomcat.", e); throw e; } final MemcachedNodesManager nodesManager = MemcachedNodesManager.createFor(MEMCACHED_NODES, null, _memcachedClientCallback); _client = new MemcachedClient(new SuffixLocatorConnectionFactory(nodesManager, nodesManager.getSessionIdFormat(), Statistics.create(), 1000, 1000), Arrays.asList(address1, address2)); final SchemeRegistry schemeRegistry = new SchemeRegistry(); schemeRegistry.register(new Scheme("http", PlainSocketFactory.getSocketFactory(), 80)); _httpClient = new DefaultHttpClient(new ThreadSafeClientConnManager(schemeRegistry)); _executor = Executors.newCachedThreadPool(); }
From source file:com.amazonaws.services.kinesis.multilang.MessageReaderTest.java
@Test public void messageReaderBuilderTest() { InputStream stream = new ByteArrayInputStream("".getBytes()); MessageReader reader = new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool()); Assert.assertNotNull(reader);//w w w . j a va 2s. c o m }
From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessReadWriteLock.java
@Test public void testBasic() throws Exception { final int CONCURRENCY = 8; final int ITERATIONS = 100; final Random random = new Random(); final AtomicInteger concurrentCount = new AtomicInteger(0); final AtomicInteger maxConcurrentCount = new AtomicInteger(0); final AtomicInteger writeCount = new AtomicInteger(0); final AtomicInteger readCount = new AtomicInteger(0); List<Future<Void>> futures = Lists.newArrayList(); ExecutorService service = Executors.newCachedThreadPool(); for (int i = 0; i < CONCURRENCY; ++i) { Future<Void> future = service.submit(new Callable<Void>() { @Override/* www.j a va 2 s .c o m*/ public Void call() throws Exception { CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); client.start(); try { InterProcessReadWriteLock lock = new InterProcessReadWriteLock(client, "/lock"); for (int i = 0; i < ITERATIONS; ++i) { if (random.nextInt(100) < 10) { doLocking(lock.writeLock(), concurrentCount, maxConcurrentCount, random, 1); writeCount.incrementAndGet(); } else { doLocking(lock.readLock(), concurrentCount, maxConcurrentCount, random, Integer.MAX_VALUE); readCount.incrementAndGet(); } } } finally { IOUtils.closeQuietly(client); } return null; } }); futures.add(future); } for (Future<Void> future : futures) { future.get(); } System.out.println("Writes: " + writeCount.get() + " - Reads: " + readCount.get() + " - Max Reads: " + maxConcurrentCount.get()); Assert.assertTrue(writeCount.get() > 0); Assert.assertTrue(readCount.get() > 0); Assert.assertTrue(maxConcurrentCount.get() > 1); }
From source file:com.alliander.osgp.acceptancetests.config.OslpConfig.java
@Bean(destroyMethod = "releaseExternalResources") public ServerBootstrap serverBootstrap() { final ChannelFactory factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(), Executors.newCachedThreadPool()); final ServerBootstrap bootstrap = new ServerBootstrap(factory); bootstrap.setPipelineFactory(new ChannelPipelineFactory() { @Override//from ww w .j a v a 2 s. co m public ChannelPipeline getPipeline() throws InvalidKeySpecException, NoSuchAlgorithmException, IOException, NoSuchProviderException { final ChannelPipeline pipeline = Channels.pipeline(); pipeline.addLast("oslpEncoder", new OslpEncoder()); pipeline.addLast("oslpDecoder", new OslpDecoder(OslpConfig.this.oslpSignature(), OslpConfig.this.oslpSignatureProvider())); pipeline.addLast("oslpSecurity", OslpConfig.this.oslpSecurityHandler()); pipeline.addLast("oslpChannelHandler", OslpConfig.this.oslpChannelHandlerServer()); return pipeline; } }); bootstrap.setOption("child.tcpNoDelay", true); bootstrap.setOption("child.keepAlive", false); return bootstrap; }
From source file:it.anyplace.sync.bep.BlockPusher.java
public FileUploadObserver pushFile(final DataSource dataSource, @Nullable FileInfo fileInfo, final String folder, final String path) { checkArgument(connectionHandler.hasFolder(folder), "supplied connection handler %s will not share folder %s", connectionHandler, folder); checkArgument(fileInfo == null || equal(fileInfo.getFolder(), folder)); checkArgument(fileInfo == null || equal(fileInfo.getPath(), path)); try {// ww w. j a va 2s . c o m final ExecutorService monitoringProcessExecutorService = Executors.newCachedThreadPool(); final long fileSize = dataSource.getSize(); final Set<String> sentBlocks = Sets.newConcurrentHashSet(); final AtomicReference<Exception> uploadError = new AtomicReference<>(); final AtomicBoolean isCompleted = new AtomicBoolean(false); final Object updateLock = new Object(); final Object listener = new Object() { @Subscribe public void handleRequestMessageReceivedEvent(RequestMessageReceivedEvent event) { BlockExchageProtos.Request request = event.getMessage(); if (equal(request.getFolder(), folder) && equal(request.getName(), path)) { try { final String hash = BaseEncoding.base16().encode(request.getHash().toByteArray()); logger.debug("handling block request = {}:{}-{} ({})", request.getName(), request.getOffset(), request.getSize(), hash); byte[] data = dataSource.getBlock(request.getOffset(), request.getSize(), hash); checkNotNull(data, "data not found for hash = %s", hash); final Future future = connectionHandler.sendMessage( Response.newBuilder().setCode(BlockExchageProtos.ErrorCode.NO_ERROR) .setData(ByteString.copyFrom(data)).setId(request.getId()).build()); monitoringProcessExecutorService.submit(new Runnable() { @Override public void run() { try { future.get(); sentBlocks.add(hash); synchronized (updateLock) { updateLock.notifyAll(); } //TODO retry on error, register error and throw on watcher } catch (InterruptedException ex) { //return and do nothing } catch (ExecutionException ex) { uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } }); } catch (Exception ex) { logger.error("error handling block request", ex); connectionHandler.sendMessage(Response.newBuilder() .setCode(BlockExchageProtos.ErrorCode.GENERIC).setId(request.getId()).build()); uploadError.set(ex); synchronized (updateLock) { updateLock.notifyAll(); } } } } }; connectionHandler.getEventBus().register(listener); logger.debug("send index update for file = {}", path); final Object indexListener = new Object() { @Subscribe public void handleIndexRecordAquiredEvent(IndexHandler.IndexRecordAquiredEvent event) { if (equal(event.getFolder(), folder)) { for (FileInfo fileInfo : event.getNewRecords()) { if (equal(fileInfo.getPath(), path) && equal(fileInfo.getHash(), dataSource.getHash())) { //TODO check not invalid // sentBlocks.addAll(dataSource.getHashes()); isCompleted.set(true); synchronized (updateLock) { updateLock.notifyAll(); } } } } } }; if (indexHandler != null) { indexHandler.getEventBus().register(indexListener); } final IndexUpdate indexUpdate = sendIndexUpdate(folder, BlockExchageProtos.FileInfo.newBuilder().setName(path).setSize(fileSize) .setType(BlockExchageProtos.FileInfoType.FILE).addAllBlocks(dataSource.getBlocks()), fileInfo == null ? null : fileInfo.getVersionList()).getRight(); final FileUploadObserver messageUploadObserver = new FileUploadObserver() { @Override public void close() { logger.debug("closing upload process"); try { connectionHandler.getEventBus().unregister(listener); monitoringProcessExecutorService.shutdown(); if (indexHandler != null) { indexHandler.getEventBus().unregister(indexListener); } } catch (Exception ex) { } if (closeConnection && connectionHandler != null) { connectionHandler.close(); } if (indexHandler != null) { FileInfo fileInfo = indexHandler.pushRecord(indexUpdate.getFolder(), Iterables.getOnlyElement(indexUpdate.getFilesList())); logger.info("sent file info record = {}", fileInfo); } } @Override public double getProgress() { return isCompleted() ? 1d : sentBlocks.size() / ((double) dataSource.getHashes().size()); } @Override public String getProgressMessage() { return (Math.round(getProgress() * 1000d) / 10d) + "% " + sentBlocks.size() + "/" + dataSource.getHashes().size(); } @Override public boolean isCompleted() { // return sentBlocks.size() == dataSource.getHashes().size(); return isCompleted.get(); } @Override public double waitForProgressUpdate() throws InterruptedException { synchronized (updateLock) { updateLock.wait(); } if (uploadError.get() != null) { throw new RuntimeException(uploadError.get()); } return getProgress(); } @Override public DataSource getDataSource() { return dataSource; } }; return messageUploadObserver; } catch (Exception ex) { throw new RuntimeException(ex); } }
From source file:com.reversemind.hypergate.server.HyperGateServer.java
/** * *///from ww w. j a v a 2 s .c o m public void start() { // Configure the server. this.serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( Executors.newCachedThreadPool(), Executors.newCachedThreadPool())); this.handler = new ServerHandler(payloadProcessor, metrics, keepClientAlive); // TODO #6 KryoSerializer // this.handler = new ServerHandler(payloadProcessor, metrics, keepClientAlive, this.kryoDeserializer); // Set up the pipeline factory // TODO add Kryo serializer this.serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() { public ChannelPipeline getPipeline() throws Exception { return Channels.pipeline( // new KryoObjectEncoder(), // new KryoObjectDecoder(ClassResolvers.cacheDisabled(getClass().getClassLoader())), // TODO #6 new ObjectEncoder(), new ObjectDecoder(ClassResolvers.cacheDisabled(getClass().getClassLoader())), handler); } }); // Bind and start to accept incoming connections. this.serverBootstrap.bind(new InetSocketAddress(port)); // TODO use CORRECT LOGGGING LOG.debug(this.toString()); LOG.warn("\n\nServer started\n\n"); // TODO need to detect HOST NAME this.setHost(this.getIpAddress()); this.running = true; }