List of usage examples for java.util.concurrent Executors newCachedThreadPool
public static ExecutorService newCachedThreadPool()
From source file:com.quinsoft.zeidon.standardoe.DefaultJavaOeConfiguration.java
@Override public ExecutorService getActivateThreadPool() { if (activatePoolThread == null) activatePoolThread = Executors.newCachedThreadPool(); return activatePoolThread; }
From source file:org.usergrid.websocket.WebSocketServer.java
public void startServer() { logger.info("Starting Usergrid WebSocket Server"); if (realm != null) { securityManager = new DefaultSecurityManager(realm); }// w w w . j a va 2 s. c om ServerBootstrap bootstrap = new ServerBootstrap(new NioServerSocketChannelFactory( Executors.newCachedThreadPool(), Executors.newCachedThreadPool())); // Set up the pipeline factory. ExecutionHandler executionHandler = new ExecutionHandler( new OrderedMemoryAwareThreadPoolExecutor(16, 1048576, 1048576)); // Set up the event pipeline factory. bootstrap.setPipelineFactory( new WebSocketServerPipelineFactory(emf, smf, management, securityManager, executionHandler, ssl)); // Bind and start to accept incoming connections. channel = bootstrap.bind(new InetSocketAddress(8088)); logger.info("Usergrid WebSocket Server started..."); }
From source file:com.linkedin.pinot.integration.tests.MetadataAndDictionaryAggregationPlanClusterIntegrationTest.java
private void createAndUploadSegments(List<File> avroFiles, String tableName, boolean createStarTreeIndex, List<String> rawIndexColumns, Schema pinotSchema) throws Exception { TestUtils.ensureDirectoriesExistAndEmpty(_segmentDir, _tarDir); ExecutorService executor = Executors.newCachedThreadPool(); ClusterIntegrationTestUtils.buildSegmentsFromAvro(avroFiles, 0, _segmentDir, _tarDir, tableName, createStarTreeIndex, rawIndexColumns, pinotSchema, executor); executor.shutdown();/*from w w w. ja va2s . com*/ executor.awaitTermination(10, TimeUnit.MINUTES); uploadSegments(_tarDir); }
From source file:edu.cmu.lti.oaqa.bioqa.providers.kb.TmToolConceptProvider.java
@Override public List<Concept> getConcepts(List<JCas> jcases) throws AnalysisEngineProcessException { // send request List<String> normalizedTexts = jcases.stream().map(JCas::getDocumentText) .map(PubAnnotationConvertUtil::normalizeText).collect(toList()); ListMultimap<Integer, PubAnnotation.Denotation> index2denotations = Multimaps .synchronizedListMultimap(ArrayListMultimap.create()); ExecutorService es = Executors.newCachedThreadPool(); for (String trigger : triggers) { es.submit(() -> {//www .jav a2 s . co m try { List<String> denotationStrings = requestConcepts(normalizedTexts, trigger); assert denotationStrings.size() == jcases.size(); for (int i = 0; i < jcases.size(); i++) { PubAnnotation.Denotation[] denotations = gson.fromJson(denotationStrings.get(i), PubAnnotation.Denotation[].class); index2denotations.putAll(i, Arrays.asList(denotations)); } } catch (Exception e) { throw TmToolConceptProviderException.unknownException(trigger, e); } }); } es.shutdown(); try { boolean status = es.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); if (!status) { throw new AnalysisEngineProcessException(); } } catch (InterruptedException e) { throw new AnalysisEngineProcessException(e); } // convert denotation strings List<Concept> concepts = new ArrayList<>(); for (int i = 0; i < jcases.size(); i++) { JCas jcas = jcases.get(i); List<PubAnnotation.Denotation> denotations = index2denotations.get(i); try { concepts.addAll(PubAnnotationConvertUtil.convertDenotationsToConcepts(jcas, denotations)); } catch (StringIndexOutOfBoundsException e) { throw TmToolConceptProviderException.offsetOutOfBounds(jcas.getDocumentText(), denotations, e); } } return concepts; }
From source file:com.linkedin.pinot.integration.tests.StarTreeClusterIntegrationTest.java
/** * Generate the reference and star tree indexes and upload to corresponding tables. * @param avroFiles//from w w w.ja v a 2 s . c o m * @param tableName * @param starTree * @throws IOException * @throws ArchiveException * @throws InterruptedException */ private void generateAndUploadSegments(List<File> avroFiles, String tableName, boolean starTree) throws IOException, ArchiveException, InterruptedException { BaseClusterIntegrationTest.ensureDirectoryExistsAndIsEmpty(_segmentsDir); BaseClusterIntegrationTest.ensureDirectoryExistsAndIsEmpty(_tarredSegmentsDir); ExecutorService executor = Executors.newCachedThreadPool(); BaseClusterIntegrationTest.buildSegmentsFromAvro(avroFiles, executor, 0, _segmentsDir, _tarredSegmentsDir, tableName, starTree, getSingleValueColumnsSchema()); executor.shutdown(); executor.awaitTermination(TIMEOUT_IN_SECONDS, TimeUnit.SECONDS); for (String segmentName : _tarredSegmentsDir.list()) { LOGGER.info("Uploading segment {}", segmentName); File file = new File(_tarredSegmentsDir, segmentName); FileUploadUtils.sendSegmentFile(ControllerTestUtils.DEFAULT_CONTROLLER_HOST, ControllerTestUtils.DEFAULT_CONTROLLER_API_PORT, segmentName, new FileInputStream(file), file.length()); } }
From source file:com.amazonaws.services.kinesis.multilang.MessageReaderTest.java
@Test public void unexcpectedStatusFailure() { BufferedReader bufferReader = Mockito.mock(BufferedReader.class); try {// w ww. jav a 2 s . c om Mockito.doAnswer(new Answer() { private boolean returnedOnce = false; @Override public Object answer(InvocationOnMock invocation) throws Throwable { if (returnedOnce) { return "{\"action\":\"status\",\"responseFor\":\"processRecords\"}"; } else { returnedOnce = true; return "{\"action\":\"shutdown\",\"reason\":\"ZOMBIE\"}"; } } }).when(bufferReader).readLine(); } catch (IOException e) { Assert.fail("There shouldn't be an exception while setting up this mock."); } MessageReader reader = new MessageReader().initialize(bufferReader, shardId, new ObjectMapper(), Executors.newCachedThreadPool()); try { reader.getNextMessageFromSTDOUT().get(); } catch (Exception e) { e.printStackTrace(); Assert.fail("MessageReader should have handled the bad message gracefully"); } }
From source file:com.ottogroup.bi.asap.pipeline.MicroPipeline.java
/** * Initializes the pipeline instance using the provided input. The newly created micro pipeline will use an {@link ExecutorService} * created through {@link Executors#newCachedThreadPool()} as runtime environment for all {@link SourceExecutor}, {@link OperatorExecutor} * and {@link EmitterExecutor} instances. * @param id unique identifier used to reference the micro pipeline (required) * @param description pipeline description (optional) * @throws RequiredInputMissingException thrown in case any of the required parameters show no value *///from w w w . jav a2 s . c o m public MicroPipeline(final String id, final String description) throws RequiredInputMissingException { /////////////////////////////////////////////////////////////////// // validate input if (StringUtils.isBlank(id)) throw new RequiredInputMissingException("Missing required input for 'id'"); // /////////////////////////////////////////////////////////////////// this.id = id; this.description = description; this.executorService = Executors.newCachedThreadPool(); this.externalExecutorService = false; if (logger.isDebugEnabled()) logger.debug("micro pipeline instantiated [id=" + id + ", description=" + description + ", executorService=internal]"); }
From source file:com.couchbase.client.core.endpoint.query.QueryHandlerTest.java
@Before @SuppressWarnings("unchecked") public void setup() { responseBuffer = new Disruptor<ResponseEvent>(new EventFactory<ResponseEvent>() { @Override/*w w w .ja v a2 s. c o m*/ public ResponseEvent newInstance() { return new ResponseEvent(); } }, 1024, Executors.newCachedThreadPool()); firedEvents = Collections.synchronizedList(new ArrayList<CouchbaseMessage>()); latch = new CountDownLatch(1); responseBuffer.handleEventsWith(new EventHandler<ResponseEvent>() { @Override public void onEvent(ResponseEvent event, long sequence, boolean endOfBatch) throws Exception { firedEvents.add(event.getMessage()); latch.countDown(); } }); responseRingBuffer = responseBuffer.start(); CoreEnvironment environment = mock(CoreEnvironment.class); when(environment.scheduler()).thenReturn(Schedulers.computation()); when(environment.queryEnabled()).thenReturn(Boolean.TRUE); when(environment.maxRequestLifetime()).thenReturn(10000L); when(environment.autoreleaseAfter()).thenReturn(2000L); endpoint = mock(AbstractEndpoint.class); when(endpoint.environment()).thenReturn(environment); when(environment.userAgent()).thenReturn("Couchbase Client Mock"); queue = new ArrayDeque<QueryRequest>(); handler = new QueryHandler(endpoint, responseRingBuffer, queue, false); channel = new EmbeddedChannel(handler); }
From source file:com.chess.genesis.net.SyncClient.java
private void sync_active(final JSONObject json) { try {//from w w w . jav a 2s . c o m final ArrayList<String> list_need = getNeedList(json.getJSONArray("gameids")); final ExecutorService pool = Executors.newCachedThreadPool(); for (final String item : list_need) { if (error) return; final NetworkClient nc = new NetworkClient(context, handle); nc.game_info(item); pool.submit(nc); lock++; } // don't save time if only syncing active if (syncType == ACTIVE_SYNC) return; // Save sync time final long time = json.getLong("time"); final PrefEdit pref = new PrefEdit(context); pref.putLong(R.array.pf_lastgamesync, time); pref.commit(); } catch (final JSONException e) { throw new RuntimeException(e.getMessage(), e); } }