List of usage examples for java.util.concurrent ExecutorService submit
Future<?> submit(Runnable task);
From source file:com.espertech.esper.example.marketdatafeed.FeedSimMain.java
public void run() { if (isWaitKeypress) { System.out.println("...press enter to start simulation..."); try {//from w ww .j a v a 2s. com System.in.read(); } catch (IOException e) { log.error("Exception reading keyboard input: " + e.getMessage(), e); } } // Configure engine with event names to make the statements more readable. // This could also be done in a configuration file. Configuration configuration = new Configuration(); configuration.addEventType("MarketDataEvent", MarketDataEvent.class.getName()); // Get engine instance EPServiceProvider epService = EPServiceProviderManager.getProvider(engineURI, configuration); // Set up statements TicksPerSecondStatement tickPerSecStmt = new TicksPerSecondStatement(epService.getEPAdministrator()); tickPerSecStmt.addListener(new RateReportingListener()); TicksFalloffStatement falloffStmt = new TicksFalloffStatement(epService.getEPAdministrator()); falloffStmt.addListener(new RateFalloffAlertListener()); // For continuous non-ending simulation if (continuousSimulation) { new MarketDataSendRunnable(epService, true).run(); } else { // Send events ExecutorService threadPool = Executors.newFixedThreadPool(numberOfThreads); MarketDataSendRunnable runnables[] = new MarketDataSendRunnable[numberOfThreads]; for (int i = 0; i < numberOfThreads; i++) { runnables[i] = new MarketDataSendRunnable(epService, false); threadPool.submit(runnables[i]); } int seconds = 0; Random random = new Random(); while (seconds < numSeconds) { seconds++; try { Thread.sleep(1000); } catch (InterruptedException e) { log.info("Interrupted", e); break; } FeedEnum feedToDropOff; if (random.nextDouble() * 100 < dropProbability) { feedToDropOff = FeedEnum.FEED_A; if (random.nextBoolean()) { feedToDropOff = FeedEnum.FEED_B; } log.info("Setting drop-off for feed " + feedToDropOff); } else { feedToDropOff = null; } for (int i = 0; i < runnables.length; i++) { runnables[i].setRateDropOffFeed(feedToDropOff); } } log.info("Shutting down threadpool"); for (int i = 0; i < runnables.length; i++) { runnables[i].setShutdown(); } threadPool.shutdown(); try { threadPool.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { // no action } } }
From source file:com.kurento.kmf.test.base.GridBrowserMediaApiTest.java
public void runParallel(List<Node> nodeList, Runnable myFunc) throws InterruptedException, ExecutionException { ExecutorService exec = Executors.newFixedThreadPool(nodes.size()); List<Future<?>> results = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { results.add(exec.submit(myFunc)); }// w w w. j a va 2 s .c o m for (Future<?> r : results) { r.get(); } }
From source file:com.pinterest.rocksplicator.controller.tasks.LoadSSTTask.java
private void doLoadSST(ExecutorService executor, SegmentBean segment, Role role) throws ExecutionException, InterruptedException { List<Future<Boolean>> futures = new ArrayList<>(segment.getHosts().size()); for (HostBean host : segment.getHosts()) { Future<Boolean> future = executor.submit(() -> loadSSTFromS3(segment.getName(), host, role)); futures.add(future);// ww w. j ava 2 s. c om } for (Future<?> future : futures) { future.get(); } }
From source file:com.quixey.hadoop.fs.oss.MultiPartUploader.java
@SuppressWarnings("unchecked") private List<PartETag> uploadParts(final String key, final File file, final String uploadId, int parts) throws IOException { // construct thread pool ExecutorService pool = newExecutorService(file, parts); final Future<PartETag>[] futures = new Future[parts]; for (int i = 0; i < parts; i++) { final int partNum = i; futures[i] = pool.submit(new Callable<PartETag>() { @Override// w w w . j ava 2 s. com public PartETag call() throws Exception { return uploadPart(key, file, uploadId, partNum); } }); } pool.shutdown(); // wait for uploads to complete awaitTermination(pool); // retrieve etags and verify uploads PartETag[] eTags = new PartETag[parts]; int i = 0; for (Future<PartETag> future : futures) { try { eTags[i++] = future.get(); } catch (InterruptedException | ExecutionException e) { throw new IOException("Unable to upload part " + i, e); } } return Arrays.asList(eTags); }
From source file:com.gooddata.http.client.GoodDataHttpClientIntegrationTest.java
License:asdf
@Test public void shouldRefreshTTConcurrent() throws Exception { mock401OnProjects();// w ww . ja v a 2 s. co m // this serves to block second thread, until the first one gets 401 on projects, which causes TT refresh // the test aims to test the second thread is not cycling on 401 and cumulating wrong TT headers final Semaphore semaphore = new Semaphore(1); requestOnPath(GDC_PROJECTS_PATH, "TT1").respondUsing(new Responder() { boolean first = true; @Override public StubResponse nextResponse(Request request) { if (first) { first = false; return StubResponse.builder().status(200).body(BODY_PROJECTS, CHARSET) .header(CONTENT_HEADER, CONTENT_TYPE_JSON_UTF).build(); } else { semaphore.release(); return StubResponse.builder().status(401).body(BODY_401, CHARSET) .header(CONTENT_HEADER, CONTENT_TYPE_JSON_UTF) .header(WWW_AUTHENTICATE_HEADER, GOODDATA_REALM + " " + TT_COOKIE) .delay(5, TimeUnit.SECONDS).build(); } } }); mock200OnProjects("TT2"); mock401OnPath(GDC_PROJECTS2_PATH, "TT1"); mock200OnPath(GDC_PROJECTS2_PATH, "TT2"); mock401OnToken(); respond200OnToken(mock200OnToken("TT1").thenRespond(), "TT2"); mockLogin(); final HttpClient client = createGoodDataClient(jadlerLogin, jadlerPassword, jadlerHost); // one get at the beginning causing successful login performGet(client, jadlerHost, GDC_PROJECTS_PATH, 200); // to be able to finish when both threads finished final CountDownLatch countDown = new CountDownLatch(2); final ExecutorService executor = Executors.newFixedThreadPool(2); semaphore.acquire(); // will be released in jadler executor.submit(new PerformGetWithCountDown(client, GDC_PROJECTS_PATH, countDown)); semaphore.acquire(); // causes waiting executor.submit(new PerformGetWithCountDown(client, GDC_PROJECTS2_PATH, countDown)); countDown.await(10, TimeUnit.SECONDS); verifyThatRequest().havingMethodEqualTo("GET").havingPathEqualTo(GDC_TOKEN_PATH) .havingHeaderEqualTo(SST_HEADER, "SST") // if received more than twice, it means the second thread didn't wait, while the first was refreshing TT .receivedTimes(2); verifyThatRequest().havingMethodEqualTo("GET").havingPathEqualTo(GDC_PROJECTS2_PATH) .havingHeaderEqualTo(TT_HEADER, "TT1") // the second thread should try only once with expired TT1 .receivedOnce(); verifyThatRequest().havingMethodEqualTo("GET").havingPathEqualTo(GDC_PROJECTS2_PATH) .havingHeaderEqualTo(TT_HEADER, "TT1").havingHeaderEqualTo(TT_HEADER, "TT2") // the second thread should not set more than one X-GDC-AuthTT header .receivedNever(); }
From source file:org.jboss.additional.testsuite.jdkall.past.eap_6_4_x.clustering.cluster.web.ClusteredWebSimpleTestCase.java
private void abstractGracefulServe(URL baseURL1, boolean undeployOnly) throws IllegalStateException, IOException, InterruptedException, Exception { final DefaultHttpClient client = HttpClientUtils.relaxedCookieHttpClient(); String url1 = baseURL1.toString() + SimpleServlet.URL; // Make sure a normal request will succeed HttpResponse response = client.execute(new HttpGet(url1)); Assert.assertEquals(HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode()); response.getEntity().getContent().close(); // Send a long request - in parallel String longRunningUrl = url1 + "?" + SimpleServlet.REQUEST_DURATION_PARAM + "=" + REQUEST_DURATION; ExecutorService executor = Executors.newSingleThreadExecutor(); Future<HttpResponse> future = executor.submit(new RequestTask(client, longRunningUrl)); // Make sure long request has started Thread.sleep(1000);/* w w w . ja va 2 s. c o m*/ if (undeployOnly) { // Undeploy the app only. undeploy(DEPLOYMENT_1); } else { // Shutdown server. stop(CONTAINER_1); } // Get result of long request // This request should succeed since it initiated before server shutdown try { response = future.get(); Assert.assertEquals("Request should succeed since it initiated before undeply or shutdown.", HttpServletResponse.SC_OK, response.getStatusLine().getStatusCode()); response.getEntity().getContent().close(); } catch (ExecutionException e) { e.printStackTrace(System.err); Assert.fail(e.getCause().getMessage()); } if (undeployOnly) { // If we are only undeploying, then subsequent requests should return 404. response = client.execute(new HttpGet(url1)); Assert.assertEquals("If we are only undeploying, then subsequent requests should return 404.", HttpServletResponse.SC_NOT_FOUND, response.getStatusLine().getStatusCode()); response.getEntity().getContent().close(); } // Cleanup after test. if (undeployOnly) { // Redeploy the app only. deploy(DEPLOYMENT_1); } else { // Startup server. start(CONTAINER_1); } }
From source file:net.jotel.ws.client.WebSocketClientTest.java
@Test public void reconnect() throws Exception { final List<Exception> exceptions = new ArrayList<Exception>(); URI uri = new URI("ws://not-existing-domain-name:8080/websocket/ws/subscribe"); final WebSocketClient c = new WebSocketClient(); c.setWebSocketUri(uri);//from www . ja v a 2 s . c o m c.setReconnectEnabled(true); c.setReconnectInterval(100L); c.setReconnectAttempts(2); c.addListener(new WebSocketListener() { @Override public void onMessage(String message) { } @Override public void onMessage(byte[] message) { } @Override public void onError(Exception ex) { exceptions.add(ex); } @Override public void onClose(Integer statusCode, String message) { // TODO Auto-generated method stub } @Override public void onConnect() { // TODO Auto-generated method stub } }); try { c.connect(); fail("Expected WebSocketException"); } catch (WebSocketException ex) { // expected assertEquals(3, exceptions.size()); for (Exception e : exceptions) { Throwable rootCause = ExceptionUtils.getRootCause(e); if (rootCause == null) { rootCause = e; } assertTrue(rootCause instanceof UnknownHostException); } } exceptions.clear(); c.setReconnectAttempts(0); try { c.connect(); fail("Expected WebSocketException"); } catch (WebSocketException ex) { // expected assertEquals(1, exceptions.size()); for (Exception e : exceptions) { Throwable rootCause = ExceptionUtils.getRootCause(e); if (rootCause == null) { rootCause = e; } assertTrue(rootCause instanceof UnknownHostException); } } exceptions.clear(); c.setReconnectAttempts(-1); ExecutorService executor = Executors.newSingleThreadExecutor(); Future<?> future = executor.submit(new Runnable() { @Override public void run() { try { c.connect(); fail("Expected WebSocketException"); } catch (WebSocketException ex) { throw new UnhandledException(ex); } } }); Thread.sleep(2000L); c.setReconnectEnabled(false); Thread.sleep(2000L); executor.shutdown(); assertTrue(executor.awaitTermination(1, TimeUnit.SECONDS)); try { future.get(); fail("Expected WebSocketException"); } catch (Exception ex) { // expected assertTrue(exceptions.size() > 1); for (Exception e : exceptions) { Throwable rootCause = ExceptionUtils.getRootCause(e); if (rootCause == null) { rootCause = e; } assertTrue(rootCause instanceof UnknownHostException); } } }
From source file:com.olacabs.fabric.compute.pipelined.ComuptationPipelineTest.java
@Test public void testCheck() throws Exception { Properties properties = new Properties(); properties.put("processor.counter_1.triggering_frequency", "1000"); properties.put("processor.summer_1.triggering_frequency", "1000"); properties.put("computation.shutdown.wait_time_in_seconds", "1"); properties.put("computation.channel.channel_type", " disruptor"); properties.put("computation.disruptor.buffer_size", "64"); properties.put("computation.disruptor.wait_strategy", "Yield "); final String sourceId = "source_1"; final String pid1 = "summer_1"; final String pid2 = "counter_1"; final String pid3 = "printer_1"; RegisteringLoader loader = RegisteringLoader.builder() .source("memory", new MemoryBasedPipelineStreamPipelineSource()) .stage("printer", new PrinterStreamingProcessor()).stage("summer", new SummingProcessor()) .stage("counter", new CountingProcessor()).build(); ComputationSpec spec = ComputationSpec.builder().name("test-pipeline") .source(ComponentInstance.builder().id(sourceId) .meta(ComponentMetadata.builder().type(ComponentType.SOURCE).id(sourceId).name("memory") .build())//from ww w . ja v a 2s . c om .build()) .processor(ComponentInstance.builder().id(pid1) .meta(ComponentMetadata.builder().type(ComponentType.PROCESSOR).id(pid1).name("summer") .build()) .build()) .processor(ComponentInstance.builder().id(pid2) .meta(ComponentMetadata.builder().type(ComponentType.PROCESSOR).id(pid2).name("counter") .build()) .build()) .processor(ComponentInstance.builder().id(pid3) .meta(ComponentMetadata.builder().type(ComponentType.PROCESSOR).id(pid3).name("printer") .build()) .build()) .connection(Connection.builder().fromType(ComponentType.SOURCE).from(sourceId).to(pid1).build()) .connection(Connection.builder().fromType(ComponentType.SOURCE).from(sourceId).to(pid2).build()) .connection(Connection.builder().fromType(ComponentType.SOURCE).from(sourceId).to(pid3).build()) .connection(Connection.builder().fromType(ComponentType.PROCESSOR).from(pid1).to(pid3).build()) .connection(Connection.builder().fromType(ComponentType.PROCESSOR).from(pid2).to(pid3).build()) .properties(properties).build(); System.out.println(new ObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(spec)); Linker linker = new Linker(loader); ComputationPipeline pipeline = linker.build(spec); pipeline.initialize(properties); ExecutorService executor = Executors.newSingleThreadExecutor(); ConsoleReporter reporter = ConsoleReporter .forRegistry(SharedMetricRegistries.getOrCreate("metrics-registry")) .convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS).build(); reporter.start(1, TimeUnit.SECONDS); executor.submit(pipeline::start); Thread.sleep(2000); pipeline.stop(); reporter.stop(); executor.shutdownNow(); }
From source file:org.apache.camel.component.http4.HttpConcurrentTest.java
private void doSendMessages(int files, int poolSize) throws Exception { ExecutorService executor = Executors.newFixedThreadPool(poolSize); // we access the responses Map below only inside the main thread, // so no need for a thread-safe Map implementation Map<Integer, Future<String>> responses = new HashMap<Integer, Future<String>>(); for (int i = 0; i < files; i++) { final int index = i; Future<String> out = executor.submit(new Callable<String>() { public String call() throws Exception { return template.requestBody("http4://" + getHostName() + ":" + getPort(), null, String.class); }/*from w w w. j a v a 2 s. c o m*/ }); responses.put(index, out); } assertEquals(files, responses.size()); // get all responses Set<String> unique = new HashSet<String>(); for (Future<String> future : responses.values()) { unique.add(future.get()); } // should be 'files' unique responses assertEquals("Should be " + files + " unique responses", files, unique.size()); executor.shutdownNow(); }
From source file:com.github.tteofili.looseen.Test20NewsgroupsClassification.java
private void testClassifier(final IndexReader ar, long startTime, IndexReader testReader, ExecutorService service, List<Future<String>> futures, Classifier<BytesRef> classifier) { futures.add(service.submit(() -> { ConfusionMatrixGenerator.ConfusionMatrix confusionMatrix; if (split) { confusionMatrix = ConfusionMatrixGenerator.getConfusionMatrix(testReader, classifier, CATEGORY_FIELD, BODY_FIELD, 60000 * 30); } else {// www. j a va2 s .c om confusionMatrix = ConfusionMatrixGenerator.getConfusionMatrix(ar, classifier, CATEGORY_FIELD, BODY_FIELD, 60000 * 30); } final long endTime = System.currentTimeMillis(); final int elapse = (int) (endTime - startTime) / 1000; return " * " + classifier + " \n * accuracy = " + confusionMatrix.getAccuracy() + "\n * precision = " + confusionMatrix.getPrecision() + "\n * recall = " + confusionMatrix.getRecall() + "\n * f1-measure = " + confusionMatrix.getF1Measure() + "\n * avgClassificationTime = " + confusionMatrix.getAvgClassificationTime() + "\n * time = " + elapse + " (sec)\n "; })); }