List of usage examples for java.util.concurrent ExecutorService submit
Future<?> submit(Runnable task);
From source file:com.ebay.jetstream.event.processor.esper.raw.EsperTest.java
@Ignore public void multithreadingTest() { Configuration configuration = new Configuration(); configuration.configure(/*w w w. j a v a 2s.co m*/ new File("src/test/java/com/ebay/jetstream/event/processor/esper/raw/EsperTestConfig.xml")); EPServiceProvider epService = EPServiceProviderManager.getProvider("EsperTest", configuration); EsperTestStatement esperStmt = new EsperTestStatement(epService.getEPAdministrator()); EsperTestSubscriber subscriber = new EsperTestSubscriber(); EsperTestListener listener = new EsperTestListener(); esperStmt.setSubscriber(subscriber); esperStmt.addListener(listener); ExecutorService threadPool = Executors.newCachedThreadPool(new EsperTestThreadFactory()); EsperTestRunnable runnables[] = new EsperTestRunnable[THREADS_NUM]; try { for (int i = 0; i < THREADS_NUM; i++) { runnables[i] = new EsperTestRunnable(epService, i); threadPool.submit(runnables[i]); } threadPool.shutdown(); threadPool.awaitTermination(200, TimeUnit.SECONDS); } catch (InterruptedException e) { fail("InterruptedException: " + e.getMessage()); } assertTrue("ExecutorService failed to shut down properly", threadPool.isShutdown()); log.info("[" + subscriber.getIds().first() + "," + subscriber.getIds().last() + "]"); assertEquals(THREADS_NUM, subscriber.getCount()); log.info("[" + listener.getIds().first() + "," + listener.getIds().last() + "]"); assertEquals(THREADS_NUM, listener.getCount()); assertEquals(THREADS_NUM, listener.getNewCount()); assertEquals(0, listener.getOldCount()); }
From source file:jenkins.plugins.elanceodesk.workplace.notifier.HttpWorkerTest.java
@Test public void testSendingMultipleWebhooks() throws IOException, InterruptedException { ExecutorService executorService = Executors.newCachedThreadPool(); HttpWorker worker1 = new HttpWorker("http://localhost:8000/test1", "test1body", 30000, 1, Mockito.mock(PrintStream.class)); HttpWorker worker2 = new HttpWorker("http://localhost:8000/test2", "test2body", 30000, 1, Mockito.mock(PrintStream.class)); executorService.submit(worker1); executorService.submit(worker2);/*from ww w. j a v a 2 s . c o m*/ executorService.shutdown(); executorService.awaitTermination(5, TimeUnit.SECONDS); Assert.assertTrue(MyHandler.getTest1Result()); Assert.assertTrue(MyHandler.getTest2Result()); }
From source file:sample.ui.mvc.MessageController.java
private void doClick(Message message) { Date fabiaoTime = message.getBidOpenTime(); // get bid id String bidId = ""; while (true) { if (System.currentTimeMillis() < fabiaoTime.getTime()) { try { Thread.sleep(1000); continue; } catch (InterruptedException e) { e.printStackTrace();// w w w . ja va 2s. com } } bidId = getBidId(message); if ("".equals(bidId)) { continue; } else { break; } } String bidMima = ""; logger.info("bidId=" + bidId); // bidId="346"; if (!"".equals(bidId)) { bidMima = getBidMima(bidId); } logger.info("Bid Mima=" + bidMima); message.setBidid(Integer.parseInt(bidId)); message.setMima(bidMima); Worker w = new Worker(message, "3000", ZHANGDAIYIXIAN); Worker w2 = new Worker(message, "3000", ZHANGDAIYIXIAN); Worker w3 = new Worker(message, "3000", ZHANGDAIYIXIAN); // Worker w4 = new Worker(message,"5000",ZHANGDAIYIXIAN); // Worker w5 = new Worker(message,"5000",ZHANGDAIYIXIAN); // Worker w6 = new Worker(message,"1000",ZHANGDAIYIXIAN); // Worker w7 = new Worker(message,"5000",ZHANGHUIFENG); ExecutorService service = Executors.newFixedThreadPool(7); logger.info("submit "); service.submit(w); service.submit(w2); service.submit(w3); // service.submit(w4); // service.submit(w5); // service.submit(w6); // service.submit(w7); }
From source file:biz.fstechnology.micro.server.jms.AbstractJmsService.java
/** * @see javax.jms.MessageListener#onMessage(javax.jms.Message) *//*from ww w .j a va 2 s .c o m*/ @Override public void onMessage(Message message) { try { ExecutorService executor = Executors.newSingleThreadExecutor(); if (((ObjectMessage) message).getObject() instanceof Result) { // no op return; } Request<?> request = (Request<?>) ((ObjectMessage) message).getObject(); // cast hell... Future<Request<?>> preProcessFuture = executor.submit(() -> onPreProcessRequest(request)); Future<Result<?>> resultFuture = executor.submit(() -> processRequest(preProcessFuture.get())); Future<Result<?>> postProcessFuture = executor .submit(() -> onPostProcessRequest(request, resultFuture.get())); executor.shutdown(); Result<?> result = postProcessFuture.get(); ResponseMessageCreator messageCreator = new ResponseMessageCreator(); messageCreator.setContents(result); messageCreator.setRequestId(message.getJMSCorrelationID()); replyProducer.send(message.getJMSReplyTo(), messageCreator.createMessage(session)); } catch (JMSException | InterruptedException | ExecutionException e) { // TODO Auto-generated catch block e.printStackTrace(); Result<Object> result = new Result<>(e); try { ResponseMessageCreator messageCreator = new ResponseMessageCreator(); messageCreator.setContents(result); messageCreator.setRequestId(message.getJMSCorrelationID()); replyProducer.send(message.getJMSReplyTo(), messageCreator.createMessage(session)); } catch (JmsException | JMSException e1) { e1.printStackTrace(); } } }
From source file:com.splout.db.integration.TestMultiThreadedFailover.java
@Test public void test() throws Throwable { FileUtils.deleteDirectory(new File(TMP_FOLDER)); new File(TMP_FOLDER).mkdirs(); createSploutEnsemble(N_QNODES, N_DNODES); String[] qNodeAddresses = new String[N_QNODES]; for (int i = 0; i < N_QNODES; i++) { qNodeAddresses[i] = getqNodes().get(i).getAddress(); }/*from w w w.j av a 2s.c om*/ final SploutClient client = new SploutClient(qNodeAddresses); final Tablespace testTablespace = createTestTablespace(N_DNODES); final Random random = new Random(SEED); final AtomicBoolean failed = new AtomicBoolean(false); deployIteration(0, random, client, testTablespace); for (QNode qnode : getqNodes()) { // Make sure all QNodes are aware of the the first deploy // There might be some delay as they have to receive notifications via // Hazelcast etc long waitedSoFar = 0; QueryStatus status = null; SploutClient perQNodeClient = new SploutClient(qnode.getAddress()); do { status = perQNodeClient.query(TABLESPACE, "0", "SELECT * FROM " + TABLE + ";", null); Thread.sleep(100); waitedSoFar += 100; if (waitedSoFar > 5000) { throw new AssertionError("Waiting too much on a test condition"); } } while (status == null || status.getError() != null); log.info("QNode [" + qnode.getAddress() + "] is ready to serve deploy 0."); } try { // Business logic here ExecutorService service = Executors.newFixedThreadPool(N_THREADS); // This is the "mother-fucker" thread. // It will bring DNodes down on purpose. // And then bring them up again. service.submit(new Runnable() { @Override public void run() { while (true) { try { Thread.sleep(1000); log.info("Time to kill some DNode..."); int whichOne = (int) (Math.random() * getdNodes().size()); getdNodes().get(whichOne).testCommand(TestCommands.SHUTDOWN.toString()); Thread.sleep(1000); log.info("Time to bring the DNode back to life..."); getdNodes().get(whichOne).testCommand(TestCommands.RESTART.toString()); } catch (InterruptedException e) { log.info("MFT - Bye bye!"); } catch (DNodeException e) { failed.set(true); e.printStackTrace(); throw new RuntimeException(e); } catch (TException e) { failed.set(true); e.printStackTrace(); throw new RuntimeException(e); } } } }); // These threads will continuously perform queries and check that the // results are consistent. for (int i = 0; i < N_THREADS; i++) { service.submit(new Runnable() { @SuppressWarnings("unchecked") @Override public void run() { try { while (true) { int randomDNode = Math.abs(random.nextInt()) % N_DNODES; QueryStatus status = client.query(TABLESPACE, ((randomDNode * 10) - 1) + "", "SELECT * FROM " + TABLE + ";", null); log.info("Query status -> " + status); assertEquals(1, status.getResult().size()); Map<String, Object> jsonResult = (Map<String, Object>) status.getResult().get(0); assertEquals(randomDNode, jsonResult.get("dnode")); Thread.sleep(100); } } catch (InterruptedException ie) { // Bye bye log.info("Bye bye!"); } catch (Throwable e) { e.printStackTrace(); failed.set(true); } } }); } Thread.sleep(15000); assertEquals(false, failed.get()); } finally { closeSploutEnsemble(); FileUtils.deleteDirectory(new File(TMP_FOLDER)); } }
From source file:com.easarrive.image.thumbor.executer.service.impl.SQSNotificationHandlerForThumbor.java
private List<NotificationHandleResult<Message, Boolean>> ergodicS3EventMessageRecord(final Message message, S3EventMessageContent messageContent) throws AWSPluginException { List<S3EventMessageRecord> recordList = messageContent.getRecords(); if (recordList == null) { throw new AWSPluginException("The S3EventMessageRecord list is null"); }/*w w w. j a v a2 s . c om*/ if (recordList.size() < 1) { throw new AWSPluginException("The S3EventMessageRecord list is empty"); } ExecutorService executorService = Executors.newCachedThreadPool(); List<Future<NotificationHandleResult<Message, Boolean>>> resultList = new ArrayList<Future<NotificationHandleResult<Message, Boolean>>>(); for (final S3EventMessageRecord s3EventMessageRecord : recordList) { Future<NotificationHandleResult<Message, Boolean>> future = executorService.submit( new SQSNotificationHandlerForThumborCallable(message, s3EventMessageRecord, generatePicture)); resultList.add(future); } List<NotificationHandleResult<Message, Boolean>> resutList = new ArrayList<NotificationHandleResult<Message, Boolean>>(); //?? for (Future<NotificationHandleResult<Message, Boolean>> fs : resultList) { try { NotificationHandleResult<Message, Boolean> result = fs.get(); if (logger.isInfoEnabled()) { logger.info("The SQS message record ({}) result to handle is {}", result.getMessageId(), result.getData()); } resutList.add(result); if (!result.getData()) { break; } } catch (Exception e) { if (logger.isErrorEnabled()) { logger.error(e.getMessage(), e); } } finally { //???????? executorService.shutdown(); } } return resutList; }
From source file:edu.cornell.med.icb.R.TestRConnectionPool.java
/** * Checks that two threads actually get the same connection pool. * @throws InterruptedException if the threads are interrupted during the test *//* www. jav a 2 s . c om*/ @Test public void validateSingleton() throws InterruptedException { final RConnectionPool[] pools = new RConnectionPool[2]; final CountDownLatch latch = new CountDownLatch(2); final ExecutorService threadPool = Executors.newCachedThreadPool(); try { threadPool.submit(new Callable<Boolean>() { public Boolean call() { pools[0] = RConnectionPool.getInstance(); latch.countDown(); return true; } }); threadPool.submit(new Callable<Boolean>() { public Boolean call() { pools[1] = RConnectionPool.getInstance(); latch.countDown(); return true; } }); latch.await(); assertNotNull("Connection pool should never be null", pools[0]); assertNotNull("Connection pool should never be null", pools[1]); assertEquals("Pools should be the same", pools[0], pools[1]); } finally { threadPool.shutdown(); if (pools[0] != null) { pools[0].close(); } if (pools[1] != null) { pools[1].close(); } } }
From source file:eu.esdihumboldt.hale.io.wfs.AbstractWFSWriter.java
@Override public IOReport execute(ProgressIndicator progress) throws IOProviderConfigurationException, IOException { progress.begin("WFS Transaction", ProgressIndicator.UNKNOWN); // configure internal provider internalProvider.setDocumentWrapper(createTransaction()); final PipedInputStream pIn = new PipedInputStream(); PipedOutputStream pOut = new PipedOutputStream(pIn); currentExecuteStream = pOut;/*from w w w . j a va 2 s. c om*/ Future<Response> futureResponse = null; IOReporter reporter = createReporter(); ExecutorService executor = Executors.newSingleThreadExecutor(); try { // read the stream (in another thread) futureResponse = executor.submit(new Callable<Response>() { @Override public Response call() throws Exception { Proxy proxy = ProxyUtil.findProxy(targetWfs.getLocation()); Request request = Request.Post(targetWfs.getLocation()).bodyStream(pIn, ContentType.APPLICATION_XML); Executor executor = FluentProxyUtil.setProxy(request, proxy); // authentication String user = getParameter(PARAM_USER).as(String.class); String password = getParameter(PARAM_PASSWORD).as(String.class); if (user != null) { // target host int port = targetWfs.getLocation().getPort(); String hostName = targetWfs.getLocation().getHost(); String scheme = targetWfs.getLocation().getScheme(); HttpHost host = new HttpHost(hostName, port, scheme); // add credentials Credentials cred = ClientProxyUtil.createCredentials(user, password); executor.auth(new AuthScope(host), cred); executor.authPreemptive(host); } try { return executor.execute(request); } finally { pIn.close(); } } }); // write the stream SubtaskProgressIndicator subprogress = new SubtaskProgressIndicator(progress); reporter = (IOReporter) super.execute(subprogress); } finally { executor.shutdown(); } try { Response response = futureResponse.get(); HttpResponse res = response.returnResponse(); int statusCode = res.getStatusLine().getStatusCode(); XPathFactory xPathfactory = XPathFactory.newInstance(); XPath xpath = xPathfactory.newXPath(); if (statusCode >= 200 && statusCode < 300) { // success reporter.setSuccess(reporter.isSuccess()); // construct summary from response try { Document responseDoc = parseResponse(res.getEntity()); // totalInserted String inserted = xpath.compile("//TransactionSummary/totalInserted").evaluate(responseDoc); // XXX totalUpdated // XXX totalReplaced // XXX totalDeleted reporter.setSummary("Inserted " + inserted + " features."); } catch (XPathExpressionException e) { log.error("Error in XPath used to evaluate service response"); } catch (ParserConfigurationException | SAXException e) { reporter.error(new IOMessageImpl(MessageFormat.format( "Server returned status code {0}, but could not parse server response", statusCode), e)); reporter.setSuccess(false); } } else { // failure reporter.error( new IOMessageImpl("Server reported failure with code " + res.getStatusLine().getStatusCode() + ": " + res.getStatusLine().getReasonPhrase(), null)); reporter.setSuccess(false); try { Document responseDoc = parseResponse(res.getEntity()); String errorText = xpath.compile("//ExceptionText/text()").evaluate(responseDoc); reporter.setSummary("Request failed: " + errorText); } catch (XPathExpressionException e) { log.error("Error in XPath used to evaluate service response"); } catch (ParserConfigurationException | SAXException e) { reporter.error(new IOMessageImpl("Could not parse server response", e)); reporter.setSuccess(false); } } } catch (ExecutionException | InterruptedException e) { reporter.error(new IOMessageImpl("Failed to execute WFS-T request", e)); reporter.setSuccess(false); } progress.end(); return reporter; }
From source file:com.googlecode.concurrentlinkedhashmap.MultiThreadedTest.java
private void executeWithTimeOut(ConcurrentLinkedHashMap<?, ?> map, Callable<Long> task) { ExecutorService es = Executors.newSingleThreadExecutor(); Future<Long> future = es.submit(task); try {// www . ja va 2 s .co m long timeNS = future.get(timeOut, SECONDS); debug("\nExecuted in %d second(s)", NANOSECONDS.toSeconds(timeNS)); assertThat(map, is(valid())); } catch (ExecutionException e) { fail("Exception during test: " + e.toString(), e); } catch (TimeoutException e) { handleTimout(map, es, e); } catch (InterruptedException e) { fail("", e); } }
From source file:com.twitter.graphjet.bipartite.GraphConcurrentTestHelper.java
/** * This helper method tests up a concurrent read-write situation with a single writer and multiple * readers that access the same underlying bipartiteGraph, and tests for correct edge access after * every single edge write via latches. This helps test write flushing after every edge insertion. * * @param graph is the underlying * {@link BipartiteGraph} * @param edgesToAdd is a list of edges to add in the graph *//* ww w . j a v a 2 s .c o m*/ public static <T extends BipartiteGraph & DynamicBipartiteGraph> void testConcurrentReadWriteThreads(T graph, List<Pair<Long, Long>> edgesToAdd) { int numReaders = edgesToAdd.size(); // start reading after first edge is written ExecutorService executor = Executors.newFixedThreadPool(2 * (2 * numReaders) + 1); List<CountDownLatch> readerStartLatches = Lists.newArrayListWithCapacity(numReaders); List<CountDownLatch> readerDoneLatches = Lists.newArrayListWithCapacity(numReaders); List<BipartiteGraphReader> leftReaders = Lists.newArrayListWithCapacity(numReaders); List<BipartiteGraphReader> rightReaders = Lists.newArrayListWithCapacity(numReaders); for (Pair<Long, Long> edge : edgesToAdd) { CountDownLatch startLatch = new CountDownLatch(1); CountDownLatch doneLatch = new CountDownLatch(2); // Each time, get edges for the node added in the previous step BipartiteGraphReader leftReader = new BipartiteGraphReader(graph, startLatch, doneLatch, edge.getLeft(), true, 0); BipartiteGraphReader rightReader = new BipartiteGraphReader(graph, startLatch, doneLatch, edge.getRight(), false, 0); leftReaders.add(leftReader); executor.submit(leftReader); rightReaders.add(rightReader); executor.submit(rightReader); readerStartLatches.add(startLatch); readerDoneLatches.add(doneLatch); } /** * The start/done latches achieve the following execution order: writer, then reader 1, then * writer, then reader 2, and so on. As a concrete example, suppose we have two readers and a * writer, then the start/done latches are used as follows: * Initial latches state: * s1 = 1, d1 = 1 * s2 = 1, d2 = 1 * Execution steps: * - writer writes edge 1, sets s1 = 0 and waits on d1 * - reader 1 reads since s1 == 0 and sets d1 = 0 * - writer writes edge 2, sets s2 = 0 and waits on d2 * - reader 2 reads since s2 == 0 and sets d2 = 0 * * One detail to note is that here we have two readers (one for left, one for right) so the done * latches are initialized to value 2 so that both readers complete the read before moving on. */ List<WriterInfo> writerInfo = Lists.newArrayListWithCapacity(numReaders); for (int i = 0; i < numReaders; i++) { // Start writing immediately at first, but then write an edge once the reader finishes reading // the previous edge CountDownLatch startLatch = (i > 0) ? readerDoneLatches.get(i - 1) : new CountDownLatch(0); // Release the next reader CountDownLatch doneLatch = readerStartLatches.get(i); writerInfo.add(new WriterInfo(edgesToAdd.get(i).getLeft(), edgesToAdd.get(i).getRight(), startLatch, doneLatch)); } executor.submit(new BipartiteGraphWriter(graph, writerInfo)); // Wait for all the processes to finish and then confirm that they did what they worked as // expected try { readerDoneLatches.get(numReaders - 1).await(); } catch (InterruptedException e) { throw new RuntimeException("Execution for last reader was interrupted: ", e); } // Now we test the readers Long2ObjectMap<LongArrayList> leftSideGraph = new Long2ObjectOpenHashMap<LongArrayList>(numReaders); Long2ObjectMap<LongArrayList> rightSideGraph = new Long2ObjectOpenHashMap<LongArrayList>(numReaders); for (int i = 0; i < numReaders; i++) { long leftNode = edgesToAdd.get(i).getLeft(); long rightNode = edgesToAdd.get(i).getRight(); // Add edges to the graph if (!leftSideGraph.containsKey(leftNode)) { leftSideGraph.put(leftNode, new LongArrayList(new long[] { rightNode })); } else { leftSideGraph.get(leftNode).add(rightNode); } if (!rightSideGraph.containsKey(rightNode)) { rightSideGraph.put(rightNode, new LongArrayList(new long[] { leftNode })); } else { rightSideGraph.get(rightNode).add(leftNode); } // Check the read info assertEquals(leftSideGraph.get(leftNode).size(), leftReaders.get(i).getQueryNodeDegree()); assertEquals(leftSideGraph.get(leftNode), leftReaders.get(i).getQueryNodeEdges()); assertEquals(rightSideGraph.get(rightNode).size(), rightReaders.get(i).getQueryNodeDegree()); assertEquals(rightSideGraph.get(rightNode), rightReaders.get(i).getQueryNodeEdges()); } }