List of usage examples for java.util.concurrent ExecutorService submit
Future<?> submit(Runnable task);
From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessSemaphore.java
@Test public void testThreadedLeaseIncrease() throws Exception { final Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); try {// www . ja v a 2s .c o m client.start(); final SharedCount count = new SharedCount(client, "/foo/count", 1); count.start(); final InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test", count); ExecutorService service = Executors.newCachedThreadPool(); final CountDownLatch latch = new CountDownLatch(1); Future<Object> future1 = service.submit(new Callable<Object>() { @Override public Object call() throws Exception { Lease lease = semaphore.acquire(timing.seconds(), TimeUnit.SECONDS); Assert.assertNotNull(lease); latch.countDown(); lease = semaphore.acquire(timing.seconds(), TimeUnit.SECONDS); Assert.assertNotNull(lease); return null; } }); Future<Object> future2 = service.submit(new Callable<Object>() { @Override public Object call() throws Exception { Assert.assertTrue(latch.await(timing.seconds(), TimeUnit.SECONDS)); timing.sleepABit(); // make sure second acquire is waiting Assert.assertTrue(count.trySetCount(2)); return null; } }); future1.get(); future2.get(); } finally { IOUtils.closeQuietly(client); } }
From source file:broadwick.montecarlo.MonteCarlo.java
@Override public void run() { log.trace("Starting Monte Carlo results producer thread"); try {//from w w w.j av a 2s . c o m final int poolSize = Runtime.getRuntime().availableProcessors(); final ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("MCScenarioProducer-%d") .setDaemon(true).build(); final ExecutorService es = Executors.newFixedThreadPool(poolSize, threadFactory); final RNG generator = new RNG(RNG.Generator.Well44497b); final StopWatch sw = new StopWatch(); sw.start(); for (int i = 0; i < numSimulations; i++) { es.submit(new Runnable() { @Override public void run() { try { log.trace("Monte Carlo producer: creating scenario object"); final MonteCarloScenario scenario = simulation.copyOf(); final MonteCarloResults results = scenario .run(generator.getInteger(0, Integer.MAX_VALUE - 1)); log.trace("Monte Carlo producer: generated results {}", results.getExpectedValue()); queue.put(results); } catch (Exception e) { log.error("Error running Monte Carlo simulation {}", Throwables.getStackTraceAsString(e)); } } }); } es.shutdown(); while (!es.isTerminated()) { es.awaitTermination(1, TimeUnit.SECONDS); } queue.put(new Poison()); sw.stop(); log.info("Finished {} simulations in {}.", numSimulations, sw); } catch (Exception ex) { log.error("Monte Carlo simulation error: {}", Throwables.getStackTraceAsString(ex)); } }
From source file:com.espertech.esper.multithread.TestMTDeterminismInsertInto.java
private void tryMultiInsertGroup(int numThreads, int numStatements, int numEvents) throws Exception { Configuration config = SupportConfigFactory.getConfiguration(); // This should fail all test in this class // config.getEngineDefaults().getThreading().setInsertIntoDispatchPreserveOrder(false); EPServiceProvider engine = EPServiceProviderManager.getDefaultProvider(config); engine.initialize();//from w w w.j ava 2s .co m // setup statements EPStatement[] insertIntoStmts = new EPStatement[numStatements]; for (int i = 0; i < numStatements; i++) { insertIntoStmts[i] = engine.getEPAdministrator().createEPL("insert into MyStream select " + i + " as ident,count(*) as cnt from " + SupportBean.class.getName()); } EPStatement stmtInsertTwo = engine.getEPAdministrator() .createEPL("select ident, sum(cnt) as mysum from MyStream group by ident"); SupportUpdateListener listener = new SupportUpdateListener(); stmtInsertTwo.addListener(listener); // execute ExecutorService threadPool = Executors.newFixedThreadPool(numThreads); Future future[] = new Future[numThreads]; ReentrantReadWriteLock sharedStartLock = new ReentrantReadWriteLock(); sharedStartLock.writeLock().lock(); for (int i = 0; i < numThreads; i++) { future[i] = threadPool.submit( new SendEventRWLockCallable(i, sharedStartLock, engine, new GeneratorIterator(numEvents))); } Thread.sleep(100); sharedStartLock.writeLock().unlock(); threadPool.shutdown(); threadPool.awaitTermination(10, TimeUnit.SECONDS); for (int i = 0; i < numThreads; i++) { assertTrue((Boolean) future[i].get()); } // assert result EventBean newEvents[] = listener.getNewDataListFlattened(); ArrayList resultsPerIdent[] = new ArrayList[numStatements]; for (EventBean theEvent : newEvents) { int ident = (Integer) theEvent.get("ident"); if (resultsPerIdent[ident] == null) { resultsPerIdent[ident] = new ArrayList(); } long mysum = (Long) theEvent.get("mysum"); resultsPerIdent[ident].add(mysum); } for (int statement = 0; statement < numStatements; statement++) { for (int i = 0; i < numEvents - 1; i++) { long expected = total(i + 1); assertEquals(expected, resultsPerIdent[statement].get(i)); } } // destroy for (int i = 0; i < numStatements; i++) { insertIntoStmts[i].destroy(); } stmtInsertTwo.destroy(); }
From source file:gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java
private void submitJob(final Pair<String, FilterOperator> job, final ApiDimensionFilter countryFilter, final String startDate, final String endDate, final List<Dimension> dimensions, ExecutorService es, final ConcurrentLinkedDeque<String> allPages, final ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound) { es.submit(new Runnable() { @Override//from ww w. j ava2 s . c om public void run() { try { LIMITER.acquirePermits(1); } catch (InterruptedException e) { throw new RuntimeException("RateBasedLimiter got interrupted.", e); } String countryString = countryFilterToString(countryFilter); List<ApiDimensionFilter> filters = new LinkedList<>(); filters.add(countryFilter); String prefix = job.getLeft(); FilterOperator operator = job.getRight(); String jobString = String.format("job(prefix: %s, operator: %s)", prefix, operator); filters.add(GoogleWebmasterFilter.pageFilter(operator, prefix)); List<String> pages; try { pages = _client.getPages(_siteProperty, startDate, endDate, countryString, GoogleWebmasterClient.API_ROW_LIMIT, dimensions, filters, 0); log.debug(String.format("%d pages fetched for %s market-%s from %s to %s.", pages.size(), jobString, countryString, startDate, endDate)); } catch (IOException e) { log.debug(String.format("%s failed due to %s. Retrying...", jobString, e.getMessage())); nextRound.add(job); return; } //If the number of pages is at the LIMIT, it must be a "CONTAINS" job. //We need to create sub-tasks, and check current page with "EQUALS" if (pages.size() == GoogleWebmasterClient.API_ROW_LIMIT) { log.info(String.format("Expanding the prefix '%s'", prefix)); expandJobs(nextRound, prefix); nextRound.add(Pair.of(prefix, FilterOperator.EQUALS)); } else { //Otherwise, we've done with current job. allPages.addAll(pages); } } }); }
From source file:com.btoddb.fastpersitentqueue.flume.FpqChannelTest.java
@Test public void testThreading() throws Exception { final int numEntries = 1000; final int numPushers = 4; final int numPoppers = 4; final int entrySize = 1000; channel.setMaxTransactionSize(2000); final int popBatchSize = 100; channel.setMaxMemorySegmentSizeInBytes(10000000); channel.setMaxJournalFileSize(10000000); channel.setMaxJournalDurationInMs(30000); channel.setFlushPeriodInMs(1000);/*from www. j a va 2 s.com*/ channel.setNumberOfFlushWorkers(4); final Random pushRand = new Random(1000L); final Random popRand = new Random(1000000L); final AtomicInteger pusherFinishCount = new AtomicInteger(); final AtomicInteger numPops = new AtomicInteger(); final AtomicLong counter = new AtomicLong(); final AtomicLong pushSum = new AtomicLong(); final AtomicLong popSum = new AtomicLong(); channel.start(); ExecutorService execSrvc = Executors.newFixedThreadPool(numPushers + numPoppers); Set<Future> futures = new HashSet<Future>(); // start pushing for (int i = 0; i < numPushers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { for (int i = 0; i < numEntries; i++) { try { long x = counter.getAndIncrement(); pushSum.addAndGet(x); ByteBuffer bb = ByteBuffer.wrap(new byte[entrySize]); bb.putLong(x); Transaction tx = channel.getTransaction(); tx.begin(); MyEvent event1 = new MyEvent(); event1.addHeader("x", String.valueOf(x)).setBody(new byte[numEntries - 8]); // take out size of long channel.put(event1); tx.commit(); tx.close(); Thread.sleep(pushRand.nextInt(5)); } catch (Exception e) { e.printStackTrace(); } } pusherFinishCount.incrementAndGet(); } }); futures.add(future); } // start popping for (int i = 0; i < numPoppers; i++) { Future future = execSrvc.submit(new Runnable() { @Override public void run() { while (pusherFinishCount.get() < numPushers || !channel.isEmpty()) { try { Transaction tx = channel.getTransaction(); tx.begin(); Event event; int count = popBatchSize; while (null != (event = channel.take()) && count-- > 0) { popSum.addAndGet(Long.valueOf(event.getHeaders().get("x"))); numPops.incrementAndGet(); } tx.commit(); tx.close(); Thread.sleep(popRand.nextInt(10)); } catch (Exception e) { e.printStackTrace(); } } } }); futures.add(future); } boolean finished = false; while (!finished) { try { for (Future f : futures) { f.get(); } finished = true; } catch (InterruptedException e) { // ignore Thread.interrupted(); } } assertThat(numPops.get(), is(numEntries * numPushers)); assertThat(channel.isEmpty(), is(true)); assertThat(pushSum.get(), is(popSum.get())); }
From source file:io.undertow.server.handlers.proxy.LoadBalancingProxyHTTP2TestCase.java
@Test public void testHttp2ClientMultipleStreamsThreadSafety() throws IOException, URISyntaxException, ExecutionException, InterruptedException, TimeoutException { //not actually a proxy test //but convent to put it here UndertowXnioSsl ssl = new UndertowXnioSsl(DefaultServer.getWorker().getXnio(), OptionMap.EMPTY, DefaultServer.SSL_BUFFER_POOL, DefaultServer.createClientSslContext()); final UndertowClient client = UndertowClient.getInstance(); final ClientConnection connection = client.connect( new URI("https", null, DefaultServer.getHostAddress(), DefaultServer.getHostPort() + 1, "/", null, null),// w w w . j av a 2s. c o m DefaultServer.getWorker(), ssl, DefaultServer.getBufferPool(), OptionMap.create(UndertowOptions.ENABLE_HTTP2, true)).get(); final ExecutorService service = Executors.newFixedThreadPool(10); try { Deque<FutureResult<String>> futures = new ArrayDeque<>(); for (int i = 0; i < 100; ++i) { final FutureResult<String> future = new FutureResult<>(); futures.add(future); service.submit(new Callable<String>() { @Override public String call() throws Exception { ClientRequest cr = new ClientRequest().setMethod(Methods.GET).setPath("/path") .setProtocol(Protocols.HTTP_1_1); connection.sendRequest(cr, new ClientCallback<ClientExchange>() { @Override public void completed(ClientExchange result) { result.setResponseListener(new ClientCallback<ClientExchange>() { @Override public void completed(ClientExchange result) { new StringReadChannelListener(DefaultServer.getBufferPool()) { @Override protected void stringDone(String string) { future.setResult(string); } @Override protected void error(IOException e) { future.setException(e); } }.setup(result.getResponseChannel()); } @Override public void failed(IOException e) { future.setException(e); } }); } @Override public void failed(IOException e) { future.setException(e); } }); return null; } }); } while (!futures.isEmpty()) { FutureResult<String> future = futures.poll(); Assert.assertNotEquals(IoFuture.Status.WAITING, future.getIoFuture().awaitInterruptibly(10, TimeUnit.SECONDS)); Assert.assertEquals("/path", future.getIoFuture().get()); } } finally { service.shutdownNow(); } }
From source file:es.upm.oeg.tools.quality.ldsniffer.eval.Evaluation.java
public void calculateMetrics() { Model model = ModelFactory.createDefaultModel(); //TODO try to limit the size of the file being loaded try {/* w ww.j ava 2 s .c om*/ model.read(url); } catch (RiotException e) { model = RDFDataMgr.loadModel(url + ".rdf"); } catch (Exception e) { throw new BadRequestException( String.format("Unable to read the content from the uri - %s \n(%s)", url, e.getMessage()), e); } totalTriples = QueryUtils.getCountAsC(model, TOTAL_TRIPLES); iriSubjects = QueryUtils.getIriList(model, DISTINCT_IRI_SUBJECTS); iriPredicates = QueryUtils.getIriList(model, DISTINCT_IRI_PREDICATES); iriObjects = QueryUtils.getIriList(model, DISTINCT_IRI_OBJECTS); iriSet.addAll(iriSubjects); iriSet.addAll(iriPredicates); iriSet.addAll(iriObjects); final ExecutorService executor = Executors.newFixedThreadPool(5); iriSet.forEach(iri -> { executor.submit(() -> { HttpResponse cachedResponse = Executor.getCachedResponse(iri); if (cachedResponse == null) { Date date = new Date(); CloseableHttpClient httpclient = HttpClients.createDefault(); HttpHead head = new HttpHead(iri); String method = "HEAD"; try (CloseableHttpResponse response = httpclient.execute(head);) { StatusLine statusLine = response.getStatusLine(); int statusCode = statusLine.getStatusCode(); if (statusCode == HttpStatus.SC_METHOD_NOT_ALLOWED || statusCode == HttpStatus.SC_INTERNAL_SERVER_ERROR || statusCode == HttpStatus.SC_NOT_IMPLEMENTED) { HttpGet get = new HttpGet(iri); method = "GET"; try (CloseableHttpResponse getResponse = httpclient.execute(get);) { statusLine = getResponse.getStatusLine(); statusCode = statusLine.getStatusCode(); } } responseMap.put(iri, new HttpResponse(iri, method, statusCode, statusLine.getReasonPhrase(), date, false)); if (statusCode >= 200 && statusCode < 300) { derefIriCount.getAndIncrement(); } Executor.putCachedResponse(iri, new HttpResponse(iri, method, statusCode, statusLine.getReasonPhrase(), date, true)); } catch (ConnectException e) { logger.error("Connection timed out ...", e); responseMap.put(iri, new HttpResponse(iri, method, -1, e.getMessage(), date, false)); } catch (IOException e) { logger.error("IO error occurred ..", e); responseMap.put(iri, new HttpResponse(iri, method, -1, e.getMessage(), date, false)); } } else { responseMap.put(iri, cachedResponse); } }); }); executor.shutdown(); try { executor.awaitTermination(LDSnifferApp.getEvaluationTimeout(), TimeUnit.MINUTES); } catch (InterruptedException e) { throw new ServerError("Interrupted ..."); } iriSubjects.forEach(subject -> incrementDerefCount(subject, derefIriSubjectCount)); iriPredicates.forEach(predicate -> incrementDerefCount(predicate, derefIriPredicateCount)); iriObjects.forEach(object -> incrementDerefCount(object, derefIriObjectCount)); }
From source file:com.quartzdesk.executor.core.job.LocalCommandExecutorJob.java
@Override protected void executeJob(JobExecutionContext context) throws JobExecutionException { log.debug("Inside job: {}", context.getJobDetail().getKey()); JobDataMap jobDataMap = context.getMergedJobDataMap(); // command/*www . j a v a 2s .c o m*/ String command = jobDataMap.getString(JDM_KEY_COMMAND); if (command == null) { throw new JobExecutionException("Missing required '" + JDM_KEY_COMMAND + "' job data map parameter."); } // command arguments (optional) String commandArgs = jobDataMap.getString(JDM_KEY_COMMAND_ARGS); // command work directory (optional) String commandWorkDir = jobDataMap.getString(JDM_KEY_COMMAND_WORK_DIR); File commandWorkDirFile = null; if (commandWorkDir != null) { commandWorkDirFile = new File(commandWorkDir); if (!commandWorkDirFile.exists() || !commandWorkDirFile.isDirectory()) { throw new JobExecutionException( "Command work directory '" + commandWorkDirFile.getAbsolutePath() + "' specified in the '" + JDM_KEY_COMMAND_WORK_DIR + "' job data map parameter does not exist."); } } // execute the command List<String> commandLine = prepareCommandLine(command, commandArgs); ProcessBuilder processBuilder = new ProcessBuilder(commandLine); processBuilder.redirectErrorStream(true); // set the process work directory if specified; otherwise the default work directory is used if (commandWorkDirFile != null) { processBuilder.directory(commandWorkDirFile); } // we could possibly set the process environment here //processBuilder.environment() try { log.info("Executing local command using command line: {}", commandLine); ExecutorService standardOutputExecutor = getProcessOutputExecutor(context); Process process = processBuilder.start(); StandardOutputReaderCallable stdOutCallable = new StandardOutputReaderCallable( process.getInputStream()); Future<String> stdOutDataFuture = standardOutputExecutor.submit(stdOutCallable); int exitCode = process.waitFor(); // wait for the process to finish log.debug("Local command finished with exit code: {}", exitCode); context.setResult(exitCode); // exit code is used as the job's execution result (visible in the QuartzDesk GUI) try { String output = stdOutDataFuture.get(); if (StringUtils.isBlank(output)) { log.info("Local command produced no output."); } else { log.info("Local command produced the following output:{}{}", CommonConst.NL, output); } } catch (Exception e) // CancellationException, ExecutionException, InterruptedException { log.warn("Error getting process data.", e); } // if result != 0, we typically want to throw JobExecutionException indicating a job execution failure if (exitCode != 0) { throw new JobExecutionException("Command finished with non-zero exit code: " + exitCode); } } catch (IOException e) { throw new JobExecutionException("Error starting command process.", e); } catch (InterruptedException e) { throw new JobExecutionException("Command process has been interrupted.", e); } }
From source file:com.linkedin.pinot.integration.tests.UploadRefreshDeleteIntegrationTest.java
@Test(enabled = false) public void testUploadRefreshDelete() throws Exception { final int THREAD_COUNT = 1; final int SEGMENT_COUNT = 5; final int MIN_ROWS_PER_SEGMENT = 500; final int MAX_ROWS_PER_SEGMENT = 1000; final int OPERATIONS_PER_ITERATION = 10; final int ITERATION_COUNT = 5; final double UPLOAD_PROBABILITY = 0.8d; final String[] segmentNames = new String[SEGMENT_COUNT]; final int[] segmentRowCounts = new int[SEGMENT_COUNT]; for (int i = 0; i < SEGMENT_COUNT; i++) { segmentNames[i] = "segment_" + i; segmentRowCounts[i] = 0;/*w w w.j av a2 s . c o m*/ } for (int i = 0; i < ITERATION_COUNT; i++) { // Create THREAD_COUNT threads ExecutorService executorService = Executors.newFixedThreadPool(THREAD_COUNT); // Submit OPERATIONS_PER_ITERATION uploads/deletes for (int j = 0; j < OPERATIONS_PER_ITERATION; j++) { executorService.submit(new Runnable() { @Override public void run() { try { ThreadLocalRandom random = ThreadLocalRandom.current(); // Pick a random segment int segmentIndex = random.nextInt(SEGMENT_COUNT); String segmentName = segmentNames[segmentIndex]; // Pick a random operation if (random.nextDouble() < UPLOAD_PROBABILITY) { // Upload this segment LOGGER.info("Will upload segment {}", segmentName); synchronized (segmentName) { // Create a segment with a random number of rows int segmentRowCount = random.nextInt(MIN_ROWS_PER_SEGMENT, MAX_ROWS_PER_SEGMENT); LOGGER.info("Generating and uploading segment {} with {} rows", segmentName, segmentRowCount); generateAndUploadRandomSegment(segmentName, segmentRowCount); // Store the number of rows LOGGER.info("Uploaded segment {} with {} rows", segmentName, segmentRowCount); segmentRowCounts[segmentIndex] = segmentRowCount; } } else { // Delete this segment LOGGER.info("Will delete segment {}", segmentName); synchronized (segmentName) { // Delete this segment LOGGER.info("Deleting segment {}", segmentName); String reply = sendDeleteRequest( ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL) .forSegmentDelete("myresource", segmentName)); LOGGER.info("Deletion returned {}", reply); // Set the number of rows to zero LOGGER.info("Deleted segment {}", segmentName); segmentRowCounts[segmentIndex] = 0; } } } catch (Exception e) { throw new RuntimeException(e); } } }); } // Await for all tasks to complete executorService.shutdown(); executorService.awaitTermination(5L, TimeUnit.MINUTES); // Count number of expected rows int expectedRowCount = 0; for (int segmentRowCount : segmentRowCounts) { expectedRowCount += segmentRowCount; } // Wait for up to one minute for the row count to match the expected row count LOGGER.info("Awaiting for the row count to match {}", expectedRowCount); int pinotRowCount = (int) getCurrentServingNumDocs(); long timeInOneMinute = System.currentTimeMillis() + 60 * 1000L; while (System.currentTimeMillis() < timeInOneMinute && pinotRowCount != expectedRowCount) { LOGGER.info("Row count is {}, expected {}, awaiting for row count to match", pinotRowCount, expectedRowCount); Thread.sleep(5000L); try { pinotRowCount = (int) getCurrentServingNumDocs(); } catch (Exception e) { LOGGER.warn("Caught exception while sending query to Pinot, retrying", e); } } // Compare row counts Assert.assertEquals(pinotRowCount, expectedRowCount, "Expected and actual row counts don't match after waiting one minute"); } }
From source file:com.chess.genesis.net.SyncClient.java
private void sync_archive(final JSONObject json) { try {//from w w w .j a v a 2 s . c o m final ArrayList<String> list_need = getNeedList(json.getJSONArray("gameids")); final ExecutorService pool = Executors.newCachedThreadPool(); for (final String item : list_need) { if (error) return; final NetworkClient nc = new NetworkClient(context, handle); nc.game_data(item); pool.submit(nc); lock++; } } catch (final JSONException e) { throw new RuntimeException(e.getMessage(), e); } }