List of usage examples for java.util.concurrent CompletionService submit
Future<V> submit(Callable<V> task);
From source file:com.mgmtp.jfunk.core.JFunk.java
/** * Executes the jFunk test. A thread pool ({@link ExecutorService}) is created with the number * of configured threads, which handles concurrent script execution. *//*w ww . ja v a 2 s .c o m*/ @Override protected void doExecute() throws Exception { ExecutorService execService = createExecutorService(); CompletionService<Boolean> completionService = new ExecutorCompletionService<>(execService); for (final File script : scripts) { completionService.submit(new Callable<Boolean>() { @Override public Boolean call() { boolean success = false; StopWatch stopWatch = new StopWatch(); stopWatch.start(); RESULT_LOG.info("Thread " + Thread.currentThread().getName() + ": starting execution of script " + script.getName()); try { success = scriptExecutor.executeScript(script, scriptProperties); } catch (Throwable th) { LOG.error(th.getMessage(), th); } finally { LOG.info("SCRIPT EXECUTION " + (success ? "SUCCESSFUL" : "FAILED") + " (" + script + ")"); RESULT_LOG.info( "Thread " + Thread.currentThread().getName() + ": finished execution of script " + script.getName() + " (took " + stopWatch + " H:mm:ss.SSS)"); } return success; } }); } boolean overallResult = true; for (int i = 0, size = scripts.size(); i < size; ++i) { if (!completionService.take().get()) { overallResult = false; } } shutDownExecutorService(execService); if (!overallResult) { throw new JFunkExecutionException(); } }
From source file:com.laudandjolynn.mytv.proxy.MyTvProxyManager.java
public void prepareProxies(ProxyProvider... providers) { int length = providers == null ? 0 : providers.length; if (length > 0) { int maxThreadNum = Constant.CPU_PROCESSOR_NUM; ThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("MyTv_Find_Proxies_%d") .build();/* w w w . j a va 2s . c o m*/ ExecutorService executorService = Executors .newFixedThreadPool(length > maxThreadNum ? maxThreadNum : length, threadFactory); CompletionService<List<Proxy>> completionService = new ExecutorCompletionService<List<Proxy>>( executorService); providerList.clear(); for (int i = 0; i < length; i++) { final ProxyProvider provider = providers[i]; providerList.add(provider); completionService.submit(new Callable<List<Proxy>>() { @Override public List<Proxy> call() throws Exception { return provider.getProxies(); } }); } executorService.shutdown(); int count = 0; List<Proxy> resultList = new ArrayList<Proxy>(); while (count < length) { try { Future<List<Proxy>> future = completionService.take(); List<Proxy> proxies = future.get(); if (proxies != null) { resultList.addAll(proxies); } } catch (InterruptedException e) { logger.error("get proxies thread has interrupted.", e); } catch (ExecutionException e) { logger.error("get proxies thread has execution fail.", e); } count++; } resultList.add(LOCALHOST_PROXY); PROXY_QUEUE.clear(); PROXY_QUEUE.addAll(resultList); } }
From source file:org.springframework.integration.jdbc.store.channel.AbstractTxTimeoutMessageStoreTests.java
public void testInt2993IdCacheConcurrency() throws InterruptedException, ExecutionException { final String groupId = "testInt2993Group"; for (int i = 0; i < 100; i++) { this.jdbcChannelMessageStore.addMessageToGroup(groupId, new GenericMessage<String>("testInt2993Message")); }/*from ww w . j a v a2 s. c o m*/ ExecutorService executorService = Executors.newCachedThreadPool(); CompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(executorService); final int concurrency = 5; final TransactionTemplate transactionTemplate = new TransactionTemplate(transactionManager); for (int i = 0; i < concurrency; i++) { completionService.submit(new Callable<Boolean>() { @Override public Boolean call() throws Exception { for (int i = 0; i < 100; i++) { boolean result = transactionTemplate.execute(new TransactionCallback<Boolean>() { @Override public Boolean doInTransaction(TransactionStatus status) { Message<?> message = null; try { message = jdbcChannelMessageStore.pollMessageFromGroup(groupId); } catch (Exception e) { log.error("IdCache race condition.", e); return false; } try { Thread.sleep(10); } catch (InterruptedException e) { log.error(e); } if (message != null) { jdbcChannelMessageStore .removeFromIdCache(message.getHeaders().getId().toString()); } return true; } }); if (!result) return false; } return true; } }); } for (int j = 0; j < concurrency; j++) { assertTrue(completionService.take().get()); } executorService.shutdown(); assertTrue(executorService.awaitTermination(5, TimeUnit.SECONDS)); }
From source file:org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.TestContainerLocalizer.java
@SuppressWarnings("unchecked") // mocked generics private ContainerLocalizer setupContainerLocalizerForTest() throws Exception { // don't actually create dirs doNothing().when(spylfs).mkdir(isA(Path.class), isA(FsPermission.class), anyBoolean()); Configuration conf = new Configuration(); FileContext lfs = FileContext.getFileContext(spylfs, conf); localDirs = new ArrayList<Path>(); for (int i = 0; i < 4; ++i) { localDirs.add(lfs.makeQualified(new Path(basedir, i + ""))); }/* w ww .j a v a 2 s . c o m*/ RecordFactory mockRF = getMockLocalizerRecordFactory(); ContainerLocalizer concreteLoc = new ContainerLocalizer(lfs, appUser, appId, containerId, localDirs, mockRF, appUserFolder); ContainerLocalizer localizer = spy(concreteLoc); // return credential stream instead of opening local file random = new Random(); long seed = random.nextLong(); System.out.println("SEED: " + seed); random.setSeed(seed); DataInputBuffer appTokens = createFakeCredentials(random, 10); tokenPath = lfs.makeQualified(new Path(String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT, containerId))); doReturn(new FSDataInputStream(new FakeFSDataInputStream(appTokens))).when(spylfs).open(tokenPath); nmProxy = mock(LocalizationProtocol.class); doReturn(nmProxy).when(localizer).getProxy(nmAddr); doNothing().when(localizer).sleep(anyInt()); // return result instantly for deterministic test ExecutorService syncExec = mock(ExecutorService.class); CompletionService<Path> cs = mock(CompletionService.class); when(cs.submit(isA(Callable.class))).thenAnswer(new Answer<Future<Path>>() { @Override public Future<Path> answer(InvocationOnMock invoc) throws Throwable { Future<Path> done = mock(Future.class); when(done.isDone()).thenReturn(true); FakeDownload d = (FakeDownload) invoc.getArguments()[0]; when(done.get()).thenReturn(d.call()); return done; } }); doReturn(syncExec).when(localizer).createDownloadThreadPool(); doReturn(cs).when(localizer).createCompletionService(syncExec); return localizer; }
From source file:org.apache.syncope.core.provisioning.java.propagation.PriorityPropagationTaskExecutor.java
@Override protected void doExecute(final Collection<PropagationTask> tasks, final PropagationReporter reporter, final boolean nullPriorityAsync) { List<PropagationTask> prioritizedTasks = CollectionUtils.select(tasks, new Predicate<PropagationTask>() { @Override/* ww w . jav a 2 s. co m*/ public boolean evaluate(final PropagationTask task) { return task.getResource().getPropagationPriority() != null; } }, new ArrayList<PropagationTask>()); Collections.sort(prioritizedTasks, new PriorityComparator()); LOG.debug("Propagation tasks sorted by priority, for serial execution: {}", prioritizedTasks); Collection<PropagationTask> concurrentTasks = CollectionUtils.subtract(tasks, prioritizedTasks); LOG.debug("Propagation tasks for concurrent execution: {}", concurrentTasks); // first process priority resources sequentially and fail as soon as any propagation failure is reported for (PropagationTask task : prioritizedTasks) { TaskExec execution = null; PropagationTaskExecStatus execStatus; try { execution = newPropagationTaskCallable(task, reporter).call(); execStatus = PropagationTaskExecStatus.valueOf(execution.getStatus()); } catch (Exception e) { LOG.error("Unexpected exception", e); execStatus = PropagationTaskExecStatus.FAILURE; } if (execStatus != PropagationTaskExecStatus.SUCCESS) { throw new PropagationException(task.getResource().getKey(), execution == null ? null : execution.getMessage()); } } // then process non-priority resources concurrently... final CompletionService<TaskExec> completionService = new ExecutorCompletionService<>(executor); Map<PropagationTask, Future<TaskExec>> nullPriority = new HashMap<>(concurrentTasks.size()); for (PropagationTask task : concurrentTasks) { try { nullPriority.put(task, completionService.submit(newPropagationTaskCallable(task, reporter))); } catch (Exception e) { LOG.error("Unexpected exception", e); } } // ...waiting for all callables to complete, if async processing was not required if (!nullPriority.isEmpty()) { if (nullPriorityAsync) { for (Map.Entry<PropagationTask, Future<TaskExec>> entry : nullPriority.entrySet()) { reporter.onSuccessOrNonPriorityResourceFailures(entry.getKey(), PropagationTaskExecStatus.CREATED, null, null, null); } } else { final Set<Future<TaskExec>> nullPriorityFutures = new HashSet<>(nullPriority.values()); try { executor.submit(new Runnable() { @Override public void run() { while (!nullPriorityFutures.isEmpty()) { try { nullPriorityFutures.remove(completionService.take()); } catch (Exception e) { LOG.error("Unexpected exception", e); } } } }).get(60, TimeUnit.SECONDS); } catch (Exception e) { LOG.error("Unexpected exception", e); } finally { for (Future<TaskExec> future : nullPriorityFutures) { future.cancel(true); } nullPriorityFutures.clear(); nullPriority.clear(); } } } }
From source file:com.opengamma.integration.viewer.status.impl.ViewStatusCalculationWorker.java
public ViewStatusResultAggregator run() { ViewStatusResultAggregator aggregator = new ViewStatusResultAggregatorImpl(); CompletionService<PerViewStatusResult> completionService = new ExecutorCompletionService<PerViewStatusResult>( _executor);//from w w w. j a va 2s . c o m //submit task to executor to run partitioned by security type for (String securityType : _valueRequirementBySecType.keySet()) { Collection<String> valueRequirements = _valueRequirementBySecType.get(securityType); completionService.submit(new ViewStatusCalculationTask(_toolContext, _portfolioId, _user, securityType, valueRequirements, _marketDataSpecification)); } try { // process all completed task for (int i = 0; i < _valueRequirementBySecType.size(); i++) { Future<PerViewStatusResult> futureTask = completionService.take(); PerViewStatusResult perViewStatusResult = futureTask.get(); for (ViewStatusKey viewStatusKey : perViewStatusResult.keySet()) { aggregator.putStatus(viewStatusKey, perViewStatusResult.get(viewStatusKey)); } } } catch (InterruptedException ex) { Thread.currentThread().interrupt(); } catch (ExecutionException ex) { throw new OpenGammaRuntimeException("Error running View status report", ex.getCause()); } return aggregator; }
From source file:pl.edu.icm.cermine.libsvm.SVMParameterFinder.java
public void run(String inputFile, String ext, int threads, int kernel, int degree) throws AnalysisException, IOException, TransformationException, CloneNotSupportedException, InterruptedException, ExecutionException { List<TrainingSample<BxZoneLabel>> samples = getSamples(inputFile, ext); ExecutorService executor = Executors.newFixedThreadPool(3); CompletionService<EvaluationParams> completionService = new ExecutorCompletionService<EvaluationParams>( executor);/*from ww w. j a v a2 s.com*/ double bestRate = 0; int bestclog = 0; int bestglog = 0; int submitted = 0; for (int clog = -5; clog <= 15; clog++) { for (int glog = 3; glog >= -15; glog--) { completionService.submit(new Evaluator(samples, new EvaluationParams(clog, glog), kernel, degree)); submitted++; } } while (submitted > 0) { Future<EvaluationParams> f1 = completionService.take(); EvaluationParams p = f1.get(); if (p.rate > bestRate) { bestRate = p.rate; bestclog = p.clog; bestglog = p.glog; } System.out.println("Gamma: " + p.glog + ", C: " + p.clog + ", rate: " + p.rate + " (Best: " + bestglog + " " + bestclog + " " + bestRate + ")"); submitted--; } executor.shutdown(); }
From source file:pl.edu.icm.cermine.libsvm.parameters.SVMParameterFinder.java
public void run(String inputFile, String ext, int threads, int kernel, int degree, int minc, int maxc, int ming, int maxg) throws AnalysisException, InterruptedException, ExecutionException { List<TrainingSample<BxZoneLabel>> samples = getSamples(inputFile, ext); ExecutorService executor = Executors.newFixedThreadPool(3); CompletionService<EvaluationParams> completionService = new ExecutorCompletionService<EvaluationParams>( executor);/*from ww w .j a va 2 s . c o m*/ double bestRate = 0; int bestclog = 0; int bestglog = 0; int submitted = 0; for (int clog = minc; clog <= maxc; clog++) { for (int glog = maxg; glog >= ming; glog--) { completionService.submit(new Evaluator(samples, new EvaluationParams(clog, glog), kernel, degree)); submitted++; } } while (submitted > 0) { Future<EvaluationParams> f1 = completionService.take(); EvaluationParams p = f1.get(); if (p.rate > bestRate) { bestRate = p.rate; bestclog = p.clog; bestglog = p.glog; } System.out.println("Gamma: " + p.glog + ", C: " + p.clog + ", rate: " + p.rate + " (Best: " + bestglog + " " + bestclog + " " + bestRate + ")"); submitted--; } executor.shutdown(); }
From source file:net.arp7.HdfsPerfTest.WriteFile.java
private static void writeFiles(final Configuration conf, final FileIoStats stats) throws InterruptedException, IOException { final FileSystem fs = FileSystem.get(conf); final AtomicLong filesLeft = new AtomicLong(params.getNumFiles()); final long runId = abs(rand.nextLong()); final byte[] data = new byte[params.getIoSize()]; Arrays.fill(data, (byte) 65); // Start the writers. final ExecutorService executor = Executors.newFixedThreadPool((int) params.getNumThreads()); final CompletionService<Object> ecs = new ExecutorCompletionService<>(executor); LOG.info("NumFiles=" + params.getNumFiles() + ", FileSize=" + FileUtils.byteCountToDisplaySize(params.getFileSize()) + ", IoSize=" + FileUtils.byteCountToDisplaySize(params.getIoSize()) + ", BlockSize=" + FileUtils.byteCountToDisplaySize(params.getBlockSize()) + ", ReplicationFactor=" + params.getReplication() + ", isThrottled=" + (params.maxWriteBps() > 0)); LOG.info("Starting " + params.getNumThreads() + " writer thread" + (params.getNumThreads() > 1 ? "s" : "") + "."); final long startTime = System.nanoTime(); for (long t = 0; t < params.getNumThreads(); ++t) { final long threadIndex = t; Callable<Object> c = new Callable<Object>() { @Override/*w ww .j av a2s. co m*/ public Object call() throws Exception { long fileIndex = 0; while (filesLeft.addAndGet(-1) >= 0) { final String fileName = "WriteFile-" + runId + "-" + (threadIndex + 1) + "-" + (++fileIndex); writeOneFile(new Path(params.getOutputDir(), fileName), fs, data, stats); } return null; } }; ecs.submit(c); } // And wait for all writers to complete. for (long t = 0; t < params.getNumThreads(); ++t) { ecs.take(); } final long endTime = System.nanoTime(); stats.setElapsedTime(endTime - startTime); executor.shutdown(); }
From source file:io.prestosql.plugin.accumulo.index.ColumnCardinalityCache.java
/** * Gets the cardinality for each {@link AccumuloColumnConstraint}. * Given constraints are expected to be indexed! Who knows what would happen if they weren't! * * @param schema Schema name//from w w w . ja va 2s.com * @param table Table name * @param auths Scan authorizations * @param idxConstraintRangePairs Mapping of all ranges for a given constraint * @param earlyReturnThreshold Smallest acceptable cardinality to return early while other tasks complete * @param pollingDuration Duration for polling the cardinality completion service * @return An immutable multimap of cardinality to column constraint, sorted by cardinality from smallest to largest * @throws TableNotFoundException If the metrics table does not exist * @throws ExecutionException If another error occurs; I really don't even know anymore. */ public Multimap<Long, AccumuloColumnConstraint> getCardinalities(String schema, String table, Authorizations auths, Multimap<AccumuloColumnConstraint, Range> idxConstraintRangePairs, long earlyReturnThreshold, Duration pollingDuration) { // Submit tasks to the executor to fetch column cardinality, adding it to the Guava cache if necessary CompletionService<Pair<Long, AccumuloColumnConstraint>> executor = new ExecutorCompletionService<>( executorService); idxConstraintRangePairs.asMap().forEach((key, value) -> executor.submit(() -> { long cardinality = getColumnCardinality(schema, table, auths, key.getFamily(), key.getQualifier(), value); LOG.debug("Cardinality for column %s is %s", key.getName(), cardinality); return Pair.of(cardinality, key); })); // Create a multi map sorted by cardinality ListMultimap<Long, AccumuloColumnConstraint> cardinalityToConstraints = MultimapBuilder.treeKeys() .arrayListValues().build(); try { boolean earlyReturn = false; int numTasks = idxConstraintRangePairs.asMap().entrySet().size(); do { // Sleep for the polling duration to allow concurrent tasks to run for this time Thread.sleep(pollingDuration.toMillis()); // Poll each task, retrieving the result if it is done for (int i = 0; i < numTasks; ++i) { Future<Pair<Long, AccumuloColumnConstraint>> futureCardinality = executor.poll(); if (futureCardinality != null && futureCardinality.isDone()) { Pair<Long, AccumuloColumnConstraint> columnCardinality = futureCardinality.get(); cardinalityToConstraints.put(columnCardinality.getLeft(), columnCardinality.getRight()); } } // If the smallest cardinality is present and below the threshold, set the earlyReturn flag Optional<Entry<Long, AccumuloColumnConstraint>> smallestCardinality = cardinalityToConstraints .entries().stream().findFirst(); if (smallestCardinality.isPresent()) { if (smallestCardinality.get().getKey() <= earlyReturnThreshold) { LOG.info("Cardinality %s, is below threshold. Returning early while other tasks finish", smallestCardinality); earlyReturn = true; } } } while (!earlyReturn && cardinalityToConstraints.entries().size() < numTasks); } catch (ExecutionException | InterruptedException e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); } throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting cardinality", e); } // Create a copy of the cardinalities return ImmutableMultimap.copyOf(cardinalityToConstraints); }