List of usage examples for java.util.concurrent Future get
V get() throws InterruptedException, ExecutionException;
From source file:com.vmware.photon.controller.api.client.resource.TasksRestApi.java
/** * Get task details synchronously./*from ww w . j av a 2 s . c om*/ * * @param taskId * @return * @throws IOException */ @Override public Task getTask(final String taskId) throws IOException { String path = getBasePath() + "/" + taskId; Future<HttpResponse> response = this.restClient.performAsync(Method.GET, path, null /* payload */, null /* callback */); try { return parseGetTaskHttpResponse(response.get()); } catch (InterruptedException e) { throw new RuntimeException(e); } catch (ExecutionException e) { throw new RuntimeException(e); } }
From source file:com.ras.updater.Downloader.java
/** * This method will check for updates on all {@link #m_fileProviders} and download anything with an update. * @return true if at least one file was updated or false if no files were updated *//*from w ww . jav a 2s .c om*/ public boolean update() { ArrayList<Future<Boolean>> results = new ArrayList<Future<Boolean>>(); ExecutorService es = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); for (IFileProvider fileProvider : m_fileProviders) { FileUpdaterCallable task = new FileUpdaterCallable(fileProvider); results.add(es.submit(task)); } es.shutdown(); try { if (!es.awaitTermination(m_downloadTimeout, m_downloadTimeUnit)) es.shutdownNow(); } catch (InterruptedException e) { m_statusCallback.handleError(e); es.shutdownNow(); Thread.currentThread().interrupt(); } //Loop through the results for update values for (Future<Boolean> result : results) { try { if (result.isDone() && result.get() != null && result.get()) return true; } catch (InterruptedException e) { //This should never happen m_statusCallback.handleError(e); } catch (ExecutionException e) { m_statusCallback.handleError(e); } } return false; }
From source file:com.amazonaws.services.kinesis.multilang.MessageReaderTest.java
@Test public void noMoreMessagesTest() throws InterruptedException { InputStream stream = new ByteArrayInputStream("".getBytes()); MessageReader reader = new MessageReader().initialize(stream, shardId, new ObjectMapper(), Executors.newCachedThreadPool()); Future<Message> future = reader.getNextMessageFromSTDOUT(); try {// ww w.j av a 2 s .c o m future.get(); Assert.fail("There should have been an execution exception if there were no more messages to get."); } catch (ExecutionException e) { // Good path. } }
From source file:com.subgraph.vega.internal.http.requests.HttpRequestEngine.java
@Override public IHttpResponse sendRequest(HttpUriRequest request, HttpContext context) throws RequestEngineException { final HttpContext requestContext = (context == null) ? (new BasicHttpContext()) : (context); requestContext.setAttribute(ClientContext.COOKIE_STORE, config.getCookieStore()); Future<IHttpResponse> future = executor .submit(new RequestTask(client, rateLimit, request, requestContext, config, htmlParser)); try {// w ww . j a v a2 s . c o m return future.get(); } catch (InterruptedException e) { logger.info("Request " + request.getURI() + " was interrupted before completion"); } catch (ExecutionException e) { throw translateException(request, e.getCause()); } return null; }
From source file:de.zib.gndms.infra.system.DummyTaskActionTest.java
@Test(groups = { "db", "sys", "action", "task" }) @SuppressWarnings({ "FeatureEnvy", "MagicNumber" }) public void runTwoDummyActions() throws ExecutionException, InterruptedException, ResourceException { synchronized (lock) { eraseDatabase();/* w w w . ja v a 2 s. c o m*/ runDatabase(); final EntityManager em = getSys().getEntityManagerFactory().createEntityManager(); final DummyTaskAction action = new DummyTaskAction(em, createInitialTask(nextUUID())); // action.setClosingEntityManagerOnCleanup(false); action.setSuccessRate(1.0d); action.setSleepInProgress(4000L); final Future<AbstractTask> serializableFuture = getSys().submitAction(action, log); final EntityManager em2 = getSys().getEntityManagerFactory().createEntityManager(); final DummyTaskAction action2 = new DummyTaskAction(em2, createInitialTask(nextUUID())); action2.setSleepInProgress(4000L); action2.setSuccessRate(0.0d); // action2.setClosingEntityManagerOnCleanup(false); final Future<AbstractTask> serializableFuture2 = getSys().submitAction(action2, log); assert serializableFuture2.get().getState().equals(TaskState.FAILED); assert serializableFuture.get().getState().equals(TaskState.FINISHED); shutdownDatabase(); } }
From source file:com.clustercontrol.monitor.util.EventSearchRunUtil.java
public Map<String, ViewListInfo> searchInfo(List<String> managerList, String facilityId, EventFilterInfo filter, int messages) { Map<String, ViewListInfo> dispDataMap = new ConcurrentHashMap<>(); Map<String, String> errMsgs = new ConcurrentHashMap<>(); long start = System.currentTimeMillis(); try {/*from w w w. j av a2 s . c om*/ String threadName = Thread.currentThread().getName() + "-EventSearch"; List<EventSearchTask> searchList = new ArrayList<EventSearchTask>(); for (String managerName : managerList) { EventSearchTask task = null; task = new EventSearchTask(threadName, managerName, facilityId, filter, messages, ContextProvider.getContext()); searchList.add(task); } List<Future<Map<String, List<?>>>> list = getExecutorService().invokeAll(searchList); for (Future<Map<String, List<?>>> future : list) { if (future == null || future.get() == null) { continue; } Map<String, List<?>> map = future.get(); for (Map.Entry<String, List<?>> entry : map.entrySet()) { //?1?? String managerName = entry.getKey(); List<?> ret = entry.getValue(); if (ret.get(POS_INFO) != null && ret.get(POS_INFO) instanceof ViewListInfo) { ViewListInfo infoList = (ViewListInfo) ret.get(POS_INFO); dispDataMap.put(managerName, infoList); } if (ret.get(POS_ERROR) != null && ret.get(POS_ERROR) instanceof String) { String err = (String) ret.get(POS_ERROR); errMsgs.put(managerName, (String) err); } } } } catch (InterruptedException e) { m_log.error(e.getMessage() + e.getClass().getName()); } catch (ExecutionException e) { m_log.error(e.getMessage() + e.getClass().getName()); } // if (0 < errMsgs.size()) { UIManager.showMessageBox(errMsgs, true); } long end = System.currentTimeMillis(); m_log.debug("time=" + (end - start)); return dispDataMap; }
From source file:io.ecarf.core.cloud.task.processor.reason.phase3.DoReasonTask10.java
@Override protected int inferAndSaveTriplesToFile(QueryStats stats, Set<Long> productiveTerms, int processors, Set<String> inferredTriplesFiles) throws IOException { log.info("********************** Starting Inference Round **********************"); int inferredTriples = 0; boolean compressed = stats.getTotalRows().intValue() > this.ddLimit; List<String> files = stats.getOutputFiles(); if (!files.isEmpty()) { String outFile;// w ww . ja v a 2 s . c om if ((processors == 1) || (files.size() == 1)) { // if one file or one processor then reason serially for (String file : files) { outFile = file + Constants.DOT_INF; int inferred = ReasonUtils.reason(file, outFile, compressed, schemaTerms, productiveTerms, duplicatesBuster); if (inferred > 0) { inferredTriplesFiles.add(outFile); } inferredTriples += inferred; } } else { // multiple cores List<ReasonSubTask> tasks = new ArrayList<>(); for (String file : files) { tasks.add(new ReasonSubTask(compressed, file, schemaTerms, duplicatesBuster)); } try { List<Future<ReasonResult>> results = executor.invokeAll(tasks); for (Future<ReasonResult> result : results) { ReasonResult reasonResult = result.get(); outFile = reasonResult.getOutFile(); int inferred = reasonResult.getInferred(); productiveTerms.addAll(reasonResult.getProductiveTerms()); if (inferred > 0) { inferredTriplesFiles.add(outFile); } inferredTriples += inferred; } } catch (Exception e) { log.error("Failed to run reasoning job in parallel", e); executor.shutdown(); throw new IOException(e); } } } log.info("Total Rows: " + stats.getTotalRows() + ", Total Processed Bytes: " + stats.getTotalProcessedGBytes() + " GB" + ", Inferred: " + inferredTriples + ", compressed = " + compressed + ", out files: " + inferredTriplesFiles.size()); log.info("********************** Completed Inference Round **********************"); return inferredTriples; }
From source file:org.robobninjas.riemann.spring.RiemannIT.java
private boolean sendWithAck() { final Future<Boolean> isOk = tcpConnection.sendWithAck(createEvent()); try {/*from w ww .j a v a2s .com*/ return isOk.get(); } catch (InterruptedException e) { throw Throwables.propagate(e); } catch (ExecutionException e) { throw Throwables.propagate(e); } }
From source file:ch.cyberduck.core.s3.S3MultipartCopyFeature.java
@Override protected void copy(final Path source, final S3Object destination, final TransferStatus status) throws BackgroundException { try {// ww w . j av a2 s. c om final List<MultipartPart> completed = new ArrayList<MultipartPart>(); // ID for the initiated multipart upload. final MultipartUpload multipart = session.getClient().multipartStartUpload(destination.getBucketName(), destination); if (log.isDebugEnabled()) { log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId())); } final long size = status.getLength(); long remaining = size; long offset = 0; final List<Future<MultipartPart>> parts = new ArrayList<Future<MultipartPart>>(); for (int partNumber = 1; remaining > 0; partNumber++) { // Last part can be less than 5 MB. Adjust part size. final Long length = Math.min( Math.max((size / S3DefaultMultipartService.MAXIMUM_UPLOAD_PARTS), partsize), remaining); // Submit to queue parts.add(this.submit(source, multipart, partNumber, offset, length)); remaining -= length; offset += length; } for (Future<MultipartPart> future : parts) { try { completed.add(future.get()); } catch (InterruptedException e) { log.error("Part upload failed with interrupt failure"); throw new ConnectionCanceledException(e); } catch (ExecutionException e) { log.warn(String.format("Part upload failed with execution failure %s", e.getMessage())); if (e.getCause() instanceof BackgroundException) { throw (BackgroundException) e.getCause(); } throw new BackgroundException(e.getCause()); } } // Combining all the given parts into the final object. Processing of a Complete Multipart Upload request // could take several minutes to complete. Because a request could fail after the initial 200 OK response // has been sent, it is important that you check the response body to determine whether the request succeeded. final MultipartCompleted complete = session.getClient().multipartCompleteUpload(multipart, completed); if (log.isDebugEnabled()) { log.debug(String.format("Completed multipart upload for %s with checksum %s", complete.getObjectKey(), complete.getEtag())); } } catch (ServiceException e) { throw new S3ExceptionMappingService().map("Cannot copy {0}", e, source); } finally { pool.shutdown(false); } }
From source file:org.createnet.raptor.indexer.impl.es.ElasticSearchIndexAdmin.java
public boolean exists(String name) { Future<IndicesExistsResponse> req = getClient().admin().indices().exists(new IndicesExistsRequest(name)); IndicesExistsResponse res;//from w w w. ja v a 2s .c om try { res = req.get(); } catch (InterruptedException | ExecutionException ex) { throw new IndexAdminException(ex); } logger.debug("Index {} does {} exists", name, res.isExists() ? "" : "not"); return res.isExists(); }