List of usage examples for java.util.concurrent CompletableFuture supplyAsync
public static <U> CompletableFuture<U> supplyAsync(Supplier<U> supplier, Executor executor)
From source file:org.apache.flink.runtime.blob.BlobServerPutTest.java
/** * [FLINK-6020]/*from w w w . j a v a 2 s. c om*/ * Tests that concurrent put operations will only upload the file once to the {@link BlobStore} * and that the files are not corrupt at any time. * * @param jobId * job ID to use (or <tt>null</tt> if job-unrelated) * @param blobType * whether the BLOB should become permanent or transient */ private void testConcurrentPutOperations(@Nullable final JobID jobId, final BlobKey.BlobType blobType) throws IOException, InterruptedException, ExecutionException { final Configuration config = new Configuration(); config.setString(BlobServerOptions.STORAGE_DIRECTORY, temporaryFolder.newFolder().getAbsolutePath()); BlobStore blobStore = mock(BlobStore.class); int concurrentPutOperations = 2; int dataSize = 1024; final CountDownLatch countDownLatch = new CountDownLatch(concurrentPutOperations); final byte[] data = new byte[dataSize]; ArrayList<CompletableFuture<BlobKey>> allFutures = new ArrayList<>(concurrentPutOperations); ExecutorService executor = Executors.newFixedThreadPool(concurrentPutOperations); try (final BlobServer server = new BlobServer(config, blobStore)) { server.start(); for (int i = 0; i < concurrentPutOperations; i++) { CompletableFuture<BlobKey> putFuture = CompletableFuture.supplyAsync(() -> { try { BlockingInputStream inputStream = new BlockingInputStream(countDownLatch, data); BlobKey uploadedKey = put(server, jobId, inputStream, blobType); // check the uploaded file's contents (concurrently) verifyContents(server, jobId, uploadedKey, data); return uploadedKey; } catch (IOException e) { throw new CompletionException(new FlinkException("Could not upload blob.", e)); } }, executor); allFutures.add(putFuture); } FutureUtils.ConjunctFuture<Collection<BlobKey>> conjunctFuture = FutureUtils.combineAll(allFutures); // wait until all operations have completed and check that no exception was thrown Collection<BlobKey> blobKeys = conjunctFuture.get(); Iterator<BlobKey> blobKeyIterator = blobKeys.iterator(); assertTrue(blobKeyIterator.hasNext()); BlobKey blobKey = blobKeyIterator.next(); // make sure that all blob keys are the same while (blobKeyIterator.hasNext()) { verifyKeyDifferentHashEquals(blobKey, blobKeyIterator.next()); } // check the uploaded file's contents verifyContents(server, jobId, blobKey, data); // check that we only uploaded the file once to the blob store if (blobType == PERMANENT_BLOB) { verify(blobStore, times(1)).put(any(File.class), eq(jobId), eq(blobKey)); } else { // can't really verify much in the other cases other than that the put operations should // work and not corrupt files verify(blobStore, times(0)).put(any(File.class), eq(jobId), eq(blobKey)); } } finally { executor.shutdownNow(); } }
From source file:org.apache.james.blob.objectstorage.ObjectStorageBlobsDAO.java
private CompletableFuture<BlobId> updateBlobId(BlobId from, BlobId to) { String containerName = this.containerName.value(); return CompletableFuture .supplyAsync(() -> blobStore.copyBlob(containerName, from.asString(), containerName, to.asString(), CopyOptions.NONE), executor) .thenAcceptAsync(any -> blobStore.removeBlob(containerName, from.asString())).thenApply(any -> to); }
From source file:org.apache.james.blob.objectstorage.ObjectStorageBlobsDAO.java
private CompletableFuture<BlobId> save(InputStream data, BlobId id) { String containerName = this.containerName.value(); HashingInputStream hashingInputStream = new HashingInputStream(Hashing.sha256(), data); Payload payload = payloadCodec.write(hashingInputStream); Blob blob = blobStore.blobBuilder(id.asString()).payload(payload).build(); return CompletableFuture.supplyAsync(() -> blobStore.putBlob(containerName, blob), executor) .thenApply(any -> blobIdFactory.from(hashingInputStream.hash().toString())); }
From source file:org.apache.james.blob.objectstorage.ObjectStorageBlobsDAO.java
@Override public CompletableFuture<byte[]> readBytes(BlobId blobId) { return CompletableFuture .supplyAsync(Throwing.supplier(() -> IOUtils.toByteArray(read(blobId))).sneakyThrow(), executor); }
From source file:org.apache.sling.commons.messaging.mail.internal.SimpleMailService.java
@Override public CompletableFuture<Result> send(@Nonnull final String message, @Nonnull final String recipient, @Nonnull final Map data) { return CompletableFuture.supplyAsync(() -> sendMail(message, recipient, data, mailBuilder), runnable -> threadPool.submit(runnable)); }
From source file:org.apache.storm.localizer.AsyncLocalizer.java
public synchronized CompletableFuture<Void> requestDownloadTopologyBlobs(final LocalAssignment assignment, final int port, final BlobChangingCallback cb) throws IOException { final String topologyId = assignment.get_topology_id(); CompletableFuture<Void> baseBlobs = requestDownloadBaseTopologyBlobs(assignment, port, cb); return baseBlobs.thenComposeAsync((v) -> { LocalDownloadedResource localResource = blobPending.get(topologyId); if (localResource == null) { Supplier<Void> supplier = new DownloadBlobs(topologyId, assignment.get_owner()); localResource = new LocalDownloadedResource(CompletableFuture.supplyAsync(supplier, execService)); blobPending.put(topologyId, localResource); }// w w w. ja v a 2 s. co m CompletableFuture<Void> r = localResource.reserve(port, assignment); LOG.debug("Reserved blobs {} {}", topologyId, localResource); return r; }); }
From source file:org.jboss.set.aphrodite.Aphrodite.java
/** * Retrieve all issues associated with the provided URLs. This method simply logs any issue URLs * that cannot be retrieved from a <code>IssueTrackerServer</code>. If the provided URLs * collection is empty, or no issues are found, then an empty List is returned. * * @param urls a collection of issue URLs. * @return a list of <code>Issue</code> objects associated with the provided urls. *//*from w ww.j a v a2 s .c om*/ public List<Issue> getIssues(Collection<URL> urls) { Objects.requireNonNull(urls, "the collection of urls cannot be null"); if (urls.isEmpty()) return new ArrayList<>(); List<CompletableFuture<List<Issue>>> requests = issueTrackers.values().stream() .map(tracker -> CompletableFuture.supplyAsync(() -> tracker.getIssues(urls), executorService)) .collect(Collectors.toList()); return requests.stream().map(CompletableFuture::join).flatMap(Collection::stream) .collect(Collectors.toList()); }
From source file:org.jboss.set.aphrodite.Aphrodite.java
/** * Return all issues, across all Issue Trackers, which match the passed <code>SearchCriteria</code>. * * @param searchCriteria all set fields will be search for. * @return a list of all <code>Issue</code> objects which match the specified searchCriteria, * or an empty list if no issues match the searched criteria. *///www . j av a2 s .c o m public List<Issue> searchIssues(SearchCriteria searchCriteria) { Objects.requireNonNull(searchCriteria, "searchCriteria cannot be null"); checkIssueTrackerExists(); if (searchCriteria.isEmpty()) return new ArrayList<>(); List<CompletableFuture<List<Issue>>> searchRequests = issueTrackers .values().stream().map(tracker -> CompletableFuture .supplyAsync(() -> tracker.searchIssues(searchCriteria), executorService)) .collect(Collectors.toList()); return searchRequests.stream().map(CompletableFuture::join).flatMap(Collection::stream) .collect(Collectors.toList()); }
From source file:org.jboss.set.aphrodite.issue.trackers.bugzilla.BugzillaClient.java
public boolean postComment(Map<Issue, Comment> commentMap) { List<CompletableFuture<Boolean>> requests = commentMap.entrySet().stream() .map(entry -> CompletableFuture.supplyAsync( () -> postCommentAndLogExceptions(entry.getKey(), entry.getValue()), executorService)) .collect(Collectors.toList()); return requests.stream().map(CompletableFuture::join).noneMatch(failed -> !failed); }
From source file:org.jboss.set.aphrodite.issue.trackers.bugzilla.BugzillaClient.java
public boolean postComment(Collection<Issue> issues, Comment comment) { List<CompletableFuture<Boolean>> requests = issues .stream().map(issue -> CompletableFuture .supplyAsync(() -> postCommentAndLogExceptions(issue, comment), executorService)) .collect(Collectors.toList()); return requests.stream().map(CompletableFuture::join).noneMatch(failed -> !failed); }