List of usage examples for java.util.concurrent CompletableFuture completedFuture
public static <U> CompletableFuture<U> completedFuture(U value)
From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java
/** Takes a collection of results from the management side-channel, and uses it to update a harvest node * @param key - source key / bucket id/*from w w w. jav a2 s. co m*/ * @param status_messages * @param source_db * @return true - if share updated with errors, false otherwise */ protected static CompletableFuture<Boolean> updateV1ShareErrorStatus(final Date main_date, final String id, final Collection<BasicMessageBean> status_messages, final IManagementCrudService<SharedLibraryBean> library_mgmt, final ICrudService<JsonNode> share_db, final boolean create_not_update) { final String message_block = status_messages.stream().map(msg -> { return "[" + msg.date() + "] " + msg.source() + " (" + msg.command() + "): " + (msg.success() ? "INFO" : "ERROR") + ": " + msg.message(); }).collect(Collectors.joining("\n")); final boolean any_errors = status_messages.stream().anyMatch(msg -> !msg.success()); // Only going to do something if we have errors: if (any_errors) { _logger.warn(ErrorUtils.get("Error creating/updating shared library bean: {0} error= {1}", id, message_block.replace("\n", "; "))); return share_db.getObjectById(new ObjectId(id), Arrays.asList("title", "description"), true) .thenCompose(jsonopt -> { if (jsonopt.isPresent()) { // (else share has vanished, nothing to do) final CommonUpdateComponent<JsonNode> v1_update = Optional .of(CrudUtils.update().set("description", safeJsonGet("description", jsonopt.get()).asText() + "\n\n" + message_block)) // If shared lib already exists then can't update the title (or the existing lib bean will get deleted) .map(c -> create_not_update ? c.set("title", "ERROR:" + safeJsonGet("title", jsonopt.get()).asText()) : c) .get(); @SuppressWarnings("unchecked") final CompletableFuture<Boolean> v2_res = Lambdas.get(() -> { if (!create_not_update) { // also make a token effort to update the timestamp on the shared lib bean, so the same error doesn't keep getting repeated final CommonUpdateComponent<SharedLibraryBean> v2_update = CrudUtils .update(SharedLibraryBean.class) .set(SharedLibraryBean::modified, new Date()); //(need to do this because as of Aug 2015, the updateObjectById isn't plumbed in) final ICrudService<SharedLibraryBean> library_service = (ICrudService<SharedLibraryBean>) (ICrudService<?>) library_mgmt .getUnderlyingPlatformDriver(ICrudService.class, Optional.empty()) .get(); return library_service.updateObjectById("v1_" + id, v2_update); // (just fire this off and forget about it) } else return CompletableFuture.completedFuture(true); }); final CompletableFuture<Boolean> update_res = v2_res.thenCompose(b -> { if (b) { return share_db.updateObjectById(new ObjectId(id), v1_update); } else { _logger.warn(ErrorUtils .get("Error creating/updating v2 library bean: {0} unknown error", id)); return CompletableFuture.completedFuture(false); } }).exceptionally(t -> { _logger.warn(ErrorUtils.getLongForm( "Error creating/updating shared library bean: {1} error= {0}", t, id)); return false; }); return update_res; } else { return CompletableFuture.completedFuture(false); } }); } else { return CompletableFuture.completedFuture(false); } }
From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java
private CompletableFuture<Void> checkReady() { if (!ready) { return FutureHelpers.failedFuture(new IllegalStateException(getClass().getName() + " not yet ready")); } else {//w ww .ja v a 2 s.c o m return CompletableFuture.completedFuture(null); } }
From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java
/** * Requests all updates from each peer in the provided list of peers. * <p>/*from w ww .j ava2 s. c o m*/ * The returned future will be completed once at least one peer bootstraps this map or bootstrap requests to all peers * fail. * * @param peers the list of peers from which to request updates * @return a future to be completed once updates have been received from at least one peer */ private CompletableFuture<Void> requestBootstrapFromPeers(List<MemberId> peers) { if (peers.isEmpty()) { return CompletableFuture.completedFuture(null); } CompletableFuture<Void> future = new CompletableFuture<>(); final int totalPeers = peers.size(); AtomicBoolean successful = new AtomicBoolean(); AtomicInteger totalCount = new AtomicInteger(); AtomicReference<Throwable> lastError = new AtomicReference<>(); // Iterate through all of the peers and send a bootstrap request. On the first peer that returns // a successful bootstrap response, complete the future. Otherwise, if no peers respond with any // successful bootstrap response, the future will be completed with the last exception. for (MemberId peer : peers) { requestBootstrapFromPeer(peer).whenComplete((result, error) -> { if (error == null) { if (successful.compareAndSet(false, true)) { future.complete(null); } else if (totalCount.incrementAndGet() == totalPeers) { Throwable e = lastError.get(); if (e != null) { future.completeExceptionally(e); } } } else { if (!successful.get() && totalCount.incrementAndGet() == totalPeers) { future.completeExceptionally(error); } else { lastError.set(error); } } }); } return future; }
From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java
private void setupStorageGetHandler(TestContext context, Set<String> storageSegments, Function<String, SegmentProperties> infoGetter) { context.storage.getInfoHandler = segmentName -> { synchronized (storageSegments) { if (!storageSegments.contains(segmentName)) { return FutureHelpers.failedFuture(new StreamSegmentNotExistsException(segmentName)); } else { return CompletableFuture.completedFuture(infoGetter.apply(segmentName)); }// w w w . j a va2s . c om } }; }
From source file:io.pravega.controller.task.Stream.StreamMetadataTasks.java
/** * Helper method to complete scale operation. It tries to optimistically complete the scale operation if no transaction is running * against previous epoch. If so, it will proceed to seal old segments and then complete partial metadata records. * @param scope scope//from www . j a v a2 s .c o m * @param stream stream * @param epoch epoch * @param context operation context * @return returns true if it was able to complete scale. false otherwise */ public CompletableFuture<Boolean> tryCompleteScale(String scope, String stream, int epoch, OperationContext context) { // Note: if we cant delete old epoch -- txns against old segments are ongoing.. // if we can delete old epoch, then only do we proceed to subsequent steps return withRetries( () -> streamMetadataStore.tryDeleteEpochIfScaling(scope, stream, epoch, context, executor), executor).thenCompose(response -> { if (!response.isDeleted()) { return CompletableFuture.completedFuture(false); } assert !response.getSegmentsCreated().isEmpty() && !response.getSegmentsSealed().isEmpty(); long scaleTs = response.getSegmentsCreated().get(0).getStart(); return notifySealedSegments(scope, stream, response.getSegmentsSealed()) .thenCompose(y -> withRetries(() -> streamMetadataStore.scaleSegmentsSealed(scope, stream, response.getSegmentsSealed(), response.getSegmentsCreated(), epoch, scaleTs, context, executor), executor).thenApply(z -> { log.info("scale processing for {}/{} epoch {} completed.", scope, stream, epoch); return true; })); }); }
From source file:io.pravega.controller.store.stream.PersistentStreamBase.java
@Override public CompletableFuture<TxnStatus> commitTransaction(final int epoch, final UUID txId) { return verifyLegalState().thenCompose(v -> checkTransactionStatus(epoch, txId)).thenApply(x -> { switch (x) { // Only sealed transactions can be committed case COMMITTED: case COMMITTING: return x; case OPEN: case ABORTING: case ABORTED: throw StoreException.create(StoreException.Type.ILLEGAL_STATE, "Stream: " + getName() + " Transaction: " + txId.toString() + " State: " + x.toString()); case UNKNOWN: default:/*from w w w. ja v a2 s. c o m*/ throw StoreException.create(StoreException.Type.DATA_NOT_FOUND, "Stream: " + getName() + " Transaction: " + txId.toString()); } }).thenCompose(x -> { if (x.equals(TxnStatus.COMMITTING)) { return createCompletedTxEntry(txId, TxnStatus.COMMITTED, System.currentTimeMillis()); } else { return CompletableFuture.completedFuture(null); // already committed, do nothing } }).thenCompose(x -> removeActiveTxEntry(epoch, txId)).thenApply(x -> TxnStatus.COMMITTED); }
From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java
@Override public CompletableFuture<Boolean> optimizeQuery(final List<String> ordered_field_list) { // (potentially in the future this could check the mapping and throw if the fields are not indexed?) return CompletableFuture.completedFuture(true); }
From source file:io.pravega.controller.store.stream.PersistentStreamBase.java
@Override public CompletableFuture<TxnStatus> abortTransaction(final int epoch, final UUID txId) { return verifyLegalState().thenCompose(v -> checkTransactionStatus(txId)).thenApply(x -> { switch (x) { case ABORTING: case ABORTED: return x; case OPEN: case COMMITTING: case COMMITTED: throw StoreException.create(StoreException.Type.ILLEGAL_STATE, "Stream: " + getName() + " Transaction: " + txId.toString() + " State: " + x.name()); case UNKNOWN: default:/*from w w w .j a v a 2 s.c o m*/ throw StoreException.create(StoreException.Type.DATA_NOT_FOUND, "Stream: " + getName() + " Transaction: " + txId.toString()); } }).thenCompose(x -> { if (x.equals(TxnStatus.ABORTING)) { return createCompletedTxEntry(txId, TxnStatus.ABORTED, System.currentTimeMillis()); } else { return CompletableFuture.completedFuture(null); // already aborted, do nothing } }).thenCompose(y -> removeActiveTxEntry(epoch, txId)).thenApply(y -> TxnStatus.ABORTED); }
From source file:io.pravega.controller.task.Stream.StreamMetadataTasks.java
@VisibleForTesting CompletableFuture<CreateStreamStatus.Status> createStreamBody(String scope, String stream, StreamConfiguration config, long timestamp) { return this.streamMetadataStore.createStream(scope, stream, config, timestamp, null, executor) .thenComposeAsync(response -> { log.info("{}/{} created in metadata store", scope, stream); CreateStreamStatus.Status status = translate(response.getStatus()); // only if its a new stream or an already existing non-active stream then we will create // segments and change the state of the stream to active. if (response.getStatus().equals(CreateStreamResponse.CreateStatus.NEW) || response.getStatus().equals(CreateStreamResponse.CreateStatus.EXISTS_CREATING)) { List<Integer> newSegments = IntStream .range(0, response.getConfiguration().getScalingPolicy().getMinNumSegments()) .boxed().collect(Collectors.toList()); return notifyNewSegments(scope, stream, response.getConfiguration(), newSegments) .thenCompose(y -> { final OperationContext context = streamMetadataStore.createContext(scope, stream); return withRetries(() -> { CompletableFuture<Void> future; if (config.getRetentionPolicy() != null) { future = streamMetadataStore.addUpdateStreamForAutoStreamCut(scope, stream, config.getRetentionPolicy(), context, executor); } else { future = CompletableFuture.completedFuture(null); }/*w w w . j av a 2 s.co m*/ return future.thenCompose(v -> streamMetadataStore.setState(scope, stream, State.ACTIVE, context, executor)); }, executor).thenApply(z -> status); }); } else { return CompletableFuture.completedFuture(status); } }, executor).handle((result, ex) -> { if (ex != null) { Throwable cause = Exceptions.unwrap(ex); if (cause instanceof StoreException.DataNotFoundException) { return CreateStreamStatus.Status.SCOPE_NOT_FOUND; } else { log.warn("Create stream failed due to ", ex); return CreateStreamStatus.Status.FAILURE; } } else { return result; } }); }
From source file:com.ikanow.aleph2.analytics.storm.services.MockAnalyticsContext.java
@Override public CompletableFuture<?> flushBatchOutput(Optional<DataBucketBean> bucket, AnalyticThreadJobBean job) { return CompletableFuture.completedFuture(Unit.unit()); }