Example usage for java.util.concurrent CompletableFuture completedFuture

List of usage examples for java.util.concurrent CompletableFuture completedFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture completedFuture.

Prototype

public static <U> CompletableFuture<U> completedFuture(U value) 

Source Link

Document

Returns a new CompletableFuture that is already completed with the given value.

Usage

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java

/** Create a new library bean
 * @param id//w w w  .j  a  va2s .  co m
 * @param bucket_mgmt
 * @param create_not_update - true if create, false if update
 * @param share_db
 * @return
 */
protected static ManagementFuture<Supplier<Object>> createLibraryBean(final String id,
        final IManagementCrudService<SharedLibraryBean> library_mgmt, final IStorageService aleph2_fs,
        final boolean create_not_update, final ICrudService<JsonNode> share_db, final GridFS share_fs,
        final IServiceContext context) {
    if (create_not_update) {
        _logger.info(ErrorUtils.get("Found new share {0}, creating library bean", id));
    } else {
        _logger.info(ErrorUtils.get("Share {0} was modified, updating library bean", id));
    }

    // Create a status bean:

    final SingleQueryComponent<JsonNode> v1_query = CrudUtils.allOf().when(JsonUtils._ID, new ObjectId(id));
    return FutureUtils.denestManagementFuture(share_db.getObjectBySpec(v1_query)
            .<ManagementFuture<Supplier<Object>>>thenApply(Lambdas.wrap_u(jsonopt -> {
                final SharedLibraryBean new_object = getLibraryBeanFromV1Share(jsonopt.get());

                // Try to copy the file across before going crazy (going to leave this as single threaded for now, we'll live)
                final String binary_id = safeJsonGet("binaryId", jsonopt.get()).asText();
                if (!binary_id.isEmpty()) {
                    copyFile(binary_id, new_object.path_name(), aleph2_fs, share_fs);
                } else { // Check if it's a reference and if so copy from local to HDFS
                    final Optional<String> maybe_local_path = JsonUtils
                            .getProperty("documentLocation.collection", jsonopt.get())
                            .filter(j -> j.isTextual()).map(j -> j.asText());
                    maybe_local_path.ifPresent(Lambdas.wrap_consumer_u(
                            local_path -> copyFile(local_path, new_object.path_name(), aleph2_fs)));
                }

                final AuthorizationBean auth = new AuthorizationBean(new_object.owner_id());
                final ManagementFuture<Supplier<Object>> ret = library_mgmt.secured(context, auth)
                        .storeObject(new_object, !create_not_update);
                return ret;
            })).exceptionally(e -> {
                return FutureUtils
                        .<Supplier<Object>>createManagementFuture(
                                FutureUtils.returnError(new RuntimeException(e)),
                                CompletableFuture.completedFuture(Arrays.asList(new BasicMessageBean(new Date(),
                                        false, "IkanowV1SyncService_LibraryJars", "createLibraryBean", null,
                                        ErrorUtils.getLongForm("{0}", e), null))));
            }));
}

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@Override
public CompletableFuture<Optional<O>> updateAndReturnObjectBySpec(QueryComponent<O> unique_spec,
        final Optional<Boolean> upsert, final UpdateComponent<O> update, final Optional<Boolean> before_updated,
        final List<String> field_list, final boolean include) {
    try {//from   ww w .  ja v  a2  s . c  o m
        final Tuple2<DBObject, DBObject> query_and_meta = MongoDbUtils.convertToMongoQuery(unique_spec);
        final DBObject update_object = MongoDbUtils.createUpdateObject(update);

        final BasicDBObject fields = getFields(field_list, include);

        // ($unset: null removes the object, only possible via the UpdateComponent.deleteObject call) 
        final boolean do_remove = update_object.containsField("$unset")
                && (null == update_object.get("$unset"));

        final O ret_val = do_remove
                ? _state.coll.findAndModify(query_and_meta._1(), fields,
                        (DBObject) query_and_meta._2().get("$sort"), do_remove, (DBObject) null, false, false)
                : _state.coll.findAndModify(query_and_meta._1(), fields,
                        (DBObject) query_and_meta._2().get("$sort"), false, update_object,
                        !before_updated.orElse(false), upsert.orElse(false));

        return CompletableFuture.completedFuture(Optional.ofNullable(ret_val));
    } catch (Exception e) {
        return FutureUtils.<Optional<O>>returnError(e);
    }
}

From source file:com.ikanow.aleph2.analytics.storm.services.MockAnalyticsContext.java

@Override
public CompletableFuture<Map<String, String>> getAnalyticsLibraries(final Optional<DataBucketBean> bucket,
        final Collection<AnalyticThreadJobBean> jobs) {
    //(just return empty, only used with local storm controller)
    return CompletableFuture.completedFuture(Collections.emptyMap());
}

From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java

/**
 * Seals a txn and transitions it to COMMITTING (resp. ABORTING) state if commit param is true (resp. false).
 *
 * Post-condition://from  www .j  av  a  2 s .  c o m
 * 1. If seal completes successfully, then
 *     (a) txn state is COMMITTING/ABORTING,
 *     (b) CommitEvent/AbortEvent is present in the commit stream/abort stream,
 *     (c) txn is removed from host-txn index,
 *     (d) txn is removed from the timeout service.
 *
 * 2. If process fails after transitioning txn to COMMITTING/ABORTING state, but before responding to client, then
 * since txn is present in the host-txn index, some other controller process shall put CommitEvent/AbortEvent to
 * commit stream/abort stream.
 *
 * @param host    host id. It is different from hostId iff invoked from TxnSweeper for aborting orphaned txn.
 * @param scope   scope name.
 * @param stream  stream name.
 * @param commit  boolean indicating whether to commit txn.
 * @param txnId   txn id.
 * @param version expected version of txn node in store.
 * @param ctx     context.
 * @return        Txn status after sealing it.
 */
CompletableFuture<TxnStatus> sealTxnBody(final String host, final String scope, final String stream,
        final boolean commit, final UUID txnId, final Integer version, final OperationContext ctx) {
    TxnResource resource = new TxnResource(scope, stream, txnId);
    Optional<Integer> versionOpt = Optional.ofNullable(version);

    // Step 1. Add txn to current host's index, if it is not already present
    CompletableFuture<Void> addIndex = host.equals(hostId) && !timeoutService.containsTxn(scope, stream, txnId)
            ?
            // PS: txn version in index does not matter, because if update is successful,
            // then txn would no longer be open.
            streamMetadataStore.addTxnToIndex(hostId, resource, Integer.MAX_VALUE)
            : CompletableFuture.completedFuture(null);

    addIndex.whenComplete((v, e) -> {
        if (e != null) {
            log.debug("Txn={}, already present/newly added to host-txn index of host={}", txnId, hostId);
        } else {
            log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId, hostId);
        }
    });

    // Step 2. Seal txn
    CompletableFuture<AbstractMap.SimpleEntry<TxnStatus, Integer>> sealFuture = addIndex.thenComposeAsync(
            x -> streamMetadataStore.sealTransaction(scope, stream, txnId, commit, versionOpt, ctx, executor),
            executor).whenComplete((v, e) -> {
                if (e != null) {
                    log.debug("Txn={}, failed sealing txn", txnId);
                } else {
                    log.debug("Txn={}, sealed successfully, commit={}", txnId, commit);
                }
            });

    // Step 3. write event to corresponding stream.
    return sealFuture.thenComposeAsync(pair -> {
        TxnStatus status = pair.getKey();
        switch (status) {
        case COMMITTING:
            return writeCommitEvent(scope, stream, pair.getValue(), txnId, status);
        case ABORTING:
            return writeAbortEvent(scope, stream, pair.getValue(), txnId, status);
        case ABORTED:
        case COMMITTED:
            return CompletableFuture.completedFuture(status);
        case OPEN:
        case UNKNOWN:
        default:
            // Not possible after successful streamStore.sealTransaction call, because otherwise an
            // exception would be thrown.
            return CompletableFuture.completedFuture(status);
        }
    }, executor).thenComposeAsync(status -> {
        // Step 4. Remove txn from timeoutService, and from the index.
        timeoutService.removeTxn(scope, stream, txnId);
        log.debug("Txn={}, removed from timeout service", txnId);
        return streamMetadataStore.removeTxnFromIndex(host, resource, true).whenComplete((v, e) -> {
            if (e != null) {
                log.debug("Txn={}, failed removing txn from host-txn index of host={}", txnId, hostId);
            } else {
                log.debug("Txn={}, removed txn from host-txn index of host={}", txnId, hostId);
            }
        }).thenApply(x -> status);
    }, executor);
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_TestBuckets.java

/**
 * Logic that runs when we come across a new test object.
 * Kicks off a test by calling BucketTestService.test_bucket -> this typically calls CoreManagementService.test_bucket
 * Updates the status based on if the test_bucket started successfully
 * /*from   w  w  w.j a  va  2 s. c  om*/
 * @param data_bucket
 * @param new_test_source
 * @param bucket_test_service
 * @param source_test_db
 * @return
 */
private CompletableFuture<Boolean> handleNewTestSource(final DataBucketBean data_bucket,
        final TestQueueBean new_test_source, final BucketTestService bucket_test_service,
        final ICrudService<TestQueueBean> source_test_db) {
    //get the test params
    final ProcessingTestSpecBean test_spec = new_test_source.test_params();

    //try to test the bucket
    _logger.debug("Running bucket test");
    @SuppressWarnings("unchecked")
    final ICrudService<JsonNode> v1_output_db = _underlying_management_db.get()
            .getUnderlyingPlatformDriver(ICrudService.class, Optional.of("ingest." + data_bucket._id())).get();
    final CompletableFuture<Boolean> delete_datastore = v1_output_db.deleteDatastore(); //(this is done in a few other places, so just to be on the safe side here)
    final ManagementFuture<Boolean> test_res_future = bucket_test_service.test_bucket(_core_management_db.get(),
            data_bucket, test_spec);

    return delete_datastore.exceptionally(ex -> {
        _logger.error("Error trying to clear v1 output db before test run: ingest." + data_bucket._id(), ex);
        return false;
    }).thenCompose(y -> test_res_future.thenCompose(res -> {
        return test_res_future.getManagementResults().<Boolean>thenCompose(man_res -> {
            //return updateTestSourceStatus(new_test_source._id(), (res ? "in_progress" : "error"), source_test_db, Optional.of(new Date()), Optional.empty(), Optional.of(man_res.stream().map(
            return updateTestSourceStatus(new_test_source._id(),
                    (res ? TestStatus.in_progress : TestStatus.error), source_test_db, Optional.of(new Date()),
                    Optional.empty(), Optional.of(man_res.stream().map(msg -> {
                        return "[" + msg.date() + "] " + msg.source() + " (" + msg.command() + "): "
                                + (msg.success() ? "INFO" : "ERROR") + ": " + msg.message();
                    }).collect(Collectors.joining("\n"))));
        });
    }).exceptionally(t -> {
        updateTestSourceStatus(new_test_source._id(), TestStatus.error, source_test_db, Optional.of(new Date()),
                Optional.empty(), Optional.of(ErrorUtils.getLongForm("Error during test_bucket: {0}", t)))
                        .thenCompose(x -> {
                            if (!x)
                                _logger.error(
                                        "Had an error trying to update status of test object after having an error during test bucket, somethings gone horribly wrong");
                            return CompletableFuture.completedFuture(x); //this return doesn't matter
                        });
        return false;
    }));
}

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@SuppressWarnings("unchecked")
@Override//from www .j  a  va 2 s. co m
public CompletableFuture<Boolean> deleteObjectById(final Object id) {
    try {
        final WriteResult<O, K> wr = _state.coll.removeById((K) id);

        return CompletableFuture.completedFuture(wr.getN() > 0);
    } catch (Exception e) {
        return FutureUtils.<Boolean>returnError(e);
    }
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

public ManagementFuture<Boolean> deleteObjectById(final Object id) {
    final CompletableFuture<Optional<DataBucketBean>> result = _underlying_data_bucket_db.get()
            .getObjectById(id);//from ww w. j a v a2s . c  o m
    try {
        if (result.get().isPresent()) {
            return this.deleteBucket(result.get().get());
        } else {
            return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(false));
        }
    } catch (Exception e) {
        // This is a serious enough exception that we'll just leave here
        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
}

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

private CompletableFuture<TxnStatus> checkTransactionStatus(final int epoch, final UUID txId) {
    return verifyLegalState().thenCompose(v -> getActiveTx(epoch, txId).handle((ok, ex) -> {
        if (ex != null && ExceptionHelpers.getRealException(ex) instanceof DataNotFoundException) {
            return TxnStatus.UNKNOWN;
        } else if (ex != null) {
            throw new CompletionException(ex);
        }//w  w w . j a v  a 2s  .  c  o  m
        return ActiveTxnRecord.parse(ok.getData()).getTxnStatus();
    }).thenCompose(x -> {
        if (x.equals(TxnStatus.UNKNOWN)) {
            return getCompletedTxnStatus(txId);
        } else {
            return CompletableFuture.completedFuture(x);
        }
    }));
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

public ManagementFuture<Boolean> deleteObjectBySpec(final QueryComponent<DataBucketBean> unique_spec) {
    final CompletableFuture<Optional<DataBucketBean>> result = _underlying_data_bucket_db.get()
            .getObjectBySpec(unique_spec);
    try {/* w  w  w  .  ja  va 2 s  . com*/
        if (result.get().isPresent()) {
            return this.deleteBucket(result.get().get());
        } else {
            return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(false));
        }
    } catch (Exception e) {
        // This is a serious enough exception that we'll just leave here
        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
}

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@Override
public CompletableFuture<Long> deleteObjectsBySpec(final QueryComponent<O> spec) {
    try {//w ww . ja  va  2 s.c  o m
        final Tuple2<DBObject, DBObject> query_and_meta = MongoDbUtils.convertToMongoQuery(spec);
        final Long limit = (Long) query_and_meta._2().get("$limit");
        final DBObject sort = (DBObject) query_and_meta._2().get("$sort");

        if ((null == limit) && (null == sort)) { // Simple case, just delete as many docs as possible
            final WriteResult<O, K> wr = _state.coll.remove(query_and_meta._1());
            return CompletableFuture.completedFuture((Long) (long) wr.getN());
        } else {

            final com.mongodb.DBCursor cursor = Optional
                    .of(_state.orig_coll.find(query_and_meta._1(), new BasicDBObject(_ID, 1)))
                    // (now we're processing on a cursor "c")
                    .map(c -> {
                        return (null != sort) ? c.sort(sort) : c;
                    }).map(c -> {
                        return (null != limit) ? c.limit(limit.intValue()) : c;
                    }).get();

            final List<Object> ids = StreamSupport.stream(cursor.spliterator(), false).map(o -> o.get(_ID))
                    .collect(Collectors.toList());

            return deleteObjectsBySpec(emptyQuery(_state.bean_clazz).withAny(_ID, ids));
        }
    } catch (Exception e) {
        return FutureUtils.<Long>returnError(e);
    }
}