Example usage for java.util.concurrent CompletableFuture completedFuture

List of usage examples for java.util.concurrent CompletableFuture completedFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture completedFuture.

Prototype

public static <U> CompletableFuture<U> completedFuture(U value) 

Source Link

Document

Returns a new CompletableFuture that is already completed with the given value.

Usage

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

/**
 * Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, when dealing
 * with Storage failures (or inexistent StreamSegments).
 *//*w w w. j a  va2s  . c o m*/
@Test
public void testGetOrAssignStreamSegmentIdWithFailures() {
    final String segmentName = "Segment";
    final String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName,
            UUID.randomUUID());

    HashSet<String> storageSegments = new HashSet<>();
    storageSegments.add(segmentName);
    storageSegments.add(transactionName);

    @Cleanup
    TestContext context = new TestContext();
    setupOperationLog(context);

    // 1. Unable to access storage.
    context.storage.getInfoHandler = sn -> FutureHelpers.failedFuture(new IntentionalException());
    AssertExtensions.assertThrows(
            "getOrAssignStreamSegmentId did not throw the right exception when the Storage access failed.",
            () -> context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT),
            ex -> ex instanceof IntentionalException);
    AssertExtensions.assertThrows(
            "getOrAssignStreamSegmentId did not throw the right exception when the Storage access failed.",
            () -> context.mapper.getOrAssignStreamSegmentId(transactionName, TIMEOUT),
            ex -> ex instanceof IntentionalException);

    // 2a. StreamSegmentNotExists (Stand-Alone segment)
    setupStorageGetHandler(context, storageSegments,
            sn -> new StreamSegmentInformation(sn, 0, false, false, new ImmutableDate()));
    AssertExtensions.assertThrows(
            "getOrAssignStreamSegmentId did not throw the right exception for a non-existent stand-alone StreamSegment.",
            () -> context.mapper.getOrAssignStreamSegmentId(segmentName + "foo", TIMEOUT),
            ex -> ex instanceof StreamSegmentNotExistsException);

    // 2b. Transaction does not exist.
    final String inexistentTransactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName,
            UUID.randomUUID());
    AssertExtensions.assertThrows(
            "getOrAssignStreamSegmentId did not throw the right exception for a non-existent Transaction.",
            () -> context.mapper.getOrAssignStreamSegmentId(inexistentTransactionName, TIMEOUT),
            ex -> ex instanceof StreamSegmentNotExistsException);

    // 2c. Transaction exists, but not its parent.
    final String noValidParentTransactionName = StreamSegmentNameUtils.getTransactionNameFromId("foo",
            UUID.randomUUID());
    storageSegments.add(noValidParentTransactionName);
    AssertExtensions.assertThrows(
            "getOrAssignStreamSegmentId did not throw the right exception for a Transaction with an inexistent parent.",
            () -> context.mapper.getOrAssignStreamSegmentId(noValidParentTransactionName, TIMEOUT),
            ex -> ex instanceof StreamSegmentNotExistsException);

    // 2d. Attribute fetch failure.
    val testStateStore = new TestStateStore();
    val badMapper = new StreamSegmentMapper(context.metadata, context.operationLog, testStateStore,
            context.noOpMetadataCleanup, context.storage, executorService());
    val segmentName2 = segmentName + "2";
    val transactionName2 = StreamSegmentNameUtils.getTransactionNameFromId(segmentName2, UUID.randomUUID());
    context.storage.getInfoHandler = sn -> CompletableFuture
            .completedFuture(new StreamSegmentInformation(sn, 0, false, false, new ImmutableDate()));
    testStateStore.getHandler = () -> FutureHelpers.failedFuture(new IntentionalException("intentional"));

    AssertExtensions.assertThrows(
            "getOrAssignStreamSegmentId did not throw the right exception for a Segment when attributes could not be retrieved.",
            () -> badMapper.getOrAssignStreamSegmentId(segmentName2, TIMEOUT),
            ex -> ex instanceof IntentionalException);

    AssertExtensions.assertThrows(
            "getOrAssignStreamSegmentId did not throw the right exception for a Transaction when attributes could not be retrieved.",
            () -> badMapper.getOrAssignStreamSegmentId(transactionName2, TIMEOUT),
            ex -> ex instanceof IntentionalException);
}

From source file:io.sqp.client.impl.SqpConnectionImpl.java

CompletableFuture<Void> closeServerResources(Collection<CloseableServerResource> resources) {
    if (resources.isEmpty()) {
        return CompletableFuture.completedFuture(null);
    }//from   ww w.  j  a v a 2 s  . c om
    CompletableFuture<Void> future = new CompletableFuture<>();

    List<String> cursorIds = resources.stream().filter(r -> r instanceof CursorImpl)
            .map(CloseableServerResource::getId).collect(Collectors.toList());
    List<String> statementIds = resources.stream().filter(r -> r instanceof PreparedStatementImpl)
            .map(CloseableServerResource::getId).collect(Collectors.toList());
    // now set their status to closed
    resources.forEach(CloseableServerResource::setClosed);

    // remove them from the open resource list
    // TODO: wait for success?
    cursorIds.forEach(_openServerResources::remove);
    statementIds.forEach(_openServerResources::remove);

    send(new ReleaseMessage(cursorIds, statementIds), new ConfirmationResponseHandler(future,
            MessageType.ReleaseCompleteMessage, "waiting for a cursor/statement release confirmation"));
    return future;
}

From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java

/**
 * Ping a txn thereby updating its timeout to current time + lease.
 *
 * Post-condition:/* w w  w .  ja v a  2  s.com*/
 * 1. If ping request completes successfully, then
 *     (a) txn timeout is set to lease + current time in timeout service,
 *     (b) txn version in timeout service equals version of txn node in store,
 *     (c) if txn's timeout was not previously tracked in timeout service of current process,
 *     then version of txn node in store is updated, thus fencing out other processes tracking timeout for this txn,
 *     (d) txn is present in the host-txn index of current host,
 *
 * 2. If process fails before responding to the client, then since txn is present in the host-txn index,
 * some other controller process shall abort the txn after maxLeaseValue
 *
 * Store read/update operation is not invoked on receiving ping request for a txn that is being tracked in the
 * timeout service. Otherwise, if the txn is not being tracked in the timeout service, txn node is read from
 * the store and updated.
 *
 * @param scope      scope name.
 * @param stream     stream name.
 * @param txnId      txn id.
 * @param lease      txn lease.
 * @param ctx        context.
 * @return           ping status.
 */
CompletableFuture<PingTxnStatus> pingTxnBody(final String scope, final String stream, final UUID txnId,
        final long lease, final OperationContext ctx) {
    if (!timeoutService.isRunning()) {
        return CompletableFuture.completedFuture(createStatus(Status.DISCONNECTED));
    }

    if (timeoutService.containsTxn(scope, stream, txnId)) {
        // If timeout service knows about this transaction, attempt to increase its lease.
        log.debug("Txn={}, extending lease in timeout service", txnId);
        return CompletableFuture.completedFuture(timeoutService.pingTxn(scope, stream, txnId, lease));
    } else {
        // Otherwise, fence other potential processes managing timeout for this txn, and update its lease.
        log.debug("Txn={}, updating txn node in store and extending lease", txnId);
        return fenceTxnUpdateLease(scope, stream, txnId, lease, ctx);
    }
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

@Override
public void onObjectBatch(final Stream<Tuple2<Long, IBatchRecord>> batch, final Optional<Integer> batch_size,
        final Optional<JsonNode> grouping_key) {
    if (_deduplication_is_disabled.get()) {
        // no deduplication, generally shouldn't be here...
        //.. but if we are, make do the best we can
        batch.forEach(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(),
                Optional.empty(), Optional.empty()));
        return;/*from  ww w . ja  va 2s.c  om*/
    }

    // Create big query

    final Tuple3<QueryComponent<JsonNode>, List<Tuple2<JsonNode, Tuple2<Long, IBatchRecord>>>, Either<String, List<String>>> fieldinfo_dedupquery_keyfields = getDedupQuery(
            batch, _dedup_fields.get(), _db_mapper.get());

    // Get duplicate results

    final Tuple2<List<String>, Boolean> fields_include = getIncludeFields(_policy.get(), _dedup_fields.get(),
            _timestamp_field.get());

    final CompletableFuture<Iterator<JsonNode>> dedup_res = fieldinfo_dedupquery_keyfields._2().isEmpty()
            ? CompletableFuture.completedFuture(Collections.<JsonNode>emptyList().iterator())
            : _dedup_context.get().getObjectsBySpec(fieldinfo_dedupquery_keyfields._1(), fields_include._1(),
                    fields_include._2()).thenApply(cursor -> cursor.iterator());

    // Wait for it to finsh

    //(create handy results structure if so)
    final LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> mutable_obj_map = fieldinfo_dedupquery_keyfields
            ._2().stream()
            .collect(Collector.of(
                    () -> new LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>>(),
                    (acc, t2) -> {
                        // (ie only the first element is added, duplicate elements are removed)
                        final Tuple3<Long, IBatchRecord, ObjectNode> t3 = Tuples._3T(t2._2()._1(), t2._2()._2(),
                                _mapper.createObjectNode());
                        acc.compute(t2._1(), (k, v) -> {
                            final LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>> new_list = (null == v)
                                    ? new LinkedList<>()
                                    : v;
                            new_list.add(t3);
                            return new_list;
                        });
                    }, (map1, map2) -> {
                        map1.putAll(map2);
                        return map1;
                    }));

    //TODO (ALEPH-20): add timestamps to annotation
    //TODO (ALEPH-20): support different timestamp fields for the different buckets
    //TODO (ALEPH-20): really need to support >1 current enrichment job 
    //                 ^^(Really really longer term you should be able to decide what objects you want and what you don't  <- NOTE: don't remember what i meant here)

    final Iterator<JsonNode> cursor = dedup_res.join();

    // Handle the results

    final Stream<JsonNode> records_to_delete = Lambdas.get(() -> {
        if (isCustom(_doc_schema.get().deduplication_policy())
                || _doc_schema.get().delete_unhandled_duplicates()) {
            return Optionals.streamOf(cursor, true)
                    .collect(Collectors.groupingBy(
                            ret_obj -> getKeyFieldsAgain(ret_obj, fieldinfo_dedupquery_keyfields._3())))
                    .entrySet().stream().<JsonNode>flatMap(kv -> {

                        final Optional<JsonNode> maybe_key = kv.getKey();
                        final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                                .map(key -> mutable_obj_map.get(key));

                        // Stats:
                        _mutable_stats.duplicate_keys++;
                        _mutable_stats.duplicates_existing += kv.getValue().size();
                        _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                        //DEBUG
                        //System.out.println("?? " + kv.getValue().size() + " vs " + maybe_key + " vs " + matching_records.map(x -> Integer.toString(x.size())).orElse("(no match)"));

                        return matching_records
                                .<Stream<JsonNode>>map(records -> handleDuplicateRecord(_doc_schema.get(),
                                        _custom_handler.optional().map(
                                                handler -> Tuples._2T(handler, this._custom_context.get())),
                                        _timestamp_field.get(), records, kv.getValue(), maybe_key.get(),
                                        mutable_obj_map))
                                .orElse(Stream.empty());
                    });
        } else {
            Optionals.streamOf(cursor, true).forEach(ret_obj -> {
                final Optional<JsonNode> maybe_key = getKeyFieldsAgain(ret_obj,
                        fieldinfo_dedupquery_keyfields._3());
                final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                        .map(key -> mutable_obj_map.get(key));

                //DEBUG
                //System.out.println("?? " + ret_obj + " vs " + maybe_key + " vs " + matching_record.map(x -> x._2().getJson().toString()).orElse("(no match)"));

                // Stats:
                _mutable_stats.duplicate_keys++;
                _mutable_stats.duplicates_existing++;
                _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                matching_records.ifPresent(records -> handleDuplicateRecord(_doc_schema.get(),
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        _timestamp_field.get(), records, Arrays.asList(ret_obj), maybe_key.get(),
                        mutable_obj_map));
            });
            return Stream.<JsonNode>empty();
        }
    });

    final List<Object> ids = records_to_delete.map(j -> jsonToObject(j)).filter(j -> null != j)
            .collect(Collectors.toList());

    if (!ids.isEmpty()) { // fire a bulk deletion request
        mutable_uncompleted_deletes.add(
                _dedup_context.get().deleteObjectsBySpec(CrudUtils.allOf().withAny(AnnotationBean._ID, ids)));

        _mutable_stats.deleted += ids.size();

        //(quickly see if we can reduce the number of outstanding requests)
        final Iterator<CompletableFuture<Long>> it = mutable_uncompleted_deletes.iterator();
        while (it.hasNext()) {
            final CompletableFuture<Long> cf = it.next();
            if (cf.isDone()) {
                it.remove();
            } else
                break; // ie stop as soon as we hit one that isn't complete)
        }
    }

    _mutable_stats.nonduplicate_keys += mutable_obj_map.size();

    if (Optional.ofNullable(_doc_schema.get().custom_finalize_all_objects()).orElse(false)) {
        mutable_obj_map.entrySet().stream()
                .forEach(kv -> handleCustomDeduplication(
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        kv.getValue(), Collections.emptyList(), kv.getKey()));
    } else { // Just emit the last element of each grouped object set
        mutable_obj_map.values().stream().map(t -> t.peekLast())
                .forEach(t -> _context.get().emitImmutableObject(t._1(), t._2().getJson(), Optional.of(t._3()),
                        Optional.empty(), Optional.empty()));
    }
}

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@Override
public CompletableFuture<Cursor<O>> getObjectsBySpec(final QueryComponent<O> spec,
        final List<String> field_list, final boolean include) {
    try {//w w w .  ja va  2 s . c o m
        final Tuple2<DBObject, DBObject> query_and_meta = MongoDbUtils.convertToMongoQuery(spec);
        final DBCursor<O> cursor = Optional.of(Patterns.match(query_and_meta).<DBCursor<O>>andReturn()
                .when(qm -> field_list.isEmpty(), qm -> _state.coll.find(qm._1())).otherwise(qm -> {
                    final BasicDBObject fields = getFields(field_list, include);
                    return _state.coll.find(qm._1(), fields);
                }))
                // (now we're processing on a cursor "c")
                .map(c -> {
                    final DBObject sort = (DBObject) query_and_meta._2().get("$sort");
                    return (null != sort) ? c.sort(sort) : c;
                }).map(c -> {
                    final Long limit = (Long) query_and_meta._2().get("$limit");
                    return (null != limit) ? c.limit(limit.intValue()) : c;
                }).get();

        return CompletableFuture.completedFuture(new MongoDbCursor<O>(cursor));
    } catch (Exception e) {
        return FutureUtils.<Cursor<O>>returnError(e);
    }
}

From source file:com.ikanow.aleph2.harvest.script.services.TestScriptHarvestService.java

private static IHarvestContext getFakeContext() {
    return new IHarvestContext() {

        @Override//from w ww.  ja va  2s. c  om
        public <T> Optional<T> getUnderlyingPlatformDriver(Class<T> driver_class,
                Optional<String> driver_options) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public Collection<Object> getUnderlyingArtefacts() {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public void sendObjectToStreamingPipeline(Optional<DataBucketBean> bucket,
                Either<JsonNode, Map<String, Object>> object) {
            // TODO Auto-generated method stub

        }

        @Override
        public void initializeNewContext(String signature) {
            // TODO Auto-generated method stub

        }

        @Override
        public String getTempOutputLocation(Optional<DataBucketBean> bucket) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public SharedLibraryBean getTechnologyLibraryConfig() {
            //            _globals.set(BeanTemplateUtils.from(Optional.ofNullable(context.getTechnologyLibraryConfig().library_config()).orElse(Collections.emptyMap()), ScriptHarvesterConfigBean.class).get());
            //            Map<String, Object> library_config = new HashMap<String, Object>();
            //            library_config.put("", "")
            return new SharedLibraryBean(null, null, null, null, null, null, null, null, null, null, null);
        }

        @Override
        public IServiceContext getServiceContext() {
            final MockServiceContext context = new MockServiceContext();
            context.addService(IStorageService.class, Optional.empty(), getFakeStorageService());
            context.addGlobals(BeanTemplateUtils.build(GlobalPropertiesBean.class)
                    .with(GlobalPropertiesBean::local_root_dir,
                            System.getProperty("java.io.tmpdir") + File.separator)
                    .done().get());
            return context;
        }

        @Override
        public <S> Optional<ICrudService<S>> getLibraryObjectStore(Class<S> clazz, String name_or_id,
                Optional<String> collection) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public Map<String, SharedLibraryBean> getLibraryConfigs() {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public CompletableFuture<Map<String, String>> getHarvestLibraries(Optional<DataBucketBean> bucket) {
            return CompletableFuture.completedFuture(new HashMap<String, String>());
        }

        @Override
        public String getHarvestContextSignature(Optional<DataBucketBean> bucket,
                Optional<Set<Tuple2<Class<? extends IUnderlyingService>, Optional<String>>>> services) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public List<String> getHarvestContextLibraries(
                Optional<Set<Tuple2<Class<? extends IUnderlyingService>, Optional<String>>>> services) {
            return new ArrayList<String>();
        }

        @Override
        public <S> ICrudService<S> getGlobalHarvestTechnologyObjectStore(Class<S> clazz,
                Optional<String> collection) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public String getFinalOutputLocation(Optional<DataBucketBean> bucket) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public CompletableFuture<DataBucketStatusBean> getBucketStatus(Optional<DataBucketBean> bucket) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public <S> ICrudService<S> getBucketObjectStore(Class<S> clazz, Optional<DataBucketBean> bucket,
                Optional<String> collection, Optional<StateDirectoryType> type) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public Optional<DataBucketBean> getBucket() {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public CompletableFuture<?> flushBatchOutput(Optional<DataBucketBean> bucket) {
            // TODO Auto-generated method stub
            return null;
        }

        @Override
        public void emitObject(Optional<DataBucketBean> bucket, Either<JsonNode, Map<String, Object>> object) {
            // TODO Auto-generated method stub

        }

        @Override
        public void emergencyQuarantineBucket(Optional<DataBucketBean> bucket, String quarantine_duration) {
            // TODO Auto-generated method stub

        }

        @Override
        public void emergencyDisableBucket(Optional<DataBucketBean> bucket) {
            // TODO Auto-generated method stub

        }

        @Override
        public IBucketLogger getLogger(Optional<DataBucketBean> bucket) {
            // TODO Auto-generated method stub
            return null;
        }
    };
}

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

private CompletableFuture<ImmutablePair<Integer, Integer>> isScaleRerun(final List<Integer> sealedSegments,
        final List<SimpleEntry<Double, Double>> newRanges, final Data<T> segmentTable,
        final Data<T> historyTable, final int activeEpoch) {
    int nextSegmentNumber;
    if (TableHelper.isRerunOf(sealedSegments, newRanges, historyTable.getData(), segmentTable.getData())) {
        // rerun means segment table is already updated. No need to do anything
        nextSegmentNumber = TableHelper.getSegmentCount(segmentTable.getData()) - newRanges.size();
        return CompletableFuture.completedFuture(new ImmutablePair<>(activeEpoch, nextSegmentNumber));
    } else {// w  w w  .j a v a 2  s.  c  o m
        return FutureHelpers.failedFuture(new ScaleOperationExceptions.ScaleStartException());
    }
}

From source file:io.pravega.controller.task.Stream.StreamMetadataTasks.java

/**
 * Delete a stream. Precondition for deleting a stream is that the stream sholud be sealed.
 *
 * @param scope      scope.//from ww  w . j  ava2 s .  c om
 * @param stream     stream name.
 * @param contextOpt optional context
 * @return delete status.
 */
public CompletableFuture<DeleteStreamStatus.Status> deleteStream(final String scope, final String stream,
        final OperationContext contextOpt) {
    final OperationContext context = contextOpt == null ? streamMetadataStore.createContext(scope, stream)
            : contextOpt;

    return streamMetadataStore.getState(scope, stream, false, context, executor).thenCompose(state -> {
        if (!state.equals(State.SEALED)) {
            return CompletableFuture.completedFuture(false);
        } else {
            return writeEvent(new DeleteStreamEvent(scope, stream)).thenApply(x -> true);
        }
    }).thenCompose(result -> {
        if (result) {
            return checkDone(() -> isDeleted(scope, stream)).thenApply(x -> DeleteStreamStatus.Status.SUCCESS);
        } else {
            return CompletableFuture.completedFuture(DeleteStreamStatus.Status.STREAM_NOT_SEALED);
        }
    }).exceptionally(ex -> {
        log.warn("Exception thrown while deleting stream", ex.getMessage());
        return handleDeleteStreamError(ex);
    });
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_Buckets.java

/** Create a new bucket
 * @param key// ww w . j  a  v a2  s.  c  om
 * @param bucket_mgmt
 * @param bucket_status_mgmt
 * @param source_db
 * @return
 */
protected static ManagementFuture<Supplier<Object>> createNewBucket(final String key,
        final IManagementCrudService<DataBucketBean> bucket_mgmt,
        final IManagementCrudService<DataBucketStatusBean> underlying_bucket_status_mgmt,
        final ICrudService<JsonNode> source_db) {
    _logger.info(ErrorUtils.get("Found new source {0}, creating bucket", key));

    // Create a status bean:

    final SingleQueryComponent<JsonNode> v1_query = CrudUtils.allOf().when("key", key);
    return FutureUtils.denestManagementFuture(source_db.getObjectBySpec(v1_query)
            .<ManagementFuture<Supplier<Object>>>thenApply(Lambdas.wrap_u(jsonopt -> {
                final DataBucketBean new_object = getBucketFromV1Source(jsonopt.get());
                final boolean is_now_suspended = safeJsonGet("searchCycle_secs", jsonopt.get()).asInt(1) < 0;

                final DataBucketStatusBean status_bean = BeanTemplateUtils.build(DataBucketStatusBean.class)
                        .with(DataBucketStatusBean::_id, new_object._id())
                        .with(DataBucketStatusBean::bucket_path, new_object.full_name())
                        .with(DataBucketStatusBean::suspended, is_now_suspended).done().get();

                return FutureUtils.denestManagementFuture(
                        underlying_bucket_status_mgmt.storeObject(status_bean, true).thenApply(__ -> {
                            final ManagementFuture<Supplier<Object>> ret = bucket_mgmt.storeObject(new_object);
                            return ret;
                        }));
            })).exceptionally(e -> {
                return FutureUtils
                        .<Supplier<Object>>createManagementFuture(
                                FutureUtils.returnError(new RuntimeException(e)),
                                CompletableFuture.completedFuture(Arrays.asList(new BasicMessageBean(new Date(),
                                        false, "IkanowV1SyncService_Buckets", "createNewBucket", null,
                                        ErrorUtils.getLongForm("{0}", e), null))));
            }));
}

From source file:io.flutter.inspector.DiagnosticsNode.java

public CompletableFuture<ArrayList<DiagnosticsNode>> getChildren() {
    if (children == null) {
        if (json.has("children")) {
            final JsonArray jsonArray = json.get("children").getAsJsonArray();
            final ArrayList<DiagnosticsNode> nodes = new ArrayList<>();
            for (JsonElement element : jsonArray) {
                nodes.add(new DiagnosticsNode(element.getAsJsonObject(), inspectorService, false));
            }/*from www.  j  a  va2s .  c  o m*/
            children = CompletableFuture.completedFuture(nodes);
        } else if (hasChildren()) {
            children = inspectorService.getChildren(getDartDiagnosticRef(), isSummaryTree());
        } else {
            // Known to have no children so we can provide the children immediately.
            children = CompletableFuture.completedFuture(new ArrayList<>());
        }
    }
    return children;
}