Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

@Override
public CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> storeObjects(final List<O> new_objects,
        final boolean replace_if_present) {
    try {//from ww  w.  ja v a 2s  .  co m
        final ReadWriteContext rw_context = getRwContextOrThrow(_state.es_context, "storeObjects");

        final BulkRequestBuilder brb = new_objects.stream()
                .reduce(_state.client.prepareBulk().setConsistencyLevel(WriteConsistencyLevel.ONE)
                        .setRefresh(CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy),
                        (acc, val) -> acc.add(singleObjectIndexRequest(Either.left(rw_context),
                                Either.left(val), replace_if_present, true)),
                        (acc1, acc2) -> {
                            throw new RuntimeException("Internal logic error - Parallel not supported");
                        });

        final BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>> action_handler = new BiConsumer<BulkResponse, CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>>() {
            // WARNING: mutable/imperative code ahead...
            long _curr_written = 0;
            List<Object> _id_list = null;
            HashMap<String, String> _mapping_failures = null;

            @Override
            public void accept(final BulkResponse result,
                    final CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>> future) {

                if (result.hasFailures() && (rw_context
                        .typeContext() instanceof ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext)) {
                    final ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext auto_context = (ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext) rw_context
                            .typeContext();
                    // Recursive builder in case I need to build a second batch of docs                        
                    BulkRequestBuilder brb2 = null;

                    if (null == _id_list) {
                        _id_list = new LinkedList<Object>();
                    }
                    HashMap<String, String> temp_mapping_failures = null;
                    final Iterator<BulkItemResponse> it = result.iterator();
                    while (it.hasNext()) {
                        final BulkItemResponse bir = it.next();
                        if (bir.isFailed()) {
                            if (bir.getFailure().getMessage().startsWith("MapperParsingException")) {
                                final Set<String> fixed_type_fields = rw_context.typeContext()
                                        .fixed_type_fields();
                                if (!fixed_type_fields.isEmpty()) {
                                    // Obtain the field name from the exception (if we fail then drop the record) 
                                    final String field = getFieldFromParsingException(
                                            bir.getFailure().getMessage());
                                    if ((null == field) || fixed_type_fields.contains(field)) {
                                        continue;
                                    }
                                } //(else roll on to...)                                                

                                // OK this is the case where I might be able to apply auto types:
                                if (null == brb2) {
                                    brb2 = _state.client.prepareBulk()
                                            .setConsistencyLevel(WriteConsistencyLevel.ONE).setRefresh(
                                                    CreationPolicy.AVAILABLE_IMMEDIATELY == _state.creation_policy);
                                }
                                String failed_json = null;
                                if (null == _mapping_failures) { // first time through, use item id to grab the objects from the original request
                                    if (null == temp_mapping_failures) {
                                        temp_mapping_failures = new HashMap<String, String>();
                                    }
                                    final ActionRequest<?> ar = brb.request().requests().get(bir.getItemId());
                                    if (ar instanceof IndexRequest) {
                                        IndexRequest ir = (IndexRequest) ar;
                                        failed_json = ir.source().toUtf8();
                                        temp_mapping_failures.put(bir.getId(), failed_json);
                                    }
                                } else { // have already grabbed all the failure _ids and stuck in a map
                                    failed_json = _mapping_failures.get(bir.getId());
                                }
                                if (null != failed_json) {
                                    brb2.add(singleObjectIndexRequest(
                                            Either.right(Tuples._2T(bir.getIndex(),
                                                    ElasticsearchContextUtils.getNextAutoType(
                                                            auto_context.getPrefix(), bir.getType()))),
                                            Either.right(Tuples._2T(bir.getId(), failed_json)), false, true));
                                }
                            }
                            // Ugh otherwise just silently fail I guess? 
                            //(should I also look for transient errors and resubmit them after a pause?!)
                        } else { // (this item worked)
                            _id_list.add(bir.getId());
                            _curr_written++;
                        }
                    }
                    if (null != brb2) { // found mapping errors to retry with
                        if (null == _mapping_failures) // (first level of recursion)
                            _mapping_failures = temp_mapping_failures;

                        // (note that if brb2.request().requests().isEmpty() this is an internal logic error, so it's OK to throw)
                        ElasticsearchFutureUtils.wrap(brb2.execute(), future, this, (error, future2) -> {
                            future2.completeExceptionally(error);
                        });
                    } else { // relative success, plus we've built the list anyway
                        future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written));
                    }
                } else { // No errors with this iteration of the bulk request         
                    _curr_written += result.getItems().length;

                    if (null == _id_list) { // This is the first bulk request, no recursion on failures, so can lazily create the list in case it isn't needed
                        final Supplier<List<Object>> get_objects = () -> {
                            return StreamSupport.stream(result.spliterator(), false)
                                    .filter(bir -> !bir.isFailed()).map(bir -> bir.getId())
                                    .collect(Collectors.toList());
                        };
                        final Supplier<Long> get_count_workaround = () -> {
                            return StreamSupport.stream(result.spliterator(), false)
                                    .filter(bir -> !bir.isFailed()).collect(Collectors.counting());
                        };
                        get_count_workaround.get();
                        future.complete(Tuples._2T(get_objects, get_count_workaround));
                    } else { // have already calculated everything so just return it                     
                        future.complete(Tuples._2T(() -> _id_list, () -> (Long) _curr_written));
                    }
                }
            }
        };

        return ElasticsearchFutureUtils.wrap(brb.execute(),
                new CompletableFuture<Tuple2<Supplier<List<Object>>, Supplier<Long>>>(), action_handler,
                (error, future) -> {
                    future.completeExceptionally(error);
                });
    } catch (Exception e) {
        return FutureUtils.returnError(e);
    }
}

From source file:org.apache.pulsar.client.impl.ClientCnx.java

public CompletableFuture<Optional<SchemaInfo>> sendGetSchema(ByteBuf request, long requestId) {
    CompletableFuture<Optional<SchemaInfo>> future = new CompletableFuture<>();

    pendingGetSchemaRequests.put(requestId, future);

    ctx.writeAndFlush(request).addListener(writeFuture -> {
        if (!writeFuture.isSuccess()) {
            log.warn("{} Failed to send GetSchema request to broker: {}", ctx.channel(),
                    writeFuture.cause().getMessage());
            pendingGetLastMessageIdRequests.remove(requestId);
            future.completeExceptionally(writeFuture.cause());
        }//from  w  w  w  .java 2s.  c  o m
    });

    return future;
}

From source file:com.yahoo.pulsar.broker.service.BrokerService.java

/**
 * Unload all the topic served by the broker service under the given service unit
 *
 * @param serviceUnit/*from  w  w  w.ja v  a  2  s  .com*/
 * @return
 */
public CompletableFuture<Integer> unloadServiceUnit(NamespaceBundle serviceUnit) {
    CompletableFuture<Integer> result = new CompletableFuture<Integer>();
    List<CompletableFuture<Void>> closeFutures = Lists.newArrayList();
    topics.forEach((name, topicFuture) -> {
        DestinationName topicName = DestinationName.get(name);
        if (serviceUnit.includes(topicName)) {
            // Topic needs to be unloaded
            log.info("[{}] Unloading topic", topicName);
            closeFutures.add(topicFuture.thenCompose(Topic::close));
        }
    });
    CompletableFuture<Void> aggregator = FutureUtil.waitForAll(closeFutures);
    aggregator.thenAccept(res -> result.complete(closeFutures.size())).exceptionally(ex -> {
        result.completeExceptionally(ex);
        return null;
    });
    return result;
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void manualTriggerWhileAutoInProgress() throws Exception {
    CompletableFuture<Void> slowOffload = new CompletableFuture<>();
    CountDownLatch offloadRunning = new CountDownLatch(1);
    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override/*from  w w w .j av  a2 s .c o  m*/
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            offloadRunning.countDown();
            return slowOffload.thenCompose((res) -> super.offload(ledger, uuid, extraMetadata));
        }
    };

    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setOffloadAutoTriggerSizeThresholdBytes(100);
    config.setRetentionTime(10, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);

    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);

    // Ledger will roll twice, offload will run on first ledger after second closed
    for (int i = 0; i < 25; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }
    offloadRunning.await();

    for (int i = 0; i < 20; i++) {
        ledger.addEntry(buildEntry(10, "entry-" + i));
    }
    Position p = ledger.addEntry(buildEntry(10, "last-entry"));

    try {
        ledger.offloadPrefix(p);
        Assert.fail("Shouldn't have succeeded");
    } catch (ManagedLedgerException.OffloadInProgressException e) {
        // expected
    }

    slowOffload.complete(null);

    // eventually all over threshold will be offloaded
    assertEventuallyTrue(() -> offloader.offloadedLedgers().size() == 3);
    Assert.assertEquals(offloader.offloadedLedgers(),
            ImmutableSet.of(ledger.getLedgersInfoAsList().get(0).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(1).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(2).getLedgerId()));

    // then a manual offload can run and offload the one ledger under the threshold
    ledger.offloadPrefix(p);

    Assert.assertEquals(offloader.offloadedLedgers().size(), 4);
    Assert.assertEquals(offloader.offloadedLedgers(),
            ImmutableSet.of(ledger.getLedgersInfoAsList().get(0).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(1).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(2).getLedgerId(),
                    ledger.getLedgersInfoAsList().get(3).getLedgerId()));
}

From source file:org.apache.bookkeeper.client.BookKeeper.java

/**
 * Synchronous open ledger call/* w  w w  .  j a va 2 s . co m*/
 *
 * @see #asyncOpenLedger
 * @param lId
 *          ledger identifier
 * @param digestType
 *          digest type, either MAC or CRC32
 * @param passwd
 *          password
 * @return a handle to the open ledger
 * @throws InterruptedException
 * @throws BKException
 */

public LedgerHandle openLedger(long lId, DigestType digestType, byte passwd[])
        throws BKException, InterruptedException {
    CompletableFuture<LedgerHandle> counter = new CompletableFuture<>();

    /*
     * Calls async open ledger
     */
    asyncOpenLedger(lId, digestType, passwd, new SyncOpenCallback(), counter);

    return SynchCallbackUtils.waitForResult(counter);
}

From source file:org.apache.hadoop.hbase.client.AsyncHBaseAdmin.java

private CompletableFuture<Void> waitProcedureResult(CompletableFuture<Long> procFuture) {
    CompletableFuture<Void> future = new CompletableFuture<>();
    procFuture.whenComplete((procId, error) -> {
        if (error != null) {
            future.completeExceptionally(error);
            return;
        }/*w w w .ja v a2s .  co  m*/
        getProcedureResult(procId, future);
    });
    return future;
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

/**
 * Delete the cursor ledger for a given subscription
 *
 * @param subscriptionName/*from  w  ww .j a  v  a 2  s .com*/
 *            Subscription for which the cursor ledger is to be deleted
 * @return Completable future indicating completion of unsubscribe operation Completed exceptionally with:
 *         ManagedLedgerException if cursor ledger delete fails
 */
@Override
public CompletableFuture<Void> unsubscribe(String subscriptionName) {
    CompletableFuture<Void> unsubscribeFuture = new CompletableFuture<>();

    ledger.asyncDeleteCursor(Codec.encode(subscriptionName), new DeleteCursorCallback() {
        @Override
        public void deleteCursorComplete(Object ctx) {
            if (log.isDebugEnabled()) {
                log.debug("[{}][{}] Cursor deleted successfully", topic, subscriptionName);
            }
            subscriptions.remove(subscriptionName);
            unsubscribeFuture.complete(null);
            lastActive = System.nanoTime();
        }

        @Override
        public void deleteCursorFailed(ManagedLedgerException exception, Object ctx) {
            if (log.isDebugEnabled()) {
                log.debug("[{}][{}] Error deleting cursor for subscription", topic, subscriptionName,
                        exception);
            }
            unsubscribeFuture.completeExceptionally(new PersistenceException(exception));
        }
    }, null);

    return unsubscribeFuture;
}

From source file:org.apache.bookkeeper.client.BookKeeper.java

/**
 * Synchronous, unsafe open ledger call/*from www  .  j a  va 2s. c o  m*/
 *
 * @see #asyncOpenLedgerNoRecovery
 * @param lId
 *          ledger identifier
 * @param digestType
 *          digest type, either MAC or CRC32
 * @param passwd
 *          password
 * @return a handle to the open ledger
 * @throws InterruptedException
 * @throws BKException
 */

public LedgerHandle openLedgerNoRecovery(long lId, DigestType digestType, byte passwd[])
        throws BKException, InterruptedException {
    CompletableFuture<LedgerHandle> counter = new CompletableFuture<>();

    /*
     * Calls async open ledger
     */
    asyncOpenLedgerNoRecovery(lId, digestType, passwd, new SyncOpenCallback(), counter);

    return SynchCallbackUtils.waitForResult(counter);
}

From source file:org.apache.bookkeeper.client.LedgerHandle.java

public LedgerEntry readLastEntry() throws InterruptedException, BKException {
    long lastEntryId = getLastAddConfirmed();
    if (lastEntryId < 0) {
        // Ledger was empty, so there is no last entry to read
        throw new BKException.BKNoSuchEntryException();
    } else {//  w w w . j a va2s .c  om
        CompletableFuture<Enumeration<LedgerEntry>> result = new CompletableFuture<>();
        asyncReadEntries(lastEntryId, lastEntryId, new SyncReadCallback(result), null);

        return SyncCallbackUtils.waitForResult(result).nextElement();
    }
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

@Override
public synchronized void asyncOpenCursor(final String cursorName, final InitialPosition initialPosition,
        final OpenCursorCallback callback, final Object ctx) {
    try {//from  w ww.j  a  v  a 2 s .c  o m
        checkManagedLedgerIsOpen();
        checkFenced();
    } catch (ManagedLedgerException e) {
        callback.openCursorFailed(e, ctx);
        return;
    }

    if (uninitializedCursors.containsKey(cursorName)) {
        uninitializedCursors.get(cursorName).thenAccept(cursor -> {
            callback.openCursorComplete(cursor, ctx);
        }).exceptionally(ex -> {
            callback.openCursorFailed((ManagedLedgerException) ex, ctx);
            return null;
        });
        return;
    }
    ManagedCursor cachedCursor = cursors.get(cursorName);
    if (cachedCursor != null) {
        if (log.isDebugEnabled()) {
            log.debug("[{}] Cursor was already created {}", name, cachedCursor);
        }
        callback.openCursorComplete(cachedCursor, ctx);
        return;
    }

    // Create a new one and persist it
    if (log.isDebugEnabled()) {
        log.debug("[{}] Creating new cursor: {}", name, cursorName);
    }
    final ManagedCursorImpl cursor = new ManagedCursorImpl(bookKeeper, config, this, cursorName);
    CompletableFuture<ManagedCursor> cursorFuture = new CompletableFuture<>();
    uninitializedCursors.put(cursorName, cursorFuture);
    cursor.initialize(getLastPosition(), new VoidCallback() {
        @Override
        public void operationComplete() {
            log.info("[{}] Opened new cursor: {}", name, cursor);
            cursor.setActive();
            // Update the ack position (ignoring entries that were written while the cursor was being created)
            cursor.initializeCursorPosition(
                    initialPosition == InitialPosition.Latest ? getLastPositionAndCounter()
                            : getFirstPositionAndCounter());

            synchronized (this) {
                cursors.add(cursor);
                uninitializedCursors.remove(cursorName).complete(cursor);
            }
            callback.openCursorComplete(cursor, ctx);
        }

        @Override
        public void operationFailed(ManagedLedgerException exception) {
            log.warn("[{}] Failed to open cursor: {}", name, cursor);

            synchronized (this) {
                uninitializedCursors.remove(cursorName).completeExceptionally(exception);
            }
            callback.openCursorFailed(exception, ctx);
        }
    });
}