Example usage for java.util.concurrent CompletableFuture isDone

List of usage examples for java.util.concurrent CompletableFuture isDone

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture isDone.

Prototype

public boolean isDone() 

Source Link

Document

Returns true if completed in any fashion: normally, exceptionally, or via cancellation.

Usage

From source file:com.yahoo.pulsar.broker.service.ServerCnx.java

@Override
protected void handleCloseProducer(CommandCloseProducer closeProducer) {
    checkArgument(state == State.Connected);

    final long producerId = closeProducer.getProducerId();
    final long requestId = closeProducer.getRequestId();

    CompletableFuture<Producer> producerFuture = producers.get(producerId);
    if (producerFuture == null) {
        log.warn("[{}] Producer {} was not registered on the connection", remoteAddress, producerId);
        ctx.writeAndFlush(Commands.newError(requestId, ServerError.UnknownError,
                "Producer was not registered on the connection"));
        return;/*  w  w  w . ja  v  a  2  s .c  o  m*/
    }

    if (!producerFuture.isDone() && producerFuture
            .completeExceptionally(new IllegalStateException("Closed producer before creation was complete"))) {
        // We have received a request to close the producer before it was actually completed, we have marked the
        // producer future as failed and we can tell the client the close operation was successful. When the actual
        // create operation will complete, the new producer will be discarded.
        log.info("[{}] Closed producer {} before its creation was completed", remoteAddress, producerId);
        ctx.writeAndFlush(Commands.newSuccess(requestId));
        return;
    } else if (producerFuture.isCompletedExceptionally()) {
        log.info("[{}] Closed producer {} that already failed to be created", remoteAddress, producerId);
        ctx.writeAndFlush(Commands.newSuccess(requestId));
        return;
    }

    // Proceed with normal close, the producer
    Producer producer = producerFuture.getNow(null);
    log.info("[{}][{}] Closing producer on cnx {}", producer.getTopic(), producer.getProducerName(),
            remoteAddress);

    producer.close().thenAccept(v -> {
        log.info("[{}][{}] Closed producer on cnx {}", producer.getTopic(), producer.getProducerName(),
                remoteAddress);
        ctx.writeAndFlush(Commands.newSuccess(requestId));
        producers.remove(producerId, producerFuture);
    });
}

From source file:org.apache.distributedlog.TestTruncate.java

@Test(timeout = 60000)
public void testPurgeLogs() throws Exception {
    String name = "distrlog-purge-logs";
    URI uri = createDLMURI("/" + name);

    populateData(new HashMap<Long, DLSN>(), conf, name, 10, 10, false);

    DistributedLogManager distributedLogManager = createNewDLM(conf, name);

    List<LogSegmentMetadata> segments = distributedLogManager.getLogSegments();
    LOG.info("Segments before modifying completion time : {}", segments);

    ZooKeeperClient zkc = TestZooKeeperClientBuilder.newBuilder(conf).uri(uri).build();

    // Update completion time of first 5 segments
    long newTimeMs = System.currentTimeMillis() - 60 * 60 * 1000 * 2;
    for (int i = 0; i < 5; i++) {
        LogSegmentMetadata segment = segments.get(i);
        updateCompletionTime(zkc, segment, newTimeMs + i);
    }/*from  ww  w . ja  v a 2 s  . c o m*/
    zkc.close();

    segments = distributedLogManager.getLogSegments();
    LOG.info("Segments after modifying completion time : {}", segments);

    DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
    confLocal.loadConf(conf);
    confLocal.setRetentionPeriodHours(1);
    confLocal.setExplicitTruncationByApplication(false);

    DistributedLogManager dlm = createNewDLM(confLocal, name);
    AsyncLogWriter writer = dlm.startAsyncLogSegmentNonPartitioned();
    long txid = 1 + 10 * 10;
    for (int j = 1; j <= 10; j++) {
        Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(txid++)));
    }

    // wait until truncation task to be completed.
    BKAsyncLogWriter bkLogWriter = (BKAsyncLogWriter) writer;
    CompletableFuture<List<LogSegmentMetadata>> truncationAttempt = bkLogWriter.getLastTruncationAttempt();
    while (truncationAttempt == null || !truncationAttempt.isDone()) {
        TimeUnit.MILLISECONDS.sleep(20);
        truncationAttempt = bkLogWriter.getLastTruncationAttempt();
    }

    assertEquals(6, distributedLogManager.getLogSegments().size());

    Utils.close(writer);
    dlm.close();

    distributedLogManager.close();
}

From source file:org.apache.flink.runtime.rest.RestServerEndpointITCase.java

/**
 * Tests that after calling {@link RestServerEndpoint#closeAsync()}, the handlers are closed
 * first, and we wait for in-flight requests to finish. As long as not all handlers are closed,
 * HTTP requests should be served.//from   w w  w. j a v  a2  s  . c o m
 */
@Test
public void testShouldWaitForHandlersWhenClosing() throws Exception {
    testHandler.closeFuture = new CompletableFuture<>();
    final HandlerBlocker handlerBlocker = new HandlerBlocker(timeout);
    testHandler.handlerBody = id -> {
        // Intentionally schedule the work on a different thread. This is to simulate
        // handlers where the CompletableFuture is finished by the RPC framework.
        return CompletableFuture.supplyAsync(() -> {
            handlerBlocker.arriveAndBlock();
            return new TestResponse(id);
        });
    };

    // Initiate closing RestServerEndpoint but the test handler should block.
    final CompletableFuture<Void> closeRestServerEndpointFuture = serverEndpoint.closeAsync();
    assertThat(closeRestServerEndpointFuture.isDone(), is(false));

    final CompletableFuture<TestResponse> request = sendRequestToTestHandler(new TestRequest(1));
    handlerBlocker.awaitRequestToArrive();

    // Allow handler to close but there is still one in-flight request which should prevent
    // the RestServerEndpoint from closing.
    testHandler.closeFuture.complete(null);
    assertThat(closeRestServerEndpointFuture.isDone(), is(false));

    // Finish the in-flight request.
    handlerBlocker.unblockRequest();

    request.get(timeout.getSize(), timeout.getUnit());
    closeRestServerEndpointFuture.get(timeout.getSize(), timeout.getUnit());
}

From source file:org.apache.hadoop.hbase.client.AsyncBatchRpcRetryingCaller.java

private void failOne(Action action, int tries, Throwable error, long currentTime, String extras) {
    CompletableFuture<T> future = action2Future.get(action);
    if (future.isDone()) {
        return;/* w w w .  ja  v  a  2s.  com*/
    }
    ThrowableWithExtraContext errorWithCtx = new ThrowableWithExtraContext(error, currentTime, extras);
    List<ThrowableWithExtraContext> errors = removeErrors(action);
    if (errors == null) {
        errors = Collections.singletonList(errorWithCtx);
    } else {
        errors.add(errorWithCtx);
    }
    future.completeExceptionally(new RetriesExhaustedException(tries - 1, errors));
}

From source file:org.apache.hadoop.hbase.client.AsyncBatchRpcRetryingCaller.java

private void failAll(Stream<Action> actions, int tries) {
    actions.forEach(action -> {//from www  .  jav a 2 s  . com
        CompletableFuture<T> future = action2Future.get(action);
        if (future.isDone()) {
            return;
        }
        future.completeExceptionally(new RetriesExhaustedException(tries,
                Optional.ofNullable(removeErrors(action)).orElse(Collections.emptyList())));
    });
}

From source file:org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.java

private boolean tryComplete(LocateRequest req, CompletableFuture<HRegionLocation> future, HRegionLocation loc) {
    if (future.isDone()) {
        return true;
    }//from  www  .  j av a2 s  .  co m
    boolean completed;
    if (req.locateType.equals(RegionLocateType.BEFORE)) {
        // for locating the row before current row, the common case is to find the previous region in
        // reverse scan, so we check the endKey first. In general, the condition should be startKey <
        // req.row and endKey >= req.row. Here we split it to endKey == req.row || (endKey > req.row
        // && startKey < req.row). The two conditions are equal since startKey < endKey.
        int c = Bytes.compareTo(loc.getRegionInfo().getEndKey(), req.row);
        completed = c == 0 || (c > 0 && Bytes.compareTo(loc.getRegionInfo().getStartKey(), req.row) < 0);
    } else {
        completed = loc.getRegionInfo().containsRow(req.row);
    }
    if (completed) {
        future.complete(loc);
        return true;
    } else {
        return false;
    }
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<CompactionState> getCompactionState(TableName tableName, CompactType compactType) {
    CompletableFuture<CompactionState> future = new CompletableFuture<>();

    switch (compactType) {
    case MOB:/*from   ww  w  . j a va2  s  .  co m*/
        addListener(connection.registry.getMasterAddress(), (serverName, err) -> {
            if (err != null) {
                future.completeExceptionally(err);
                return;
            }
            RegionInfo regionInfo = RegionInfo.createMobRegionInfo(tableName);

            addListener(this.<GetRegionInfoResponse>newAdminCaller().serverName(serverName)
                    .action((controller, stub) -> this
                            .<GetRegionInfoRequest, GetRegionInfoResponse, GetRegionInfoResponse>adminCall(
                                    controller, stub,
                                    RequestConverter.buildGetRegionInfoRequest(regionInfo.getRegionName(),
                                            true),
                                    (s, c, req, done) -> s.getRegionInfo(controller, req, done), resp -> resp))
                    .call(), (resp2, err2) -> {
                        if (err2 != null) {
                            future.completeExceptionally(err2);
                        } else {
                            if (resp2.hasCompactionState()) {
                                future.complete(ProtobufUtil.createCompactionState(resp2.getCompactionState()));
                            } else {
                                future.complete(CompactionState.NONE);
                            }
                        }
                    });
        });
        break;
    case NORMAL:
        addListener(getTableHRegionLocations(tableName), (locations, err) -> {
            if (err != null) {
                future.completeExceptionally(err);
                return;
            }
            ConcurrentLinkedQueue<CompactionState> regionStates = new ConcurrentLinkedQueue<>();
            List<CompletableFuture<CompactionState>> futures = new ArrayList<>();
            locations.stream().filter(loc -> loc.getServerName() != null).filter(loc -> loc.getRegion() != null)
                    .filter(loc -> !loc.getRegion().isOffline()).map(loc -> loc.getRegion().getRegionName())
                    .forEach(region -> {
                        futures.add(getCompactionStateForRegion(region).whenComplete((regionState, err2) -> {
                            // If any region compaction state is MAJOR_AND_MINOR
                            // the table compaction state is MAJOR_AND_MINOR, too.
                            if (err2 != null) {
                                future.completeExceptionally(unwrapCompletionException(err2));
                            } else if (regionState == CompactionState.MAJOR_AND_MINOR) {
                                future.complete(regionState);
                            } else {
                                regionStates.add(regionState);
                            }
                        }));
                    });
            addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[futures.size()])),
                    (ret, err3) -> {
                        // If future not completed, check all regions's compaction state
                        if (!future.isCompletedExceptionally() && !future.isDone()) {
                            CompactionState state = CompactionState.NONE;
                            for (CompactionState regionState : regionStates) {
                                switch (regionState) {
                                case MAJOR:
                                    if (state == CompactionState.MINOR) {
                                        future.complete(CompactionState.MAJOR_AND_MINOR);
                                    } else {
                                        state = CompactionState.MAJOR;
                                    }
                                    break;
                                case MINOR:
                                    if (state == CompactionState.MAJOR) {
                                        future.complete(CompactionState.MAJOR_AND_MINOR);
                                    } else {
                                        state = CompactionState.MINOR;
                                    }
                                    break;
                                case NONE:
                                default:
                                }
                            }
                            if (!future.isDone()) {
                                future.complete(state);
                            }
                        }
                    });
        });
        break;
    default:
        throw new IllegalArgumentException("Unknown compactType: " + compactType);
    }

    return future;
}

From source file:org.apache.hadoop.hbase.client.ZKAsyncRegistry.java

@Override
public CompletableFuture<RegionLocations> getMetaRegionLocation() {
    CompletableFuture<RegionLocations> future = new CompletableFuture<>();
    HRegionLocation[] locs = new HRegionLocation[znodePaths.metaReplicaZNodes.size()];
    MutableInt remaining = new MutableInt(locs.length);
    znodePaths.metaReplicaZNodes.forEach((replicaId, path) -> {
        if (replicaId == DEFAULT_REPLICA_ID) {
            exec(zk.getData(), path, ZKAsyncRegistry::getMetaProto).whenComplete((proto, error) -> {
                if (error != null) {
                    future.completeExceptionally(error);
                    return;
                }/*from  ww w .j a v  a2s  .c  om*/
                if (proto == null) {
                    future.completeExceptionally(new IOException("Meta znode is null"));
                    return;
                }
                Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
                if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
                    future.completeExceptionally(
                            new IOException("Meta region is in state " + stateAndServerName.getFirst()));
                    return;
                }
                locs[DEFAULT_REPLICA_ID] = new HRegionLocation(
                        getRegionInfoForDefaultReplica(FIRST_META_REGIONINFO), stateAndServerName.getSecond());
                tryComplete(remaining, locs, future);
            });
        } else {
            exec(zk.getData(), path, ZKAsyncRegistry::getMetaProto).whenComplete((proto, error) -> {
                if (future.isDone()) {
                    return;
                }
                if (error != null) {
                    LOG.warn("Failed to fetch " + path, error);
                    locs[replicaId] = null;
                } else if (proto == null) {
                    LOG.warn("Meta znode for replica " + replicaId + " is null");
                    locs[replicaId] = null;
                } else {
                    Pair<RegionState.State, ServerName> stateAndServerName = getStateAndServerName(proto);
                    if (stateAndServerName.getFirst() != RegionState.State.OPEN) {
                        LOG.warn("Meta region for replica " + replicaId + " is in state "
                                + stateAndServerName.getFirst());
                        locs[replicaId] = null;
                    } else {
                        locs[replicaId] = new HRegionLocation(
                                getRegionInfoForReplica(FIRST_META_REGIONINFO, replicaId),
                                stateAndServerName.getSecond());
                    }
                }
                tryComplete(remaining, locs, future);
            });
        }
    });
    return future;
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

private CompletableFuture<? extends Subscription> getNonDurableSubscription(String subscriptionName,
        MessageId startMessageId) {/*from ww  w.ja  v a 2 s . co m*/
    CompletableFuture<Subscription> subscriptionFuture = new CompletableFuture<>();
    log.info("[{}][{}] Creating non-durable subscription at msg id {}", topic, subscriptionName,
            startMessageId);

    // Create a new non-durable cursor only for the first consumer that connects
    Subscription subscription = subscriptions.computeIfAbsent(subscriptionName, name -> {
        MessageIdImpl msgId = startMessageId != null ? (MessageIdImpl) startMessageId
                : (MessageIdImpl) MessageId.latest;

        long ledgerId = msgId.getLedgerId();
        long entryId = msgId.getEntryId();
        if (msgId instanceof BatchMessageIdImpl) {
            // When the start message is relative to a batch, we need to take one step back on the previous message,
            // because the "batch" might not have been consumed in its entirety.
            // The client will then be able to discard the first messages in the batch.
            if (((BatchMessageIdImpl) msgId).getBatchIndex() >= 0) {
                entryId = msgId.getEntryId() - 1;
            }
        }
        Position startPosition = new PositionImpl(ledgerId, entryId);
        ManagedCursor cursor = null;
        try {
            cursor = ledger.newNonDurableCursor(startPosition);
        } catch (ManagedLedgerException e) {
            subscriptionFuture.completeExceptionally(e);
        }

        return new PersistentSubscription(this, subscriptionName, cursor);
    });

    if (!subscriptionFuture.isDone()) {
        subscriptionFuture.complete(subscription);
    } else {
        // failed to initialize managed-cursor: clean up created subscription
        subscriptions.remove(subscriptionName);
    }

    return subscriptionFuture;
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

public synchronized LongRunningProcessStatus compactionStatus() {
    final CompletableFuture<Long> current;
    synchronized (this) {
        current = currentCompaction;/*from  ww w.j  a  v a 2 s  .com*/
    }
    if (!current.isDone()) {
        return LongRunningProcessStatus.forStatus(LongRunningProcessStatus.Status.RUNNING);
    } else {
        try {
            if (current.join() == COMPACTION_NEVER_RUN) {
                return LongRunningProcessStatus.forStatus(LongRunningProcessStatus.Status.NOT_RUN);
            } else {
                return LongRunningProcessStatus.forStatus(LongRunningProcessStatus.Status.SUCCESS);
            }
        } catch (CancellationException | CompletionException e) {
            return LongRunningProcessStatus.forError(e.getMessage());
        }
    }
}