Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:org.springframework.ide.eclipse.boot.dash.test.CloudFoundryClientTest.java

private Future<Void> doAsync(Thunk task) {
    CompletableFuture<Void> result = new CompletableFuture<>();
    Job job = new Job("Async task") {
        protected IStatus run(IProgressMonitor monitor) {
            try {
                task.call();/*  w w  w  .  ja va  2s  . co m*/
                result.complete(null);
            } catch (Throwable e) {
                result.completeExceptionally(e);
            }
            return Status.OK_STATUS;
        }
    };
    job.schedule();
    return result;
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

/**
 * Close this topic - close all producers and subscriptions associated with this topic
 *
 * @return Completable future indicating completion of close operation
 *///from w  w w  .j  a  v a 2 s.  com
@Override
public CompletableFuture<Void> close() {
    CompletableFuture<Void> closeFuture = new CompletableFuture<>();

    lock.writeLock().lock();
    try {
        if (!isFenced) {
            isFenced = true;
        } else {
            log.warn("[{}] Topic is already being closed or deleted", topic);
            closeFuture.completeExceptionally(new TopicFencedException("Topic is already fenced"));
            return closeFuture;
        }
    } finally {
        lock.writeLock().unlock();
    }

    List<CompletableFuture<Void>> futures = Lists.newArrayList();

    replicators.forEach((cluster, replicator) -> futures.add(replicator.disconnect()));
    producers.forEach(producer -> futures.add(producer.disconnect()));
    subscriptions.forEach((s, sub) -> futures.add(sub.disconnect()));

    FutureUtil.waitForAll(futures).thenRun(() -> {
        // After having disconnected all producers/consumers, close the managed ledger
        ledger.asyncClose(new CloseCallback() {
            @Override
            public void closeComplete(Object ctx) {
                // Everything is now closed, remove the topic from map
                brokerService.removeTopicFromCache(topic);

                log.info("[{}] Topic closed", topic);
                closeFuture.complete(null);
            }

            @Override
            public void closeFailed(ManagedLedgerException exception, Object ctx) {
                log.error("[{}] Failed to close managed ledger, proceeding anyway.", topic, exception);
                brokerService.removeTopicFromCache(topic);
                closeFuture.complete(null);
            }
        }, null);

        if (dispatchRateLimiter.isPresent()) {
            dispatchRateLimiter.get().close();
        }
        if (subscribeRateLimiter.isPresent()) {
            subscribeRateLimiter.get().close();
        }

    }).exceptionally(exception -> {
        log.error("[{}] Error closing topic", topic, exception);
        isFenced = false;
        closeFuture.completeExceptionally(exception);
        return null;
    });

    return closeFuture;
}

From source file:org.apache.distributedlog.lock.ZKSessionLock.java

/**
 * NOTE: unlockInternal should only after try lock.
 *///from   ww  w  . j a  va2s.com
private void unlockInternal(final CompletableFuture<Void> promise) {

    // already closed or expired, nothing to cleanup
    this.epochUpdater.incrementAndGet(this);
    if (null != watcher) {
        this.zkClient.unregister(watcher);
    }

    if (lockState.inState(State.CLOSED)) {
        promise.complete(null);
        return;
    }

    LOG.info("Lock {} for {} is closed from state {}.",
            new Object[] { lockId, lockPath, lockState.getState() });

    final boolean skipCleanup = lockState.inState(State.INIT) || lockState.inState(State.EXPIRED);

    lockState.transition(State.CLOSING);

    if (skipCleanup) {
        // Nothing to cleanup if INIT (never tried) or EXPIRED (ephemeral node
        // auto-removed)
        lockState.transition(State.CLOSED);
        promise.complete(null);
        return;
    }

    // In any other state, we should clean up the member node
    CompletableFuture<Void> deletePromise = new CompletableFuture<Void>();
    deleteLockNode(deletePromise);

    // Set the state to closed after we've cleaned up
    deletePromise.whenCompleteAsync(new FutureEventListener<Void>() {
        @Override
        public void onSuccess(Void complete) {
            lockState.transition(State.CLOSED);
            promise.complete(null);
        }

        @Override
        public void onFailure(Throwable cause) {
            // Delete failure is quite serious (causes lock leak) and should be
            // handled better
            LOG.error("lock node delete failed {} {}", lockId, lockPath);
            promise.complete(null);
        }
    }, lockStateExecutor.chooseThread(lockPath));
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

private CompletableFuture<Void> checkReplicationAndRetryOnFailure() {
    CompletableFuture<Void> result = new CompletableFuture<Void>();
    checkReplication().thenAccept(res -> {
        log.info("[{}] Policies updated successfully", topic);
        result.complete(null);//from w  w  w  . j a v  a 2  s. c  om
    }).exceptionally(th -> {
        log.error("[{}] Policies update failed {}, scheduled retry in {} seconds", topic, th.getMessage(),
                POLICY_UPDATE_FAILURE_RETRY_TIME_SECONDS, th);
        brokerService.executor().schedule(this::checkReplicationAndRetryOnFailure,
                POLICY_UPDATE_FAILURE_RETRY_TIME_SECONDS, TimeUnit.SECONDS);
        result.completeExceptionally(th);
        return null;
    });
    return result;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
    if (TableName.isMetaTableName(tableName)) {
        return CompletableFuture.completedFuture(true);
    }//from ww  w.  j  a  v  a 2  s.  co m
    CompletableFuture<Boolean> future = new CompletableFuture<>();
    addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
        if (error != null) {
            future.completeExceptionally(error);
            return;
        }
        if (state.isPresent()) {
            future.complete(state.get().inStates(TableState.State.ENABLED));
        } else {
            future.completeExceptionally(new TableNotFoundException(tableName));
        }
    });
    return future;
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

private CompletableFuture<Void> checkPersistencePolicies() {
    TopicName topicName = TopicName.get(topic);
    CompletableFuture<Void> future = new CompletableFuture<>();
    brokerService.getManagedLedgerConfig(topicName).thenAccept(config -> {
        // update managed-ledger config and managed-cursor.markDeleteRate
        this.ledger.setConfig(config);
        future.complete(null);//from   w  ww  . java2  s.com
    }).exceptionally(ex -> {
        log.warn("[{}] Failed to update persistence-policies {}", topic, ex.getMessage());
        future.completeExceptionally(ex);
        return null;
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<Boolean> isTableDisabled(TableName tableName) {
    if (TableName.isMetaTableName(tableName)) {
        return CompletableFuture.completedFuture(false);
    }/*from   w  w w.  j a  va  2s .c o  m*/
    CompletableFuture<Boolean> future = new CompletableFuture<>();
    addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> {
        if (error != null) {
            future.completeExceptionally(error);
            return;
        }
        if (state.isPresent()) {
            future.complete(state.get().inStates(TableState.State.DISABLED));
        } else {
            future.completeExceptionally(new TableNotFoundException(tableName));
        }
    });
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private CompletableFuture<Boolean> isTableAvailable(TableName tableName, Optional<byte[][]> splitKeys) {
    if (TableName.isMetaTableName(tableName)) {
        return connection.registry.getMetaRegionLocation().thenApply(locs -> Stream
                .of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null));
    }/*from w  w  w .  j a  va 2 s  . com*/
    CompletableFuture<Boolean> future = new CompletableFuture<>();
    addListener(isTableEnabled(tableName), (enabled, error) -> {
        if (error != null) {
            if (error instanceof TableNotFoundException) {
                future.complete(false);
            } else {
                future.completeExceptionally(error);
            }
            return;
        }
        if (!enabled) {
            future.complete(false);
        } else {
            addListener(AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, Optional.of(tableName)),
                    (locations, error1) -> {
                        if (error1 != null) {
                            future.completeExceptionally(error1);
                            return;
                        }
                        List<HRegionLocation> notDeployedRegions = locations.stream()
                                .filter(loc -> loc.getServerName() == null).collect(Collectors.toList());
                        if (notDeployedRegions.size() > 0) {
                            if (LOG.isDebugEnabled()) {
                                LOG.debug("Table " + tableName + " has " + notDeployedRegions.size()
                                        + " regions");
                            }
                            future.complete(false);
                            return;
                        }

                        Optional<Boolean> available = splitKeys
                                .map(keys -> compareRegionsWithSplitKeys(locations, keys));
                        future.complete(available.orElse(true));
                    });
        }
    });
    return future;
}

From source file:org.apache.pulsar.broker.service.persistent.PersistentTopic.java

@Override
public CompletableFuture<Void> checkReplication() {
    TopicName name = TopicName.get(topic);
    if (!name.isGlobal()) {
        return CompletableFuture.completedFuture(null);
    }/*from w ww. ja v a 2 s . c o  m*/

    if (log.isDebugEnabled()) {
        log.debug("[{}] Checking replication status", name);
    }

    Policies policies = null;
    try {
        policies = brokerService.pulsar().getConfigurationCache().policiesCache()
                .get(AdminResource.path(POLICIES, name.getNamespace()))
                .orElseThrow(() -> new KeeperException.NoNodeException());
    } catch (Exception e) {
        CompletableFuture<Void> future = new CompletableFuture<>();
        future.completeExceptionally(new ServerMetadataException(e));
        return future;
    }

    final int newMessageTTLinSeconds = policies.message_ttl_in_seconds;

    Set<String> configuredClusters;
    if (policies.replication_clusters != null) {
        configuredClusters = Sets.newTreeSet(policies.replication_clusters);
    } else {
        configuredClusters = Collections.emptySet();
    }

    String localCluster = brokerService.pulsar().getConfiguration().getClusterName();

    // if local cluster is removed from global namespace cluster-list : then delete topic forcefully because pulsar
    // doesn't serve global topic without local repl-cluster configured.
    if (TopicName.get(topic).isGlobal() && !configuredClusters.contains(localCluster)) {
        log.info("Deleting topic [{}] because local cluster is not part of global namespace repl list {}",
                configuredClusters);
        return deleteForcefully();
    }

    List<CompletableFuture<Void>> futures = Lists.newArrayList();

    // Check for missing replicators
    for (String cluster : configuredClusters) {
        if (cluster.equals(localCluster)) {
            continue;
        }

        if (!replicators.containsKey(cluster)) {
            futures.add(startReplicator(cluster));
        }
    }

    // Check for replicators to be stopped
    replicators.forEach((cluster, replicator) -> {
        // Update message TTL
        ((PersistentReplicator) replicator).updateMessageTTL(newMessageTTLinSeconds);

        if (!cluster.equals(localCluster)) {
            if (!configuredClusters.contains(cluster)) {
                futures.add(removeReplicator(cluster));
            }
        }

    });

    return FutureUtil.waitForAll(futures);
}

From source file:org.apache.bookkeeper.client.LedgerHandle.java

/**
 * {@inheritDoc}//from   w  w  w.java2 s  .com
 */
@Override
public CompletableFuture<Void> force() {
    CompletableFuture<Void> result = new CompletableFuture<>();
    ForceLedgerOp op = new ForceLedgerOp(this, clientCtx.getBookieClient(), getCurrentEnsemble(), result);
    boolean wasClosed = false;
    synchronized (this) {
        // synchronized on this to ensure that
        // the ledger isn't closed between checking and
        // updating lastAddPushed
        if (!isHandleWritable()) {
            wasClosed = true;
        }
    }

    if (wasClosed) {
        // make sure the callback is triggered in main worker pool
        try {
            clientCtx.getMainWorkerPool().executeOrdered(ledgerId, new SafeRunnable() {
                @Override
                public void safeRun() {
                    LOG.warn("Force() attempted on a closed ledger: {}", ledgerId);
                    result.completeExceptionally(new BKException.BKLedgerClosedException());
                }

                @Override
                public String toString() {
                    return String.format("force(lid=%d)", ledgerId);
                }
            });
        } catch (RejectedExecutionException e) {
            result.completeExceptionally(new BKException.BKInterruptedException());
        }
        return result;
    }

    // early exit: no write has been issued yet
    if (pendingAddsSequenceHead == INVALID_ENTRY_ID) {
        clientCtx.getMainWorkerPool().executeOrdered(ledgerId, new SafeRunnable() {
            @Override
            public void safeRun() {
                FutureUtils.complete(result, null);
            }

            @Override
            public String toString() {
                return String.format("force(lid=%d)", ledgerId);
            }
        });
        return result;
    }

    try {
        clientCtx.getMainWorkerPool().executeOrdered(ledgerId, op);
    } catch (RejectedExecutionException e) {
        result.completeExceptionally(new BKException.BKInterruptedException());
    }
    return result;
}