Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:org.apache.pulsar.client.impl.ClientCnx.java

public CompletableFuture<LookupDataResult> newLookup(ByteBuf request, long requestId) {
    CompletableFuture<LookupDataResult> future = new CompletableFuture<>();

    if (pendingLookupRequestSemaphore.tryAcquire()) {
        addPendingLookupRequests(requestId, future);
        ctx.writeAndFlush(request).addListener(writeFuture -> {
            if (!writeFuture.isSuccess()) {
                log.warn("{} Failed to send request {} to broker: {}", ctx.channel(), requestId,
                        writeFuture.cause().getMessage());
                getAndRemovePendingLookupRequest(requestId);
                future.completeExceptionally(writeFuture.cause());
            }//from w  ww. j  av a 2s.  c om
        });
    } else {
        if (log.isDebugEnabled()) {
            log.debug("{} Failed to add lookup-request into pending queue", requestId);
        }
        if (!waitingLookupRequests.offer(Pair.of(requestId, Pair.of(request, future)))) {
            if (log.isDebugEnabled()) {
                log.debug("{} Failed to add lookup-request into waiting queue", requestId);
            }
            future.completeExceptionally(new PulsarClientException.TooManyRequestsException(String.format(
                    "Requests number out of config: There are {%s} lookup requests outstanding and {%s} requests pending.",
                    pendingLookupRequests.size(), waitingLookupRequests.size())));
        }
    }
    return future;
}

From source file:org.apache.bookkeeper.client.BookKeeper.java

/**
 * Synchronous call to create ledger./*from w w  w .  ja v a  2  s.  c  om*/
 * Creates a new ledger asynchronously and returns {@link LedgerHandleAdv} which can accept entryId.
 * Parameters must match those of
 * {@link #asyncCreateLedgerAdv(int, int, int, DigestType, byte[],
 *                           AsyncCallback.CreateCallback, Object)}
 *
 * @param ensSize
 * @param writeQuorumSize
 * @param ackQuorumSize
 * @param digestType
 * @param passwd
 * @param customMetadata
 * @return a handle to the newly created ledger
 * @throws InterruptedException
 * @throws BKException
 */
public LedgerHandle createLedgerAdv(int ensSize, int writeQuorumSize, int ackQuorumSize, DigestType digestType,
        byte passwd[], final Map<String, byte[]> customMetadata) throws InterruptedException, BKException {
    CompletableFuture<LedgerHandle> counter = new CompletableFuture<>();

    /*
     * Calls asynchronous version
     */
    asyncCreateLedgerAdv(ensSize, writeQuorumSize, ackQuorumSize, digestType, passwd, new SyncCreateCallback(),
            counter, customMetadata);

    LedgerHandle lh = SynchCallbackUtils.waitForResult(counter);
    if (lh == null) {
        LOG.error("Unexpected condition : no ledger handle returned for a success ledger creation");
        throw BKException.create(BKException.Code.UnexpectedConditionException);
    }
    return lh;
}

From source file:io.atomix.cluster.messaging.impl.NettyMessagingService.java

private CompletableFuture<Channel> openChannel(Address address) {
    Bootstrap bootstrap = bootstrapClient(address);
    CompletableFuture<Channel> retFuture = new CompletableFuture<>();
    ChannelFuture f = bootstrap.connect();

    f.addListener(future -> {//from   www . j  av  a 2s . co  m
        if (future.isSuccess()) {
            retFuture.complete(f.channel());
        } else {
            retFuture.completeExceptionally(future.cause());
        }
    });
    log.debug("Established a new connection to {}", address);
    return retFuture;
}

From source file:org.apache.distributedlog.BKLogHandler.java

/**
 * Read the log segments from the store and register a listener.
 * @param comparator/*w  w  w .  j a  v  a2  s.  co  m*/
 * @param segmentFilter
 * @param logSegmentNamesListener
 * @return future represents the result of log segments
 */
public CompletableFuture<Versioned<List<LogSegmentMetadata>>> readLogSegmentsFromStore(
        final Comparator<LogSegmentMetadata> comparator, final LogSegmentFilter segmentFilter,
        final LogSegmentNamesListener logSegmentNamesListener) {
    final CompletableFuture<Versioned<List<LogSegmentMetadata>>> readResult = new CompletableFuture<Versioned<List<LogSegmentMetadata>>>();
    metadataStore.getLogSegmentNames(logMetadata.getLogSegmentsPath(), logSegmentNamesListener)
            .whenComplete(new FutureEventListener<Versioned<List<String>>>() {
                @Override
                public void onFailure(Throwable cause) {
                    readResult.completeExceptionally(cause);
                }

                @Override
                public void onSuccess(Versioned<List<String>> logSegmentNames) {
                    readLogSegmentsFromStore(logSegmentNames, comparator, segmentFilter, readResult);
                }
            });
    return readResult;
}

From source file:io.pravega.controller.store.stream.AbstractStreamMetadataStore.java

private <T> CompletableFuture<T> withCompletion(CompletableFuture<T> future, final Executor executor) {

    // Following makes sure that the result future given out to caller is actually completed on
    // caller's executor. So any chaining, if done without specifying an executor, will either happen on
    // caller's executor or fork join pool but never on someone else's executor.

    CompletableFuture<T> result = new CompletableFuture<>();

    future.whenCompleteAsync((r, e) -> {
        if (e != null) {
            result.completeExceptionally(e);
        } else {//from  w  ww. ja  va 2 s  .  c om
            result.complete(r);
        }
    }, executor);

    return result;
}

From source file:org.apache.bookkeeper.mledger.impl.OffloadPrefixTest.java

@Test
public void testOffloadDeleteIncomplete() throws Exception {
    Set<Pair<Long, UUID>> deleted = ConcurrentHashMap.newKeySet();
    CompletableFuture<Set<Long>> errorLedgers = new CompletableFuture<>();
    Set<Pair<Long, UUID>> failedOffloads = ConcurrentHashMap.newKeySet();

    MockLedgerOffloader offloader = new MockLedgerOffloader() {
        @Override/*w  w w. j  a  v a 2s.c  o m*/
        public CompletableFuture<Void> offload(ReadHandle ledger, UUID uuid,
                Map<String, String> extraMetadata) {
            return super.offload(ledger, uuid, extraMetadata).thenCompose((res) -> {
                CompletableFuture<Void> f = new CompletableFuture<>();
                f.completeExceptionally(new Exception("Fail after offload occurred"));
                return f;
            });
        }
    };
    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setMaxEntriesPerLedger(10);
    config.setMinimumRolloverTime(0, TimeUnit.SECONDS);
    config.setRetentionTime(0, TimeUnit.MINUTES);
    config.setLedgerOffloader(offloader);
    ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("my_test_ledger", config);
    ManagedCursor cursor = ledger.openCursor("foobar");
    for (int i = 0; i < 15; i++) {
        String content = "entry-" + i;
        ledger.addEntry(content.getBytes());
    }

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);
    try {
        ledger.offloadPrefix(ledger.getLastConfirmedEntry());
    } catch (ManagedLedgerException mle) {
        // expected
    }

    Assert.assertEquals(ledger.getLedgersInfoAsList().size(), 2);

    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().getComplete()).count(), 0);
    Assert.assertEquals(
            ledger.getLedgersInfoAsList().stream().filter(e -> e.getOffloadContext().hasUidMsb()).count(), 1);
    Assert.assertTrue(ledger.getLedgersInfoAsList().get(0).getOffloadContext().hasUidMsb());

    long firstLedger = ledger.getLedgersInfoAsList().get(0).getLedgerId();
    long secondLedger = ledger.getLedgersInfoAsList().get(1).getLedgerId();

    cursor.markDelete(ledger.getLastConfirmedEntry());
    assertEventuallyTrue(() -> ledger.getLedgersInfoAsList().size() == 1);
    Assert.assertEquals(ledger.getLedgersInfoAsList().get(0).getLedgerId(), secondLedger);

    assertEventuallyTrue(() -> offloader.deletedOffloads().contains(firstLedger));
}

From source file:com.hpe.application.automation.tools.srf.run.RunFromSrfBuilder.java

@Override
public boolean perform(final AbstractBuild<?, ?> build, final Launcher launcher, BuildListener _listener)
        throws InterruptedException, IOException {

    this.logger = _listener.getLogger();
    Dispatcher.TRACE = true;/*from   w w  w  .  java 2  s .co  m*/
    Dispatcher.TRACE_PER_REQUEST = true;

    this._token = null; // Important in order to get only this run events
    this.build = build;
    this.sseEventListener = new SseEventListener(this.logger);
    this.sseEventListener.addObserver(this);
    this.srfExecutionFuture = new CompletableFuture<>();
    this.runningCount = new HashSet<>();

    JSONObject conData = getSrfConnectionData(build, logger);
    if (conData == null)
        return false;

    _app = conData.getString("app");
    _secret = conData.getString("secret");
    _ftaasServerAddress = conData.getString("server");
    _https = conData.getBoolean("https");
    _tenant = conData.getString("tenant");
    String srfProxy = conData.getString("proxy");

    try {
        SSLContext sslContext = SSLContext.getInstance("TLS");
        _trustMgr = new SrfTrustManager();
        sslContext.init(null, new SrfTrustManager[] { _trustMgr }, null);
        SSLContext.setDefault(sslContext);
        _factory = sslContext.getSocketFactory();
    } catch (NoSuchAlgorithmException | KeyManagementException e) {
        logger.print(e.getMessage());
        logger.print("\n\r");
    }

    if ((srfProxy != null) && (srfProxy.length() != 0)) {
        URL proxy = new URL(srfProxy);
        String proxyHost = proxy.getHost();
        String proxyPort = String.format("%d", proxy.getPort());
        Properties systemProperties = System.getProperties();
        systemProperties.setProperty("https.proxyHost", proxyHost);
        systemProperties.setProperty("http.proxyHost", proxyHost);
        systemProperties.setProperty("https.proxyPort", proxyPort);
        systemProperties.setProperty("http.proxyPort", proxyPort);
    }

    jobIds = null;
    try {
        initSrfEventListener();
        _secretApplied = false;

        try {
            jobIds = executeTestsSet();
            if (jobIds.size() > 0 && eventSrc == null)
                initSrfEventListener();

        } catch (AuthenticationException e) {
            initSrfEventListener();
            if (_token == null)
                _token = loginToSrf();
            _secretApplied = true;
        }

    } catch (UnknownHostException | ConnectException | SSLHandshakeException | IllegalArgumentException e) {
        cleanUp();
        logger.println(
                String.format("ERROR: Failed logging in to SRF server: %s %s", this._ftaasServerAddress, e));
        return false;
    } catch (IOException | SrfException e) {
        cleanUp();
        logger.println(String.format("ERROR: Failed executing test, %s", e));
        return false;
    }

    try {
        boolean buildResult = this.srfExecutionFuture.get();
        return buildResult;
    } catch (ExecutionException e) {
        e.printStackTrace();
        return false;
    } catch (InterruptedException e) {
        e.printStackTrace();
        build.setResult(Result.ABORTED);
        // postSrfJobCancellation();
        return false;
    } finally {
        cleanUp();
    }
}

From source file:org.apache.pulsar.client.impl.ClientCnx.java

public CompletableFuture<List<String>> newGetTopicsOfNamespace(ByteBuf request, long requestId) {
    CompletableFuture<List<String>> future = new CompletableFuture<>();

    pendingGetTopicsRequests.put(requestId, future);
    ctx.writeAndFlush(request).addListener(writeFuture -> {
        if (!writeFuture.isSuccess()) {
            log.warn("{} Failed to send request {} to broker: {}", ctx.channel(), requestId,
                    writeFuture.cause().getMessage());
            pendingGetTopicsRequests.remove(requestId);
            future.completeExceptionally(writeFuture.cause());
        }//from  www  . j  a  v  a 2  s. co  m
    });

    return future;
}

From source file:org.apache.pulsar.broker.service.BrokerService.java

public CompletableFuture<ManagedLedgerConfig> getManagedLedgerConfig(DestinationName topicName) {
    CompletableFuture<ManagedLedgerConfig> future = new CompletableFuture<>();
    // Execute in background thread, since getting the policies might block if the z-node wasn't already cached
    pulsar.getOrderedExecutor().submitOrdered(topicName, safeRun(() -> {
        NamespaceName namespace = topicName.getNamespaceObject();
        ServiceConfiguration serviceConfig = pulsar.getConfiguration();

        // Get persistence policy for this destination
        Policies policies;/*w w w  .j a v  a 2  s .  co m*/
        try {
            policies = pulsar
                    .getConfigurationCache().policiesCache().get(AdminResource.path("policies",
                            namespace.getProperty(), namespace.getCluster(), namespace.getLocalName()))
                    .orElse(null);
        } catch (Throwable t) {
            // Ignoring since if we don't have policies, we fallback on the default
            log.warn("Got exception when reading persistence policy for {}: {}", topicName, t.getMessage(), t);
            future.completeExceptionally(t);
            return;
        }

        PersistencePolicies persistencePolicies = policies != null ? policies.persistence : null;
        RetentionPolicies retentionPolicies = policies != null ? policies.retention_policies : null;

        if (persistencePolicies == null) {
            // Apply default values
            persistencePolicies = new PersistencePolicies(serviceConfig.getManagedLedgerDefaultEnsembleSize(),
                    serviceConfig.getManagedLedgerDefaultWriteQuorum(),
                    serviceConfig.getManagedLedgerDefaultAckQuorum(),
                    serviceConfig.getManagedLedgerDefaultMarkDeleteRateLimit());
        }

        if (retentionPolicies == null) {
            retentionPolicies = new RetentionPolicies(serviceConfig.getDefaultRetentionTimeInMinutes(),
                    serviceConfig.getDefaultRetentionSizeInMB());
        }

        ManagedLedgerConfig config = new ManagedLedgerConfig();
        config.setEnsembleSize(persistencePolicies.getBookkeeperEnsemble());
        config.setWriteQuorumSize(persistencePolicies.getBookkeeperWriteQuorum());
        config.setAckQuorumSize(persistencePolicies.getBookkeeperAckQuorum());
        config.setThrottleMarkDelete(persistencePolicies.getManagedLedgerMaxMarkDeleteRate());
        config.setDigestType(DigestType.CRC32);

        config.setMaxUnackedRangesToPersist(serviceConfig.getManagedLedgerMaxUnackedRangesToPersist());
        config.setMaxEntriesPerLedger(serviceConfig.getManagedLedgerMaxEntriesPerLedger());
        config.setMinimumRolloverTime(serviceConfig.getManagedLedgerMinLedgerRolloverTimeMinutes(),
                TimeUnit.MINUTES);
        config.setMaximumRolloverTime(serviceConfig.getManagedLedgerMaxLedgerRolloverTimeMinutes(),
                TimeUnit.MINUTES);
        config.setMaxSizePerLedgerMb(2048);

        config.setMetadataEnsembleSize(serviceConfig.getManagedLedgerDefaultEnsembleSize());
        config.setMetadataWriteQuorumSize(serviceConfig.getManagedLedgerDefaultWriteQuorum());
        config.setMetadataAckQuorumSize(serviceConfig.getManagedLedgerDefaultAckQuorum());
        config.setMetadataMaxEntriesPerLedger(serviceConfig.getManagedLedgerCursorMaxEntriesPerLedger());

        config.setLedgerRolloverTimeout(serviceConfig.getManagedLedgerCursorRolloverTimeInSeconds());
        config.setRetentionTime(retentionPolicies.getRetentionTimeInMinutes(), TimeUnit.MINUTES);
        config.setRetentionSizeInMB(retentionPolicies.getRetentionSizeInMB());

        future.complete(config);
    }, (exception) -> future.completeExceptionally(exception)));

    return future;
}

From source file:org.apache.pulsar.client.impl.ConsumerImpl.java

@Override
public CompletableFuture<Void> closeAsync() {
    if (getState() == State.Closing || getState() == State.Closed) {
        unAckedMessageTracker.close();// w  w w  .  ja  v  a  2  s.c o m
        if (possibleSendToDeadLetterTopicMessages != null) {
            possibleSendToDeadLetterTopicMessages.clear();
        }
        return CompletableFuture.completedFuture(null);
    }

    if (!isConnected()) {
        log.info("[{}] [{}] Closed Consumer (not connected)", topic, subscription);
        setState(State.Closed);
        unAckedMessageTracker.close();
        if (possibleSendToDeadLetterTopicMessages != null) {
            possibleSendToDeadLetterTopicMessages.clear();
        }
        client.cleanupConsumer(this);
        return CompletableFuture.completedFuture(null);
    }

    stats.getStatTimeout().ifPresent(Timeout::cancel);

    setState(State.Closing);

    acknowledgmentsGroupingTracker.close();

    long requestId = client.newRequestId();

    CompletableFuture<Void> closeFuture = new CompletableFuture<>();
    ClientCnx cnx = cnx();
    if (null == cnx) {
        cleanupAtClose(closeFuture);
    } else {
        ByteBuf cmd = Commands.newCloseConsumer(consumerId, requestId);
        cnx.sendRequestWithId(cmd, requestId).handle((v, exception) -> {
            cnx.removeConsumer(consumerId);
            if (exception == null || !cnx.ctx().channel().isActive()) {
                cleanupAtClose(closeFuture);
            } else {
                closeFuture.completeExceptionally(exception);
            }
            return null;
        });
    }

    return closeFuture;
}