Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:io.pravega.controller.store.stream.ZKStream.java

@Override
CompletableFuture<Data<Integer>> getMarkerData(int segmentNumber) {
    final CompletableFuture<Data<Integer>> result = new CompletableFuture<>();
    final String path = ZKPaths.makePath(markerPath, String.format("%d", segmentNumber));
    cache.getCachedData(path).whenComplete((res, ex) -> {
        if (ex != null) {
            Throwable cause = ExceptionHelpers.getRealException(ex);
            if (cause instanceof StoreException.DataNotFoundException) {
                result.complete(null);/*from   w w  w  .  j a v a  2 s  .  c  om*/
            } else {
                result.completeExceptionally(cause);
            }
        } else {
            result.complete(res);
        }
    });

    return result;
}

From source file:org.apache.pulsar.functions.runtime.ProcessRuntime.java

@Override
public CompletableFuture<Void> resetMetrics() {
    CompletableFuture<Void> retval = new CompletableFuture<>();
    if (stub == null) {
        retval.completeExceptionally(new RuntimeException("Not alive"));
        return retval;
    }//from w  w w.jav  a2 s  .c o  m
    ListenableFuture<Empty> response = stub.withDeadlineAfter(GRPC_TIMEOUT_SECS, TimeUnit.SECONDS)
            .resetMetrics(Empty.newBuilder().build());
    Futures.addCallback(response, new FutureCallback<Empty>() {
        @Override
        public void onFailure(Throwable throwable) {
            retval.completeExceptionally(throwable);
        }

        @Override
        public void onSuccess(Empty t) {
            retval.complete(null);
        }
    });
    return retval;
}

From source file:org.openhab.binding.mqtt.generic.ChannelState.java

/**
 * Subscribes to the state topic on the given connection and informs about updates on the given listener.
 *
 * @param connection A broker connection
 * @param scheduler A scheduler to realize the timeout
 * @param timeout A timeout in milliseconds. Can be 0 to disable the timeout and let the future return earlier.
 * @param channelStateUpdateListener An update listener
 * @return A future that completes with true if the subscribing worked, with false if the stateTopic is not set
 *         and exceptionally otherwise./*from  ww w .ja v  a  2 s  .c o  m*/
 */
public CompletableFuture<@Nullable Void> start(MqttBrokerConnection connection,
        ScheduledExecutorService scheduler, int timeout) {
    if (hasSubscribed) {
        return CompletableFuture.completedFuture(null);
    }

    this.connection = connection;

    if (StringUtils.isBlank(config.stateTopic)) {
        return CompletableFuture.completedFuture(null);
    }

    this.future = new CompletableFuture<>();
    connection.subscribe(config.stateTopic, this).thenRun(() -> {
        hasSubscribed = true;
        logger.debug("Subscribed channel {} to topic: {}", this.channelUID, config.stateTopic);
        if (timeout > 0 && !future.isDone()) {
            this.scheduledFuture = scheduler.schedule(this::receivedOrTimeout, timeout, TimeUnit.MILLISECONDS);
        } else {
            receivedOrTimeout();
        }
    }).exceptionally(this::subscribeFail);
    return future;
}

From source file:org.apache.hadoop.hbase.client.RawAsyncTableImpl.java

private static <RESP> CompletableFuture<RESP> mutateRow(HBaseRpcController controller, HRegionLocation loc,
        ClientService.Interface stub, RowMutations mutation,
        Converter<MultiRequest, byte[], RowMutations> reqConvert, Function<Result, RESP> respConverter) {
    CompletableFuture<RESP> future = new CompletableFuture<>();
    try {//from   w w  w  .j av  a 2  s .com
        byte[] regionName = loc.getRegionInfo().getRegionName();
        MultiRequest req = reqConvert.convert(regionName, mutation);
        stub.multi(controller, req, new RpcCallback<MultiResponse>() {

            @Override
            public void run(MultiResponse resp) {
                if (controller.failed()) {
                    future.completeExceptionally(controller.getFailed());
                } else {
                    try {
                        org.apache.hadoop.hbase.client.MultiResponse multiResp = ResponseConverter
                                .getResults(req, resp, controller.cellScanner());
                        Throwable ex = multiResp.getException(regionName);
                        if (ex != null) {
                            future.completeExceptionally(ex instanceof IOException ? ex
                                    : new IOException(
                                            "Failed to mutate row: " + Bytes.toStringBinary(mutation.getRow()),
                                            ex));
                        } else {
                            future.complete(respConverter
                                    .apply((Result) multiResp.getResults().get(regionName).result.get(0)));
                        }
                    } catch (IOException e) {
                        future.completeExceptionally(e);
                    }
                }
            }
        });
    } catch (IOException e) {
        future.completeExceptionally(e);
    }
    return future;
}

From source file:org.apache.pulsar.broker.authorization.AuthorizationService.java

/**
 * Check whether the specified role can perform a lookup for the specified topic.
 *
 * For that the caller needs to have producer or consumer permission.
 *
 * @param topicName//  w ww .  ja  v a 2s .  c  om
 * @param role
 * @return
 * @throws Exception
 */
public CompletableFuture<Boolean> canLookupAsync(TopicName topicName, String role,
        AuthenticationDataSource authenticationData) {
    CompletableFuture<Boolean> finalResult = new CompletableFuture<Boolean>();
    canProduceAsync(topicName, role, authenticationData).whenComplete((produceAuthorized, ex) -> {
        if (ex == null) {
            if (produceAuthorized) {
                finalResult.complete(produceAuthorized);
                return;
            }
        } else {
            if (log.isDebugEnabled()) {
                log.debug(
                        "Topic [{}] Role [{}] exception occured while trying to check Produce permissions. {}",
                        topicName.toString(), role, ex.getMessage());
            }
        }
        canConsumeAsync(topicName, role, null, null).whenComplete((consumeAuthorized, e) -> {
            if (e == null) {
                if (consumeAuthorized) {
                    finalResult.complete(consumeAuthorized);
                    return;
                }
            } else {
                if (log.isDebugEnabled()) {
                    log.debug(
                            "Topic [{}] Role [{}] exception occured while trying to check Consume permissions. {}",
                            topicName.toString(), role, e.getMessage());

                }
                finalResult.completeExceptionally(e);
                return;
            }
            finalResult.complete(false);
        });
    });
    return finalResult;
}

From source file:io.pravega.controller.server.eventProcessor.ScaleRequestHandler.java

/**
 * Scale tasks exceptions are absorbed.// w  w  w .  ja va2  s.c o  m
 *
 * @param request   incoming request from request stream.
 * @param segments  segments to seal
 * @param newRanges new ranges for segments to create
 * @param context   operation context
 * @return CompletableFuture
 */
private CompletableFuture<Void> executeScaleTask(final ScaleEvent request, final ArrayList<Integer> segments,
        final ArrayList<AbstractMap.SimpleEntry<Double, Double>> newRanges, final OperationContext context) {
    CompletableFuture<Void> result = new CompletableFuture<>();

    streamMetadataTasks.scale(request.getScope(), request.getStream(), segments, newRanges,
            System.currentTimeMillis(), context).whenCompleteAsync((res, e) -> {
                if (e != null) {
                    log.warn("Scale failed for request {}/{}/{} with exception {}", request.getScope(),
                            request.getStream(), request.getSegmentNumber(), e);
                    Throwable cause = ExceptionHelpers.getRealException(e);
                    if (cause instanceof LockFailedException) {
                        result.completeExceptionally(cause);
                    } else {
                        result.completeExceptionally(e);
                    }
                } else {
                    // completed - either successfully or with pre-condition-failure. Clear markers on all scaled segments.
                    log.error("scale done for {}/{}/{}", request.getScope(), request.getStream(),
                            request.getSegmentNumber());
                    result.complete(null);

                    clearMarkers(request.getScope(), request.getStream(), segments, context);
                }
            }, executor);

    return result;
}

From source file:com.ikanow.aleph2.distributed_services.services.CoreDistributedServices.java

/** Joins the Akka cluster
 *//*from w w  w  .  j a v  a 2 s  .  com*/
protected void joinAkkaCluster() {
    if (!_akka_system.isSet()) {
        this.getAkkaSystem(); // (this will also join the cluster)
        return;
    }
    if (!_has_joined_akka_cluster) {
        _has_joined_akka_cluster = true;

        // WORKAROUND FOR BUG IN akka-cluster/akka-zookeeper-seed: if it grabs the old ephemeral connection info of master then bad things can happen
        // so wait until a ZK node that I create for this purpose is removed (so the others also should have been)
        final String application_name = _config_bean.application_name();
        final String hostname_application = DistributedServicesPropertyBean.ZOOKEEPER_APPLICATION_LOCK + "/"
                + ZookeeperUtils.getHostname() + ":" + application_name;
        if (null == application_name) {
            logger.info("(This is a transient application, cannot be the master)");
        } else {
            logger.info("Checking for old ZK artefacts from old instance of this application path="
                    + hostname_application);
            final int MAX_ZK_ATTEMPTS = 6;
            int i = 0;
            for (i = 0; i <= MAX_ZK_ATTEMPTS; ++i) {
                try {
                    this.getCuratorFramework().create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL)
                            .forPath(hostname_application);

                    Thread.sleep(2000L); // (Wait a little longer)
                    break;
                } catch (Exception e) {
                    logger.warn(
                            ErrorUtils.get("Waiting for old instance to be cleared out (err={0}), retrying={1}",
                                    e.getMessage(), i < MAX_ZK_ATTEMPTS));
                    try {
                        Thread.sleep(10000L);
                    } catch (Exception __) {
                    }
                }
            }
            if (i > MAX_ZK_ATTEMPTS) {
                throw new RuntimeException("Failed to clear out lock, not clear why - try removing by hand: "
                        + (DistributedServicesPropertyBean.ZOOKEEPER_APPLICATION_LOCK + "/"
                                + hostname_application));
            }
        }

        ZookeeperClusterSeed.get(_akka_system.get()).join();

        _shutdown_hook.set(Lambdas.wrap_runnable_u(() -> {
            try {
                final CompletableFuture<Unit> wait_for_member_to_leave = new CompletableFuture<>();
                Cluster.get(_akka_system.get())
                        .registerOnMemberRemoved(() -> wait_for_member_to_leave.complete(Unit.unit()));

                _joined_akka_cluster = new CompletableFuture<>(); //(mainly just for testing)
                Cluster.get(_akka_system.get()).leave(ZookeeperClusterSeed.get(_akka_system.get()).address());

                // If it's an application, not transient, then handle synchronization
                try {
                    System.out
                            .println(new java.util.Date() + ": Akka cluster management: Shutting down in ~10s");
                    logger.error("(Not really an error) Shutting down in ~10s");
                } catch (Throwable e) {
                } // logging might not still work at this point

                // (don't delete the ZK node - appear to still be able to run into race problems if you do, left here to remind me):
                //if (null != application_name) {
                //   this.getCuratorFramework().delete().deletingChildrenIfNeeded().forPath(hostname_application);
                //}
                try {
                    wait_for_member_to_leave.get(10L, TimeUnit.SECONDS);
                } catch (Throwable e) {
                    try {
                        System.out.println(new java.util.Date()
                                + ": Akka cluster management: Akka Cluster departure was not able to complete in time: "
                                + e.getMessage());
                        logger.error("Akka Cluster departure was not able to complete in time");
                    } catch (Throwable ee) {
                    } // logging might not still work at this point               
                }
                try {
                    Await.result(_akka_system.get().terminate(), Duration.create(10L, TimeUnit.SECONDS));
                } catch (Throwable e) {
                    try {
                        System.out.println(new java.util.Date()
                                + ": Akka cluster management: Akka System termination was not able to complete in time: "
                                + e.getMessage());
                        logger.error("Akka System termination was not able to complete in time");
                    } catch (Throwable ee) {
                    } // logging might not still work at this point                              
                }

                // All done

                try {
                    System.out.println(new java.util.Date()
                            + ": Akka cluster management:  Akka shut down complete, now exiting");
                    logger.error("(Not really an error) Akka shut down complete, now exiting");
                } catch (Throwable e) {
                } // logging might not still work at this point
            } catch (Throwable t) { // (unknown error, we'll print and log this)
                try {
                    t.printStackTrace();
                    logger.error(ErrorUtils.getLongForm("{0}", t));
                } catch (Throwable e) {
                } // logging might not still work at this point
            }
        }));
        Cluster.get(_akka_system.get()).registerOnMemberUp(() -> {
            logger.info("Joined cluster address=" + ZookeeperClusterSeed.get(_akka_system.get()).address()
                    + ", adding shutdown hook");
            synchronized (_joined_akka_cluster) { // (prevents a race condition vs runOnAkkaJoin)
                _joined_akka_cluster.complete(true);
            }
            // Now register a shutdown hook
            Runtime.getRuntime().addShutdownHook(new Thread(_shutdown_hook.get()));

            _post_join_task_list.stream().parallel().forEach(retval_task -> {
                try {
                    retval_task._2().run();
                    retval_task._1().complete(null);
                } catch (Throwable t) {
                    retval_task._1().completeExceptionally(t);
                }
            });
        });
    }
}

From source file:io.sqp.client.impl.SqpConnectionImpl.java

private CompletableFuture<Void> finishTransaction(boolean commit) {
    String mode = commit ? "commit" : "rollback";
    CompletableFuture<Void> future = new CompletableFuture<>();
    if (_autocommit) {
        future.completeExceptionally(/*from ww  w .  j a va 2 s  . c  om*/
                new InvalidOperationException("Cannot " + mode + " a transaction in autocommit mode"));
        return future;
    }
    if (!checkOpenAndNoErrors(future)) {
        return future;
    }
    SqpMessage finishMessage = commit ? new CommitTransactionMessage() : new RollbackTransactionMessage();
    send(finishMessage, new ConfirmationResponseHandler(future, MessageType.TransactionFinishedMessage,
            "waiting for a " + mode + " complete confirmation"));
    return future;
}

From source file:org.onosproject.store.cluster.messaging.impl.NettyMessagingManager.java

@Override
public CompletableFuture<byte[]> sendAndReceive(Endpoint ep, String type, byte[] payload, Executor executor) {
    checkPermission(CLUSTER_WRITE);//from   w w w.  j a va 2  s . co m
    CompletableFuture<byte[]> response = new CompletableFuture<>();
    Callback callback = new Callback(response, executor);
    Long messageId = messageIdGenerator.incrementAndGet();
    callbacks.put(messageId, callback);
    InternalMessage message = new InternalMessage(preamble, clockService.timeNow(), messageId, localEp, type,
            payload);
    return sendAsync(ep, message).whenComplete((r, e) -> {
        if (e != null) {
            callbacks.invalidate(messageId);
        }
    }).thenComposeAsync(v -> response, executor);
}

From source file:io.pravega.client.stream.mock.MockController.java

private CompletableFuture<Void> commitTxSegment(UUID txId, Segment segment) {
    CompletableFuture<Void> result = new CompletableFuture<>();
    FailingReplyProcessor replyProcessor = new FailingReplyProcessor() {

        @Override//from   w  w  w.  ja  v  a 2 s .  com
        public void connectionDropped() {
            result.completeExceptionally(new ConnectionClosedException());
        }

        @Override
        public void wrongHost(WrongHost wrongHost) {
            result.completeExceptionally(new NotImplementedException());
        }

        @Override
        public void transactionCommitted(TransactionCommitted transactionCommitted) {
            result.complete(null);
        }

        @Override
        public void transactionAborted(TransactionAborted transactionAborted) {
            result.completeExceptionally(new TxnFailedException("Transaction already aborted."));
        }

        @Override
        public void processingFailure(Exception error) {
            result.completeExceptionally(error);
        }
    };
    sendRequestOverNewConnection(new CommitTransaction(idGenerator.get(), segment.getScopedName(), txId),
            replyProcessor, result);
    return result;
}