Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:com.devicehive.service.DeviceNotificationService.java

public Pair<String, CompletableFuture<List<DeviceNotification>>> subscribe(final Set<String> devices,
        final Set<String> names, final Date timestamp, final BiConsumer<DeviceNotification, String> callback) {

    final String subscriptionId = UUID.randomUUID().toString();
    Set<NotificationSubscribeRequest> subscribeRequests = devices.stream()
            .map(device -> new NotificationSubscribeRequest(subscriptionId, device, names, timestamp))
            .collect(Collectors.toSet());
    Collection<CompletableFuture<Collection<DeviceNotification>>> futures = new ArrayList<>();
    for (NotificationSubscribeRequest sr : subscribeRequests) {
        CompletableFuture<Collection<DeviceNotification>> future = new CompletableFuture<>();
        Consumer<Response> responseConsumer = response -> {
            String resAction = response.getBody().getAction();
            if (resAction.equals(Action.NOTIFICATION_SUBSCRIBE_RESPONSE.name())) {
                NotificationSubscribeResponse r = response.getBody().cast(NotificationSubscribeResponse.class);
                future.complete(r.getNotifications());
            } else if (resAction.equals(Action.NOTIFICATION_EVENT.name())) {
                NotificationEvent event = response.getBody().cast(NotificationEvent.class);
                callback.accept(event.getNotification(), subscriptionId);
            } else {
                logger.warn("Unknown action received from backend {}", resAction);
            }/*  w  ww .j  av a  2s .c o  m*/
        };
        futures.add(future);
        Request request = Request.newBuilder().withBody(sr).withPartitionKey(sr.getDevice())
                .withSingleReply(false).build();
        rpcClient.call(request, responseConsumer);
    }

    CompletableFuture<List<DeviceNotification>> future = CompletableFuture
            .allOf(futures.toArray(new CompletableFuture[futures.size()])).thenApply(v -> futures.stream()
                    .map(CompletableFuture::join).flatMap(Collection::stream).collect(Collectors.toList()));
    return Pair.of(subscriptionId, future);
}

From source file:com.ikanow.aleph2.distributed_services.services.CoreDistributedServices.java

/** Guice-invoked constructor
 * @throws Exception /*from ww w .j  av  a2  s  . co m*/
 */
@Inject
public CoreDistributedServices(DistributedServicesPropertyBean config_bean) throws Exception {

    final String connection_string = Optional.ofNullable(config_bean.zookeeper_connection())
            .orElse(DistributedServicesPropertyBean.__DEFAULT_ZOOKEEPER_CONNECTION);

    _config_bean = BeanTemplateUtils.clone(config_bean)
            .with(DistributedServicesPropertyBean::zookeeper_connection, connection_string).done();

    logger.info("Zookeeper connection_string=" + _config_bean.zookeeper_connection());

    // Else join akka cluster lazily, because often it's not required at all
    if (null != config_bean.application_name()) {
        joinAkkaCluster();
    }

    if (null != config_bean.broker_list()) {
        final String broker_list_string = config_bean.broker_list();
        KafkaUtils.setStandardKafkaProperties(_config_bean.zookeeper_connection(), broker_list_string,
                Optional.ofNullable(_config_bean.cluster_name())
                        .orElse(DistributedServicesPropertyBean.__DEFAULT_CLUSTER_NAME),
                Optional.empty());
        _initialized_kafka = new CompletableFuture<>();
        _initialized_kafka.complete(null);
        _initializing_kafka = false;
    } else { // Launch an async process to intialize kafka
        logger.info("Fetching Kafka broker_list from Zookeeper");
        _initialized_kafka = CompletableFuture.runAsync(() -> {
            try {
                final String broker_list = KafkaUtils.getBrokerListFromZookeeper(this.getCuratorFramework(),
                        Optional.empty(), _mapper);
                KafkaUtils
                        .setStandardKafkaProperties(_config_bean.zookeeper_connection(), broker_list,
                                Optional.ofNullable(_config_bean.cluster_name())
                                        .orElse(DistributedServicesPropertyBean.__DEFAULT_CLUSTER_NAME),
                                Optional.empty());
                logger.info("Kafka broker_list=" + broker_list);

                _kafka_zk_framework.set(KafkaUtils.getNewZkClient());
            } catch (Exception e) { // just use the default and hope:
                KafkaUtils
                        .setStandardKafkaProperties(_config_bean.zookeeper_connection(),
                                DistributedServicesPropertyBean.__DEFAULT_BROKER_LIST,
                                Optional.ofNullable(_config_bean.cluster_name())
                                        .orElse(DistributedServicesPropertyBean.__DEFAULT_CLUSTER_NAME),
                                Optional.empty());
            }
            _initializing_kafka = false;
        });
    }
}

From source file:io.pravega.controller.server.SegmentHelper.java

/**
 * This method sends segment sealed message for the specified segment.
 * It owns up the responsibility of retrying the operation on failures until success.
 *
 * @param scope               stream scope
 * @param stream              stream name
 * @param segmentNumber       number of segment to be sealed
 * @param hostControllerStore host controller store
 * @param clientCF            connection factory
 * @return void/*from ww  w.  j  a  v  a  2  s.c o m*/
 */
public CompletableFuture<Boolean> sealSegment(final String scope, final String stream, final int segmentNumber,
        final HostControllerStore hostControllerStore, final ConnectionFactory clientCF) {
    final Controller.NodeUri uri = getSegmentUri(scope, stream, segmentNumber, hostControllerStore);
    final CompletableFuture<Boolean> result = new CompletableFuture<>();
    final WireCommandType type = WireCommandType.SEAL_SEGMENT;
    final FailingReplyProcessor replyProcessor = new FailingReplyProcessor() {

        @Override
        public void connectionDropped() {
            result.completeExceptionally(
                    new WireCommandFailedException(type, WireCommandFailedException.Reason.ConnectionDropped));
        }

        @Override
        public void wrongHost(WireCommands.WrongHost wrongHost) {
            result.completeExceptionally(
                    new WireCommandFailedException(type, WireCommandFailedException.Reason.UnknownHost));
        }

        @Override
        public void segmentSealed(WireCommands.SegmentSealed segmentSealed) {
            result.complete(true);
        }

        @Override
        public void segmentIsSealed(WireCommands.SegmentIsSealed segmentIsSealed) {
            result.complete(true);
        }

        @Override
        public void processingFailure(Exception error) {
            result.completeExceptionally(error);
        }
    };

    WireCommands.SealSegment request = new WireCommands.SealSegment(idGenerator.get(),
            Segment.getScopedName(scope, stream, segmentNumber));
    sendRequestAsync(request, replyProcessor, result, clientCF, ModelHelper.encode(uri));
    return result;
}

From source file:io.pravega.segmentstore.server.reading.StorageReaderTests.java

/**
 * Tests the ability to queue dependent reads (subsequent reads that only want to read a part of a previous read).
 * Test this both with successful and failed reads.
 *//*w  w w.j  a va2s  .c  o m*/
@Test
public void testDependents() {
    final Duration waitTimeout = Duration.ofSeconds(5);
    TestStorage storage = new TestStorage();
    CompletableFuture<Integer> signal = new CompletableFuture<>();
    AtomicBoolean wasReadInvoked = new AtomicBoolean();
    storage.readImplementation = () -> {
        if (wasReadInvoked.getAndSet(true)) {
            Assert.fail(
                    "Read was invoked multiple times, which is a likely indicator that the requests were not chained.");
        }
        return signal;
    };

    @Cleanup
    StorageReader reader = new StorageReader(SEGMENT_METADATA, storage, executorService());

    // Create some reads.
    CompletableFuture<StorageReader.Result> c1 = new CompletableFuture<>();
    CompletableFuture<StorageReader.Result> c2 = new CompletableFuture<>();
    reader.execute(new StorageReader.Request(0, 100, c1::complete, c1::completeExceptionally, TIMEOUT));
    reader.execute(new StorageReader.Request(50, 100, c2::complete, c2::completeExceptionally, TIMEOUT));

    Assert.assertFalse("One or more of the reads has completed prematurely.", c1.isDone() || c2.isDone());

    signal.completeExceptionally(new IntentionalException());
    AssertExtensions.assertThrows("The first read was not failed with the correct exception.",
            () -> c1.get(waitTimeout.toMillis(), TimeUnit.MILLISECONDS),
            ex -> ex instanceof IntentionalException);

    AssertExtensions.assertThrows("The second read was not failed with the correct exception.",
            () -> c2.get(waitTimeout.toMillis(), TimeUnit.MILLISECONDS),
            ex -> ex instanceof IntentionalException);
}

From source file:com.microsoft.azure.servicebus.samples.timetolive.TimeToLive.java

CompletableFuture pickUpAndFixDeadLetters(String connectionString, String queueName,
        IMessageSender resubmitSender, ExecutorService executorService) throws Exception {
    CompletableFuture running = new CompletableFuture();
    QueueClient receiver = new QueueClient(
            new ConnectionStringBuilder(connectionString, "BasicQueue/$deadletterqueue"), ReceiveMode.PEEKLOCK);

    running.whenComplete((r, t) -> {// ww  w  .  j  ava 2s  . c  o m
        try {
            receiver.close();
        } catch (ServiceBusException e) {
            System.out.printf(e.getMessage());
        }
    });

    // register the RegisterMessageHandler callback
    receiver.registerMessageHandler(new IMessageHandler() {
        // callback invoked when the message handler loop has obtained a message
        public CompletableFuture<Void> onMessageAsync(IMessage message) {
            try {
                IMessage resubmitMessage = new Message(message.getBody());
                System.out.printf(
                        "\n\t\tFixing: \n\t\t\tMessageId = %s, \n\t\t\tSequenceNumber = %s, \n\t\t\tLabel = %s\n",
                        message.getMessageId(), message.getSequenceNumber(), message.getLabel());
                resubmitMessage.setMessageId(message.getMessageId());
                resubmitMessage.setLabel(message.getLabel());
                resubmitMessage.setContentType(message.getContentType());
                resubmitMessage.setTimeToLive(Duration.ofMinutes(2));

                resubmitSender.send(resubmitMessage);
                return receiver.completeAsync(message.getLockToken());
            } catch (Exception e) {
                CompletableFuture failure = new CompletableFuture();
                failure.completeExceptionally(e);
                return failure;
            }
        }

        // callback invoked when the message handler has an exception to report
        public void notifyException(Throwable throwable, ExceptionPhase exceptionPhase) {
            System.out.printf(exceptionPhase + "-" + throwable.getMessage());
        }
    },
            // 1 concurrent call, messages are auto-completed, auto-renew duration
            new MessageHandlerOptions(1, false, Duration.ofMinutes(1)), executorService);

    return running;
}

From source file:org.apache.pulsar.io.kafka.connect.PulsarOffsetBackingStore.java

@Override
public Future<Map<ByteBuffer, ByteBuffer>> get(Collection<ByteBuffer> keys,
        Callback<Map<ByteBuffer, ByteBuffer>> callback) {
    CompletableFuture<Void> endFuture = new CompletableFuture<>();
    readToEnd(endFuture);/*from www  .j av  a  2  s .c  o m*/
    return endFuture.thenApply(ignored -> {
        Map<ByteBuffer, ByteBuffer> values = new HashMap<>();
        for (ByteBuffer key : keys) {
            ByteBuffer value;
            synchronized (data) {
                value = data.get(key);
            }
            if (null != value) {
                values.put(key, value);
            }
        }
        if (null != callback) {
            callback.onCompletion(null, values);
        }
        return values;
    }).whenComplete((ignored, cause) -> {
        if (null != cause && null != callback) {
            callback.onCompletion(cause, null);
        }
    });
}

From source file:org.pentaho.di.ui.repo.controller.RepositoryConnectController.java

public String editDatabaseConnection(String database) {
    CompletableFuture<String> future = new CompletableFuture<>();
    spoonSupplier.get().getShell().getDisplay().asyncExec(() -> {
        DatabaseMeta databaseMeta = getDatabase(database);
        String originalName = databaseMeta.getName();
        DatabaseDialog databaseDialog = new DatabaseDialog(spoonSupplier.get().getShell(), databaseMeta);
        databaseDialog.open();/*from www  .  ja v a2 s  .c  o  m*/
        if (!isDatabaseWithNameExist(databaseMeta, false)) {
            save();
            future.complete(databaseMeta.getName());
        } else {
            DatabaseDialog.showDatabaseExistsDialog(spoonSupplier.get().getShell(), databaseMeta);
            databaseMeta.setName(originalName);
            databaseMeta.setDisplayName(originalName);
            future.complete(originalName);
        }
    });
    JSONObject jsonObject = new JSONObject();
    try {
        jsonObject.put("name", future.get());
        return jsonObject.toJSONString();
    } catch (Exception e) {
        jsonObject.put("name", "None");
        return jsonObject.toJSONString();
    }
}

From source file:com.android.tools.idea.diagnostics.crash.GoogleCrash.java

@NotNull
@Override//w  ww  .j av a 2  s  .  c  om
public CompletableFuture<String> submit(@NotNull final HttpEntity requestEntity) {
    CompletableFuture<String> future = new CompletableFuture<>();

    try {
        ourExecutor.submit(() -> {
            try {
                HttpClient client = HttpClients.createDefault();

                HttpEntity entity = requestEntity;
                if (!UNIT_TEST_MODE) {
                    // The test server used in testing doesn't handle gzip compression (netty requires jcraft jzlib for gzip decompression)
                    entity = new GzipCompressingEntity(requestEntity);
                }

                HttpPost post = new HttpPost(myCrashUrl);
                post.setEntity(entity);

                HttpResponse response = client.execute(post);
                StatusLine statusLine = response.getStatusLine();
                if (statusLine.getStatusCode() >= 300) {
                    future.completeExceptionally(new HttpResponseException(statusLine.getStatusCode(),
                            statusLine.getReasonPhrase()));
                    return;
                }

                entity = response.getEntity();
                if (entity == null) {
                    future.completeExceptionally(new NullPointerException("Empty response entity"));
                    return;
                }

                String reportId = EntityUtils.toString(entity);
                if (DEBUG_BUILD) {
                    //noinspection UseOfSystemOutOrSystemErr
                    System.out.println("Report submitted: http://go/crash-staging/" + reportId);
                }
                future.complete(reportId);
            } catch (IOException e) {
                future.completeExceptionally(e);
            }
        });
    } catch (RejectedExecutionException ignore) {
        // handled by the rejected execution handler associated with ourExecutor
    }

    return future;
}

From source file:com.devicehive.rpcclient.RpcClientActionTest.java

@Test
public void testListDeviceClassAction() throws Exception {
    ListDeviceClassRequest deviceClassRequest = new ListDeviceClassRequest();
    deviceClassRequest.setName(UUID.randomUUID().toString()); // nonexistent name

    Request request = Request.newBuilder().withBody(deviceClassRequest).build();
    CompletableFuture<Response> future = new CompletableFuture<>();
    client.call(request, future::complete);

    Response response = future.get(10, TimeUnit.SECONDS);
    ListDeviceClassResponse responseBody = (ListDeviceClassResponse) response.getBody();
    assertNotNull(responseBody.getDeviceClasses().isEmpty());
}

From source file:io.pravega.segmentstore.server.reading.StorageReadManagerTests.java

/**
 * Tests the ability to queue dependent reads (subsequent reads that only want to read a part of a previous read).
 * Test this both with successful and failed reads.
 *//*from  w  w w .j  a v a  2 s  . c o  m*/
@Test
public void testDependents() {
    final Duration waitTimeout = Duration.ofSeconds(5);
    TestStorage storage = new TestStorage();
    CompletableFuture<Integer> signal = new CompletableFuture<>();
    AtomicBoolean wasReadInvoked = new AtomicBoolean();
    storage.readImplementation = () -> {
        if (wasReadInvoked.getAndSet(true)) {
            Assert.fail(
                    "Read was invoked multiple times, which is a likely indicator that the requests were not chained.");
        }
        return signal;
    };

    @Cleanup
    StorageReadManager reader = new StorageReadManager(SEGMENT_METADATA, storage, executorService());

    // Create some reads.
    CompletableFuture<StorageReadManager.Result> c1 = new CompletableFuture<>();
    CompletableFuture<StorageReadManager.Result> c2 = new CompletableFuture<>();
    reader.execute(new StorageReadManager.Request(0, 100, c1::complete, c1::completeExceptionally, TIMEOUT));
    reader.execute(new StorageReadManager.Request(50, 100, c2::complete, c2::completeExceptionally, TIMEOUT));

    Assert.assertFalse("One or more of the reads has completed prematurely.", c1.isDone() || c2.isDone());

    signal.completeExceptionally(new IntentionalException());
    AssertExtensions.assertThrows("The first read was not failed with the correct exception.",
            () -> c1.get(waitTimeout.toMillis(), TimeUnit.MILLISECONDS),
            ex -> ex instanceof IntentionalException);

    AssertExtensions.assertThrows("The second read was not failed with the correct exception.",
            () -> c2.get(waitTimeout.toMillis(), TimeUnit.MILLISECONDS),
            ex -> ex instanceof IntentionalException);
}