Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:com.yahoo.pulsar.client.impl.PulsarClientImpl.java

public CompletableFuture<Producer> createProducerAsync(final String topic, final ProducerConfiguration conf,
        String producerName) {//ww w. j a  va2 s.c o m
    if (state.get() != State.Open) {
        return FutureUtil
                .failedFuture(new PulsarClientException.AlreadyClosedException("Client already closed"));
    }

    if (!DestinationName.isValid(topic)) {
        return FutureUtil
                .failedFuture(new PulsarClientException.InvalidTopicNameException("Invalid topic name"));
    }
    if (conf == null) {
        return FutureUtil.failedFuture(
                new PulsarClientException.InvalidConfigurationException("Producer configuration undefined"));
    }

    CompletableFuture<Producer> producerCreatedFuture = new CompletableFuture<>();

    getPartitionedTopicMetadata(topic).thenAccept(metadata -> {
        if (log.isDebugEnabled()) {
            log.debug("[{}] Received topic metadata. partitions: {}", topic, metadata.partitions);
        }

        ProducerBase producer;
        if (metadata.partitions > 1) {
            producer = new PartitionedProducerImpl(PulsarClientImpl.this, topic, conf, metadata.partitions,
                    producerCreatedFuture);
        } else {
            producer = new ProducerImpl(PulsarClientImpl.this, topic, producerName, conf, producerCreatedFuture,
                    -1);
        }

        synchronized (producers) {
            producers.put(producer, Boolean.TRUE);
        }
    }).exceptionally(ex -> {
        log.warn("[{}] Failed to get partitioned topic metadata: {}", topic, ex.getMessage());
        producerCreatedFuture.completeExceptionally(ex);
        return null;
    });

    return producerCreatedFuture;
}

From source file:org.apache.james.blob.cassandra.CassandraBlobsDAO.java

@Override
public CompletableFuture<byte[]> readBytes(BlobId blobId) {
    try {//from w  ww  . j a va 2  s . c o  m
        return readBlobParts(blobId).collectList().map(parts -> Bytes.concat(parts.toArray(new byte[0][])))
                .toFuture();
    } catch (ObjectStoreException e) {
        CompletableFuture<byte[]> error = new CompletableFuture<>();
        error.completeExceptionally(e);
        return error;
    }
}

From source file:com.devicehive.service.NetworkService.java

public CompletableFuture<List<NetworkVO>> list(String name, String namePattern, String sortField,
        boolean sortOrderAsc, Integer take, Integer skip, HivePrincipal principal) {
    Optional<HivePrincipal> principalOpt = ofNullable(principal);

    ListNetworkRequest request = new ListNetworkRequest();
    request.setName(name);//from ww w . j a v a 2s  .  com
    request.setNamePattern(namePattern);
    request.setSortField(sortField);
    request.setSortOrderAsc(sortOrderAsc);
    request.setTake(take);
    request.setSkip(skip);
    request.setPrincipal(principalOpt);

    CompletableFuture<Response> future = new CompletableFuture<>();

    rpcClient.call(Request.newBuilder().withBody(request).build(), new ResponseConsumer(future));

    return future.thenApply(r -> ((ListNetworkResponse) r.getBody()).getNetworks());
}

From source file:org.apache.tinkerpop.gremlin.server.GremlinServer.java

/**
 * Start Gremlin Server with {@link Settings} provided to the constructor.
 *//*from   www  .j a  va2 s. co  m*/
public synchronized CompletableFuture<ServerGremlinExecutor<EventLoopGroup>> start() throws Exception {
    if (serverStarted != null) {
        // server already started - don't get it rolling again
        return serverStarted;
    }

    serverStarted = new CompletableFuture<>();
    final CompletableFuture<ServerGremlinExecutor<EventLoopGroup>> serverReadyFuture = serverStarted;
    try {
        final ServerBootstrap b = new ServerBootstrap();

        // when high value is reached then the channel becomes non-writable and stays like that until the
        // low value is so that there is time to recover
        b.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, settings.writeBufferLowWaterMark);
        b.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, settings.writeBufferHighWaterMark);
        b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

        // fire off any lifecycle scripts that were provided by the user. hooks get initialized during
        // ServerGremlinExecutor initialization
        serverGremlinExecutor.getHooks().forEach(hook -> {
            logger.info("Executing start up {}", LifeCycleHook.class.getSimpleName());
            try {
                hook.onStartUp(new LifeCycleHook.Context(logger));
            } catch (UnsupportedOperationException uoe) {
                // if the user doesn't implement onStartUp the scriptengine will throw
                // this exception.  it can safely be ignored.
            }
        });

        final Channelizer channelizer = createChannelizer(settings);
        channelizer.init(serverGremlinExecutor);
        b.group(bossGroup, workerGroup).childHandler(channelizer);
        if (isEpollEnabled) {
            b.channel(EpollServerSocketChannel.class);
        } else {
            b.channel(NioServerSocketChannel.class);
        }

        // bind to host/port and wait for channel to be ready
        b.bind(settings.host, settings.port).addListener(new ChannelFutureListener() {
            @Override
            public void operationComplete(final ChannelFuture channelFuture) throws Exception {
                if (channelFuture.isSuccess()) {
                    ch = channelFuture.channel();

                    logger.info(
                            "Gremlin Server configured with worker thread pool of {}, gremlin pool of {} and boss thread pool of {}.",
                            settings.threadPoolWorker, settings.gremlinPool, settings.threadPoolBoss);
                    logger.info("Channel started at port {}.", settings.port);

                    serverReadyFuture.complete(serverGremlinExecutor);
                } else {
                    serverReadyFuture.completeExceptionally(new IOException(String.format(
                            "Could not bind to %s and %s - perhaps something else is bound to that address.",
                            settings.host, settings.port)));
                }
            }
        });
    } catch (Exception ex) {
        logger.error("Gremlin Server Error", ex);
        serverReadyFuture.completeExceptionally(ex);
    }

    return serverStarted;
}

From source file:org.apache.pulsar.client.impl.BinaryProtoLookupService.java

private CompletableFuture<PartitionedTopicMetadata> getPartitionedTopicMetadata(InetSocketAddress socketAddress,
        TopicName topicName) {/*  w w w. j  a  va 2s .c om*/

    CompletableFuture<PartitionedTopicMetadata> partitionFuture = new CompletableFuture<PartitionedTopicMetadata>();

    client.getCnxPool().getConnection(socketAddress).thenAccept(clientCnx -> {
        long requestId = client.newRequestId();
        ByteBuf request = Commands.newPartitionMetadataRequest(topicName.toString(), requestId);
        clientCnx.newLookup(request, requestId).thenAccept(lookupDataResult -> {
            try {
                partitionFuture.complete(new PartitionedTopicMetadata(lookupDataResult.partitions));
            } catch (Exception e) {
                partitionFuture.completeExceptionally(new PulsarClientException.LookupException(
                        format("Failed to parse partition-response redirect=%s , partitions with %s",
                                lookupDataResult.redirect, lookupDataResult.partitions, e.getMessage())));
            }
        }).exceptionally((e) -> {
            log.warn("[{}] failed to get Partitioned metadata : {}", topicName.toString(),
                    e.getCause().getMessage(), e);
            partitionFuture.completeExceptionally(e);
            return null;
        });
    }).exceptionally(connectionException -> {
        partitionFuture.completeExceptionally(connectionException);
        return null;
    });

    return partitionFuture;
}

From source file:io.sqp.client.impl.SqpConnectionImpl.java

@Override
public synchronized CompletableFuture<Void> setAutoCommit(boolean useAutoCommit) {
    if (useAutoCommit == _autocommit) {
        return CompletableFuture.completedFuture(null);
    }//from w  w w. java 2 s.co  m
    CompletableFuture<Void> future = new CompletableFuture<>();
    if (!checkOpenAndNoErrors(future)) {
        return future;
    }
    send(new SetFeatureMessage().setAutoCommit(useAutoCommit), new ConfirmationResponseHandler(future,
            MessageType.SetFeatureCompleteMessage, "waiting for a server settings complete message"));
    return future.thenApply(v -> {
        _autocommit = useAutoCommit;
        return null;
    });
}

From source file:com.devicehive.handler.command.CommandSubscribeIntegrationTest.java

@Test
@Ignore/*w  w  w. j  ava 2s .c o  m*/
public void shouldUnsubscribeFromCommands() throws Exception {
    String device1 = randomUUID().toString();

    String subscriber1 = randomUUID().toString();
    String subscriber2 = randomUUID().toString();

    CommandSubscribeRequest sr1 = new CommandSubscribeRequest(subscriber1, device1, null, null);
    Request r1 = Request.newBuilder().withBody(sr1).withSingleReply(false).build();
    TestCallback c1 = new TestCallback();
    client.call(r1, c1);

    CommandSubscribeRequest sr2 = new CommandSubscribeRequest(subscriber2, device1, null, null);
    Request r2 = Request.newBuilder().withBody(sr2).withSingleReply(false).build();
    TestCallback c2 = new TestCallback();
    client.call(r2, c2);

    Stream.of(c1.subscribeFuture, c2.subscribeFuture).forEach(CompletableFuture::join);

    DeviceCommand command = new DeviceCommand();
    command.setId(0);
    command.setCommand("increase_temperature");
    command.setDeviceGuid(device1);
    CommandInsertRequest event = new CommandInsertRequest(command);
    CompletableFuture<Response> f1 = new CompletableFuture<>();
    client.call(Request.newBuilder().withBody(event).build(), f1::complete);

    f1.join();

    assertThat(c1.commands, hasSize(1));
    assertThat(c2.commands, hasSize(1));

    CommandUnsubscribeRequest ur = new CommandUnsubscribeRequest(sr1.getSubscriptionId(), null);
    Request r3 = Request.newBuilder().withBody(ur).withSingleReply(false).build();
    client.call(r3, c1);

    c1.subscribeFuture.join();

    DeviceCommand command2 = new DeviceCommand();
    command2.setId(1);
    command2.setCommand("increase_temperature");
    command2.setDeviceGuid(device1);
    CommandInsertRequest event2 = new CommandInsertRequest(command2);
    CompletableFuture<Response> f2 = new CompletableFuture<>();
    client.call(Request.newBuilder().withBody(event2).build(), f2::complete);

    f2.join();

    assertThat(c1.commands, hasSize(1));
    assertThat(c2.commands, hasSize(2));
}

From source file:co.runrightfast.vertx.core.impl.VertxServiceImpl.java

private void initVertx() {
    if (this.vertxOptions.isClustered()) {
        final CompletableFuture<AsyncResult<Vertx>> clusteredVertxResult = new CompletableFuture<>();
        Vertx.clusteredVertx(vertxOptions, clusteredVertxResult::complete);
        while (true) {
            try {
                final AsyncResult<Vertx> result = clusteredVertxResult.get(10, TimeUnit.SECONDS);
                if (result.succeeded()) {
                    this.vertx = result.result();
                    LOG.logp(INFO, getClass().getName(), "initVertx",
                            "Vertx clustered instance has been created");
                } else {
                    throw new RuntimeException("Failed to start a clustered Vertx instance", result.cause());
                }// w w w  .  j a va  2s. c o m
                break;
            } catch (final ExecutionException ex) {
                throw new RuntimeException("Failed to start a clustered Vertx instance", ex);
            } catch (final TimeoutException ex) {
                LOG.logp(INFO, getClass().getName(), "initVertx", "Waiting for Vertx to start");
            } catch (final InterruptedException ex) {
                throw new RuntimeException(ex);
            }
        }
    } else {
        this.vertx = Vertx.vertx(vertxOptions);
        LOG.logp(INFO, getClass().getName(), "initVertx", "Vertx instance has been created");
    }
}

From source file:io.ventu.rpc.amqp.AmqpInvokerimplTest.java

@Test
public void invoke_onOkRequest_onIOException_futureCompletesExceptionally()
        throws IOException, TimeoutException, ExecutionException, InterruptedException {
    String instanceId = "123456789";

    Req req = new Req();

    Channel channel = mock(Channel.class);
    doAnswer(invocation -> {//from   ww w. j av a2  s.  co m
        throw new IOException("boom");
    }).when(channel).basicPublish(anyString(), any(), any(), any());

    CompletableFuture<Res> answer = new CompletableFuture<>();
    ResponseReceiver receiver = mock(ResponseReceiver.class);
    doReturn(answer).when(receiver).put(anyString(), any());

    ChannelProvider channelProvider = mock(ChannelProvider.class);
    doReturn(channel).when(channelProvider).provide(instanceId, receiver);
    doReturn(DEFAULT_RPC_EXCHANGE).when(channelProvider).rpcExchange();

    RemoteInvoker invoker = new AmqpInvokerImpl(instanceId, channelProvider, receiver);
    CompletableFuture<Res> actual = invoker.invoke(req, Res.class);

    assertSame(answer, actual);
    assertTrue(actual.isDone());
    assertTrue(actual.isCompletedExceptionally());

    exception.expect(ExecutionException.class);
    try {
        actual.get();
    } catch (ExecutionException ex) {
        assertTrue(ex.getCause() instanceof IOException);
        throw ex;
    }
}

From source file:org.onlab.netty.NettyMessaging.java

@Override
public CompletableFuture<byte[]> sendAndReceive(Endpoint ep, String type, byte[] payload) {
    CompletableFuture<byte[]> response = new CompletableFuture<>();
    Long messageId = messageIdGenerator.incrementAndGet();
    responseFutures.put(messageId, response);
    InternalMessage message = new InternalMessage(messageId, localEp, type, payload);
    try {//  w w  w .  jav  a2s  .c om
        sendAsync(ep, message);
    } catch (Exception e) {
        responseFutures.invalidate(messageId);
        response.completeExceptionally(e);
    }
    return response;
}