Example usage for java.util.concurrent CompletableFuture CompletableFuture

List of usage examples for java.util.concurrent CompletableFuture CompletableFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture CompletableFuture.

Prototype

public CompletableFuture() 

Source Link

Document

Creates a new incomplete CompletableFuture.

Usage

From source file:org.apache.samza.table.caching.TestCachingTable.java

/**
 * Testing caching in a more realistic scenario with Guava cache + remote table
 *///from  ww w.  ja v a  2  s.  c  o  m
@Test
public void testGuavaCacheAndRemoteTable() throws Exception {
    String tableId = "testGuavaCacheAndRemoteTable";
    Cache<String, String> guavaCache = CacheBuilder.newBuilder().initialCapacity(100).build();
    final ReadWriteTable<String, String> guavaTable = new GuavaCacheTable<>(tableId + "-cache", guavaCache);

    // It is okay to share rateLimitHelper and async helper for read/write in test
    TableRateLimiter<String, String> rateLimitHelper = mock(TableRateLimiter.class);
    TableReadFunction<String, String> readFn = mock(TableReadFunction.class);
    TableWriteFunction<String, String> writeFn = mock(TableWriteFunction.class);
    final RemoteTable<String, String> remoteTable = new RemoteTable<>(tableId + "-remote", readFn, writeFn,
            rateLimitHelper, rateLimitHelper, Executors.newSingleThreadExecutor(), null, null, null,
            Executors.newSingleThreadExecutor());

    final CachingTable<String, String> cachingTable = new CachingTable<>(tableId, remoteTable, guavaTable,
            false);

    initTables(cachingTable, guavaTable, remoteTable);

    // 3 per readable table (9)
    // 5 per read/write table (15)
    verify(metricsRegistry, times(24)).newCounter(any(), anyString());

    // 2 per readable table (6)
    // 5 per read/write table (15)
    // 1 per remote readable table (1)
    // 1 per remote read/write table (1)
    verify(metricsRegistry, times(23)).newTimer(any(), anyString());

    // 1 per guava table (1)
    // 3 per caching table (2)
    verify(metricsRegistry, times(4)).newGauge(anyString(), any());

    // GET
    doReturn(CompletableFuture.completedFuture("bar")).when(readFn).getAsync(any());
    Assert.assertEquals(cachingTable.getAsync("foo").get(), "bar");
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo"), "bar");

    // PUT
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).putAsync(any(), any());
    cachingTable.putAsync("foo", "baz").get();
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo"), "baz");

    // DELETE
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).deleteAsync(any());
    cachingTable.deleteAsync("foo").get();
    // Ensure cache is updated
    Assert.assertNull(guavaCache.getIfPresent("foo"));

    // GET-ALL
    Map<String, String> records = new HashMap<>();
    records.put("foo1", "bar1");
    records.put("foo2", "bar2");
    doReturn(CompletableFuture.completedFuture(records)).when(readFn).getAllAsync(any());
    Assert.assertEquals(cachingTable.getAllAsync(Arrays.asList("foo1", "foo2")).get(), records);
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo1"), "bar1");
    Assert.assertEquals(guavaCache.getIfPresent("foo2"), "bar2");

    // GET-ALL with partial miss
    doReturn(CompletableFuture.completedFuture(Collections.singletonMap("foo3", "bar3"))).when(readFn)
            .getAllAsync(any());
    records = cachingTable.getAllAsync(Arrays.asList("foo1", "foo2", "foo3")).get();
    Assert.assertEquals(records.get("foo3"), "bar3");
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo3"), "bar3");

    // Calling again for the same keys should not trigger IO, ie. no exception is thrown
    CompletableFuture<String> exFuture = new CompletableFuture<>();
    exFuture.completeExceptionally(new RuntimeException("Test exception"));
    doReturn(exFuture).when(readFn).getAllAsync(any());
    cachingTable.getAllAsync(Arrays.asList("foo1", "foo2", "foo3")).get();

    // Partial results should throw
    try {
        cachingTable.getAllAsync(Arrays.asList("foo1", "foo2", "foo5")).get();
        Assert.fail();
    } catch (Exception e) {
    }

    // PUT-ALL
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).putAllAsync(any());
    List<Entry<String, String>> entries = new ArrayList<>();
    entries.add(new Entry<>("foo1", "bar111"));
    entries.add(new Entry<>("foo2", "bar222"));
    cachingTable.putAllAsync(entries).get();
    // Ensure cache is updated
    Assert.assertEquals(guavaCache.getIfPresent("foo1"), "bar111");
    Assert.assertEquals(guavaCache.getIfPresent("foo2"), "bar222");

    // PUT-ALL with delete
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).putAllAsync(any());
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).deleteAllAsync(any());
    entries = new ArrayList<>();
    entries.add(new Entry<>("foo1", "bar111"));
    entries.add(new Entry<>("foo2", null));
    cachingTable.putAllAsync(entries).get();
    // Ensure cache is updated
    Assert.assertNull(guavaCache.getIfPresent("foo2"));

    // At this point, foo1 and foo3 should still exist
    Assert.assertNotNull(guavaCache.getIfPresent("foo1"));
    Assert.assertNotNull(guavaCache.getIfPresent("foo3"));

    // DELETE-ALL
    doReturn(CompletableFuture.completedFuture(null)).when(writeFn).deleteAllAsync(any());
    cachingTable.deleteAllAsync(Arrays.asList("foo1", "foo3")).get();
    // Ensure foo1 and foo3 are gone
    Assert.assertNull(guavaCache.getIfPresent("foo1"));
    Assert.assertNull(guavaCache.getIfPresent("foo3"));
}

From source file:com.ikanow.aleph2.analytics.storm.utils.StormControllerUtil.java

/**
 * Stops a storm job, uses the bucket.id to try and find the job to stop
 * /*from   w  ww  .j  a  va  2s . co m*/
 * @param bucket
 * @return
 */
public static CompletableFuture<BasicMessageBean> stopJob(IStormController storm_controller,
        DataBucketBean bucket, final Optional<String> sub_job) {
    CompletableFuture<BasicMessageBean> stop_future = new CompletableFuture<BasicMessageBean>();
    try {
        storm_controller.stopJob(bucketPathToTopologyName(bucket, sub_job));
    } catch (Exception ex) {
        stop_future.complete(ErrorUtils.buildErrorMessage(StormControllerUtil.class, "stopJob",
                ErrorUtils.getLongForm("Error stopping storm job: {0}", ex)));
        return stop_future;
    }
    stop_future.complete(ErrorUtils.buildSuccessMessage(StormControllerUtil.class, "stopJob",
            "Stopped storm job succesfully"));
    return stop_future;
}

From source file:co.runrightfast.vertx.demo.testHarness.jmx.DemoMXBeanImpl.java

@Override
public String browseEventLogRecords(int skip, int limit) {
    final CompletableFuture<GetEvents.Response> future = new CompletableFuture<>();
    vertx.eventBus().send(EventBusAddress.eventBusAddress(EventLogRepository.VERTICLE_ID, GetEvents.class),
            GetEvents.Request.newBuilder().setSkip(skip).setLimit(limit).build(),
            new DeliveryOptions().setSendTimeout(2000L), responseHandler(future, GetEvents.Response.class));
    try {/*w w  w .  ja  v  a2  s  .com*/
        final GetEvents.Response response = future.get(2, TimeUnit.SECONDS);
        return JsonUtils.toVertxJsonObject(Json.createObjectBuilder().add("count", response.getEventsCount())
                .add("records", ProtobufUtils.protobuMessageToJson(response)).build()).encodePrettily();
    } catch (final InterruptedException | ExecutionException | TimeoutException ex) {
        log.logp(SEVERE, getClass().getName(), "createEventLogRecord", "failed", ex);
        throw new RuntimeException("Failed to create event log record : " + ex.getMessage());
    }
}

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Integer> createNewTransaction(UUID txId, long timestamp, long leaseExpiryTime,
        long maxExecutionExpiryTime, long scaleGracePeriod) {
    Preconditions.checkNotNull(txId);//from w w w.  j a va2 s  . c om

    final CompletableFuture<Integer> result = new CompletableFuture<>();
    final Data<Integer> txnData = new Data<>(new ActiveTxnRecord(timestamp, leaseExpiryTime,
            maxExecutionExpiryTime, scaleGracePeriod, TxnStatus.OPEN).toByteArray(), 0);
    synchronized (txnsLock) {
        activeTxns.putIfAbsent(txId.toString(), txnData);
    }
    int epoch = activeEpoch.get();
    synchronized (txnsLock) {
        if (!epochTxnMap.containsKey(epoch)) {
            result.completeExceptionally(StoreException.create(StoreException.Type.DATA_NOT_FOUND,
                    "Stream: " + getName() + " Transaction: " + txId.toString() + " Epoch: " + epoch));
        } else {
            epochTxnMap.compute(epoch, (x, y) -> {
                y.add(txId.toString());
                return y;
            });
            result.complete(epoch);
        }
    }

    return result;
}

From source file:com.yahoo.pulsar.broker.service.BrokerService.java

private static <T> CompletableFuture<T> failedFuture(Throwable t) {
    CompletableFuture<T> future = new CompletableFuture<>();
    future.completeExceptionally(t);/*from w  w  w.  j  av a 2  s  .com*/
    return future;
}

From source file:com.devicehive.service.DeviceService.java

public CompletableFuture<List<DeviceVO>> list(String name, String namePattern, Long networkId,
        String networkName, Long deviceClassId, String deviceClassName, String sortField,
        @NotNull Boolean sortOrderAsc, Integer take, Integer skip, HivePrincipal principal) {
    ListDeviceRequest request = new ListDeviceRequest();
    request.setName(name);//from   w w  w  . j  a  va2s  .c  o m
    request.setNamePattern(namePattern);
    request.setNetworkId(networkId);
    request.setNetworkName(networkName);
    request.setDeviceClassId(deviceClassId);
    request.setDeviceClassName(deviceClassName);
    request.setSortField(sortField);
    request.setSortOrderAsc(sortOrderAsc);
    request.setTake(take);
    request.setSkip(skip);
    request.setPrincipal(principal);

    CompletableFuture<Response> future = new CompletableFuture<>();

    rpcClient.call(Request.newBuilder().withBody(request).build(), new ResponseConsumer(future));
    return future.thenApply(r -> ((ListDeviceResponse) r.getBody()).getDevices());
}

From source file:com.ikanow.aleph2.aleph2_rest_utils.DataStoreCrudService.java

@Override
public CompletableFuture<Boolean> deleteObjectById(Object id) {
    try {//w w w. j  av a2  s  . com
        final Path path = new Path(output_directory + id.toString());
        _logger.debug("Trying to delete: " + path.toString());
        final boolean delete_success = fileContext.delete(path, false); //this is always returning false
        _logger.debug("success deleteing: " + delete_success);
        return CompletableFuture.completedFuture(!doesPathExist(path, fileContext)); //if file does not exist, delete was a success
    } catch (IllegalArgumentException | IOException e) {
        final CompletableFuture<Boolean> fut = new CompletableFuture<Boolean>();
        fut.completeExceptionally(e);
        return fut;
    }
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlobStoreManagedLedgerOffloader.java

@Override
public CompletableFuture<Void> offload(ReadHandle readHandle, UUID uuid, Map<String, String> extraMetadata) {
    CompletableFuture<Void> promise = new CompletableFuture<>();
    scheduler.chooseThread(readHandle.getId()).submit(() -> {
        if (readHandle.getLength() == 0 || !readHandle.isClosed() || readHandle.getLastAddConfirmed() < 0) {
            promise.completeExceptionally(
                    new IllegalArgumentException("An empty or open ledger should never be offloaded"));
            return;
        }/*from   www  . ja v a 2s  . c o m*/
        OffloadIndexBlockBuilder indexBuilder = OffloadIndexBlockBuilder.create()
                .withLedgerMetadata(readHandle.getLedgerMetadata())
                .withDataBlockHeaderLength(BlockAwareSegmentInputStreamImpl.getHeaderSize());
        String dataBlockKey = dataBlockOffloadKey(readHandle.getId(), uuid);
        String indexBlockKey = indexBlockOffloadKey(readHandle.getId(), uuid);

        MultipartUpload mpu = null;
        List<MultipartPart> parts = Lists.newArrayList();

        // init multi part upload for data block.
        try {
            BlobBuilder blobBuilder = writeBlobStore.blobBuilder(dataBlockKey);
            addVersionInfo(blobBuilder, userMetadata);
            Blob blob = blobBuilder.build();
            mpu = writeBlobStore.initiateMultipartUpload(writeBucket, blob.getMetadata(), new PutOptions());
        } catch (Throwable t) {
            promise.completeExceptionally(t);
            return;
        }

        long dataObjectLength = 0;
        // start multi part upload for data block.
        try {
            long startEntry = 0;
            int partId = 1;
            long entryBytesWritten = 0;
            while (startEntry <= readHandle.getLastAddConfirmed()) {
                int blockSize = BlockAwareSegmentInputStreamImpl.calculateBlockSize(maxBlockSize, readHandle,
                        startEntry, entryBytesWritten);

                try (BlockAwareSegmentInputStream blockStream = new BlockAwareSegmentInputStreamImpl(readHandle,
                        startEntry, blockSize)) {

                    Payload partPayload = Payloads.newInputStreamPayload(blockStream);
                    partPayload.getContentMetadata().setContentLength((long) blockSize);
                    partPayload.getContentMetadata().setContentType("application/octet-stream");
                    parts.add(writeBlobStore.uploadMultipartPart(mpu, partId, partPayload));
                    log.debug("UploadMultipartPart. container: {}, blobName: {}, partId: {}, mpu: {}",
                            writeBucket, dataBlockKey, partId, mpu.id());

                    indexBuilder.addBlock(startEntry, partId, blockSize);

                    if (blockStream.getEndEntryId() != -1) {
                        startEntry = blockStream.getEndEntryId() + 1;
                    } else {
                        // could not read entry from ledger.
                        break;
                    }
                    entryBytesWritten += blockStream.getBlockEntryBytesCount();
                    partId++;
                }

                dataObjectLength += blockSize;
            }

            writeBlobStore.completeMultipartUpload(mpu, parts);
            mpu = null;
        } catch (Throwable t) {
            try {
                if (mpu != null) {
                    writeBlobStore.abortMultipartUpload(mpu);
                }
            } catch (Throwable throwable) {
                log.error("Failed abortMultipartUpload in bucket - {} with key - {}, uploadId - {}.",
                        writeBucket, dataBlockKey, mpu.id(), throwable);
            }
            promise.completeExceptionally(t);
            return;
        }

        // upload index block
        try (OffloadIndexBlock index = indexBuilder.withDataObjectLength(dataObjectLength).build();
                OffloadIndexBlock.IndexInputStream indexStream = index.toStream()) {
            // write the index block
            BlobBuilder blobBuilder = writeBlobStore.blobBuilder(indexBlockKey);
            addVersionInfo(blobBuilder, userMetadata);
            Payload indexPayload = Payloads.newInputStreamPayload(indexStream);
            indexPayload.getContentMetadata().setContentLength((long) indexStream.getStreamSize());
            indexPayload.getContentMetadata().setContentType("application/octet-stream");

            Blob blob = blobBuilder.payload(indexPayload).contentLength((long) indexStream.getStreamSize())
                    .build();

            writeBlobStore.putBlob(writeBucket, blob);
            promise.complete(null);
        } catch (Throwable t) {
            try {
                writeBlobStore.removeBlob(writeBucket, dataBlockKey);
            } catch (Throwable throwable) {
                log.error("Failed deleteObject in bucket - {} with key - {}.", writeBucket, dataBlockKey,
                        throwable);
            }
            promise.completeExceptionally(t);
            return;
        }
    });
    return promise;
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_TestBuckets.java

/**
 * Logic that runs when we come across an old test object
 * Check if source is done:// w w w.  j  av a  2s .  c  o m
 * A. Has timed out
 * B. Has created enough results
 * If either are true, copy over what test results there are (if any), mark as done
 * 
 * @param data_bucket
 * @param old_test_source
 * @param source_test_db
 * @return
 */
private CompletableFuture<Boolean> handleExistingTestSource(final DataBucketBean data_bucket,
        final TestQueueBean old_test_source, final ICrudService<TestQueueBean> source_test_db) {

    // if null==started_processing_on, then source is still being started in a different thread, so just ignore it:
    if (null == old_test_source.started_processing_on()) {
        return CompletableFuture.completedFuture(true);
    }

    //ENTRY: is old      
    final ProcessingTestSpecBean test_spec = old_test_source.test_params();
    //get v1 bucket
    return getTestOutputCrudService(data_bucket).map(v2_output_db -> {
        //got the output crud, check if time is up or we have enough test results
        //1: time is up by checking started_on+test_spec vs now
        final long max_run_time_secs = Optional.ofNullable(test_spec.max_run_time_secs()).orElse(60L);
        final long time_expires_on = old_test_source.started_processing_on().getTime()
                + (max_run_time_secs * 1000L);
        if (new Date().getTime() > time_expires_on) {
            _logger.debug("Test job: " + data_bucket.full_name() + " expired, need to retire");
            return retireTestJob(data_bucket, old_test_source, source_test_db, v2_output_db);
        }
        //2: test results, if we've hit the requested num results
        return checkTestHitRequestedNumResults(v2_output_db, data_bucket, test_spec, old_test_source,
                source_test_db);
    }).orElseGet(() -> {
        //we couldn't get the output crud, need to exit out
        //complete exceptionally so sync will throw an error
        _logger.error("Error getting test output crud");
        CompletableFuture<Boolean> db_error_future = new CompletableFuture<Boolean>();
        db_error_future.completeExceptionally(
                new Exception("Error retrieving output db for test job: " + data_bucket._id()));
        return db_error_future;
    });
}

From source file:org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider.java

public CompletableFuture<Boolean> checkPermission(TopicName topicName, String role, AuthAction action) {
    CompletableFuture<Boolean> permissionFuture = new CompletableFuture<>();
    try {/* ww w . ja  v  a  2  s .  c om*/
        configCache.policiesCache().getAsync(POLICY_ROOT + topicName.getNamespace()).thenAccept(policies -> {
            if (!policies.isPresent()) {
                if (log.isDebugEnabled()) {
                    log.debug("Policies node couldn't be found for topic : {}", topicName);
                }
            } else {
                Map<String, Set<AuthAction>> namespaceRoles = policies.get().auth_policies.namespace_auth;
                Set<AuthAction> namespaceActions = namespaceRoles.get(role);
                if (namespaceActions != null && namespaceActions.contains(action)) {
                    // The role has namespace level permission
                    permissionFuture.complete(true);
                    return;
                }

                Map<String, Set<AuthAction>> topicRoles = policies.get().auth_policies.destination_auth
                        .get(topicName.toString());
                if (topicRoles != null) {
                    // Topic has custom policy
                    Set<AuthAction> topicActions = topicRoles.get(role);
                    if (topicActions != null && topicActions.contains(action)) {
                        // The role has topic level permission
                        permissionFuture.complete(true);
                        return;
                    }
                }

                // Using wildcard
                if (conf.isAuthorizationAllowWildcardsMatching()) {
                    if (checkWildcardPermission(role, action, namespaceRoles)) {
                        // The role has namespace level permission by wildcard match
                        permissionFuture.complete(true);
                        return;
                    }

                    if (topicRoles != null && checkWildcardPermission(role, action, topicRoles)) {
                        // The role has topic level permission by wildcard match
                        permissionFuture.complete(true);
                        return;
                    }
                }
            }
            permissionFuture.complete(false);
        }).exceptionally(ex -> {
            log.warn("Client  with Role - {} failed to get permissions for topic - {}. {}", role, topicName,
                    ex.getMessage());
            permissionFuture.completeExceptionally(ex);
            return null;
        });
    } catch (Exception e) {
        log.warn("Client  with Role - {} failed to get permissions for topic - {}. {}", role, topicName,
                e.getMessage());
        permissionFuture.completeExceptionally(e);
    }
    return permissionFuture;
}