Example usage for java.util.concurrent CompletableFuture completedFuture

List of usage examples for java.util.concurrent CompletableFuture completedFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture completedFuture.

Prototype

public static <U> CompletableFuture<U> completedFuture(U value) 

Source Link

Document

Returns a new CompletableFuture that is already completed with the given value.

Usage

From source file:com.ikanow.aleph2.data_model.interfaces.shared_services.MockManagementCrudService.java

@Override
public ManagementFuture<Long> updateObjectsBySpec(QueryComponent<T> spec, Optional<Boolean> upsert,
        UpdateComponent<T> update) {
    return FutureUtils.createManagementFuture(CompletableFuture.completedFuture((long) _mutable_values.size()));
}

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_Buckets.java

/** Top level logic for source synchronization
 * @param bucket_mgmt/*from  ww w . ja  v a 2s. c o  m*/
 * @param source_db
 */
protected CompletableFuture<Void> synchronizeSources(final IManagementCrudService<DataBucketBean> bucket_mgmt,
        final IManagementCrudService<DataBucketStatusBean> underlying_bucket_status_mgmt,
        final ICrudService<JsonNode> source_db) {
    return compareSourcesToBuckets_get(bucket_mgmt, source_db).thenApply(v1_v2 -> {
        return compareSourcesToBuckets_categorize(v1_v2);
    }).thenCompose(create_update_delete -> {
        if (create_update_delete._1().isEmpty() && create_update_delete._2().isEmpty()
                && create_update_delete._3().isEmpty()) {
            //(nothing to do)
            return CompletableFuture.completedFuture(null);
        }
        _logger.info(ErrorUtils.get("Found [create={0}, delete={1}, update={2}] sources",
                create_update_delete._1().size(), create_update_delete._2().size(),
                create_update_delete._3().size()));

        final List<CompletableFuture<Boolean>> l1 = create_update_delete._1().stream().parallel()
                .<Tuple2<String, ManagementFuture<?>>>map(key -> Tuples._2T(key,
                        createNewBucket(key, bucket_mgmt, underlying_bucket_status_mgmt, source_db)))
                .<CompletableFuture<Boolean>>map(
                        key_fres -> updateV1SourceStatus_top(key_fres._1(), key_fres._2(), true, source_db))
                .collect(Collectors.toList());
        ;

        final List<CompletableFuture<Boolean>> l2 = create_update_delete._2().stream().parallel()
                .<Tuple2<String, ManagementFuture<?>>>map(
                        key -> Tuples._2T(key, deleteBucket(key, bucket_mgmt)))
                .<CompletableFuture<Boolean>>map(key_fres -> CompletableFuture.completedFuture(true)) // (don't update source in delete case obviously)
                .collect(Collectors.toList());
        ;

        final List<CompletableFuture<Boolean>> l3 = create_update_delete._3().stream().parallel()
                .<Tuple2<String, ManagementFuture<?>>>map(key -> Tuples._2T(key,
                        updateBucket(key, bucket_mgmt, underlying_bucket_status_mgmt, source_db)))
                .<CompletableFuture<Boolean>>map(
                        key_fres -> updateV1SourceStatus_top(key_fres._1(), key_fres._2(), false, source_db))
                .collect(Collectors.toList());
        ;

        List<CompletableFuture<?>> retval = Arrays.asList(l1, l2, l3).stream().flatMap(l -> l.stream())
                .collect(Collectors.toList());
        ;

        return CompletableFuture.allOf(retval.toArray(new CompletableFuture[0]));
    });
}

From source file:io.pravega.controller.store.stream.InMemoryStream.java

@Override
CompletableFuture<Data<Integer>> getIndexTable() {
    synchronized (lock) {
        if (this.indexTable == null) {
            return FutureHelpers
                    .failedFuture(StoreException.create(StoreException.Type.DATA_NOT_FOUND, getName()));
        }/*from  w w  w.j  av  a2s.c om*/
        return CompletableFuture.completedFuture(copy(indexTable));
    }
}

From source file:com.ikanow.aleph2.harvest.logstash.services.LogstashHarvestService.java

@Override
public CompletableFuture<BasicMessageBean> onDecommission(DataBucketBean to_decommission,
        IHarvestContext context) {//from  w  ww  .j  a  va 2 s. co m
    return CompletableFuture.completedFuture(
            ErrorUtils.buildMessage(true, this.getClass().getSimpleName(), "onDecommission", "NYI"));
}

From source file:com.yahoo.pulsar.common.naming.NamespaceBundlesTest.java

@SuppressWarnings("unchecked")
private Pair<NamespaceBundles, List<NamespaceBundle>> splitBundlesUtilFactory(
        NamespaceBundleFactory utilityFactory, NamespaceName nsname, NamespaceBundles bundles,
        NamespaceBundle targetBundle, int numBundles) throws Exception {
    Field bCacheField = NamespaceBundleFactory.class.getDeclaredField("bundlesCache");
    bCacheField.setAccessible(true);//from  w w  w  .java2s.  c om
    ((AsyncLoadingCache<NamespaceName, NamespaceBundles>) bCacheField.get(utilityFactory)).put(nsname,
            CompletableFuture.completedFuture(bundles));
    return utilityFactory.splitBundles(targetBundle, numBundles);
}

From source file:io.pravega.controller.task.Stream.StreamMetadataTasks.java

private CompletableFuture<Void> checkGenerateStreamCut(String scope, String stream, OperationContext context,
        RetentionPolicy policy, StreamCutRecord latestCut, long recordingTime) {
    switch (policy.getRetentionType()) {
    case TIME:/*from   w  w w. j  a va 2  s .c o  m*/
        if (latestCut == null || recordingTime - latestCut.getRecordingTime() > Duration
                .ofMinutes(Config.MINIMUM_RETENTION_FREQUENCY_IN_MINUTES).toMillis()) {
            return generateStreamCut(scope, stream, context).thenCompose(newRecord -> streamMetadataStore
                    .addStreamCutToRetentionSet(scope, stream, newRecord, context, executor));
        } else {
            return CompletableFuture.completedFuture(null);
        }
    case SIZE:
    default:
        throw new NotImplementedException("Size based retention");
    }
}

From source file:com.yahoo.pulsar.broker.namespace.NamespaceServiceTest.java

@Test
public void testUnloadNamespaceBundleFailure() throws Exception {

    final String topicName = "persistent://my-property/use/my-ns/my-topic1";
    ConsumerConfiguration conf = new ConsumerConfiguration();
    Consumer consumer = pulsarClient.subscribe(topicName, "my-subscriber-name", conf);
    ConcurrentOpenHashMap<String, CompletableFuture<Topic>> topics = pulsar.getBrokerService().getTopics();
    Topic spyTopic = spy(topics.get(topicName).get());
    topics.clear();//from www.j a v a 2  s . c  om
    CompletableFuture<Topic> topicFuture = CompletableFuture.completedFuture(spyTopic);
    // add mock topic
    topics.put(topicName, topicFuture);
    doAnswer(new Answer<CompletableFuture<Void>>() {
        @Override
        public CompletableFuture<Void> answer(InvocationOnMock invocation) throws Throwable {
            CompletableFuture<Void> result = new CompletableFuture<>();
            result.completeExceptionally(new RuntimeException("first time failed"));
            return result;
        }
    }).when(spyTopic).close();
    NamespaceBundle bundle = pulsar.getNamespaceService().getBundle(DestinationName.get(topicName));
    try {
        pulsar.getNamespaceService().unloadNamespaceBundle(bundle);
    } catch (Exception e) {
        // fail
        fail(e.getMessage());
    }
    try {
        pulsar.getLocalZkCache().getZooKeeper().getData(ServiceUnitZkUtils.path(bundle), null, null);
        fail("it should fail as node is not present");
    } catch (org.apache.zookeeper.KeeperException.NoNodeException e) {
        // ok
    }
}

From source file:io.pravega.segmentstore.server.host.ZKSegmentContainerMonitorTest.java

@Test
public void testRetryOnExceptions() throws Exception {
    @Cleanup//from  w ww  .  j  a  v a2s.  c  om
    CuratorFramework zkClient = startClient();
    initializeHostContainerMapping(zkClient);

    SegmentContainerRegistry containerRegistry = createMockContainerRegistry();
    @Cleanup
    ZKSegmentContainerMonitor segMonitor = createContainerMonitor(containerRegistry, zkClient);
    segMonitor.initialize(Duration.ofSeconds(1));

    // Simulate a container that throws exception on start.
    when(containerRegistry.startContainer(eq(2), any())).thenThrow(new RuntimeException());

    // Use ZK to send that information to the Container Manager.
    HashMap<Host, Set<Integer>> currentData = deserialize(zkClient, PATH);
    currentData.put(PRAVEGA_SERVICE_ENDPOINT, Collections.singleton(2));
    zkClient.setData().forPath(PATH, SerializationUtils.serialize(currentData));

    // Verify that it does not start.
    verify(containerRegistry, timeout(1000).atLeastOnce()).startContainer(eq(2), any());
    assertEquals(0, segMonitor.getRegisteredContainers().size());

    // Now simulate success for the same container.
    ContainerHandle containerHandle = mock(ContainerHandle.class);
    when(containerHandle.getContainerId()).thenReturn(2);
    when(containerRegistry.startContainer(eq(2), any()))
            .thenReturn(CompletableFuture.completedFuture(containerHandle));

    // Verify that it retries and starts the same container again.
    verify(containerRegistry, timeout(1000).atLeastOnce()).startContainer(eq(2), any());

    // Using wait here to ensure the private data structure is updated.
    // TODO: Removing dependency on sleep here and other places in this class
    // - https://github.com/pravega/pravega/issues/1079
    Thread.sleep(2000);
    assertEquals(1, segMonitor.getRegisteredContainers().size());
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

public ManagementFuture<Supplier<Object>> storeObject(final DataBucketBean new_object,
        final boolean replace_if_present) {
    try {//from   ww  w  . ja v a2  s  . c o  m
        // New bucket vs update - get the old bucket (we'll do this non-concurrently at least for now)

        final Optional<DataBucketBean> old_bucket = Lambdas.get(() -> {
            try {
                if (replace_if_present && (null != new_object._id())) {
                    return _underlying_data_bucket_db.get().getObjectById(new_object._id()).get();
                } else {
                    return Optional.<DataBucketBean>empty();
                }
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
        });

        // Validation (also generates a clone of the bucket with the data_locations written in)

        final Tuple2<DataBucketBean, Collection<BasicMessageBean>> validation_info = validateBucket(new_object,
                old_bucket, true, false);

        if (!validation_info._2().isEmpty() && validation_info._2().stream().anyMatch(m -> !m.success())) {
            return FutureUtils.createManagementFuture(
                    FutureUtils.returnError(new RuntimeException("Bucket not valid, see management channels")),
                    CompletableFuture.completedFuture(validation_info._2()));
        }
        return storeValidatedObject(validation_info._1(), old_bucket, validation_info._2(), replace_if_present);
    } catch (Exception e) {
        // This is a serious enough exception that we'll just leave here
        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
}

From source file:com.ikanow.aleph2.aleph2_rest_utils.DataStoreCrudService.java

@Override
public CompletableFuture<Cursor<FileDescriptor>> getObjectsBySpec(QueryComponent<FileDescriptor> spec,
        List<String> field_list, boolean include) {
    try {//from  ww w.  j  a v a2  s  .  com
        return CompletableFuture
                .completedFuture(new DataStoreCursor(getFolderFilenames(output_directory, fileContext)));
    } catch (IllegalArgumentException | IOException e) {
        final CompletableFuture<Cursor<FileDescriptor>> fut = new CompletableFuture<Cursor<FileDescriptor>>();
        fut.completeExceptionally(e);
        return fut;
    }
}