Example usage for java.util.concurrent CompletableFuture get

List of usage examples for java.util.concurrent CompletableFuture get

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture get.

Prototype

@SuppressWarnings("unchecked")
public T get() throws InterruptedException, ExecutionException 

Source Link

Document

Waits if necessary for this future to complete, and then returns its result.

Usage

From source file:org.apache.bookkeeper.client.BookieWriteLedgerTest.java

@Test
@SuppressWarnings("unchecked")
public void testLedgerCreateAdvByteBufRefCnt() throws Exception {
    long ledgerId = rng.nextLong();
    ledgerId &= Long.MAX_VALUE;
    if (!baseConf.getLedgerManagerFactoryClass().equals(LongHierarchicalLedgerManagerFactory.class)) {
        // since LongHierarchicalLedgerManager supports ledgerIds of
        // decimal length upto 19 digits but other
        // LedgerManagers only upto 10 decimals
        ledgerId %= 9999999999L;//  w  w w.  j  av a 2 s .  c  om
    }

    final LedgerHandle lh = bkc.createLedgerAdv(ledgerId, 5, 3, 2, digestType, ledgerPassword, null);

    final List<AbstractByteBufAllocator> allocs = Lists.newArrayList(new PooledByteBufAllocator(true),
            new PooledByteBufAllocator(false), new UnpooledByteBufAllocator(true),
            new UnpooledByteBufAllocator(false));

    long entryId = 0;
    for (AbstractByteBufAllocator alloc : allocs) {
        final ByteBuf data = alloc.buffer(10);
        data.writeBytes(("fragment0" + entryId).getBytes());
        assertEquals("ref count on ByteBuf should be 1", 1, data.refCnt());

        CompletableFuture<Integer> cf = new CompletableFuture<>();
        lh.asyncAddEntry(entryId, data, (rc, handle, eId, qwcLatency, ctx) -> {
            CompletableFuture<Integer> future = (CompletableFuture<Integer>) ctx;
            future.complete(rc);
        }, cf);

        int rc = cf.get();
        assertEquals("rc code is OK", BKException.Code.OK, rc);

        for (int i = 0; i < 10; i++) {
            if (data.refCnt() == 0) {
                break;
            }
            TimeUnit.MILLISECONDS.sleep(250); // recycler runs asynchronously
        }
        assertEquals("writing entry with id " + entryId + ", ref count on ByteBuf should be 0 ", 0,
                data.refCnt());

        org.apache.bookkeeper.client.api.LedgerEntry e = lh.read(entryId, entryId).getEntry(entryId);
        assertEquals("entry data is correct", "fragment0" + entryId, new String(e.getEntryBytes()));
        entryId++;
    }

    bkc.deleteLedger(lh.ledgerId);
}

From source file:org.apache.bookkeeper.client.BookieWriteLedgerTest.java

/**
 * Verify that LedgerHandleAdv cannnot handle addEntry without the entryId.
 *
 * @throws Exception/*  w w  w .jav  a2s  . co m*/
 */
@Test
public void testNoAddEntryLedgerCreateAdv() throws Exception {

    ByteBuffer entry = ByteBuffer.allocate(4);
    entry.putInt(rng.nextInt(maxInt));
    entry.position(0);

    lh = bkc.createLedgerAdv(5, 3, 2, digestType, ledgerPassword);
    assertTrue(lh instanceof LedgerHandleAdv);

    try {
        lh.addEntry(entry.array());
        fail("using LedgerHandleAdv addEntry without entryId is forbidden");
    } catch (BKException e) {
        assertEquals(e.getCode(), BKException.Code.IllegalOpException);
    }

    try {
        lh.addEntry(entry.array(), 0, 4);
        fail("using LedgerHandleAdv addEntry without entryId is forbidden");
    } catch (BKException e) {
        assertEquals(e.getCode(), BKException.Code.IllegalOpException);
    }

    try {
        CompletableFuture<Object> done = new CompletableFuture<>();
        lh.asyncAddEntry(Unpooled.wrappedBuffer(entry.array()),
                (int rc, LedgerHandle lh1, long entryId, Object ctx) -> {
                    SyncCallbackUtils.finish(rc, null, done);
                }, null);
        done.get();
    } catch (ExecutionException ee) {
        assertTrue(ee.getCause() instanceof BKException);
        BKException e = (BKException) ee.getCause();
        assertEquals(e.getCode(), BKException.Code.IllegalOpException);
    }

    try {
        CompletableFuture<Object> done = new CompletableFuture<>();
        lh.asyncAddEntry(entry.array(), (int rc, LedgerHandle lh1, long entryId, Object ctx) -> {
            SyncCallbackUtils.finish(rc, null, done);
        }, null);
        done.get();
    } catch (ExecutionException ee) {
        assertTrue(ee.getCause() instanceof BKException);
        BKException e = (BKException) ee.getCause();
        assertEquals(e.getCode(), BKException.Code.IllegalOpException);
    }

    try {
        CompletableFuture<Object> done = new CompletableFuture<>();
        lh.asyncAddEntry(entry.array(), 0, 4, (int rc, LedgerHandle lh1, long entryId, Object ctx) -> {
            SyncCallbackUtils.finish(rc, null, done);
        }, null);
        done.get();
    } catch (ExecutionException ee) {
        assertTrue(ee.getCause() instanceof BKException);
        BKException e = (BKException) ee.getCause();
        assertEquals(e.getCode(), BKException.Code.IllegalOpException);
    }
    lh.close();
}

From source file:org.apache.bookkeeper.client.MetadataUpdateLoopTest.java

/**
 * Test that if we have two conflicting updates, only one of the loops will complete.
 * The other will throw an exception./*from  w  w w.  j  ava 2 s . c o  m*/
 */
@Test
public void testNewestValueCannotBeUsedAfterReadBack() throws Exception {
    try (BlockableMockLedgerManager lm = spy(new BlockableMockLedgerManager())) {
        lm.blockWrites();

        long ledgerId = 1234L;
        BookieSocketAddress b0 = new BookieSocketAddress("0.0.0.0:3181");
        BookieSocketAddress b1 = new BookieSocketAddress("0.0.0.1:3181");

        LedgerMetadata initMeta = LedgerMetadataBuilder.create().withEnsembleSize(1)
                .withDigestType(DigestType.CRC32C).withPassword(new byte[0]).withWriteQuorumSize(1)
                .withAckQuorumSize(1).newEnsembleEntry(0L, Lists.newArrayList(b0)).build();
        Versioned<LedgerMetadata> writtenMetadata = lm.createLedgerMetadata(ledgerId, initMeta).get();

        AtomicReference<Versioned<LedgerMetadata>> reference = new AtomicReference<>(writtenMetadata);
        CompletableFuture<Versioned<LedgerMetadata>> loop1 = new MetadataUpdateLoop(lm, ledgerId,
                reference::get, (currentMetadata) -> !currentMetadata.isClosed(), (currentMetadata) -> {
                    return LedgerMetadataBuilder.from(currentMetadata).withClosedState().withLastEntryId(10L)
                            .withLength(100L).build();
                }, reference::compareAndSet).run();
        CompletableFuture<Versioned<LedgerMetadata>> loop2 = new MetadataUpdateLoop(lm, ledgerId,
                reference::get, (currentMetadata) -> {
                    if (currentMetadata.isClosed()) {
                        throw new BKException.BKLedgerClosedException();
                    } else {
                        return currentMetadata.getEnsembleAt(0L).contains(b0);
                    }
                }, (currentMetadata) -> {
                    List<BookieSocketAddress> ensemble = Lists.newArrayList(currentMetadata.getEnsembleAt(0L));
                    ensemble.set(0, b1);
                    return LedgerMetadataBuilder.from(currentMetadata).replaceEnsembleEntry(0L, ensemble)
                            .build();
                }, reference::compareAndSet).run();
        lm.releaseWrites();

        Versioned<LedgerMetadata> l1meta = loop1.get();
        try {
            loop2.get();
            Assert.fail("Update loop should have failed");
        } catch (ExecutionException ee) {
            Assert.assertEquals(ee.getCause().getClass(), BKException.BKLedgerClosedException.class);
        }
        Assert.assertEquals(l1meta, reference.get());
        Assert.assertEquals(l1meta.getValue().getEnsembleAt(0L).get(0), b0);
        Assert.assertTrue(l1meta.getValue().isClosed());

        verify(lm, times(2)).writeLedgerMetadata(anyLong(), any(), any());
    }
}

From source file:org.apache.bookkeeper.client.MetadataUpdateLoopTest.java

/**
 * Test that when 2 update loops conflict when making diffent updates to the metadata,
 * both will eventually succeed, and both updates will be reflected in the final metadata.
 *//* ww  w. j a  va  2 s .  c o  m*/
@Test
public void testConflictOnWrite() throws Exception {
    try (BlockableMockLedgerManager lm = spy(new BlockableMockLedgerManager())) {
        lm.blockWrites();

        long ledgerId = 1234L;
        BookieSocketAddress b0 = new BookieSocketAddress("0.0.0.0:3181");
        BookieSocketAddress b1 = new BookieSocketAddress("0.0.0.1:3181");
        BookieSocketAddress b2 = new BookieSocketAddress("0.0.0.2:3181");
        BookieSocketAddress b3 = new BookieSocketAddress("0.0.0.3:3181");

        LedgerMetadata initMeta = LedgerMetadataBuilder.create().withEnsembleSize(2)
                .withDigestType(DigestType.CRC32C).withPassword(new byte[0]).withWriteQuorumSize(2)
                .newEnsembleEntry(0L, Lists.newArrayList(b0, b1)).build();
        Versioned<LedgerMetadata> writtenMetadata = lm.createLedgerMetadata(ledgerId, initMeta).get();

        AtomicReference<Versioned<LedgerMetadata>> reference1 = new AtomicReference<>(writtenMetadata);
        CompletableFuture<Versioned<LedgerMetadata>> loop1 = new MetadataUpdateLoop(lm, ledgerId,
                reference1::get, (currentMetadata) -> currentMetadata.getEnsembleAt(0L).contains(b0),
                (currentMetadata) -> {
                    List<BookieSocketAddress> ensemble = Lists.newArrayList(currentMetadata.getEnsembleAt(0L));
                    ensemble.set(0, b2);
                    return LedgerMetadataBuilder.from(currentMetadata).replaceEnsembleEntry(0L, ensemble)
                            .build();
                }, reference1::compareAndSet).run();

        AtomicReference<Versioned<LedgerMetadata>> reference2 = new AtomicReference<>(writtenMetadata);
        CompletableFuture<Versioned<LedgerMetadata>> loop2 = new MetadataUpdateLoop(lm, ledgerId,
                reference2::get, (currentMetadata) -> currentMetadata.getEnsembleAt(0L).contains(b1),
                (currentMetadata) -> {
                    List<BookieSocketAddress> ensemble = Lists.newArrayList(currentMetadata.getEnsembleAt(0L));
                    ensemble.set(1, b3);
                    return LedgerMetadataBuilder.from(currentMetadata).replaceEnsembleEntry(0L, ensemble)
                            .build();
                }, reference2::compareAndSet).run();

        lm.releaseWrites();

        Versioned<LedgerMetadata> l1meta = loop1.get();
        Versioned<LedgerMetadata> l2meta = loop2.get();

        Assert.assertEquals(l1meta, reference1.get());
        Assert.assertEquals(l2meta, reference2.get());

        Assert.assertEquals(l1meta.getVersion().compare(l2meta.getVersion()), Version.Occurred.BEFORE);

        Assert.assertEquals(l1meta.getValue().getEnsembleAt(0L).get(0), b2);
        Assert.assertEquals(l1meta.getValue().getEnsembleAt(0L).get(1), b1);

        Assert.assertEquals(l2meta.getValue().getEnsembleAt(0L).get(0), b2);
        Assert.assertEquals(l2meta.getValue().getEnsembleAt(0L).get(1), b3);

        verify(lm, times(3)).writeLedgerMetadata(anyLong(), any(), any());
    }
}

From source file:com.ikanow.aleph2.data_import_manager.harvest.modules.LocalHarvestTestModule.java

/** Actually perform harvester command
 * @param source_key/*from   www.  ja v  a2s  . co  m*/
 * @param harvest_tech_jar_path
 * @param command
 * @throws ExecutionException 
 * @throws InterruptedException 
 * @throws IOException 
 * @throws JsonMappingException 
 * @throws JsonParseException 
 */
private void run_command(String source_key, String harvest_tech_jar_path, String command) throws Exception {

    @SuppressWarnings("unchecked")
    final ICrudService<JsonNode> v1_config_db = _underlying_management_db
            .getUnderlyingPlatformDriver(ICrudService.class, Optional.of("ingest.source")).get();

    final SingleQueryComponent<JsonNode> query = CrudUtils.allOf().when("key", source_key);
    final Optional<JsonNode> result = v1_config_db.getObjectBySpec(query).get();

    if (!result.isPresent()) {
        System.out.println("Must specify valid source.key: " + source_key);
        return;
    }

    // Create a bucket out of the source

    DataBucketBean bucket = createBucketFromSource(result.get());

    // OK now we simply create an instance of the harvester and invoke it

    final Validation<BasicMessageBean, IHarvestTechnologyModule> ret_val = ClassloaderUtils
            .getFromCustomClasspath(IHarvestTechnologyModule.class,
                    "com.ikanow.aleph2.test.example.ExampleHarvestTechnology",
                    Optional.of(new File(harvest_tech_jar_path).getAbsoluteFile().toURI().toString()),
                    Collections.emptyList(), "test1", command);

    final IHarvestContext context = _injector.getInstance(HarvestContext.class);

    if (ret_val.isFail()) {
        System.out.println("Failed to instantiate harvester: " + ret_val.fail().message());
    } else {
        final IHarvestTechnologyModule harvester = ret_val.success();
        if (command.equals("canRunOnThisNode")) {
            System.out.println(command + ": " + harvester.canRunOnThisNode(bucket, context));
        } else {
            CompletableFuture<BasicMessageBean> harvest_result = null; // (this is most of them)
            if (command.equals("onDelete")) {
                harvest_result = harvester.onDelete(bucket, context);
            } else if (command.equals("onHarvestComplete")) {
                harvest_result = harvester.onHarvestComplete(bucket, context);
            } else if (command.equals("onNewSource")) {
                harvest_result = harvester.onNewSource(bucket, context, true);
            } else if (command.equals("onPeriodicPoll")) {
                harvest_result = harvester.onPeriodicPoll(bucket, context);
            } else if (command.equals("onPurge")) {
                harvest_result = harvester.onPurge(bucket, context);
            } else if (command.equals("onTestSource")) {
                harvest_result = harvester.onTestSource(bucket, null, context);
            } else if (command.equals("onUpdatedSource")) {
                harvest_result = harvester.onUpdatedSource(bucket, bucket, true, Optional.empty(), context);
            } else {
                if (command.equals("help")) {
                    System.out.println("Allowed commands: ");
                } else {
                    System.out.println("Command not recognized, allowed commands: ");
                }
                System.out.println(Arrays.asList(harvester.getClass().getMethods()).stream()
                        .map(m -> m.getName()).collect(Collectors.joining(",")));
            }
            if (null != harvest_result) {
                System.out.println(command + ": success: " + harvest_result.get().success());
                System.out.println(command + ": source: " + harvest_result.get().source());
                System.out.println(command + ": message: " + harvest_result.get().message());
            } else {
                System.out.println("(no return value)");
            }
        }
    }
}

From source file:org.apache.pulsar.functions.worker.rest.api.ComponentImpl.java

private void updateRequest(final FunctionMetaData functionMetaData) {

    // Submit to FMT
    FunctionMetaDataManager functionMetaDataManager = worker().getFunctionMetaDataManager();

    CompletableFuture<RequestResult> completableFuture = functionMetaDataManager
            .updateFunction(functionMetaData);

    RequestResult requestResult = null;/*from  w  w  w . j a  v  a2s .  co m*/
    try {
        requestResult = completableFuture.get();
        if (!requestResult.isSuccess()) {
            throw new RestException(Status.BAD_REQUEST, requestResult.getMessage());
        }
    } catch (ExecutionException e) {
        throw new RestException(Status.INTERNAL_SERVER_ERROR, e.getMessage());
    } catch (InterruptedException e) {
        throw new RestException(Status.REQUEST_TIMEOUT, e.getMessage());
    }

}

From source file:org.apache.distributedlog.auditor.DLAuditor.java

/**
 * Find leak ledgers phase 1: collect ledgers set.
 *///from  www.  j a v a  2 s .c o  m
private Set<Long> collectLedgersFromBK(BookKeeperClient bkc, final ExecutorService executorService)
        throws IOException {
    LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get());

    final Set<Long> ledgers = new HashSet<Long>();
    final CompletableFuture<Void> doneFuture = FutureUtils.createFuture();

    BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() {
        @Override
        public void process(Long lid, final AsyncCallback.VoidCallback cb) {
            synchronized (ledgers) {
                ledgers.add(lid);
                if (0 == ledgers.size() % 1000) {
                    logger.info("Collected {} ledgers", ledgers.size());
                }
            }
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    cb.processResult(BKException.Code.OK, null, null);
                }
            });

        }
    };
    AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() {
        @Override
        public void processResult(int rc, String path, Object ctx) {
            if (BKException.Code.OK == rc) {
                doneFuture.complete(null);
            } else {
                doneFuture.completeExceptionally(BKException.create(rc));
            }
        }
    };
    lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException);
    try {
        doneFuture.get();
        logger.info("Collected total {} ledgers", ledgers.size());
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new DLInterruptedException("Interrupted on collecting ledgers : ", e);
    } catch (ExecutionException e) {
        if (e.getCause() instanceof IOException) {
            throw (IOException) (e.getCause());
        } else {
            throw new IOException("Failed to collect ledgers : ", e.getCause());
        }
    }
    return ledgers;
}

From source file:io.pravega.client.stream.impl.ControllerImplTest.java

@Test
public void testKeepAlive() throws IOException, ExecutionException, InterruptedException {

    // Verify that keep-alive timeout less than permissible by the server results in a failure.
    ControllerImpl controller = new ControllerImpl(NettyChannelBuilder.forAddress("localhost", serverPort)
            .keepAliveTime(10, TimeUnit.SECONDS).usePlaintext(true));
    CompletableFuture<Boolean> createStreamStatus = controller.createStream(StreamConfiguration.builder()
            .streamName("streamdelayed").scope("scope1").scalingPolicy(ScalingPolicy.fixed(1)).build());
    AssertExtensions.assertThrows("Should throw Exception", createStreamStatus,
            throwable -> throwable instanceof StatusRuntimeException);

    // Verify that the same RPC with permissible keepalive time succeeds.
    int serverPort2 = TestUtils.getAvailableListenPort();
    ServerImpl testServer = NettyServerBuilder.forPort(serverPort2).addService(testServerImpl)
            .permitKeepAliveTime(5, TimeUnit.SECONDS).build().start();
    controller = new ControllerImpl(NettyChannelBuilder.forAddress("localhost", serverPort2)
            .keepAliveTime(10, TimeUnit.SECONDS).usePlaintext(true));
    createStreamStatus = controller.createStream(StreamConfiguration.builder().streamName("streamdelayed")
            .scope("scope1").scalingPolicy(ScalingPolicy.fixed(1)).build());
    assertTrue(createStreamStatus.get());
    testServer.shutdownNow();/*  ww w  . ja  va  2  s  . c o m*/
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Worker function for storeObject
 * @param new_object - the bucket to create
 * @param old_bucket - the version of the bucket being overwritte, if an update
 * @param validation_info - validation info to be presented to the user
 * @param replace_if_present - update move
 * @return - the user return value//from w  w  w.  jav a2 s. co m
 * @throws Exception
 */
public ManagementFuture<Supplier<Object>> storeValidatedObject(final DataBucketBean new_object,
        final Optional<DataBucketBean> old_bucket, final Collection<BasicMessageBean> validation_info,
        boolean replace_if_present) throws Exception {
    final MethodNamingHelper<DataBucketStatusBean> helper = BeanTemplateUtils.from(DataBucketStatusBean.class);

    // Error if a bucket status doesn't exist - must create a bucket status before creating the bucket
    // (note the above validation ensures the bucket has an _id)
    // (obviously need to block here until we're sure..)

    final CompletableFuture<Optional<DataBucketStatusBean>> corresponding_status = _underlying_data_bucket_status_db
            .get().getObjectById(new_object._id(),
                    Arrays.asList(helper.field(DataBucketStatusBean::_id),
                            helper.field(DataBucketStatusBean::node_affinity),
                            helper.field(DataBucketStatusBean::confirmed_master_enrichment_type),
                            helper.field(DataBucketStatusBean::confirmed_suspended),
                            helper.field(DataBucketStatusBean::confirmed_multi_node_enabled),
                            helper.field(DataBucketStatusBean::suspended),
                            helper.field(DataBucketStatusBean::quarantined_until)),
                    true);

    if (!corresponding_status.get().isPresent()) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException(
                        ErrorUtils.get(ManagementDbErrorUtils.BUCKET_CANNOT_BE_CREATED_WITHOUT_BUCKET_STATUS,
                                new_object.full_name()))),
                CompletableFuture.completedFuture(Collections.emptyList()));
    }

    // Some fields like multi-node, you can only change if the bucket status is set to suspended, to make
    // the control logic easy
    old_bucket.ifPresent(ob -> {
        validation_info.addAll(checkForInactiveOnlyUpdates(new_object, ob, corresponding_status.join().get()));
        // (corresponding_status present and completed because of above check) 
    });
    if (!validation_info.isEmpty() && validation_info.stream().anyMatch(m -> !m.success())) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException("Bucket not valid, see management channels")),
                CompletableFuture.completedFuture(validation_info));
    }
    // Made it this far, try to set the next_poll_time in the status object
    if (null != new_object.poll_frequency()) {
        //get the next poll time
        final Date next_poll_time = TimeUtils
                .getForwardSchedule(new_object.poll_frequency(), Optional.of(new Date())).success();
        //update the status
        _underlying_data_bucket_status_db.get().updateObjectById(new_object._id(), CrudUtils
                .update(DataBucketStatusBean.class).set(DataBucketStatusBean::next_poll_date, next_poll_time));
    }

    // Create the directories

    try {
        createFilePaths(new_object, _storage_service.get());
        //if logging is enabled, create the logging filepath also
        if (Optionals.of(() -> new_object.management_schema().logging_schema().enabled()).orElse(false)) {
            createFilePaths(BucketUtils.convertDataBucketBeanToLogging(new_object), _storage_service.get());
        }
    } catch (Exception e) { // Error creating directory, haven't created object yet so just back out now

        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
    // OK if the bucket is validated we can store it (and create a status object)

    final CompletableFuture<Supplier<Object>> ret_val = _underlying_data_bucket_db.get().storeObject(new_object,
            replace_if_present);
    final boolean is_suspended = DataBucketStatusCrudService
            .bucketIsSuspended(corresponding_status.get().get());

    // Register the bucket update with any applicable data services      

    final Multimap<IDataServiceProvider, String> data_service_info = DataServiceUtils
            .selectDataServices(new_object.data_schema(), _service_context);
    final Optional<Multimap<IDataServiceProvider, String>> old_data_service_info = old_bucket
            .map(old -> DataServiceUtils.selectDataServices(old.data_schema(), _service_context));

    final List<CompletableFuture<Collection<BasicMessageBean>>> ds_update_results = data_service_info.asMap()
            .entrySet().stream()
            .map(kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                    kv.getValue().stream().collect(Collectors.toSet()),
                    old_data_service_info.map(old_map -> old_map.get(kv.getKey()))
                            .map(old_servs -> old_servs.stream().collect(Collectors.toSet()))
                            .orElse(Collections.emptySet())))
            .collect(Collectors.toList());

    // Process old data services that are no longer in use
    final List<CompletableFuture<Collection<BasicMessageBean>>> old_ds_update_results = old_data_service_info
            .map(old_ds_info -> {
                return old_ds_info.asMap().entrySet().stream()
                        .filter(kv -> !data_service_info.containsKey(kv.getKey()))
                        .<CompletableFuture<Collection<BasicMessageBean>>>map(
                                kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                                        Collections.emptySet(),
                                        kv.getValue().stream().collect(Collectors.toSet())))
                        .collect(Collectors.toList());
            }).orElse(Collections.emptyList());

    //(combine)
    @SuppressWarnings("unchecked")
    CompletableFuture<Collection<BasicMessageBean>> all_service_registration_complete[] = Stream
            .concat(ds_update_results.stream(), old_ds_update_results.stream())
            .toArray(CompletableFuture[]::new);

    // Get the status and then decide whether to broadcast out the new/update message

    final CompletableFuture<Collection<BasicMessageBean>> mgmt_results = CompletableFuture
            .allOf(all_service_registration_complete)
            .thenCombine(
                    old_bucket.isPresent()
                            ? requestUpdatedBucket(new_object, old_bucket.get(),
                                    corresponding_status.get().get(), _actor_context,
                                    _underlying_data_bucket_status_db.get(), _bucket_action_retry_store.get())
                            : requestNewBucket(new_object, is_suspended,
                                    _underlying_data_bucket_status_db.get(), _actor_context),
                    (__, harvest_results) -> {
                        return (Collection<BasicMessageBean>) Stream
                                .concat(Arrays.stream(all_service_registration_complete)
                                        .flatMap(s -> s.join().stream()), harvest_results.stream())
                                .collect(Collectors.toList());
                    })
            .exceptionally(t -> Arrays.asList(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "storeValidatedObject", ErrorUtils.get("{0}", t))));

    // Update the status depending on the results of the management channels

    return FutureUtils.createManagementFuture(ret_val,
            MgmtCrudUtils
                    .handleUpdatingStatus(new_object, corresponding_status.get().get(), is_suspended,
                            mgmt_results, _underlying_data_bucket_status_db.get())
                    .thenApply(msgs -> Stream.concat(msgs.stream(), validation_info.stream())
                            .collect(Collectors.toList())));
}

From source file:com.ikanow.aleph2.analytics.storm.utils.StormControllerUtil.java

/**
 * Starts up a storm job.//from  w  ww  . j  a va2s  . c o  m
 * 
 * 1. gets the storm instance from the yarn config
 * 2. Makes a mega jar consisting of:
 *    A. Underlying artefacts (system libs)
 *  B. User supplied libraries
 * 3. Submit megajar to storm with jobname of the bucket id
 * 
 * @param bucket
 * @param underlying_artefacts
 * @param yarn_config_dir
 * @param user_lib_paths
 * @param topology
 * @return
 */
public static CompletableFuture<BasicMessageBean> startJob(final IStormController storm_controller,
        final DataBucketBean bucket, final Optional<String> sub_job,
        final Collection<Object> underlying_artefacts, final Collection<String> user_lib_paths,
        final StormTopology topology, final Map<String, String> config, final String cached_jar_dir) {
    if (null == topology) {
        return CompletableFuture.completedFuture(ErrorUtils.buildErrorMessage(StormControllerUtil.class,
                "startJob", ErrorUtils.TOPOLOGY_NULL_ERROR, bucket.full_name()));
    }

    _logger.info("Retrieved user Storm config topology: spouts=" + topology.get_spouts_size() + " bolts="
            + topology.get_bolts_size() + " configs=" + config.toString());

    final Set<String> jars_to_merge = new TreeSet<String>();

    final CompletableFuture<String> jar_future = Lambdas.get(() -> {
        if (RemoteStormController.class.isAssignableFrom(storm_controller.getClass())) {
            // (This is only necessary in the remote case)

            jars_to_merge.addAll(underlying_artefacts.stream()
                    .map(artefact -> LiveInjector.findPathJar(artefact.getClass(), ""))
                    .filter(f -> !f.equals("")).collect(Collectors.toSet()));

            if (jars_to_merge.isEmpty()) { // special case: no aleph2 libs found, this is almost certainly because this is being run from eclipse...
                final GlobalPropertiesBean globals = ModuleUtils.getGlobalProperties();
                _logger.warn(
                        "WARNING: no library files found, probably because this is running from an IDE - instead taking all JARs from: "
                                + (globals.local_root_dir() + "/lib/"));
                try {
                    //... and LiveInjecter doesn't work on classes ... as a backup just copy everything from "<LOCAL_ALEPH2_HOME>/lib" into there 
                    jars_to_merge
                            .addAll(FileUtils
                                    .listFiles(new File(globals.local_root_dir() + "/lib/"),
                                            new String[] { "jar" }, false)
                                    .stream().map(File::toString).collect(Collectors.toList()));
                } catch (Exception e) {
                    throw new RuntimeException("In eclipse/IDE mode, directory not found: "
                            + (globals.local_root_dir() + "/lib/"));
                }
            }
            //add in the user libs
            jars_to_merge.addAll(user_lib_paths);

            //create jar
            return buildOrReturnCachedStormTopologyJar(jars_to_merge, cached_jar_dir);
        } else {
            return CompletableFuture.completedFuture("/unused/dummy.jar");
        }
    });

    //submit to storm
    @SuppressWarnings("unchecked")
    final CompletableFuture<BasicMessageBean> submit_future = Lambdas.get(() -> {
        long retries = 0;
        while (retries < MAX_RETRIES) {
            try {
                _logger.debug("Trying to submit job, try: " + retries + " of " + MAX_RETRIES);
                final String jar_file_location = jar_future.get();
                return storm_controller.submitJob(bucketPathToTopologyName(bucket, sub_job), jar_file_location,
                        topology, (Map<String, Object>) (Map<String, ?>) config);
            } catch (Exception ex) {
                if (ex instanceof AlreadyAliveException) {
                    retries++;
                    //sleep 1s, was seeing about 2s of sleep required before job successfully submitted on restart
                    try {
                        Thread.sleep(1000);
                    } catch (Exception e) {
                        final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
                        error_future.completeExceptionally(e);
                        return error_future;
                    }
                } else {
                    retries = MAX_RETRIES; //we threw some other exception, bail out
                    final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
                    error_future.completeExceptionally(ex);
                    return error_future;
                }
            }
        }
        //we maxed out our retries, throw failure
        final CompletableFuture<BasicMessageBean> error_future = new CompletableFuture<BasicMessageBean>();
        error_future.completeExceptionally(new Exception(
                "Error submitting job, ran out of retries (previous (same name) job is probably still alive)"));
        return error_future;
    });
    return submit_future;
}