Example usage for java.util.concurrent CompletableFuture join

List of usage examples for java.util.concurrent CompletableFuture join

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture join.

Prototype

@SuppressWarnings("unchecked")
public T join() 

Source Link

Document

Returns the result value when complete, or throws an (unchecked) exception if completed exceptionally.

Usage

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_TestBuckets.java

/**
 * Returns back all test sources that aren't marked as complete or errored
 * //from   w  w  w.java2 s .c  o  m
 * @param source_test_db
 * @return
 */
protected CompletableFuture<List<TestQueueBean>> getAllTestSources(
        final ICrudService<TestQueueBean> source_test_db) {
    final QueryComponent<TestQueueBean> get_query = CrudUtils.allOf(TestQueueBean.class)
            .whenNot(TestQueueBean::status, TestStatus.completed)
            .whenNot(TestQueueBean::status, TestStatus.error); //can be complete | error | in_progress | submitted | {unset/anything else}

    final QueryComponent<TestQueueBean> update_query = CrudUtils.allOf(TestQueueBean.class)
            .whenNot(TestQueueBean::status, TestStatus.in_progress)
            .whenNot(TestQueueBean::status, TestStatus.completed)
            .whenNot(TestQueueBean::status, TestStatus.error); //can be complete | error | in_progress | submitted | {unset/anything else}

    final UpdateComponent<TestQueueBean> update_command = CrudUtils.update(TestQueueBean.class)
            .set(TestQueueBean::status, TestStatus.in_progress)
    // (don't set started_processing_on - only set that once the job has been launched)
    ;

    final CompletableFuture<List<TestQueueBean>> get_command = source_test_db.getObjectsBySpec(get_query)
            .thenApply(c -> StreamSupport.stream(c.spliterator(), false).collect(Collectors.toList()));

    return get_command.thenCompose(__ -> {
        return source_test_db.updateObjectsBySpec(update_query, Optional.of(false), update_command);
    }).thenApply(__ -> get_command.join()); // (ie return the original command but only once the update has completed)
}

From source file:io.pravega.client.stream.impl.ControllerImplTest.java

@Test
public void testDeleteStream() {
    CompletableFuture<Boolean> deleteStreamStatus;
    deleteStreamStatus = controllerClient.deleteStream("scope1", "stream1");
    assertTrue(deleteStreamStatus.join());

    deleteStreamStatus = controllerClient.deleteStream("scope1", "stream2");
    AssertExtensions.assertThrows("Should throw Exception", deleteStreamStatus, throwable -> true);

    deleteStreamStatus = controllerClient.deleteStream("scope1", "stream3");
    assertFalse(deleteStreamStatus.join());

    deleteStreamStatus = controllerClient.deleteStream("scope1", "stream4");
    AssertExtensions.assertThrows("Should throw Exception", deleteStreamStatus, throwable -> true);

    deleteStreamStatus = controllerClient.deleteStream("scope1", "stream5");
    AssertExtensions.assertThrows("Should throw Exception", deleteStreamStatus, throwable -> true);
}

From source file:io.pravega.client.stream.impl.ControllerImplTest.java

@Test
public void testDeleteScope() {
    CompletableFuture<Boolean> deleteStatus;
    String scope1 = "scope1";
    String scope2 = "scope2";
    String scope3 = "scope3";
    String scope4 = "scope4";
    String scope5 = "scope5";

    deleteStatus = controllerClient.deleteScope(scope1);
    assertTrue(deleteStatus.join());

    deleteStatus = controllerClient.deleteScope(scope2);
    AssertExtensions.assertThrows("Server should throw exception", deleteStatus, Throwable -> true);

    deleteStatus = controllerClient.deleteScope(scope3);
    AssertExtensions.assertThrows("Server should throw exception", deleteStatus, Throwable -> true);

    deleteStatus = controllerClient.deleteScope(scope4);
    assertFalse(deleteStatus.join());/* ww w.ja v  a  2s .com*/

    deleteStatus = controllerClient.deleteScope(scope5);
    AssertExtensions.assertThrows("Server should throw exception", deleteStatus, Throwable -> true);
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Validates whether the new or updated bucket is valid: both in terms of authorization and in terms of format
 * @param bucket/*  w  w w  . j  a  v  a  2s  . c  om*/
 * @return
 * @throws ExecutionException 
 * @throws InterruptedException 
 */
protected Tuple2<DataBucketBean, Collection<BasicMessageBean>> validateBucket(final DataBucketBean bucket,
        final Optional<DataBucketBean> old_version, boolean do_full_checks, final boolean allow_system_names)
        throws InterruptedException, ExecutionException {

    // (will live with this being mutable)
    final LinkedList<BasicMessageBean> errors = new LinkedList<BasicMessageBean>();

    final JsonNode bucket_json = BeanTemplateUtils.toJson(bucket);

    /////////////////

    // PHASE 1

    // Check for missing fields

    ManagementDbErrorUtils.NEW_BUCKET_ERROR_MAP.keySet().stream()
            .filter(s -> !bucket_json.has(s)
                    || (bucket_json.get(s).isTextual() && bucket_json.get(s).asText().isEmpty()))
            .forEach(s -> errors.add(MgmtCrudUtils
                    .createValidationError(ErrorUtils.get(ManagementDbErrorUtils.NEW_BUCKET_ERROR_MAP.get(s),
                            Optional.ofNullable(bucket.full_name()).orElse("(unknown)")))));

    // We have a full name if we're here, so no check for uniqueness

    // Check for some bucket path restrictions
    if (null != bucket.full_name()) {
        if (!BucketValidationUtils.bucketPathFormatValidationCheck(bucket.full_name())) {
            errors.add(MgmtCrudUtils
                    .createValidationError(ErrorUtils.get(ManagementDbErrorUtils.BUCKET_FULL_NAME_FORMAT_ERROR,
                            Optional.ofNullable(bucket.full_name()).orElse("(unknown)"))));

            return Tuples._2T(bucket, errors); // (this is catastrophic obviously)         
        }

        if (!old_version.isPresent()) { // (create not update)
            if (do_full_checks) {
                if (this._underlying_data_bucket_db.get().countObjectsBySpec(CrudUtils
                        .allOf(DataBucketBean.class).when(DataBucketBean::full_name, bucket.full_name()))
                        .get() > 0) {
                    errors.add(MgmtCrudUtils.createValidationError(
                            ErrorUtils.get(ManagementDbErrorUtils.BUCKET_FULL_NAME_UNIQUENESS,
                                    Optional.ofNullable(bucket.full_name()).orElse("(unknown)"))));

                    return Tuples._2T(bucket, errors); // (this is catastrophic obviously)
                }
            }
        }
    } else
        return Tuples._2T(bucket, errors); // (this is catastrophic obviously)

    // Some static validation moved into a separate function for testability

    errors.addAll(BucketValidationUtils.staticValidation(bucket, allow_system_names));

    // OK before I do any more stateful checking, going to stop if we have logic errors first 

    if (!errors.isEmpty()) {
        return Tuples._2T(bucket, errors);
    }

    /////////////////

    // PHASE 2

    //TODO (ALEPH-19): multi buckets - authorization; other - authorization

    if (do_full_checks) {

        final CompletableFuture<Collection<BasicMessageBean>> bucket_path_errors_future = validateOtherBucketsInPathChain(
                bucket);

        errors.addAll(bucket_path_errors_future.join());

        // OK before I do any more stateful checking, going to stop if we have logic errors first 

        if (!errors.isEmpty()) {
            return Tuples._2T(bucket, errors);
        }
    }

    /////////////////

    // PHASE 3

    // Finally Check whether I am allowed to update the various fields if old_version.isPresent()

    if (old_version.isPresent()) {
        final DataBucketBean old_bucket = old_version.get();
        if (!bucket.full_name().equals(old_bucket.full_name())) {
            errors.add(MgmtCrudUtils
                    .createValidationError(ErrorUtils.get(ManagementDbErrorUtils.BUCKET_UPDATE_FULLNAME_CHANGED,
                            bucket.full_name(), old_bucket.full_name())));
        }
        if (!bucket.owner_id().equals(old_bucket.owner_id())) {
            errors.add(MgmtCrudUtils
                    .createValidationError(ErrorUtils.get(ManagementDbErrorUtils.BUCKET_UPDATE_OWNERID_CHANGED,
                            bucket.full_name(), old_bucket.owner_id())));
        }
    }

    /////////////////

    // PHASE 4 - DATA SCHEMA NOT MESSAGES AT THIS POINT CAN BE INFO, YOU NEED TO CHECK THE SUCCESS()

    Tuple2<Map<String, String>, List<BasicMessageBean>> schema_validation = BucketValidationUtils
            .validateSchema(bucket, _service_context);

    errors.addAll(schema_validation._2());

    return Tuples._2T(
            BeanTemplateUtils.clone(bucket).with(DataBucketBean::data_locations, schema_validation._1()).done(),
            errors);
}

From source file:io.pravega.controller.task.Stream.StreamTransactionMetadataTasks.java

/**
 * Creates txn on the specified stream.//from  w  w  w .  j  a v a 2 s. c  o  m
 *
 * Post-condition:
 * 1. If txn creation succeeds, then
 *     (a) txn node is created in the store,
 *     (b) txn segments are successfully created on respective segment stores,
 *     (c) txn is present in the host-txn index of current host,
 *     (d) txn's timeout is being tracked in timeout service.
 *
 * 2. If process fails after creating txn node, but before responding to the client, then since txn is
 * present in the host-txn index, some other controller process shall abort the txn after maxLeaseValue
 *
 * 3. If timeout service tracks timeout of specified txn,
 * then txn is also present in the host-txn index of current process.
 *
 * Invariant:
 * The following invariants are maintained throughout the execution of createTxn, pingTxn and sealTxn methods.
 * 1. If timeout service tracks timeout of a txn, then txn is also present in the host-txn index of current process.
 * 2. If txn znode is updated, then txn is also present in the host-txn index of current process.
 *
 * @param scope               scope name.
 * @param stream              stream name.
 * @param lease               txn lease.
 * @param maxExecutionPeriod  maximum amount of time for which txn may remain open.
 * @param scaleGracePeriod    amount of time for which txn may remain open after scale operation is initiated.
 * @param ctx                 context.
 * @return                    identifier of the created txn.
 */
CompletableFuture<Pair<VersionedTransactionData, List<Segment>>> createTxnBody(final String scope,
        final String stream, final long lease, final long maxExecutionPeriod, final long scaleGracePeriod,
        final OperationContext ctx) {
    // Step 1. Validate parameters.
    CompletableFuture<Void> validate = validate(lease, maxExecutionPeriod, scaleGracePeriod);

    UUID txnId = UUID.randomUUID();
    TxnResource resource = new TxnResource(scope, stream, txnId);

    // Step 2. Add txn to host-transaction index.
    CompletableFuture<Void> addIndex = validate
            .thenComposeAsync(ignore -> streamMetadataStore.addTxnToIndex(hostId, resource, 0), executor)
            .whenComplete((v, e) -> {
                if (e != null) {
                    log.debug("Txn={}, failed adding txn to host-txn index of host={}", txnId, hostId);
                } else {
                    log.debug("Txn={}, added txn to host-txn index of host={}", txnId, hostId);
                }
            });

    // Step 3. Create txn node in the store.
    CompletableFuture<VersionedTransactionData> txnFuture = addIndex
            .thenComposeAsync(ignore -> streamMetadataStore.createTransaction(scope, stream, txnId, lease,
                    maxExecutionPeriod, scaleGracePeriod, ctx, executor), executor)
            .whenComplete((v, e) -> {
                if (e != null) {
                    log.debug("Txn={}, failed creating txn in store", txnId);
                } else {
                    log.debug("Txn={}, created in store", txnId);
                }
            });

    // Step 4. Notify segment stores about new txn.
    CompletableFuture<List<Segment>> segmentsFuture = txnFuture.thenComposeAsync(
            txnData -> streamMetadataStore.getActiveSegments(scope, stream, txnData.getEpoch(), ctx, executor),
            executor);

    CompletableFuture<Void> notify = segmentsFuture
            .thenComposeAsync(activeSegments -> notifyTxnCreation(scope, stream, activeSegments, txnId),
                    executor)
            .whenComplete((v, e) ->
    // Method notifyTxnCreation ensures that notification completes
    // even in the presence of n/w or segment store failures.
    log.debug("Txn={}, notified segments stores", txnId));

    // Step 5. Start tracking txn in timeout service
    return notify.thenApplyAsync(y -> {
        int version = txnFuture.join().getVersion();
        long executionExpiryTime = txnFuture.join().getMaxExecutionExpiryTime();
        timeoutService.addTxn(scope, stream, txnId, version, lease, executionExpiryTime, scaleGracePeriod);
        log.debug("Txn={}, added to timeout service on host={}", txnId, hostId);
        return null;
    }, executor).thenApplyAsync(v -> new ImmutablePair<>(txnFuture.join(), segmentsFuture.join()), executor);
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Worker function for storeObject
 * @param new_object - the bucket to create
 * @param old_bucket - the version of the bucket being overwritte, if an update
 * @param validation_info - validation info to be presented to the user
 * @param replace_if_present - update move
 * @return - the user return value/*  w ww  .  ja  va2  s. c  o m*/
 * @throws Exception
 */
public ManagementFuture<Supplier<Object>> storeValidatedObject(final DataBucketBean new_object,
        final Optional<DataBucketBean> old_bucket, final Collection<BasicMessageBean> validation_info,
        boolean replace_if_present) throws Exception {
    final MethodNamingHelper<DataBucketStatusBean> helper = BeanTemplateUtils.from(DataBucketStatusBean.class);

    // Error if a bucket status doesn't exist - must create a bucket status before creating the bucket
    // (note the above validation ensures the bucket has an _id)
    // (obviously need to block here until we're sure..)

    final CompletableFuture<Optional<DataBucketStatusBean>> corresponding_status = _underlying_data_bucket_status_db
            .get().getObjectById(new_object._id(),
                    Arrays.asList(helper.field(DataBucketStatusBean::_id),
                            helper.field(DataBucketStatusBean::node_affinity),
                            helper.field(DataBucketStatusBean::confirmed_master_enrichment_type),
                            helper.field(DataBucketStatusBean::confirmed_suspended),
                            helper.field(DataBucketStatusBean::confirmed_multi_node_enabled),
                            helper.field(DataBucketStatusBean::suspended),
                            helper.field(DataBucketStatusBean::quarantined_until)),
                    true);

    if (!corresponding_status.get().isPresent()) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException(
                        ErrorUtils.get(ManagementDbErrorUtils.BUCKET_CANNOT_BE_CREATED_WITHOUT_BUCKET_STATUS,
                                new_object.full_name()))),
                CompletableFuture.completedFuture(Collections.emptyList()));
    }

    // Some fields like multi-node, you can only change if the bucket status is set to suspended, to make
    // the control logic easy
    old_bucket.ifPresent(ob -> {
        validation_info.addAll(checkForInactiveOnlyUpdates(new_object, ob, corresponding_status.join().get()));
        // (corresponding_status present and completed because of above check) 
    });
    if (!validation_info.isEmpty() && validation_info.stream().anyMatch(m -> !m.success())) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException("Bucket not valid, see management channels")),
                CompletableFuture.completedFuture(validation_info));
    }
    // Made it this far, try to set the next_poll_time in the status object
    if (null != new_object.poll_frequency()) {
        //get the next poll time
        final Date next_poll_time = TimeUtils
                .getForwardSchedule(new_object.poll_frequency(), Optional.of(new Date())).success();
        //update the status
        _underlying_data_bucket_status_db.get().updateObjectById(new_object._id(), CrudUtils
                .update(DataBucketStatusBean.class).set(DataBucketStatusBean::next_poll_date, next_poll_time));
    }

    // Create the directories

    try {
        createFilePaths(new_object, _storage_service.get());
        //if logging is enabled, create the logging filepath also
        if (Optionals.of(() -> new_object.management_schema().logging_schema().enabled()).orElse(false)) {
            createFilePaths(BucketUtils.convertDataBucketBeanToLogging(new_object), _storage_service.get());
        }
    } catch (Exception e) { // Error creating directory, haven't created object yet so just back out now

        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
    // OK if the bucket is validated we can store it (and create a status object)

    final CompletableFuture<Supplier<Object>> ret_val = _underlying_data_bucket_db.get().storeObject(new_object,
            replace_if_present);
    final boolean is_suspended = DataBucketStatusCrudService
            .bucketIsSuspended(corresponding_status.get().get());

    // Register the bucket update with any applicable data services      

    final Multimap<IDataServiceProvider, String> data_service_info = DataServiceUtils
            .selectDataServices(new_object.data_schema(), _service_context);
    final Optional<Multimap<IDataServiceProvider, String>> old_data_service_info = old_bucket
            .map(old -> DataServiceUtils.selectDataServices(old.data_schema(), _service_context));

    final List<CompletableFuture<Collection<BasicMessageBean>>> ds_update_results = data_service_info.asMap()
            .entrySet().stream()
            .map(kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                    kv.getValue().stream().collect(Collectors.toSet()),
                    old_data_service_info.map(old_map -> old_map.get(kv.getKey()))
                            .map(old_servs -> old_servs.stream().collect(Collectors.toSet()))
                            .orElse(Collections.emptySet())))
            .collect(Collectors.toList());

    // Process old data services that are no longer in use
    final List<CompletableFuture<Collection<BasicMessageBean>>> old_ds_update_results = old_data_service_info
            .map(old_ds_info -> {
                return old_ds_info.asMap().entrySet().stream()
                        .filter(kv -> !data_service_info.containsKey(kv.getKey()))
                        .<CompletableFuture<Collection<BasicMessageBean>>>map(
                                kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                                        Collections.emptySet(),
                                        kv.getValue().stream().collect(Collectors.toSet())))
                        .collect(Collectors.toList());
            }).orElse(Collections.emptyList());

    //(combine)
    @SuppressWarnings("unchecked")
    CompletableFuture<Collection<BasicMessageBean>> all_service_registration_complete[] = Stream
            .concat(ds_update_results.stream(), old_ds_update_results.stream())
            .toArray(CompletableFuture[]::new);

    // Get the status and then decide whether to broadcast out the new/update message

    final CompletableFuture<Collection<BasicMessageBean>> mgmt_results = CompletableFuture
            .allOf(all_service_registration_complete)
            .thenCombine(
                    old_bucket.isPresent()
                            ? requestUpdatedBucket(new_object, old_bucket.get(),
                                    corresponding_status.get().get(), _actor_context,
                                    _underlying_data_bucket_status_db.get(), _bucket_action_retry_store.get())
                            : requestNewBucket(new_object, is_suspended,
                                    _underlying_data_bucket_status_db.get(), _actor_context),
                    (__, harvest_results) -> {
                        return (Collection<BasicMessageBean>) Stream
                                .concat(Arrays.stream(all_service_registration_complete)
                                        .flatMap(s -> s.join().stream()), harvest_results.stream())
                                .collect(Collectors.toList());
                    })
            .exceptionally(t -> Arrays.asList(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "storeValidatedObject", ErrorUtils.get("{0}", t))));

    // Update the status depending on the results of the management channels

    return FutureUtils.createManagementFuture(ret_val,
            MgmtCrudUtils
                    .handleUpdatingStatus(new_object, corresponding_status.get().get(), is_suspended,
                            mgmt_results, _underlying_data_bucket_status_db.get())
                    .thenApply(msgs -> Stream.concat(msgs.stream(), validation_info.stream())
                            .collect(Collectors.toList())));
}

From source file:com.ikanow.aleph2.management_db.services.TestDataBucketCrudService_Create.java

@Test
public void test_UpdateValidation() throws Exception {

    // Insert a bucket:
    test_SuccessfulBucketCreation_multiNode();

    // (add analytics bucket to handle batch enrichment changes)
    insertAnalyticsActor(TestActor_Accepter.class);

    // Try to update it

    final DataBucketBean bucket = _bucket_crud.getObjectById("id1").get().get();

    //(while we're here, just check that the data schema map is written in)
    assertEquals(Collections.emptyMap(), bucket.data_locations());

    final DataBucketBean mod_bucket1 = BeanTemplateUtils.clone(bucket)
            .with(DataBucketBean::full_name, "/Something/else").with(DataBucketBean::owner_id, "Someone else")
            .done();/* ww w.ja  va 2  s.co  m*/

    // First attempt: will fail because not trying to overwrite:
    {
        final ManagementFuture<Supplier<Object>> update_future = _bucket_crud.storeObject(mod_bucket1);

        try {
            update_future.get();
            fail("Should have thrown exception");
        } catch (Exception e) {
            assertTrue("Dup key error", e.getCause() instanceof MongoException);
        }

        // Second attempt: fail on validation
        final ManagementFuture<Supplier<Object>> update_future2 = _bucket_crud.storeObject(mod_bucket1, true);

        try {
            assertEquals(2, update_future2.getManagementResults().get().size()); // (2 errors)
            update_future.get();
            fail("Should have thrown exception");
        } catch (Exception e) {
            assertTrue("Validation error", e.getCause() instanceof RuntimeException);
        }
    }

    // Third attempt, succeed with different update
    final DataBucketBean mod_bucket3 = BeanTemplateUtils.clone(bucket)
            .with(DataBucketBean::master_enrichment_type, MasterEnrichmentType.batch)
            .with(DataBucketBean::batch_enrichment_configs, Arrays.asList())
            .with(DataBucketBean::display_name, "Something else").done();

    // Actually first time, fail because changing enrichment type and bucket active
    {
        final ManagementFuture<Supplier<Object>> update_future2 = _bucket_crud.storeObject(mod_bucket3, true);

        try {
            assertEquals(
                    "Should get 1 error: " + update_future2.getManagementResults().get().stream()
                            .map(m -> m.message()).collect(Collectors.joining()),
                    1, update_future2.getManagementResults().get().size()); // (1 error)
            update_future2.get();
            fail("Should have thrown exception");
        } catch (Exception e) {
            assertTrue("Validation error: " + e + " / " + e.getCause(),
                    e.getCause() instanceof RuntimeException);
            assertTrue(
                    "compains about master_enrichment_type: " + e.getMessage() + ": "
                            + update_future2.getManagementResults().get().stream().map(b -> b.message())
                                    .collect(Collectors.joining(";")),
                    update_future2.getManagementResults().get().iterator().next().message()
                            .contains("master_enrichment_type"));
        }
    }

    // THen suspend and succeed
    {
        assertEquals(true,
                _underlying_bucket_status_crud
                        .updateObjectById("id1", CrudUtils.update(DataBucketStatusBean.class)
                                .set("suspended", true).set("confirmed_suspended", true))
                        .get());

        final ManagementFuture<Supplier<Object>> update_future3 = _bucket_crud.storeObject(mod_bucket3, true);

        try {
            assertEquals("id1", update_future3.get().get());

            final DataBucketBean bucket3 = _bucket_crud.getObjectById("id1").get().get();
            assertEquals("Something else", bucket3.display_name());

            //(wait for completion)
            update_future3.getManagementResults().get();

            //(just quickly check node affinity didn't change)
            final DataBucketStatusBean status_after = _bucket_status_crud.getObjectById("id1").get().get();
            assertEquals(0, Optionals.ofNullable(status_after.node_affinity()).size());

            // Check the "Confirmed" bucket fields match the bucket now (only confirmed_suspended is set)
            assertEquals(true, status_after.confirmed_suspended());
            assertEquals(true, status_after.confirmed_multi_node_enabled());
            assertEquals(MasterEnrichmentType.batch, status_after.confirmed_master_enrichment_type());
        } catch (Exception e) {
            update_future3.getManagementResults().get().stream().map(msg -> msg.command())
                    .collect(Collectors.joining());
            throw e; // erorr out
        }
    }
    // Check that will set the affinity if it's null though:
    {
        // (manually remove)
        assertTrue("Updated",
                _underlying_bucket_status_crud.updateObjectById("id1",
                        CrudUtils.update(DataBucketStatusBean.class).set("node_affinity", Arrays.asList()))
                        .get());
        final DataBucketStatusBean status_after2 = _bucket_status_crud.getObjectById("id1").get().get();
        assertEquals("Really updated!", 0, status_after2.node_affinity().size());

        final ManagementFuture<Supplier<Object>> update_future4 = _bucket_crud.storeObject(mod_bucket3, true);

        assertEquals("id1", update_future4.get().get());

        final DataBucketBean bucket4 = _bucket_crud.getObjectById("id1").get().get();
        assertEquals("Something else", bucket4.display_name());

        //(wait for completion)
        update_future4.getManagementResults().get();

        //(Check that node affinity was set)
        update_future4.getManagementResults().get(); // (wait for management results - until then node affinity may not be set)
        final DataBucketStatusBean status_after3 = _bucket_status_crud.getObjectById("id1").get().get();
        assertEquals(0, Optionals.ofNullable(status_after3.node_affinity()).size());

        // Check the "Confirmed" bucket fields match the bucket now (only confirmed_suspended is set)
        assertEquals(true, status_after3.confirmed_suspended());
        assertEquals(true, status_after3.confirmed_multi_node_enabled());
        assertEquals(MasterEnrichmentType.batch, status_after3.confirmed_master_enrichment_type());
    }

    // OK check that if moving to single node then it resets the affinity
    {
        final DataBucketBean mod_bucket4 = BeanTemplateUtils.clone(bucket)
                .with(DataBucketBean::display_name, "Something else")
                .with(DataBucketBean::master_enrichment_type, MasterEnrichmentType.batch)
                .with(DataBucketBean::batch_enrichment_configs, Arrays.asList())
                .with(DataBucketBean::multi_node_enabled, false).done();

        //ACTIVE: FAIL
        {
            assertEquals(true,
                    _underlying_bucket_status_crud
                            .updateObjectById("id1", CrudUtils.update(DataBucketStatusBean.class)
                                    .set("suspended", false).set("confirmed_suspended", false))
                            .get());

            final ManagementFuture<Supplier<Object>> update_future2 = _bucket_crud.storeObject(mod_bucket4,
                    true);

            try {
                assertEquals(
                        "Should get 1 error: " + update_future2.getManagementResults().get().stream()
                                .map(m -> m.message()).collect(Collectors.joining()),
                        1, update_future2.getManagementResults().get().size()); // (1 error)
                update_future2.get();
                fail("Should have thrown exception");
            } catch (Exception e) {
                assertTrue("Validation error", e.getCause() instanceof RuntimeException);
                assertTrue("compains about multi_node_enabled: " + e.getMessage(),
                        update_future2.getManagementResults().get().iterator().next().message()
                                .contains("multi_node_enabled"));
            }
        }
        //SUSPENDED: SUCCESS
        {
            assertEquals(true,
                    _underlying_bucket_status_crud
                            .updateObjectById("id1", CrudUtils.update(DataBucketStatusBean.class)
                                    .set("suspended", true).set("confirmed_suspended", true))
                            .get());

            final ManagementFuture<Supplier<Object>> update_future5 = _bucket_crud.storeObject(mod_bucket4,
                    true);

            assertEquals("id1", update_future5.get().get());

            final DataBucketBean bucket5 = _bucket_crud.getObjectById("id1").get().get();
            assertEquals("Something else", bucket5.display_name());

            //(wait for completion)
            update_future5.getManagementResults().get();

            //(Check that node affinity was set to 1)
            update_future5.getManagementResults().get(); // (wait for management results - until then node affinity may not be set)
            final DataBucketStatusBean status_after4 = _bucket_status_crud.getObjectById("id1").get().get();
            assertEquals(0, Optionals.ofNullable(status_after4.node_affinity()).size());

            // Check the "Confirmed" bucket fields match the bucket now (only confirmed_suspended is set)
            assertEquals(true, status_after4.confirmed_suspended());
            assertEquals(false, status_after4.confirmed_multi_node_enabled());
            assertEquals(MasterEnrichmentType.batch, status_after4.confirmed_master_enrichment_type());
        }
        // NOW UNSUSPEND: SUCCESS AND ADDS NODE AFFINITY
        {
            assertEquals(true,
                    _underlying_bucket_status_crud
                            .updateObjectById("id1", CrudUtils.update(DataBucketStatusBean.class)
                                    .set("suspended", false).set("confirmed_suspended", false))
                            .get());

            final ManagementFuture<Supplier<Object>> update_future5 = _bucket_crud.storeObject(mod_bucket4,
                    true);

            assertEquals("id1", update_future5.get().get());

            final DataBucketBean bucket5 = _bucket_crud.getObjectById("id1").get().get();
            assertEquals("Something else", bucket5.display_name());

            //(wait for completion)
            update_future5.getManagementResults().get();

            //(Check that node affinity was set to 1)
            update_future5.getManagementResults().get(); // (wait for management results - until then node affinity may not be set)
            final DataBucketStatusBean status_after4 = _bucket_status_crud.getObjectById("id1").get().get();
            assertEquals(1, Optionals.ofNullable(status_after4.node_affinity()).size());

            // Check the "Confirmed" bucket fields match the bucket now (only confirmed_suspended is set)
            assertEquals(false, status_after4.confirmed_suspended());
            assertEquals(false, status_after4.confirmed_multi_node_enabled());
            assertEquals(MasterEnrichmentType.batch, status_after4.confirmed_master_enrichment_type());
        }
    }
    // And check that moves back to 2 when set back to multi node
    final DataBucketBean mod_bucket4 = BeanTemplateUtils.clone(bucket)
            .with(DataBucketBean::display_name, "Something else").with(DataBucketBean::multi_node_enabled, true)
            .done();

    {
        assertEquals(true,
                _underlying_bucket_status_crud
                        .updateObjectById("id1", CrudUtils.update(DataBucketStatusBean.class)
                                .set("suspended", true).set("confirmed_suspended", true))
                        .get());

        final ManagementFuture<Supplier<Object>> update_future5 = _bucket_crud.storeObject(mod_bucket4, true);

        assertEquals("id1", update_future5.get().get());

        final DataBucketBean bucket5 = _bucket_crud.getObjectById("id1").get().get();
        assertEquals("Something else", bucket5.display_name());

        //(Check that node affinity was set to unset)
        update_future5.getManagementResults().get(); // (wait for management results - until then node affinity may not be set)
        final DataBucketStatusBean status_after4 = _bucket_status_crud.getObjectById("id1").get().get();
        assertEquals(0, Optionals.ofNullable(status_after4.node_affinity()).size());
    }
    //UNSUSPEND AND CHECK THAT NODE AFFINITY IS SET
    {
        assertEquals(true,
                _underlying_bucket_status_crud
                        .updateObjectById("id1", CrudUtils.update(DataBucketStatusBean.class)
                                .set("suspended", false).set("confirmed_suspended", false))
                        .get());

        final ManagementFuture<Supplier<Object>> update_future5 = _bucket_crud.storeObject(mod_bucket4, true);

        assertEquals("id1", update_future5.get().get());

        final DataBucketBean bucket5 = _bucket_crud.getObjectById("id1").get().get();
        assertEquals("Something else", bucket5.display_name());

        //(Check that node affinity was set to unset)
        update_future5.getManagementResults().get(); // (wait for management results - until then node affinity may not be set)
        final DataBucketStatusBean status_after4 = _bucket_status_crud.getObjectById("id1").get().get();
        assertEquals(2, Optionals.ofNullable(status_after4.node_affinity()).size());
    }
    // Convert to lock_to_nodes: false and check that the node affinity is not updated
    {
        CompletableFuture<Boolean> updated = _underlying_bucket_status_crud.updateObjectById("id1",
                CrudUtils.update(DataBucketStatusBean.class).set(DataBucketStatusBean::suspended, false)
                        .set(DataBucketStatusBean::confirmed_suspended, false));
        assertTrue(updated.join());
        final DataBucketStatusBean status_after4a = _bucket_status_crud.getObjectById("id1").get().get();
        assertEquals(false, status_after4a.suspended());
        assertEquals(false, status_after4a.confirmed_suspended());

        final DataBucketBean mod_bucket5 = BeanTemplateUtils.clone(mod_bucket4)
                .with(DataBucketBean::lock_to_nodes, false).done();

        final ManagementFuture<Supplier<Object>> update_future5 = _bucket_crud.storeObject(mod_bucket5, true);

        assertEquals("id1", update_future5.get().get());

        final DataBucketBean bucket5 = _bucket_crud.getObjectById("id1").get().get();
        assertEquals("Something else", bucket5.display_name());

        //(Check that node affinity was not set)
        update_future5.getManagementResults().get(); // (wait for management results - until then node affinity may not be set)
        final DataBucketStatusBean status_after4 = _bucket_status_crud.getObjectById("id1").get().get();
        assertEquals(2, status_after4.node_affinity().size());
    }
    // Now suspend the bucket and then rerun, check removes the node affinity
    // (note there is logic in the DIM that prevents you from doing this unless the harvest tech allows you to)
    {
        CompletableFuture<Boolean> updated = _underlying_bucket_status_crud.updateObjectById("id1",
                CrudUtils.update(DataBucketStatusBean.class).set(DataBucketStatusBean::suspended, true)
                        .set(DataBucketStatusBean::confirmed_suspended, true));
        assertTrue(updated.join());
        final DataBucketStatusBean status_after4a = _bucket_status_crud.getObjectById("id1").get().get();
        assertEquals(true, status_after4a.suspended());
        assertEquals(true, status_after4a.confirmed_suspended());

        final DataBucketBean mod_bucket5 = BeanTemplateUtils.clone(mod_bucket4)
                .with(DataBucketBean::lock_to_nodes, false).done();

        final ManagementFuture<Supplier<Object>> update_future5 = _bucket_crud.storeObject(mod_bucket5, true);

        assertEquals("id1", update_future5.get().get());

        final DataBucketBean bucket5 = _bucket_crud.getObjectById("id1").get().get();
        assertEquals("Something else", bucket5.display_name());

        //(Check that node affinity was not set)
        update_future5.getManagementResults().get(); // (wait for management results - until then node affinity may not be set)
        final DataBucketStatusBean status_after4 = _bucket_status_crud.getObjectById("id1").get().get();
        assertEquals(null, status_after4.node_affinity());
    }
    // Check again (code coverage)
    {
        final DataBucketBean mod_bucket5 = BeanTemplateUtils.clone(mod_bucket4)
                .with(DataBucketBean::lock_to_nodes, false).done();

        final ManagementFuture<Supplier<Object>> update_future5 = _bucket_crud.storeObject(mod_bucket5, true);

        assertEquals("id1", update_future5.get().get());

        final DataBucketBean bucket5 = _bucket_crud.getObjectById("id1").get().get();
        assertEquals("Something else", bucket5.display_name());

        //(Check that node affinity was not set)
        update_future5.getManagementResults().get(); // (wait for management results - until then node affinity may not be set)
        final DataBucketStatusBean status_after4 = _bucket_status_crud.getObjectById("id1").get().get();
        assertEquals(null, status_after4.node_affinity());
    }
    // Some testing of data service registration
    {
        final DataBucketBean mod_bucket6 = BeanTemplateUtils.clone(mod_bucket4)
                .with(DataBucketBean::data_schema, BeanTemplateUtils.build(DataSchemaBean.class)
                        .with(DataSchemaBean::data_warehouse_schema, BeanTemplateUtils
                                .build(DataSchemaBean.DataWarehouseSchemaBean.class).done().get())
                        .done().get())
                .done();

        {
            final ManagementFuture<Supplier<Object>> update_future6 = _bucket_crud.storeObject(mod_bucket6,
                    true);

            assertEquals("id1", update_future6.get().get());

            final Collection<BasicMessageBean> res = update_future6.getManagementResults().join();

            assertEquals("Wrong size: "
                    + res.stream().map(b -> b.success() + ": " + b.message()).collect(Collectors.joining(";")),
                    3, res.size());
            assertEquals(1, res.stream().filter(b -> !b.success()).count());
        }

        // OK, now store again minus the data warehouse bean ... will still get the error because of the old bucket
        {
            final ManagementFuture<Supplier<Object>> update_future6 = _bucket_crud.storeObject(mod_bucket4,
                    true);

            assertEquals("id1", update_future6.get().get());

            final Collection<BasicMessageBean> res = update_future6.getManagementResults().join();

            assertEquals("Wrong size: "
                    + res.stream().map(b -> b.success() + ": " + b.message()).collect(Collectors.joining(";")),
                    3, res.size());
            assertEquals(1, res.stream().filter(b -> !b.success()).count());
        }

        // Store one more time - this time won't get an error because data warehouse is in neither
        {
            final ManagementFuture<Supplier<Object>> update_future6 = _bucket_crud.storeObject(mod_bucket4,
                    true);

            assertEquals("id1", update_future6.get().get());

            final Collection<BasicMessageBean> res = update_future6.getManagementResults().join();

            assertEquals("Wrong size: "
                    + res.stream().map(b -> b.success() + ": " + b.message()).collect(Collectors.joining(";")),
                    2, res.size());
            assertEquals(0, res.stream().filter(b -> !b.success()).count());

        }
    }
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

@Override
public void onObjectBatch(final Stream<Tuple2<Long, IBatchRecord>> batch, final Optional<Integer> batch_size,
        final Optional<JsonNode> grouping_key) {
    if (_deduplication_is_disabled.get()) {
        // no deduplication, generally shouldn't be here...
        //.. but if we are, make do the best we can
        batch.forEach(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(),
                Optional.empty(), Optional.empty()));
        return;/*ww  w. j ava  2  s . c o m*/
    }

    // Create big query

    final Tuple3<QueryComponent<JsonNode>, List<Tuple2<JsonNode, Tuple2<Long, IBatchRecord>>>, Either<String, List<String>>> fieldinfo_dedupquery_keyfields = getDedupQuery(
            batch, _dedup_fields.get(), _db_mapper.get());

    // Get duplicate results

    final Tuple2<List<String>, Boolean> fields_include = getIncludeFields(_policy.get(), _dedup_fields.get(),
            _timestamp_field.get());

    final CompletableFuture<Iterator<JsonNode>> dedup_res = fieldinfo_dedupquery_keyfields._2().isEmpty()
            ? CompletableFuture.completedFuture(Collections.<JsonNode>emptyList().iterator())
            : _dedup_context.get().getObjectsBySpec(fieldinfo_dedupquery_keyfields._1(), fields_include._1(),
                    fields_include._2()).thenApply(cursor -> cursor.iterator());

    // Wait for it to finsh

    //(create handy results structure if so)
    final LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> mutable_obj_map = fieldinfo_dedupquery_keyfields
            ._2().stream()
            .collect(Collector.of(
                    () -> new LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>>(),
                    (acc, t2) -> {
                        // (ie only the first element is added, duplicate elements are removed)
                        final Tuple3<Long, IBatchRecord, ObjectNode> t3 = Tuples._3T(t2._2()._1(), t2._2()._2(),
                                _mapper.createObjectNode());
                        acc.compute(t2._1(), (k, v) -> {
                            final LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>> new_list = (null == v)
                                    ? new LinkedList<>()
                                    : v;
                            new_list.add(t3);
                            return new_list;
                        });
                    }, (map1, map2) -> {
                        map1.putAll(map2);
                        return map1;
                    }));

    //TODO (ALEPH-20): add timestamps to annotation
    //TODO (ALEPH-20): support different timestamp fields for the different buckets
    //TODO (ALEPH-20): really need to support >1 current enrichment job 
    //                 ^^(Really really longer term you should be able to decide what objects you want and what you don't  <- NOTE: don't remember what i meant here)

    final Iterator<JsonNode> cursor = dedup_res.join();

    // Handle the results

    final Stream<JsonNode> records_to_delete = Lambdas.get(() -> {
        if (isCustom(_doc_schema.get().deduplication_policy())
                || _doc_schema.get().delete_unhandled_duplicates()) {
            return Optionals.streamOf(cursor, true)
                    .collect(Collectors.groupingBy(
                            ret_obj -> getKeyFieldsAgain(ret_obj, fieldinfo_dedupquery_keyfields._3())))
                    .entrySet().stream().<JsonNode>flatMap(kv -> {

                        final Optional<JsonNode> maybe_key = kv.getKey();
                        final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                                .map(key -> mutable_obj_map.get(key));

                        // Stats:
                        _mutable_stats.duplicate_keys++;
                        _mutable_stats.duplicates_existing += kv.getValue().size();
                        _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                        //DEBUG
                        //System.out.println("?? " + kv.getValue().size() + " vs " + maybe_key + " vs " + matching_records.map(x -> Integer.toString(x.size())).orElse("(no match)"));

                        return matching_records
                                .<Stream<JsonNode>>map(records -> handleDuplicateRecord(_doc_schema.get(),
                                        _custom_handler.optional().map(
                                                handler -> Tuples._2T(handler, this._custom_context.get())),
                                        _timestamp_field.get(), records, kv.getValue(), maybe_key.get(),
                                        mutable_obj_map))
                                .orElse(Stream.empty());
                    });
        } else {
            Optionals.streamOf(cursor, true).forEach(ret_obj -> {
                final Optional<JsonNode> maybe_key = getKeyFieldsAgain(ret_obj,
                        fieldinfo_dedupquery_keyfields._3());
                final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                        .map(key -> mutable_obj_map.get(key));

                //DEBUG
                //System.out.println("?? " + ret_obj + " vs " + maybe_key + " vs " + matching_record.map(x -> x._2().getJson().toString()).orElse("(no match)"));

                // Stats:
                _mutable_stats.duplicate_keys++;
                _mutable_stats.duplicates_existing++;
                _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                matching_records.ifPresent(records -> handleDuplicateRecord(_doc_schema.get(),
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        _timestamp_field.get(), records, Arrays.asList(ret_obj), maybe_key.get(),
                        mutable_obj_map));
            });
            return Stream.<JsonNode>empty();
        }
    });

    final List<Object> ids = records_to_delete.map(j -> jsonToObject(j)).filter(j -> null != j)
            .collect(Collectors.toList());

    if (!ids.isEmpty()) { // fire a bulk deletion request
        mutable_uncompleted_deletes.add(
                _dedup_context.get().deleteObjectsBySpec(CrudUtils.allOf().withAny(AnnotationBean._ID, ids)));

        _mutable_stats.deleted += ids.size();

        //(quickly see if we can reduce the number of outstanding requests)
        final Iterator<CompletableFuture<Long>> it = mutable_uncompleted_deletes.iterator();
        while (it.hasNext()) {
            final CompletableFuture<Long> cf = it.next();
            if (cf.isDone()) {
                it.remove();
            } else
                break; // ie stop as soon as we hit one that isn't complete)
        }
    }

    _mutable_stats.nonduplicate_keys += mutable_obj_map.size();

    if (Optional.ofNullable(_doc_schema.get().custom_finalize_all_objects()).orElse(false)) {
        mutable_obj_map.entrySet().stream()
                .forEach(kv -> handleCustomDeduplication(
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        kv.getValue(), Collections.emptyList(), kv.getKey()));
    } else { // Just emit the last element of each grouped object set
        mutable_obj_map.values().stream().map(t -> t.peekLast())
                .forEach(t -> _context.get().emitImmutableObject(t._1(), t._2().getJson(), Optional.of(t._3()),
                        Optional.empty(), Optional.empty()));
    }
}

From source file:org.apache.bookkeeper.client.BookieWriteLedgerTest.java

/**
 * In a loop create/write/delete the ledger with same ledgerId through
 * the functionality of Advanced Ledger which accepts ledgerId as input.
 *
 * @throws Exception// www  . j  a v a  2  s  . co m
 */
@Test
public void testLedgerCreateAdvWithLedgerIdInLoop() throws Exception {
    int ledgerCount = 40;

    long maxId = 9999999999L;
    if (baseConf.getLedgerManagerFactoryClass().equals(LongHierarchicalLedgerManagerFactory.class)) {
        // since LongHierarchicalLedgerManager supports ledgerIds of decimal length upto 19 digits but other
        // LedgerManagers only upto 10 decimals
        maxId = Long.MAX_VALUE;
    }

    rng.longs(ledgerCount, 0, maxId) // generate a stream of ledger ids
            .mapToObj(ledgerId -> { // create a ledger for each ledger id
                LOG.info("Creating adv ledger with id {}", ledgerId);
                return bkc.newCreateLedgerOp().withEnsembleSize(1).withWriteQuorumSize(1).withAckQuorumSize(1)
                        .withDigestType(org.apache.bookkeeper.client.api.DigestType.CRC32)
                        .withPassword(ledgerPassword).makeAdv().withLedgerId(ledgerId).execute()
                        .thenApply(writer -> { // Add entries to ledger when created
                            LOG.info("Writing stream of {} entries to {}", numEntriesToWrite, ledgerId);
                            List<ByteBuf> entries = rng.ints(numEntriesToWrite, 0, maxInt).mapToObj(i -> {
                                ByteBuf entry = Unpooled.buffer(4);
                                entry.retain();
                                entry.writeInt(i);
                                return entry;
                            }).collect(Collectors.toList());
                            CompletableFuture<?> lastRequest = null;
                            int i = 0;
                            for (ByteBuf entry : entries) {
                                long entryId = i++;
                                LOG.info("Writing {}:{} as {}", ledgerId, entryId, entry.slice().readInt());
                                lastRequest = writer.writeAsync(entryId, entry);
                            }
                            lastRequest.join();
                            return Pair.of(writer, entries);
                        });
            }).parallel().map(CompletableFuture::join) // wait for all creations and adds in parallel
            .forEach(e -> { // check that each set of adds succeeded
                try {
                    WriteAdvHandle handle = e.getLeft();
                    List<ByteBuf> entries = e.getRight();
                    // Read and verify
                    LOG.info("Read entries for ledger: {}", handle.getId());
                    readEntries(handle, entries);
                    entries.forEach(ByteBuf::release);
                    handle.close();
                    bkc.deleteLedger(handle.getId());
                } catch (InterruptedException ie) {
                    Thread.currentThread().interrupt();
                    Assert.fail("Test interrupted");
                } catch (Exception ex) {
                    LOG.info("Readback failed with exception", ex);
                    Assert.fail("Readback failed " + ex.getMessage());
                }
            });
}

From source file:org.apache.bookkeeper.metadata.etcd.EtcdRegistrationTest.java

private void testConcurrentRegistration(boolean readonly) throws Exception {
    final String bookieId;
    if (readonly) {
        bookieId = runtime.getMethodName() + "-readonly:3181";
    } else {//from   w ww .j a v a 2s .  c  om
        bookieId = runtime.getMethodName() + ":3181";
    }
    final int numBookies = 10;
    @Cleanup("shutdown")
    ExecutorService executor = Executors.newFixedThreadPool(numBookies);
    final CyclicBarrier startBarrier = new CyclicBarrier(numBookies);
    final CyclicBarrier completeBarrier = new CyclicBarrier(numBookies);
    final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
    final AtomicInteger numSuccesses = new AtomicInteger(0);
    final AtomicInteger numFailures = new AtomicInteger(0);
    for (int i = 0; i < numBookies; i++) {
        executor.submit(() -> {
            try (EtcdRegistrationManager regMgr = new EtcdRegistrationManager(newEtcdClient(), scope, 1)) {
                try {
                    startBarrier.await();
                    regMgr.registerBookie(bookieId, readonly);
                    numSuccesses.incrementAndGet();
                } catch (InterruptedException e) {
                    log.warn("Interrupted at waiting for the other threads to start", e);
                } catch (BrokenBarrierException e) {
                    log.warn("Start barrier is broken", e);
                } catch (BookieException e) {
                    numFailures.incrementAndGet();
                }
                try {
                    completeBarrier.await();
                } catch (InterruptedException e) {
                    log.warn("Interrupted at waiting for the other threads to complete", e);
                } catch (BrokenBarrierException e) {
                    log.warn("Complete barrier is broken", e);
                }
                FutureUtils.complete(doneFuture, null);
            }
        });
    }
    doneFuture.join();
    assertEquals(1, numSuccesses.get());
    assertEquals(numBookies - 1, numFailures.get());
}