Example usage for java.util.concurrent CompletableFuture completedFuture

List of usage examples for java.util.concurrent CompletableFuture completedFuture

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture completedFuture.

Prototype

public static <U> CompletableFuture<U> completedFuture(U value) 

Source Link

Document

Returns a new CompletableFuture that is already completed with the given value.

Usage

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

/**
 * update history table if not already updated:
 * fetch last record from history table.
 * if eventTime is >= scale.scaleTimeStamp do nothing, else create record
 *
 * @return : future of history table offset for last entry
 *//*from   w  w w  .ja  v a 2  s.  c o  m*/
private CompletableFuture<Void> addPartialHistoryRecord(final List<Integer> sealedSegments,
        final List<Integer> createdSegments, final int epoch) {
    return getHistoryTable().thenCompose(historyTable -> {
        final Optional<HistoryRecord> lastRecordOpt = HistoryRecord.readLatestRecord(historyTable.getData(),
                false);

        // scale task is not allowed unless create is done which means at least one
        // record in history table.
        assert lastRecordOpt.isPresent();

        final HistoryRecord lastRecord = lastRecordOpt.get();

        // idempotent check
        if (lastRecord.getEpoch() > epoch) {
            boolean idempotent = lastRecord.isPartial()
                    && lastRecord.getSegments().containsAll(createdSegments);
            if (idempotent) {
                HistoryRecord previous = HistoryRecord.fetchPrevious(lastRecord, historyTable.getData()).get();

                idempotent = previous.getSegments().stream().noneMatch(createdSegments::contains);
            }

            if (idempotent) {
                return CompletableFuture.completedFuture(null);
            } else {
                throw new ScaleOperationExceptions.ScaleConditionInvalidException();
            }
        }

        final List<Integer> newActiveSegments = getNewActiveSegments(createdSegments, sealedSegments,
                lastRecord);

        byte[] updatedTable = TableHelper.addPartialRecordToHistoryTable(historyTable.getData(),
                newActiveSegments);
        final Data<T> updated = new Data<>(updatedTable, historyTable.getVersion());
        int latestEpoch = TableHelper.getLatestEpoch(updatedTable).getKey();
        return createNewEpoch(latestEpoch).thenCompose(v -> updateHistoryTable(updated));
    });
}

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

private CompletableFuture<Void> completeScale(final long scaleTimestamp, final List<Integer> sealedSegments,
        final int activeEpoch, final List<Integer> newSegments) {
    return getHistoryTable().thenCompose(historyTable -> {
        final Optional<HistoryRecord> lastRecordOpt = HistoryRecord.readLatestRecord(historyTable.getData(),
                false);/*from w  w  w. jav  a 2s  . c  om*/

        assert lastRecordOpt.isPresent();

        final HistoryRecord lastRecord = lastRecordOpt.get();

        // idempotent check
        if (!lastRecord.isPartial()) {
            if (lastRecord.getSegments().stream().noneMatch(sealedSegments::contains)
                    && newSegments.stream().allMatch(x -> lastRecord.getSegments().contains(x))) {
                return CompletableFuture.completedFuture(null);
            } else {
                throw new ScaleOperationExceptions.ScaleConditionInvalidException();
            }
        }

        long scaleEventTime = Math.max(System.currentTimeMillis(), scaleTimestamp);
        final Optional<HistoryRecord> previousOpt = HistoryRecord.fetchPrevious(lastRecord,
                historyTable.getData());
        if (previousOpt.isPresent()) {
            // To ensure that we always have ascending time in history records irrespective of controller clock mismatches.
            scaleEventTime = Math.max(scaleEventTime, previousOpt.get().getScaleTime() + 1);

            if (previousOpt.get().getEpoch() > activeEpoch) {
                throw new ScaleOperationExceptions.ScaleConditionInvalidException();
            }
        }

        byte[] updatedTable = TableHelper.completePartialRecordInHistoryTable(historyTable.getData(),
                lastRecord, scaleEventTime);
        final Data<T> updated = new Data<>(updatedTable, historyTable.getVersion());

        final HistoryRecord newRecord = HistoryRecord.readLatestRecord(updatedTable, false).get();
        return addIndexRecord(newRecord).thenCompose(x -> updateHistoryTable(updated))
                .thenCompose(x -> FutureHelpers.toVoid(updateState(State.ACTIVE)));
    });
}

From source file:io.pravega.controller.store.stream.PersistentStreamBase.java

private CompletableFuture<Void> addIndexRecord(final HistoryRecord historyRecord) {
    return getIndexTable().thenCompose(indexTable -> {
        final Optional<IndexRecord> lastRecord = IndexRecord.readLatestRecord(indexTable.getData());
        // check idempotent
        if (lastRecord.isPresent() && lastRecord.get().getHistoryOffset() == historyRecord.getOffset()) {
            return CompletableFuture.completedFuture(null);
        }//from ww  w.j a v  a2  s . com

        final byte[] updatedTable = TableHelper.updateIndexTable(indexTable.getData(),
                historyRecord.getScaleTime(), historyRecord.getOffset());
        final Data<T> updated = new Data<>(updatedTable, indexTable.getVersion());
        return updateIndexTable(updated);
    });
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Make various requests of the analytics module based on the message type
 * @param bucket/*from w  ww . j a v a  2 s.  com*/
 * @param tech_module
 * @param m
 * @return - a future containing the reply or an error (they're the same type at this point hence can discard the Validation finally)
 */
protected static CompletableFuture<BucketActionReplyMessage> talkToAnalytics(final DataBucketBean bucket,
        final BucketActionMessage m, final String source, final AnalyticsContext context,
        final DataImportActorContext dim_context, final Tuple2<ActorRef, ActorSelection> me_sibling,
        final Map<String, Tuple2<SharedLibraryBean, String>> libs, // (if we're here then must be valid)
        final Validation<BasicMessageBean, Tuple2<IAnalyticsTechnologyModule, ClassLoader>> err_or_tech_module, // "pipeline element"
        final ILoggingService _logging_service) {
    final List<AnalyticThreadJobBean> jobs = bucket.analytic_thread().jobs();

    final BiFunction<Stream<AnalyticThreadJobBean>, Tuple2<Boolean, Boolean>, Stream<AnalyticThreadJobBean>> perJobSetup = (
            job_stream, existingbucket_bucketactive) -> {
        return job_stream.filter(
                job -> existingbucket_bucketactive._1() || Optional.ofNullable(job.enabled()).orElse(true))
                .filter(job -> !isBatchJobWithDependencies(bucket, job, existingbucket_bucketactive))
                .peek(job -> setPerJobContextParams(job, context, libs)); //(WARNING: mutates context)
    };

    final ClassLoader saved_current_classloader = Thread.currentThread().getContextClassLoader();
    try {
        return err_or_tech_module.<CompletableFuture<BucketActionReplyMessage>>validation(
                //Error:
                error -> CompletableFuture.completedFuture(new BucketActionHandlerMessage(source, error)),
                // Normal
                techmodule_classloader -> {
                    final IAnalyticsTechnologyModule tech_module = techmodule_classloader._1();

                    if (shouldLog(m))
                        _logging_service.getSystemLogger(bucket).log(Level.INFO, ErrorUtils.lazyBuildMessage(
                                false, () -> DataBucketAnalyticsChangeActor.class.getSimpleName(),
                                () -> "talkToAnalytics", () -> null,
                                () -> "Set active classloader=" + techmodule_classloader._2() + " class="
                                        + tech_module.getClass() + " message=" + m.getClass().getSimpleName()
                                        + " bucket=" + bucket.full_name(),
                                () -> Collections.emptyMap()));
                    Thread.currentThread().setContextClassLoader(techmodule_classloader._2());

                    tech_module.onInit(context);

                    // One final check before we do anything: are we allowed to run multi-node if we're trying
                    // By construction, all the jobs have the same setting, so:
                    final boolean multi_node_enabled = jobs.stream().findFirst()
                            .map(j -> j.multi_node_enabled()).orElse(false);
                    if (multi_node_enabled) {
                        if (!tech_module.supportsMultiNode(bucket, jobs, context)) {
                            return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                                    SharedErrorUtils.buildErrorMessage(source, m, ErrorUtils.get(
                                            AnalyticsErrorUtils.TRIED_TO_RUN_MULTI_NODE_ON_UNSUPPORTED_TECH,
                                            bucket.full_name(), tech_module.getClass().getSimpleName()))));
                        }
                    }

                    return Patterns.match(m).<CompletableFuture<BucketActionReplyMessage>>andReturn()
                            .when(BucketActionMessage.BucketActionOfferMessage.class, msg -> {
                                final boolean accept_or_ignore = NodeRuleUtils.canRunOnThisNode(
                                        jobs.stream().map(j -> Optional.ofNullable(j.node_list_rules())),
                                        dim_context) && tech_module.canRunOnThisNode(bucket, jobs, context);

                                return CompletableFuture.completedFuture(accept_or_ignore
                                        ? new BucketActionReplyMessage.BucketActionWillAcceptMessage(source)
                                        : new BucketActionReplyMessage.BucketActionIgnoredMessage(source));
                            }).when(BucketActionMessage.DeleteBucketActionMessage.class, msg -> {
                                //(note have already told the sibling about this)

                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onDeleteThread(bucket, jobs, context);
                                final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = perJobSetup
                                        .apply(jobs.stream(), Tuples._2T(true, false))
                                        .map(job -> Tuples._2T(job,
                                                (CompletableFuture<BasicMessageBean>) tech_module
                                                        .stopAnalyticJob(bucket, jobs, job, context)))
                                        .collect(Collectors.toList());

                                //(no need to call the context.completeJobOutput since we're deleting the bucket)
                                sendOnTriggerEventMessages(job_results, msg.bucket(),
                                        __ -> Optional.of(JobMessageType.stopping), me_sibling,
                                        _logging_service);

                                return combineResults(top_level_result,
                                        job_results.stream().map(jf -> jf._2()).collect(Collectors.toList()),
                                        source);
                            }).when(BucketActionMessage.NewBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onNewThread(bucket, jobs, context, !msg.is_suspended());

                                return top_level_result.thenCompose(ret_val -> {
                                    if (!ret_val.success()) {
                                        return combineResults(top_level_result, Arrays.asList(), source);
                                    } else { // success, carry on
                                        // Firstly, tell the sibling
                                        if (null != me_sibling)
                                            me_sibling._2().tell(msg, me_sibling._1());

                                        final boolean starting_thread = msg.is_suspended() ? false
                                                : perJobSetup.apply(jobs.stream(), Tuples._2T(false, true))
                                                        .anyMatch(job -> _batch_types
                                                                .contains(job.analytic_type()));

                                        if (starting_thread) {
                                            BasicMessageBean thread_start_result = tech_module.onThreadExecute(
                                                    bucket, jobs, Collections.emptyList(), context).join(); // (wait for completion before doing anything else)
                                            _logging_service.getSystemLogger(bucket).log(
                                                    thread_start_result.success() ? Level.INFO : Level.WARN,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Executing thread for bucket {0}, success={1} (error={2})",
                                                                    bucket.full_name(),
                                                                    thread_start_result.success(),
                                                                    thread_start_result.success() ? "none"
                                                                            : thread_start_result.message()),
                                                            () -> Collections.emptyMap()));
                                        }

                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = msg
                                                .is_suspended()
                                                        ? Collections.emptyList()
                                                        : perJobSetup
                                                                .apply(jobs.stream(), Tuples._2T(false, true))
                                                                .map(job -> Tuples._2T(job,
                                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                                .startAnalyticJob(bucket, jobs,
                                                                                        job, context)))
                                                                .collect(Collectors.toList());

                                        // Only send on trigger events for messages that started
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            _logging_service.getSystemLogger(bucket).log(
                                                    j_r._2().success() ? Level.INFO : Level.WARN,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Starting bucket:job {0}:{1} success={2}{3}",
                                                                    bucket.full_name(), j_r._1().name(),
                                                                    j_r._2().success(),
                                                                    j_r._2().success() ? ""
                                                                            : (" error = "
                                                                                    + j_r._2().message())),
                                                            () -> Collections.emptyMap()));

                                            return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        return combineResults(top_level_result, job_results.stream()
                                                .map(jf -> jf._2()).collect(Collectors.toList()), source);
                                    }
                                });
                            }).when(BucketActionMessage.UpdateBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onUpdatedThread(msg.old_bucket(), bucket, jobs, msg.is_enabled(),
                                                Optional.empty(), context);

                                return top_level_result.thenCompose(ret_val -> {
                                    if (!ret_val.success()) {
                                        return combineResults(top_level_result, Arrays.asList(), source);
                                    } else { // success, carry on
                                        // Firstly, tell the sibling
                                        if (null != me_sibling)
                                            me_sibling._2().tell(msg, me_sibling._1());

                                        final boolean starting_thread = !msg.is_enabled() ? false
                                                : perJobSetup.apply(jobs.stream(), Tuples._2T(true, true))
                                                        .filter(job -> Optional.ofNullable(job.enabled())
                                                                .orElse(true))
                                                        .anyMatch(job -> _batch_types
                                                                .contains(job.analytic_type()));

                                        if (starting_thread) {
                                            BasicMessageBean thread_start_result = tech_module.onThreadExecute(
                                                    bucket, jobs, Collections.emptyList(), context).join(); // (wait for completion before doing anything else)
                                            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Executing thread for bucket {0}, success={1} (error={2})",
                                                                    bucket.full_name(),
                                                                    thread_start_result.success(),
                                                                    thread_start_result.success() ? "none"
                                                                            : thread_start_result.message()),
                                                            () -> Collections.emptyMap()));
                                        }
                                        //(don't need the analog for stopping because the trigger will give me the notification once all jobs are completed)

                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = perJobSetup
                                                .apply(jobs.stream(), Tuples._2T(true, msg.is_enabled()))
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) ((msg.is_enabled()
                                                                && Optional.ofNullable(job.enabled())
                                                                        .orElse(true))
                                                                                ? tech_module.resumeAnalyticJob(
                                                                                        bucket, jobs, job,
                                                                                        context)
                                                                                : tech_module
                                                                                        .suspendAnalyticJob(
                                                                                                bucket, jobs,
                                                                                                job, context))))
                                                .collect(Collectors.toList());

                                        // Send all stop messages, and start messages for jobs that succeeeded
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            if (msg.is_enabled()
                                                    && Optional.ofNullable(j_r._1().enabled()).orElse(true)) {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Starting bucket:job {0}:{1} success={2}{3}",
                                                                        bucket.full_name(), j_r._1().name(),
                                                                        j_r._2().success(),
                                                                        j_r._2().success() ? ""
                                                                                : (" error = "
                                                                                        + j_r._2().message())),
                                                                () -> Collections.emptyMap()));
                                                return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                        : Optional.empty();
                                            } else { // either stopping all, or have disabled certain jobs
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Stopping bucket:job {0}:{1}",
                                                                        bucket.full_name(), j_r._1().name()),
                                                                () -> Collections.emptyMap()));
                                                if (msg.is_enabled()) { //(else stopping the entire bucket)
                                                    context.completeJobOutput(msg.bucket(), j_r._1());
                                                }
                                                return Optional.of(JobMessageType.stopping);
                                            }
                                        }, me_sibling, _logging_service);

                                        return combineResults(top_level_result, job_results.stream()
                                                .map(jf -> jf._2()).collect(Collectors.toList()), source);
                                    }
                                });
                            }).when(BucketActionMessage.PurgeBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onPurge(bucket, jobs, context);
                                // (don't need to tell the sibling about this)

                                return combineResults(top_level_result, Collections.emptyList(), source);
                            }).when(BucketActionMessage.TestBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onTestThread(bucket, jobs, msg.test_spec(), context);
                                return top_level_result.thenCompose(ret_val -> {
                                    if (!ret_val.success()) {
                                        return combineResults(top_level_result, Arrays.asList(), source);
                                    } else { // success, carry on
                                        // Firstly, tell the sibling
                                        if (null != me_sibling)
                                            me_sibling._2().tell(msg, me_sibling._1());

                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = perJobSetup
                                                .apply(jobs.stream(), Tuples._2T(false, true))
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .startAnalyticJobTest(bucket, jobs, job,
                                                                        msg.test_spec(), context)))
                                                .collect(Collectors.toList());

                                        // Only send on trigger events for messages that started
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Starting test bucket:job {0}:{1} success={2}{3}",
                                                                    bucket.full_name(), j_r._1().name(),
                                                                    j_r._2().success(),
                                                                    j_r._2().success() ? ""
                                                                            : (" error = "
                                                                                    + j_r._2().message())),
                                                            () -> Collections.emptyMap()));
                                            return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        return combineResults(top_level_result, job_results.stream()
                                                .map(jf -> jf._2()).collect(Collectors.toList()), source);
                                    }
                                });
                            }).when(BucketActionMessage.PollFreqBucketActionMessage.class, msg -> {
                                final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                        .onPeriodicPoll(bucket, jobs, context);

                                //(don't need to tell trigger sibling about this)

                                return combineResults(top_level_result, Collections.emptyList(), source);
                            })
                            // Finally, a bunch of analytic messages (don't tell trigger sibling about any of these)
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.check_completion == msg.type()), msg -> {
                                        // Check whether these jobs are complete, send message back to sibling asynchronously

                                        //(note: don't use perJobSetup for these explicity analytic event messages)
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<Boolean>>> job_results = Optionals
                                                .ofNullable(msg.jobs()).stream()
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<Boolean>) tech_module
                                                                .checkAnalyticJobProgress(msg.bucket(),
                                                                        msg.jobs(), job, context)))
                                                .collect(Collectors.toList());

                                        // In addition (for now) just log the management results
                                        job_results.stream().forEach(jr -> {
                                            if (jr._2() instanceof ManagementFuture) {
                                                ManagementFuture<Boolean> jr2 = (ManagementFuture<Boolean>) jr
                                                        ._2();
                                                jr2.thenAccept(result -> {
                                                    if (result) {
                                                        jr2.getManagementResults().thenAccept(mgmt_results -> {
                                                            List<String> errs = mgmt_results.stream()
                                                                    .filter(res -> !res.success())
                                                                    .map(res -> res.message())
                                                                    .collect(Collectors.toList());
                                                            if (!errs.isEmpty()) {
                                                                _logging_service.getSystemLogger(bucket).log(
                                                                        Level.INFO,
                                                                        ErrorUtils.lazyBuildMessage(false,
                                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                                        .getSimpleName(),
                                                                                () -> "talkToAnalytics",
                                                                                () -> null,
                                                                                () -> ErrorUtils.get(
                                                                                        "Completed bucket:job {0}:{1} had errors: {2}",
                                                                                        bucket.full_name(),
                                                                                        jr._1().name(),
                                                                                        errs.stream().collect(
                                                                                                Collectors
                                                                                                        .joining(
                                                                                                                ";"))),
                                                                                () -> Collections.emptyMap()));
                                                            }
                                                        });
                                                    }
                                                });
                                            }
                                            //(it will always be)
                                        });

                                        sendOnTriggerEventMessages(job_results, msg.bucket(), t2 -> {
                                            if (t2._2()) {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Completed: bucket:job {0}:{1}",
                                                                        bucket.full_name(), t2._1().name()),
                                                                () -> Collections.emptyMap()));
                                                context.completeJobOutput(msg.bucket(), t2._1());
                                            }
                                            return t2._2() ? Optional.of(JobMessageType.stopping)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.starting == msg.type()) && (null == msg.jobs()),
                                    msg -> {
                                        // Received a start notification for the bucket

                                        //TODO (ALEPH-12): get the matching triggers into the message
                                        final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                                .onThreadExecute(msg.bucket(), jobs, Collections.emptyList(),
                                                        context);

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        top_level_result.thenAccept(reply -> {
                                            if (!reply.success()) {
                                                _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Error starting analytic thread {0}: message={1}",
                                                                        bucket.full_name(), reply.message()),
                                                                () -> Collections.emptyMap()));
                                            } else {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(true,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Started analytic thread {0}",
                                                                        bucket.full_name()),
                                                                () -> Collections.emptyMap()));
                                            }
                                        });

                                        // Now start any enabled jobs that have no dependencies
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = jobs
                                                .stream()
                                                .filter(job -> Optional.ofNullable(job.enabled()).orElse(true))
                                                .filter(job -> Optionals.ofNullable(job.dependencies())
                                                        .isEmpty())
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .startAnalyticJob(msg.bucket(), jobs, job,
                                                                        context)))
                                                .collect(Collectors.toList());

                                        // Only send on trigger events for messages that started
                                        sendOnTriggerEventMessages(job_results, msg.bucket(), j_r -> {
                                            _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                    ErrorUtils.lazyBuildMessage(false,
                                                            () -> DataBucketAnalyticsChangeActor.class
                                                                    .getSimpleName(),
                                                            () -> "talkToAnalytics", () -> null,
                                                            () -> ErrorUtils.get(
                                                                    "Starting bucket:job {0}:{1} success={2}{3}",
                                                                    bucket.full_name(), j_r._1().name(),
                                                                    j_r._2().success(),
                                                                    j_r._2().success() ? ""
                                                                            : (" error = "
                                                                                    + j_r._2().message())),
                                                            () -> Collections.emptyMap()));
                                            return j_r._2().success() ? Optional.of(JobMessageType.starting)
                                                    : Optional.empty();
                                        }, me_sibling, _logging_service);

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.starting == msg.type()) && (null != msg.jobs()),
                                    msg -> {
                                        // Received a start notification for 1+ of the jobs

                                        //(note: don't use perJobSetup for these explicity analytic event messages)
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = msg
                                                .jobs().stream()
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .startAnalyticJob(msg.bucket(), jobs, job,
                                                                        context)))
                                                .collect(Collectors.toList());

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        job_results.forEach(job_res -> {
                                            job_res._2().thenAccept(res -> {
                                                if (!res.success()) {
                                                    _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                            ErrorUtils.lazyBuildMessage(false,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Error starting analytic job {0}:{1}: message={2}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name(), res.message()),
                                                                    () -> Collections.emptyMap()));
                                                } else {
                                                    _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                            ErrorUtils.lazyBuildMessage(true,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Started analytic job {0}:{1}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name()),
                                                                    () -> Collections.emptyMap()));
                                                }
                                            });
                                        });

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.stopping == msg.type()) && (null == msg.jobs()),
                                    msg -> {
                                        // Received a stop notification for the bucket

                                        // Complete the job output
                                        context.completeBucketOutput(msg.bucket());

                                        final CompletableFuture<BasicMessageBean> top_level_result = tech_module
                                                .onThreadComplete(msg.bucket(), jobs, context);

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        top_level_result.thenAccept(reply -> {
                                            if (!reply.success()) {
                                                _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                        ErrorUtils.lazyBuildMessage(false,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Error stopping analytic thread {0}: message={1}",
                                                                        bucket.full_name(), reply.message()),
                                                                () -> Collections.emptyMap()));
                                            } else {
                                                _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                        ErrorUtils.lazyBuildMessage(true,
                                                                () -> DataBucketAnalyticsChangeActor.class
                                                                        .getSimpleName(),
                                                                () -> "talkToAnalytics", () -> null,
                                                                () -> ErrorUtils.get(
                                                                        "Stopping analytic thread {0}",
                                                                        bucket.full_name()),
                                                                () -> Collections.emptyMap()));
                                            }
                                        });

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.stopping == msg.type()) && (null != msg.jobs()),
                                    msg -> {
                                        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<BasicMessageBean>>> job_results = msg
                                                .jobs().stream()
                                                .peek(job -> setPerJobContextParams(job, context, libs)) //(WARNING: mutates context)
                                                .map(job -> Tuples._2T(job,
                                                        (CompletableFuture<BasicMessageBean>) tech_module
                                                                .suspendAnalyticJob(msg.bucket(), jobs, job,
                                                                        context)))
                                                .collect(Collectors.toList());

                                        //(ignore the reply apart from logging - failures will be identified by triggers)
                                        job_results.forEach(job_res -> {
                                            job_res._2().thenAccept(res -> {
                                                if (!res.success()) {
                                                    _logging_service.getSystemLogger(bucket).log(Level.WARN,
                                                            ErrorUtils.lazyBuildMessage(false,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Error stopping analytic job {0}:{1}: message={2}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name(), res.message()),
                                                                    () -> Collections.emptyMap()));
                                                } else {
                                                    _logging_service.getSystemLogger(bucket).log(Level.INFO,
                                                            ErrorUtils.lazyBuildMessage(true,
                                                                    () -> DataBucketAnalyticsChangeActor.class
                                                                            .getSimpleName(),
                                                                    () -> "talkToAnalytics", () -> null,
                                                                    () -> ErrorUtils.get(
                                                                            "Stopping analytic job {0}:{1}",
                                                                            bucket.full_name(),
                                                                            job_res._1().name()),
                                                                    () -> Collections.emptyMap()));
                                                }
                                            });
                                        });

                                        // Send a status message (Which will be ignored)

                                        return CompletableFuture.completedFuture(
                                                new BucketActionReplyMessage.BucketActionNullReplyMessage());
                                    })
                            .when(BucketActionMessage.BucketActionAnalyticJobMessage.class,
                                    msg -> (JobMessageType.deleting == msg.type()), msg -> {
                                        // This is different because it happens as part of a user action related to buckets, whereas stopping occurs based on trigger related actions

                                        final CompletableFuture<BasicMessageBean> top_level_result = CompletableFuture
                                                .completedFuture(ErrorUtils.buildSuccessMessage(
                                                        DataBucketAnalyticsChangeActor.class.getSimpleName(),
                                                        "BucketActionAnalyticJobMessage:deleting", ""));

                                        final List<CompletableFuture<BasicMessageBean>> job_results = Optionals
                                                .ofNullable(msg.jobs()).stream().map(job -> tech_module
                                                        .suspendAnalyticJob(bucket, jobs, job, context))
                                                .collect(Collectors.toList());

                                        // Hence do return a legit reply message here

                                        return combineResults(top_level_result, job_results, source);
                                    })
                            .otherwise(msg -> { // return "command not recognized" error
                                return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                                        SharedErrorUtils.buildErrorMessage(source, m,
                                                AnalyticsErrorUtils.MESSAGE_NOT_RECOGNIZED, bucket.full_name(),
                                                m.getClass().getSimpleName())));
                            });
                });
    } catch (Throwable e) { // (trying to use Validation to avoid this, but just in case...)
        return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                SharedErrorUtils.buildErrorMessage(source, m,
                        ErrorUtils.getLongForm(SharedErrorUtils.ERROR_LOADING_CLASS, e,
                                err_or_tech_module.success()._1().getClass()))));
    } finally {
        Thread.currentThread().setContextClassLoader(saved_current_classloader);
    }
}

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

@Override
public CompletableFuture<Boolean> deleteDatastore() {
    try {/*from w  ww.  jav a 2 s . c  o m*/
        final ReadWriteContext rw_context = getRwContextOrThrow(_state.es_context, "deleteDatastore");

        final String[] index_list = rw_context.indexContext().getReadableIndexArray(Optional.empty());
        final boolean involves_wildcards = Arrays.stream(index_list).anyMatch(s -> s.contains("*"));
        DeleteIndexRequestBuilder dir = _state.client.admin().indices().prepareDelete(index_list);

        // First check if the indexes even exist, so can return false if they don't
        // (can bypass this if there are no wildcards, will get an exception instead)
        final CompletableFuture<Boolean> intermed = Lambdas.get(() -> {
            if (involves_wildcards) {
                final IndicesStatsRequestBuilder irb = _state.client.admin().indices().prepareStats(index_list);
                final CompletableFuture<Boolean> check_indexes = ElasticsearchFutureUtils.wrap(irb.execute(),
                        ir -> {
                            return !ir.getIndices().isEmpty();
                        }, (err, future) -> {
                            future.completeExceptionally(err);
                        });
                return check_indexes;
            } else
                return CompletableFuture.completedFuture(true);
        });
        // Now try deleting the indexes
        return intermed.thenCompose(b -> {
            if (b) {
                return ElasticsearchFutureUtils.wrap(dir.execute(), dr -> {
                    return true;
                }, (err, future) -> {
                    if ((err instanceof IndexMissingException)
                            || (err instanceof SearchPhaseExecutionException)) //(this one can come up as on a read on a newly created index)
                    {
                        future.complete(false);
                    } else {
                        future.completeExceptionally(err);
                    }
                });
            } else
                return CompletableFuture.completedFuture(false);
        });
    } catch (Exception e) {
        return FutureUtils.returnError(e);
    }
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.services.ElasticsearchIndexService.java

@Override
public CompletableFuture<Collection<BasicMessageBean>> onPublishOrUpdate(DataBucketBean bucket,
        Optional<DataBucketBean> old_bucket, boolean suspended, Set<String> data_services,
        Set<String> previous_data_services) {
    try {/*from w w w.  ja  v a2 s  .c  om*/

        final LinkedList<BasicMessageBean> mutable_errors = new LinkedList<>();

        // If search_index_service or document_service is enabled then update mapping

        if ((data_services.contains(DataSchemaBean.SearchIndexSchemaBean.name))
                || data_services.contains(DataSchemaBean.DocumentSchemaBean.name)) {

            final Tuple3<ElasticsearchIndexServiceConfigBean, String, Optional<String>> schema_index_type = getSchemaConfigAndIndexAndType(
                    bucket, _config);

            handlePotentiallyNewIndex(bucket, Optional.empty(), true, schema_index_type._1(),
                    schema_index_type._2());
        }

        // If data_warehouse_service is enabled then update Hive table (remove and reinsert super quick)
        // If data_warehouse_service _was_ enabled then remove Hive table

        final boolean old_data_service_matches_dw = previous_data_services
                .contains(DataSchemaBean.DataWarehouseSchemaBean.name);
        if ((data_services.contains(DataSchemaBean.DataWarehouseSchemaBean.name))
                || old_data_service_matches_dw) {
            final Configuration hive_config = ElasticsearchHiveUtils
                    .getHiveConfiguration(_service_context.getGlobalProperties());

            final DataBucketBean delete_bucket = old_bucket.filter(__ -> old_data_service_matches_dw)
                    .orElse(bucket);
            final String delete_string = ElasticsearchHiveUtils.deleteHiveSchema(delete_bucket,
                    delete_bucket.data_schema().data_warehouse_schema());

            final Validation<String, String> maybe_recreate_string = data_services
                    .contains(DataSchemaBean.DataWarehouseSchemaBean.name)
                            ? ElasticsearchHiveUtils.generateFullHiveSchema(Optional.empty(), bucket,
                                    bucket.data_schema().data_warehouse_schema(),
                                    Optional.of(_crud_factory.getClient()), _config)
                            : Validation.success(null);

            final Validation<String, Boolean> ret_val = maybe_recreate_string
                    .bind(recreate_string -> ElasticsearchHiveUtils.registerHiveTable(Optional.empty(),
                            hive_config, Optional.of(delete_string), Optional.ofNullable(recreate_string)));

            if (ret_val.isFail()) {
                mutable_errors.add(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                        "onPublishOrUpdate", ret_val.fail()));
            } else {
                _logger.info(ErrorUtils.get("Register/update/delete hive ({2}) table for bucket {0}: {1}",
                        bucket.full_name(), delete_string + "/" + maybe_recreate_string.success(),
                        ElasticsearchHiveUtils.getParamsFromHiveConfig(hive_config)));
            }
        }
        return CompletableFuture.completedFuture(mutable_errors);
    } catch (Throwable t) {
        return CompletableFuture
                .completedFuture(Arrays.asList(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                        "onPublishOrUpdate", ErrorUtils.getLongForm("{0}", t))));
    }
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public CompletableFuture<?> flushBatchOutput(Optional<DataBucketBean> bucket, AnalyticThreadJobBean job) {
    _mutable_state.has_unflushed_data = false; // (just means that the shutdown hook will do nothing)

    // (this can safely be run for multiple jobs since it only applies to the outputter we're using in this process anyway, plus multiple
    // flushes don't have any functional side effects)

    final DataBucketBean my_bucket = bucket.orElseGet(() -> _mutable_state.bucket.get());

    _logger.info(ErrorUtils.get("Flushing output for bucket:job {0}:{1}", my_bucket.full_name(), job.name()));

    //first flush loggers
    final Stream<CompletableFuture<?>> flush_loggers = _mutable_state.bucket_loggers.values().stream()
            .map(l -> l.flush());/*from ww  w. j a v a  2 s .  c o m*/

    // Flush external and sub-buckets:
    final Stream<CompletableFuture<?>> flush_external = _mutable_state.external_buckets.values().stream()
            .map(e -> {
                return e.<CompletableFuture<?>>either(ee -> ee.<CompletableFuture<?>>either(batch -> {
                    return batch.flushOutput(); // flush external output
                }, slow -> {
                    //(nothing to do)
                    return (CompletableFuture<?>) CompletableFuture.completedFuture(Unit.unit());

                }), topic -> {
                    //(nothing to do)
                    return (CompletableFuture<?>) CompletableFuture.completedFuture(Unit.unit());
                });
            });
    final Stream<CompletableFuture<?>> flush_sub = _mutable_state.sub_buckets.values().stream()
            .map(sub_context -> sub_context.flushBatchOutput(bucket, job));

    final Stream<CompletableFuture<?>> flush_writer = Stream
            .of(_multi_writer.optional().<CompletableFuture<?>>map(writer -> writer.flushBatchOutput())
                    .orElseGet(() -> (CompletableFuture<?>) CompletableFuture.completedFuture(Unit.unit())));

    // Important: this is the line that actually executes all the flushes, so need to ensure each of the above is added here:
    return CompletableFuture.allOf(Stream.of(flush_loggers, flush_external, flush_sub, flush_writer)
            .flatMap(__ -> __).toArray(CompletableFuture[]::new));
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Wraps the communications with the tech module so that calls to completeExceptionally are handled
 * @param bucket/*from  ww w.  java 2  s  . c om*/
 * @param m
 * @param source
 * @param context
 * @param err_or_tech_module - the tech module (is ignored unless the user code got called ie implies err_or_tech_module.isRight)
 * @param return_value - either the user return value or a wrap of the exception
 * @return
 */
public static final CompletableFuture<BucketActionReplyMessage> handleTechnologyErrors(
        final DataBucketBean bucket, final BucketActionMessage m, final String source,
        final Validation<BasicMessageBean, Tuple2<IAnalyticsTechnologyModule, ClassLoader>> err_or_tech_module,
        final CompletableFuture<BucketActionReplyMessage> return_value // "pipeline element"
) {
    if (return_value.isCompletedExceptionally()) { // Harvest Tech developer called completeExceptionally, ugh
        try {
            return_value.get(); // (causes an exception)
        } catch (Throwable t) { // e.getCause() is the exception we want
            // Note if we're here then err_or_tech_module must be "right"
            return CompletableFuture.completedFuture(new BucketActionHandlerMessage(source,
                    SharedErrorUtils.buildErrorMessage(source, m,
                            ErrorUtils.getLongForm(AnalyticsErrorUtils.NO_TECHNOLOGY_NAME_OR_ID, t.getCause(),
                                    m.bucket().full_name(), err_or_tech_module.success()._1().getClass()))));
        }
    }
    //(else fall through to...)
    return return_value;
}

From source file:org.apache.bookkeeper.meta.MockLedgerManager.java

@Override
public CompletableFuture<Void> removeLedgerMetadata(long ledgerId, Version version) {
    return CompletableFuture.completedFuture(null);
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerTest.java

@Test
public void testDeletionAfterRetention() throws Exception {
    ManagedLedgerFactory factory = new ManagedLedgerFactoryImpl(bkc, bkc.getZkHandle());
    ManagedLedgerConfig config = new ManagedLedgerConfig();
    config.setRetentionSizeInMB(0);/*w w w.j  a v a2  s. com*/
    config.setMaxEntriesPerLedger(1);
    config.setRetentionTime(1, TimeUnit.SECONDS);

    ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open("deletion_after_retention_test_ledger", config);
    ManagedCursor c1 = ml.openCursor("c1noretention");
    ml.addEntry("iamaverylongmessagethatshouldnotberetained".getBytes());
    c1.skipEntries(1, IndividualDeletedEntries.Exclude);
    ml.close();

    // reopen ml
    ml = (ManagedLedgerImpl) factory.open("deletion_after_retention_test_ledger", config);
    c1 = ml.openCursor("c1noretention");
    ml.addEntry("shortmessage".getBytes());
    c1.skipEntries(1, IndividualDeletedEntries.Exclude);
    // let retention expire
    Thread.sleep(1000);
    ml.internalTrimConsumedLedgers(CompletableFuture.completedFuture(null));

    assertTrue(ml.getLedgersInfoAsList().size() <= 1);
    assertTrue(ml.getTotalSize() <= "shortmessage".getBytes().length);
    ml.close();
}