Example usage for java.util.concurrent CompletableFuture allOf

List of usage examples for java.util.concurrent CompletableFuture allOf

Introduction

In this page you can find the example usage for java.util.concurrent CompletableFuture allOf.

Prototype

public static CompletableFuture<Void> allOf(CompletableFuture<?>... cfs) 

Source Link

Document

Returns a new CompletableFuture that is completed when all of the given CompletableFutures complete.

Usage

From source file:com.ikanow.aleph2.management_db.mongodb.services.IkanowV1SyncService_LibraryJars.java

/** Top level logic for source synchronization
 * @param library_mgmt//w  ww . j a va 2s .com
 * @param share_db
 */
protected CompletableFuture<Void> synchronizeLibraryJars(
        final IManagementCrudService<SharedLibraryBean> library_mgmt, final IStorageService aleph2_fs,
        final ICrudService<JsonNode> share_db, final GridFS share_fs) {
    return compareJarsToLibaryBeans_get(library_mgmt, share_db).thenApply(v1_v2 -> {
        return compareJarsToLibraryBeans_categorize(v1_v2);
    }).thenCompose(create_update_delete -> {
        if (create_update_delete._1().isEmpty() && create_update_delete._2().isEmpty()
                && create_update_delete._3().isEmpty()) {
            //(nothing to do)
            return CompletableFuture.completedFuture(null);
        }
        _logger.info(ErrorUtils.get("Found [create={0}, delete={1}, update={2}] sources",
                create_update_delete._1().size(), create_update_delete._2().size(),
                create_update_delete._3().size()));

        final List<CompletableFuture<Boolean>> l1 = create_update_delete._1().stream().parallel()
                .<Tuple2<String, ManagementFuture<?>>>map(id -> Tuples._2T(id,
                        createLibraryBean(id, library_mgmt, aleph2_fs, true, share_db, share_fs, _context)))
                .<CompletableFuture<Boolean>>map(id_fres -> updateV1ShareErrorStatus_top(id_fres._1(),
                        id_fres._2(), library_mgmt, share_db, true))
                .collect(Collectors.toList());
        ;

        final List<CompletableFuture<Boolean>> l2 = create_update_delete._2().stream().parallel()
                .<Tuple2<String, ManagementFuture<?>>>map(
                        id -> Tuples._2T(id, deleteLibraryBean(id, library_mgmt, aleph2_fs)))
                .<CompletableFuture<Boolean>>map(id_fres -> CompletableFuture.completedFuture(true))
                .collect(Collectors.toList());
        ;

        final List<CompletableFuture<Boolean>> l3 = create_update_delete._3().stream().parallel()
                .<Tuple2<String, ManagementFuture<?>>>map(id -> Tuples._2T(id,
                        createLibraryBean(id, library_mgmt, aleph2_fs, false, share_db, share_fs, _context)))
                .<CompletableFuture<Boolean>>map(id_fres -> updateV1ShareErrorStatus_top(id_fres._1(),
                        id_fres._2(), library_mgmt, share_db, false))
                .collect(Collectors.toList());
        ;

        List<CompletableFuture<?>> retval = Arrays.asList(l1, l2, l3).stream().flatMap(l -> l.stream())
                .collect(Collectors.toList());
        ;

        return CompletableFuture.allOf(retval.toArray(new CompletableFuture[0]));
    });
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Worker function for storeObject
 * @param new_object - the bucket to create
 * @param old_bucket - the version of the bucket being overwritte, if an update
 * @param validation_info - validation info to be presented to the user
 * @param replace_if_present - update move
 * @return - the user return value//from  w ww  .  jav a  2s.  co  m
 * @throws Exception
 */
public ManagementFuture<Supplier<Object>> storeValidatedObject(final DataBucketBean new_object,
        final Optional<DataBucketBean> old_bucket, final Collection<BasicMessageBean> validation_info,
        boolean replace_if_present) throws Exception {
    final MethodNamingHelper<DataBucketStatusBean> helper = BeanTemplateUtils.from(DataBucketStatusBean.class);

    // Error if a bucket status doesn't exist - must create a bucket status before creating the bucket
    // (note the above validation ensures the bucket has an _id)
    // (obviously need to block here until we're sure..)

    final CompletableFuture<Optional<DataBucketStatusBean>> corresponding_status = _underlying_data_bucket_status_db
            .get().getObjectById(new_object._id(),
                    Arrays.asList(helper.field(DataBucketStatusBean::_id),
                            helper.field(DataBucketStatusBean::node_affinity),
                            helper.field(DataBucketStatusBean::confirmed_master_enrichment_type),
                            helper.field(DataBucketStatusBean::confirmed_suspended),
                            helper.field(DataBucketStatusBean::confirmed_multi_node_enabled),
                            helper.field(DataBucketStatusBean::suspended),
                            helper.field(DataBucketStatusBean::quarantined_until)),
                    true);

    if (!corresponding_status.get().isPresent()) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException(
                        ErrorUtils.get(ManagementDbErrorUtils.BUCKET_CANNOT_BE_CREATED_WITHOUT_BUCKET_STATUS,
                                new_object.full_name()))),
                CompletableFuture.completedFuture(Collections.emptyList()));
    }

    // Some fields like multi-node, you can only change if the bucket status is set to suspended, to make
    // the control logic easy
    old_bucket.ifPresent(ob -> {
        validation_info.addAll(checkForInactiveOnlyUpdates(new_object, ob, corresponding_status.join().get()));
        // (corresponding_status present and completed because of above check) 
    });
    if (!validation_info.isEmpty() && validation_info.stream().anyMatch(m -> !m.success())) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException("Bucket not valid, see management channels")),
                CompletableFuture.completedFuture(validation_info));
    }
    // Made it this far, try to set the next_poll_time in the status object
    if (null != new_object.poll_frequency()) {
        //get the next poll time
        final Date next_poll_time = TimeUtils
                .getForwardSchedule(new_object.poll_frequency(), Optional.of(new Date())).success();
        //update the status
        _underlying_data_bucket_status_db.get().updateObjectById(new_object._id(), CrudUtils
                .update(DataBucketStatusBean.class).set(DataBucketStatusBean::next_poll_date, next_poll_time));
    }

    // Create the directories

    try {
        createFilePaths(new_object, _storage_service.get());
        //if logging is enabled, create the logging filepath also
        if (Optionals.of(() -> new_object.management_schema().logging_schema().enabled()).orElse(false)) {
            createFilePaths(BucketUtils.convertDataBucketBeanToLogging(new_object), _storage_service.get());
        }
    } catch (Exception e) { // Error creating directory, haven't created object yet so just back out now

        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
    // OK if the bucket is validated we can store it (and create a status object)

    final CompletableFuture<Supplier<Object>> ret_val = _underlying_data_bucket_db.get().storeObject(new_object,
            replace_if_present);
    final boolean is_suspended = DataBucketStatusCrudService
            .bucketIsSuspended(corresponding_status.get().get());

    // Register the bucket update with any applicable data services      

    final Multimap<IDataServiceProvider, String> data_service_info = DataServiceUtils
            .selectDataServices(new_object.data_schema(), _service_context);
    final Optional<Multimap<IDataServiceProvider, String>> old_data_service_info = old_bucket
            .map(old -> DataServiceUtils.selectDataServices(old.data_schema(), _service_context));

    final List<CompletableFuture<Collection<BasicMessageBean>>> ds_update_results = data_service_info.asMap()
            .entrySet().stream()
            .map(kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                    kv.getValue().stream().collect(Collectors.toSet()),
                    old_data_service_info.map(old_map -> old_map.get(kv.getKey()))
                            .map(old_servs -> old_servs.stream().collect(Collectors.toSet()))
                            .orElse(Collections.emptySet())))
            .collect(Collectors.toList());

    // Process old data services that are no longer in use
    final List<CompletableFuture<Collection<BasicMessageBean>>> old_ds_update_results = old_data_service_info
            .map(old_ds_info -> {
                return old_ds_info.asMap().entrySet().stream()
                        .filter(kv -> !data_service_info.containsKey(kv.getKey()))
                        .<CompletableFuture<Collection<BasicMessageBean>>>map(
                                kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                                        Collections.emptySet(),
                                        kv.getValue().stream().collect(Collectors.toSet())))
                        .collect(Collectors.toList());
            }).orElse(Collections.emptyList());

    //(combine)
    @SuppressWarnings("unchecked")
    CompletableFuture<Collection<BasicMessageBean>> all_service_registration_complete[] = Stream
            .concat(ds_update_results.stream(), old_ds_update_results.stream())
            .toArray(CompletableFuture[]::new);

    // Get the status and then decide whether to broadcast out the new/update message

    final CompletableFuture<Collection<BasicMessageBean>> mgmt_results = CompletableFuture
            .allOf(all_service_registration_complete)
            .thenCombine(
                    old_bucket.isPresent()
                            ? requestUpdatedBucket(new_object, old_bucket.get(),
                                    corresponding_status.get().get(), _actor_context,
                                    _underlying_data_bucket_status_db.get(), _bucket_action_retry_store.get())
                            : requestNewBucket(new_object, is_suspended,
                                    _underlying_data_bucket_status_db.get(), _actor_context),
                    (__, harvest_results) -> {
                        return (Collection<BasicMessageBean>) Stream
                                .concat(Arrays.stream(all_service_registration_complete)
                                        .flatMap(s -> s.join().stream()), harvest_results.stream())
                                .collect(Collectors.toList());
                    })
            .exceptionally(t -> Arrays.asList(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "storeValidatedObject", ErrorUtils.get("{0}", t))));

    // Update the status depending on the results of the management channels

    return FutureUtils.createManagementFuture(ret_val,
            MgmtCrudUtils
                    .handleUpdatingStatus(new_object, corresponding_status.get().get(), is_suspended,
                            mgmt_results, _underlying_data_bucket_status_db.get())
                    .thenApply(msgs -> Stream.concat(msgs.stream(), validation_info.stream())
                            .collect(Collectors.toList())));
}

From source file:io.atomix.protocols.gossip.map.AntiEntropyMapDelegate.java

/**
 * Handles a bootstrap request from a peer.
 * <p>/*  ww  w  .  ja  va2s . c o  m*/
 * When handling a bootstrap request from a peer, the node sends batches of entries back to the peer and completes the
 * bootstrap request once all batches have been received and processed.
 *
 * @param peer the peer that sent the bootstrap request
 * @return a future to be completed once updates have been sent to the peer
 */
private CompletableFuture<Void> handleBootstrap(MemberId peer) {
    log.trace("Received bootstrap request from {} for {}", peer, bootstrapMessageSubject);

    Function<List<UpdateEntry>, CompletableFuture<Void>> sendUpdates = updates -> {
        log.trace("Initializing {} with {} entries", peer, updates.size());
        return clusterCommunicator.<List<UpdateEntry>, Void>send(initializeMessageSubject,
                ImmutableList.copyOf(updates), serializer::encode, serializer::decode, peer)
                .whenComplete((result, error) -> {
                    if (error != null) {
                        log.debug("Failed to initialize {}", peer, error);
                    }
                });
    };

    List<CompletableFuture<Void>> futures = Lists.newArrayList();
    List<UpdateEntry> updates = Lists.newArrayList();
    for (Map.Entry<String, MapValue> entry : items.entrySet()) {
        String key = entry.getKey();
        MapValue value = entry.getValue();
        if (value.isAlive()) {
            updates.add(new UpdateEntry(key, value));
            if (updates.size() == DEFAULT_MAX_EVENTS) {
                futures.add(sendUpdates.apply(updates));
                updates = new ArrayList<>();
            }
        }
    }

    if (!updates.isEmpty()) {
        futures.add(sendUpdates.apply(updates));
    }
    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()]));
}

From source file:com.ikanow.aleph2.data_import.services.HarvestContext.java

@Override
public CompletableFuture<?> flushBatchOutput(Optional<DataBucketBean> bucket) {
    // Flush data and logger

    final Stream<CompletableFuture<?>> flush_writer = Stream.of(_multi_writer.get().flushBatchOutput());
    final Stream<CompletableFuture<?>> flush_logger = _mutable_state.bucket_loggers.values().stream()
            .map(l -> l.flush());//w  ww  .j a va 2s.com

    return CompletableFuture.allOf(Stream.concat(flush_writer, flush_logger).toArray(CompletableFuture[]::new));
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

@SuppressWarnings("unchecked")
@Override/*ww  w.  ja v  a  2 s.  co m*/
public void onStageComplete(final boolean is_original) {
    _custom_handler.optional().ifPresent(handler -> handler.onStageComplete(true));

    final Supplier<String> subsystem_builder = () -> (_is_system_dedup_stage.get() ? ""
            : ("." + _control.get().name() + Optional.ofNullable("no_name")));
    final Supplier<String> command_builder = () -> (_is_system_dedup_stage.get() ? "system"
            : _control.get().name() + Optional.ofNullable("no_name"));

    _logger.optional().ifPresent(l -> l.log(Level.DEBUG, ErrorUtils.lazyBuildMessage(true,
            () -> "DeduplicationService" + subsystem_builder.get(),
            () -> command_builder.get() + ".onStageComplete", () -> null,
            () -> ErrorUtils.get(
                    "Job {0} completed deduplication: nondup_keys={1}, dup_keys={2}, dups_inc={3}, dups_db={4}, del={5}",
                    command_builder.get(), Integer.toString(_mutable_stats.nonduplicate_keys),
                    Integer.toString(_mutable_stats.duplicate_keys),
                    Integer.toString(_mutable_stats.duplicates_incoming),
                    Integer.toString(_mutable_stats.duplicates_existing),
                    Integer.toString(_mutable_stats.deleted)),
            () -> (Map<String, Object>) _mapper.convertValue(_mutable_stats, Map.class))));

    if (!mutable_uncompleted_deletes.isEmpty()) {
        try {
            CompletableFuture.allOf(mutable_uncompleted_deletes.stream().toArray(CompletableFuture[]::new))
                    .get(60, TimeUnit.SECONDS);
        } catch (Exception e) {
            _logger.optional().ifPresent(l -> l.log(Level.ERROR,
                    ErrorUtils.lazyBuildMessage(false, () -> "DeduplicationService" + subsystem_builder.get(),
                            () -> command_builder.get() + ".onStageComplete", () -> null,
                            () -> ErrorUtils.get("Job {0}: error completing deleted ids: {1}",
                                    command_builder.get(), e.getMessage()),
                            () -> null)));
        }
    }
    _logger.optional().ifPresent(Lambdas.wrap_consumer_u(l -> l.flush().get(60, TimeUnit.SECONDS)));
}

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

@Override
public CompletableFuture<Long> deleteObjectsBySpec(final QueryComponent<O> spec) {
    try {/*from ww w . ja v a  2 s  .c  om*/
        Tuple2<FilterBuilder, UnaryOperator<SearchRequestBuilder>> query = ElasticsearchUtils
                .convertToElasticsearchFilter(spec, _state.id_ranges_ok);

        final Optional<Long> maybe_size = Optional.ofNullable(spec.getLimit()).filter(x -> x > 0);
        // (don't scroll if a limit is set and we're sorting - note sorting is ignored otherwise)
        final boolean scroll = !(maybe_size.isPresent() && !Optionals.ofNullable(spec.getOrderBy()).isEmpty());
        final long max_size = maybe_size.orElse((long) Integer.MAX_VALUE).intValue();

        final SearchRequestBuilder srb = Optional.of(_state.client.prepareSearch()
                .setIndices(_state.es_context.indexContext().getReadableIndexArray(Optional.empty()))
                .setTypes(_state.es_context.typeContext().getReadableTypeArray())
                .setQuery(QueryBuilders.constantScoreQuery(query._1())).setSize(1000).setFetchSource(false)
                .setNoFields()).map(
                        s -> (!scroll && (null != spec.getOrderBy()))
                                ? spec.getOrderBy().stream().reduce(s,
                                        (ss, sort) -> ss.addSort(sort._1(),
                                                sort._2() > 0 ? SortOrder.ASC : SortOrder.DESC),
                                        (s1, s2) -> s1)
                                : s)
                .map(s -> scroll ? s.setSearchType(SearchType.SCAN).setScroll(new TimeValue(60000)) : s).get();

        return ElasticsearchFutureUtils.wrap(srb.execute(), sr -> {
            long mutable_count = 0L;
            final int batch_size = 50;
            PingPongList<CompletableFuture<?>> mutable_future_batches = new PingPongList<>(batch_size);

            if (scroll && ((sr.getHits().totalHits() > 0) && (0 == sr.getHits().getHits().length))) {
                //(odd workaround, if number of hits < scroll size, then the reply contains no hits, need to scroll an extra time to get it)
                sr = _state.client.prepareSearchScroll(sr.getScrollId()).setScroll(new TimeValue(60000))
                        .execute().actionGet();
            }
            while ((sr.getHits().getHits().length > 0) && (mutable_count < max_size)) {
                BulkRequestBuilder bulk_request = _state.client.prepareBulk();
                for (SearchHit sh : sr.getHits().getHits()) {
                    bulk_request.add(_state.client.prepareDelete().setIndex(sh.index()).setId(sh.id())
                            .setType(sh.type()));

                    mutable_count++; // (for now we'll just report on the _ids we found)
                    if (mutable_count >= max_size)
                        break;
                }
                // We're full, so wait for the first half of the data to complete
                if (mutable_future_batches
                        .add(ElasticsearchFutureUtils.wrap(bulk_request.execute(), __ -> null))) {
                    try {
                        CompletableFuture.allOf(mutable_future_batches.getAboutToBeOverwrittenList().stream()
                                .toArray(CompletableFuture[]::new)).join();
                    } catch (Exception e) {
                    } // just carry on if fails, probably more important to keep trying to delete

                    mutable_future_batches.getAboutToBeOverwrittenList().clear();
                }
                if (scroll && (mutable_count < max_size))
                    sr = _state.client.prepareSearchScroll(sr.getScrollId()).setScroll(new TimeValue(60000))
                            .execute().actionGet();
                else
                    break;
            }
            if (scroll)
                _state.client.prepareClearScroll().addScrollId(sr.getScrollId());

            //(wait for any remaining batches - this one we'll allow to error out since we've completed all our operations)
            CompletableFuture
                    .allOf(mutable_future_batches.getCompleteStream().toArray(CompletableFuture[]::new)).join();

            return mutable_count; //(just return an estimate)
        }, (err, future) -> {
            if ((err instanceof IndexMissingException) || (err instanceof SearchPhaseExecutionException)) //(this one can come up as on a read on a newly created index)
            {
                // just treat this like an "object not found"
                future.complete(0L);
            } else {
                future.completeExceptionally(err);
            }
        });
    } catch (Exception e) {
        return FutureUtils.returnError(e);
    }
}

From source file:com.ikanow.aleph2.analytics.services.AnalyticsContext.java

@Override
public CompletableFuture<?> flushBatchOutput(Optional<DataBucketBean> bucket, AnalyticThreadJobBean job) {
    _mutable_state.has_unflushed_data = false; // (just means that the shutdown hook will do nothing)

    // (this can safely be run for multiple jobs since it only applies to the outputter we're using in this process anyway, plus multiple
    // flushes don't have any functional side effects)

    final DataBucketBean my_bucket = bucket.orElseGet(() -> _mutable_state.bucket.get());

    _logger.info(ErrorUtils.get("Flushing output for bucket:job {0}:{1}", my_bucket.full_name(), job.name()));

    //first flush loggers
    final Stream<CompletableFuture<?>> flush_loggers = _mutable_state.bucket_loggers.values().stream()
            .map(l -> l.flush());//from   w  w  w  . ja  va2  s. c o m

    // Flush external and sub-buckets:
    final Stream<CompletableFuture<?>> flush_external = _mutable_state.external_buckets.values().stream()
            .map(e -> {
                return e.<CompletableFuture<?>>either(ee -> ee.<CompletableFuture<?>>either(batch -> {
                    return batch.flushOutput(); // flush external output
                }, slow -> {
                    //(nothing to do)
                    return (CompletableFuture<?>) CompletableFuture.completedFuture(Unit.unit());

                }), topic -> {
                    //(nothing to do)
                    return (CompletableFuture<?>) CompletableFuture.completedFuture(Unit.unit());
                });
            });
    final Stream<CompletableFuture<?>> flush_sub = _mutable_state.sub_buckets.values().stream()
            .map(sub_context -> sub_context.flushBatchOutput(bucket, job));

    final Stream<CompletableFuture<?>> flush_writer = Stream
            .of(_multi_writer.optional().<CompletableFuture<?>>map(writer -> writer.flushBatchOutput())
                    .orElseGet(() -> (CompletableFuture<?>) CompletableFuture.completedFuture(Unit.unit())));

    // Important: this is the line that actually executes all the flushes, so need to ensure each of the above is added here:
    return CompletableFuture.allOf(Stream.of(flush_loggers, flush_external, flush_sub, flush_writer)
            .flatMap(__ -> __).toArray(CompletableFuture[]::new));
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Combine the analytic thread level results and the per-job results into a single reply
 * @param top_level//  w w w.j av  a2s .  co m
 * @param per_job
 * @param source
 * @return
 */
protected final static CompletableFuture<BucketActionReplyMessage> combineResults(
        final CompletableFuture<BasicMessageBean> top_level,
        final List<CompletableFuture<BasicMessageBean>> per_job, final String source) {
    if (per_job.isEmpty()) {
        return top_level.thenApply(reply -> new BucketActionHandlerMessage(source, reply));
    } else { // slightly more complex:

        // First off wait for them all to complete:
        final CompletableFuture<?>[] futures = per_job.toArray(new CompletableFuture<?>[0]);

        return top_level.thenCombine(CompletableFuture.allOf(futures), (thread, __) -> {
            List<BasicMessageBean> replies = Stream.concat(Lambdas.get(() -> {
                if (thread.success() && ((null == thread.message()) || thread.message().isEmpty())) {
                    // Ignore top level, it's not very interesting
                    return Stream.empty();
                } else
                    return Stream.of(thread);
            }), per_job.stream().map(cf -> cf.join())

            ).collect(Collectors.toList());

            return (BucketActionReplyMessage) new BucketActionCollectedRepliesMessage(source, replies,
                    Collections.emptySet(), Collections.emptySet());
        }).exceptionally(t -> {
            return (BucketActionReplyMessage) new BucketActionHandlerMessage(source,
                    ErrorUtils.buildErrorMessage(DataBucketAnalyticsChangeActor.class.getSimpleName(), source,
                            ErrorUtils.getLongForm("{0}", t)));
        });
    }
}

From source file:com.ikanow.aleph2.data_import_manager.analytics.actors.DataBucketAnalyticsChangeActor.java

/** Inefficient but safe utility for sending update events to the trigger sibling
 * @param job_results//from  w  w w .  j  a  v a2  s.c  o  m
 * @param bucket
 * @param grouping_lambda - returns the job type based on the job and return value
 * @param me_sibling
 */
protected static <T> void sendOnTriggerEventMessages(
        final List<Tuple2<AnalyticThreadJobBean, CompletableFuture<T>>> job_results,
        final DataBucketBean bucket,
        final Function<Tuple2<AnalyticThreadJobBean, T>, Optional<JobMessageType>> grouping_lambda,
        final Tuple2<ActorRef, ActorSelection> me_sibling, final ILoggingService _logging_service) {
    if (null == me_sibling)
        return; // (just keeps bw compatibility with the various test cases we currently have - won't get encountered in practice)

    // Perform the processing even if 1+ if the jobs fails - that job will just be flat wrapped out
    CompletableFuture.allOf(job_results.stream().map(j_f -> j_f._2()).toArray(CompletableFuture<?>[]::new))
            .thenAccept(__ -> {
                sendOnTriggerEventMessages_phase2(job_results, bucket, grouping_lambda, me_sibling,
                        _logging_service);
            }).exceptionally(__ -> {
                sendOnTriggerEventMessages_phase2(job_results, bucket, grouping_lambda, me_sibling,
                        _logging_service);
                return null;
            });
}

From source file:org.apache.hadoop.hbase.client.AsyncBatchRpcRetryingCaller.java

private void groupAndSend(Stream<Action> actions, int tries) {
    long locateTimeoutNs;
    if (operationTimeoutNs > 0) {
        locateTimeoutNs = remainingTimeNs();
        if (locateTimeoutNs <= 0) {
            failAll(actions, tries);/*from   ww w  . ja va  2  s .  c o m*/
            return;
        }
    } else {
        locateTimeoutNs = -1L;
    }
    ConcurrentMap<ServerName, ServerRequest> actionsByServer = new ConcurrentHashMap<>();
    ConcurrentLinkedQueue<Action> locateFailed = new ConcurrentLinkedQueue<>();
    CompletableFuture.allOf(
            actions.map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(),
                    RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> {
                        if (error != null) {
                            error = translateException(error);
                            if (error instanceof DoNotRetryIOException) {
                                failOne(action, tries, error, EnvironmentEdgeManager.currentTime(), "");
                                return;
                            }
                            addError(action, error, null);
                            locateFailed.add(action);
                        } else {
                            computeIfAbsent(actionsByServer, loc.getServerName(), ServerRequest::new)
                                    .addAction(loc, action);
                        }
                    })).toArray(CompletableFuture[]::new))
            .whenComplete((v, r) -> {
                if (!actionsByServer.isEmpty()) {
                    send(actionsByServer, tries);
                }
                if (!locateFailed.isEmpty()) {
                    tryResubmit(locateFailed.stream(), tries);
                }
            });
}