Example usage for java.util Collection stream

List of usage examples for java.util Collection stream

Introduction

In this page you can find the example usage for java.util Collection stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:com.ctc.fulfilmentprocess.actions.order.SubprocessesCompletedAction.java

@Override
public Transition executeAction(final OrderProcessModel process) {
    LOG.info(PROCESS_MSG + process.getCode() + " in step " + getClass());
    LOG.info(PROCESS_MSG + process.getCode() + " is checking for  " + process.getConsignmentProcesses().size()
            + " subprocess results");

    final OrderModel order = process.getOrder();
    final Collection<ConsignmentProcessModel> consignmentProcesses = process.getConsignmentProcesses();

    if (CollectionUtils.isNotEmpty(consignmentProcesses)) {
        final Optional<ConsignmentProcessModel> atleastOneConsProcessNotDone = consignmentProcesses.stream()
                .filter(consProcess -> !consProcess.isDone()).findFirst();
        final boolean allConsProcessNotDone = consignmentProcesses.stream()
                .allMatch(consProcess -> !consProcess.isDone());

        if (allConsProcessNotDone) {
            LOG.info(PROCESS_MSG + process.getCode() + " found all subprocesses incomplete");
            order.setDeliveryStatus(DeliveryStatus.NOTSHIPPED);
            save(order);/*  w  w  w  .  j  a va 2 s  . c om*/
            return Transition.NOK;
        } else if (atleastOneConsProcessNotDone.isPresent()) {
            LOG.info(PROCESS_MSG + process.getCode() + " found subprocess "
                    + atleastOneConsProcessNotDone.get().getCode() + " incomplete -> wait again!");
            order.setDeliveryStatus(DeliveryStatus.PARTSHIPPED);
            save(order);
            return Transition.NOK;
        }
    }

    LOG.info(PROCESS_MSG + process.getCode() + " found all subprocesses complete");
    order.setDeliveryStatus(DeliveryStatus.SHIPPED);
    save(order);
    return Transition.OK;
}

From source file:org.obiba.mica.micaConfig.service.PluginsService.java

/**
 * Initialize plugin resources.//from  w  ww  . j a v  a 2s .c o  m
 */
private void initPlugins() {
    Collection<PluginResources> plugins = getPlugins(true);
    // ensure there is a mica-search plugin installed
    if (plugins.stream().noneMatch(p -> "mica-search".equals(p.getType()))) {
        installPlugin("mica-search-es", null);
        // rescan plugins
        plugins = getPlugins(true);
    }
    boolean micaSearchFound = false; // mica-search plugin is a singleton
    for (PluginResources plugin : plugins) {
        if ("mica-search".equals(plugin.getType()) && !micaSearchFound) {
            initSearchEngineServicePlugin(plugin);
            micaSearchFound = true;
        }
    }
}

From source file:co.runrightfast.core.security.cert.X509V3CertRequest.java

private void checkConstraints(final Collection<X509CertExtension> extensions) {
    if (CollectionUtils.isEmpty(extensions)) {
        return;/*  w  ww. j av a 2s .c o  m*/
    }

    final Extensions exts = new Extensions(
            extensions.stream().map(X509CertExtension::toExtension).toArray(Extension[]::new));
    checkArgument(BasicConstraints.fromExtensions(exts) == null,
            "BasicConstraints must not be specified as an extension - it is added automatically");
    checkArgument(SubjectKeyIdentifier.fromExtensions(exts) == null,
            "SubjectKeyIdentifier must not be specified as an extension - it is added automatically");
}

From source file:com.netflix.spinnaker.orca.clouddriver.tasks.providers.aws.AmazonImageTagger.java

/**
 * Return true iff the tags on the current machine image match the desired.
 *///w  ww .  j a v a  2 s. co m
@Override
public boolean areImagesTagged(Collection<Image> targetImages, Stage stage) {
    Collection<MatchedImage> matchedImages = findImages(
            targetImages.stream().map(targetImage -> targetImage.imageName).collect(Collectors.toList()),
            stage);

    AtomicBoolean isUpserted = new AtomicBoolean(true);
    for (Image targetImage : targetImages) {
        targetImage.regions.forEach(region -> {
            MatchedImage matchedImage = matchedImages.stream()
                    .filter(m -> m.imageName.equals(targetImage.imageName)).findFirst().orElse(null);

            if (matchedImage == null) {
                isUpserted.set(false);
                return;
            }

            List<String> imagesForRegion = matchedImage.amis.get(region);
            imagesForRegion.forEach(image -> {
                Map<String, String> allImageTags = matchedImage.tagsByImageId.get(image);
                targetImage.tags.entrySet().forEach(entry -> {
                    // assert tag equality
                    isUpserted
                            .set(isUpserted.get() && entry.getValue().equals(allImageTags.get(entry.getKey())));
                });
            });
        });
    }

    return isUpserted.get();
}

From source file:com.ikanow.aleph2.analytics.storm.assets.PassthroughTopology.java

@Override
public Tuple2<Object, Map<String, String>> getTopologyAndConfiguration(final DataBucketBean bucket,
        final IEnrichmentModuleContext context) {
    final TopologyBuilder builder = new TopologyBuilder();

    final Collection<Tuple2<BaseRichSpout, String>> entry_points = context
            .getTopologyEntryPoints(BaseRichSpout.class, Optional.of(bucket));

    //DEBUG//from w w w.  j  av a 2 s  . co  m
    _logger.debug("Passthrough topology: loaded: "
            + entry_points.stream().map(x -> x.toString()).collect(Collectors.joining(":")));

    entry_points.forEach(spout_name -> builder.setSpout(spout_name._2(), spout_name._1()));
    entry_points.stream()
            .reduce(builder.setBolt(BOLT_NAME,
                    context.getTopologyStorageEndpoint(BaseRichBolt.class, Optional.of(bucket))),
                    (acc, v) -> acc.localOrShuffleGrouping(v._2()), (acc1, acc2) -> acc1 // (not possible in practice)
    );

    return Tuples._2T(builder.createTopology(), Collections.emptyMap());
}

From source file:com.ikanow.aleph2.storm.samples.topology.SampleStormStreamTopology1.java

@Override
public Tuple2<Object, Map<String, String>> getTopologyAndConfiguration(final DataBucketBean bucket,
        final IEnrichmentModuleContext context) {
    final TopologyBuilder builder = new TopologyBuilder();

    final Collection<Tuple2<BaseRichSpout, String>> entry_points = context
            .getTopologyEntryPoints(BaseRichSpout.class, Optional.of(bucket));
    entry_points.forEach(spout_name -> builder.setSpout(spout_name._2(), spout_name._1()));
    entry_points.stream().reduce(builder.setBolt("sample_conversion_bolt", new SampleConversionBolt()),
            (acc, v) -> acc.localOrShuffleGrouping(v._2()), (acc1, acc2) -> acc1 // (not possible in practice)
    );/*from   w  w  w. ja  v a2s .  c o  m*/

    builder.setBolt("sample_enrichment_bolt", new SampleEnrichmentBolt())
            .localOrShuffleGrouping("sample_conversion_bolt");
    builder.setBolt("default_aleph2_output_spout",
            context.getTopologyStorageEndpoint(BaseRichBolt.class, Optional.of(bucket)))
            .localOrShuffleGrouping("sample_enrichment_bolt");
    return Tuples._2T(builder.createTopology(), Collections.emptyMap());
}

From source file:mtsar.processors.meta.ZenCrowd.java

@Nonnull
@Override// ww w .  j  a v a  2 s.c o  m
public Map<Integer, AnswerAggregation> aggregate(@Nonnull Collection<Task> tasks) {
    requireNonNull(stage, "the stage provider should not provide null");
    if (tasks.isEmpty())
        return Collections.emptyMap();
    final Map<Integer, Task> taskIds = tasks.stream()
            .collect(Collectors.toMap(Task::getId, Function.identity()));
    final Models.ZenModel<Integer, Integer, String> zenModel = compute(stage, answerDAO, getTaskMap())
            .getZenModel();
    final ZenCrowdEM<Integer, Integer, String> zenCrowd = new ZenCrowdEM<>(zenModel);
    zenCrowd.computeLabelEstimates();
    final Map<Integer, AnswerAggregation> aggregations = zenCrowd.getCurrentModel().getCombinedEstLabels()
            .entrySet().stream().filter(entry -> taskIds.containsKey(entry.getKey()))
            .collect(Collectors.toMap(Map.Entry::getKey, entry -> new AnswerAggregation.Builder()
                    .setTask(taskIds.get(entry.getKey())).addAnswers(entry.getValue().getFirst()).build()));
    return aggregations;
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

/** Logic to perform the custom deduplication with the current and new versions
 * @param maybe_custom_handler/*  ww  w.  ja v  a  2  s. co  m*/
 * @param new_record
 * @param old_record
 * @returns list of Json objects to delete
 */
protected static Stream<JsonNode> handleCustomDeduplication(
        Optional<Tuple2<IEnrichmentBatchModule, DeduplicationEnrichmentContext>> maybe_custom_handler,
        final List<Tuple3<Long, IBatchRecord, ObjectNode>> new_records, final Collection<JsonNode> old_records,
        final JsonNode key) {
    return maybe_custom_handler.map(handler_context -> {
        handler_context._2().resetMutableState(old_records, key);

        final Consumer<IEnrichmentBatchModule> handler = new_module -> {
            final Stream<Tuple2<Long, IBatchRecord>> dedup_stream = Stream.concat(
                    new_records.stream().map(t3 -> Tuples._2T(t3._1(), t3._2())),
                    old_records.stream().map(old_record -> Tuples._2T(-1L,
                            (IBatchRecord) (new BatchRecordUtils.InjectedJsonBatchRecord(old_record)))));

            final int batch_size = new_records.size();

            new_module.onObjectBatch(dedup_stream, Optional.of(batch_size).filter(__ -> !old_records.isEmpty()), // (ie leave batch size blank if there's no dedup) 
                    Optional.of(key));

            new_module.onStageComplete(false);
        };

        handler.accept(handler_context._1());

        return handler_context._2().getObjectIdsToDelete();
    }).orElse(Stream.empty());
}

From source file:com.ikanow.aleph2.storm.samples.topology.JavaScriptTopology.java

@Override
public Tuple2<Object, Map<String, String>> getTopologyAndConfiguration(DataBucketBean bucket,
        IEnrichmentModuleContext context) {
    TopologyBuilder builder = new TopologyBuilder();
    String contextSignature = context.getEnrichmentContextSignature(Optional.of(bucket), Optional.empty());

    final Collection<Tuple2<BaseRichSpout, String>> entry_points = context
            .getTopologyEntryPoints(BaseRichSpout.class, Optional.of(bucket));
    entry_points.forEach(spout_name -> builder.setSpout(spout_name._2(), spout_name._1()));
    entry_points.stream().reduce(
            builder.setBolt("scriptBolt",
                    new JavaScriptBolt(contextSignature,
                            "/com/ikanow/aleph2/storm/samples/script/js/scripts.properties")),
            (acc, v) -> acc.shuffleGrouping(v._2()), (acc1, acc2) -> acc1 // (not possible in practice)
    );/*  w  ww.  j a v  a 2s . c om*/
    builder.setBolt("reducerCounter", new ReducerCounterBolt()).shuffleGrouping("scriptBolt");
    builder.setBolt("out", context.getTopologyStorageEndpoint(BaseRichBolt.class, Optional.of(bucket)))
            .localOrShuffleGrouping("reducerCounter");
    return new Tuple2<Object, Map<String, String>>(builder.createTopology(), new HashMap<String, String>());
}

From source file:com.thoughtworks.go.apiv2.environments.EnvironmentsControllerV2.java

private String calculateEtag(Collection<EnvironmentConfig> environmentConfigs) {
    final String environmentConfigSegment = environmentConfigs.stream().map(this::etagFor)
            .collect(Collectors.joining(SEP_CHAR));

    return DigestUtils.sha256Hex(environmentConfigSegment);
}