Example usage for java.util Collection stream

List of usage examples for java.util Collection stream

Introduction

In this page you can find the example usage for java.util Collection stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:de.tudarmstadt.ukp.dkpro.argumentation.io.annotations.SpanAnnotationMatrices.java

public static <T extends SpanTextLabel> Sparse3DObjectMatrix<String, T> createMatrix(
        final Collection<T> spanAnnotationVector, final int estimatedSpanBeginToEndMapMaxCapacity,
        final int estimatedAnnotationMapMaxCapacity) {
    final Sparse3DObjectMatrix<String, T> result = new Sparse3DObjectMatrix<>(
            new Int2ObjectOpenHashMap<>(spanAnnotationVector.size() + 1), estimatedSpanBeginToEndMapMaxCapacity,
            estimatedAnnotationMapMaxCapacity);
    putAnnotations(result, spanAnnotationVector.stream());
    return result;
}

From source file:delfos.dataset.util.DatasetPrinter.java

public static String printCompactRatingTableSortedByNumRatings(DatasetLoader<? extends Rating> datasetLoader,
        Collection<User> _users) {

    final List<User> users = _users.stream().sorted(User.BY_ID).collect(Collectors.toList());
    final List<Item> itemsAllUsersRated = datasetLoader.getContentDataset().stream().sorted(Item.BY_ID)
            .map(item -> {/*from w w  w.  j  a va  2s .  c o  m*/
                List<User> listOfUsersThatRated = datasetLoader.getRatingsDataset()
                        .getItemRatingsRated(item.getId()).values().stream().filter(rating -> {
                            return _users.contains(rating.getUser());
                        }).map(rating -> rating.getUser()).collect(Collectors.toList());
                GroupOfUsers groupOfUsersThatRated = new GroupOfUsers(listOfUsersThatRated);
                return new Pair<GroupOfUsers, Item>(groupOfUsersThatRated, item);
            }).filter(pair -> !pair.getKey().isEmpty())
            .sorted((pair1, pair2) -> -pair1.getKey().compareTo(pair2.getKey())).map(pair -> pair.getValue())
            .collect(Collectors.toList());

    return actuallyDoTheTable(itemsAllUsersRated, users, datasetLoader);
}

From source file:com.thinkbiganalytics.nifi.rest.support.NifiProcessUtil.java

/**
 * Finds the first process group with the specified name.
 *
 * @param processGroups the list of process groups to filter
 * @param name          the feed system name to match, case-insensitive
 * @return the matching process group, or {@code null} if not found
 *//*from  w ww .  jav a 2s . co m*/
@Nullable
public static ProcessGroupDTO findFirstProcessGroupByName(
        @Nonnull final Collection<ProcessGroupDTO> processGroups, @Nonnull final String name) {
    return processGroups.stream().filter(processGroup -> processGroup.getName().equalsIgnoreCase(name))
            .findAny().orElse(null);
}

From source file:com.github.drbookings.ui.beans.StatisticsTableBean.java

public static StatisticsTableBean buildSum(final Collection<StatisticsTableBean> data) {
    final StatisticsTableBean result = new StatisticsTableBean(false);
    result.setOrigin("sum");
    result.setNumberOfPayedNights(data.stream().mapToInt(StatisticsTableBean::getNumberOfPayedNights).sum());
    result.setNumberOfPayedBookings(/*from   www  . j a va 2 s.  c o m*/
            data.stream().mapToInt(StatisticsTableBean::getNumberOfPayedBookings).sum());
    result.setCleaningCount(data.stream().mapToInt(StatisticsTableBean::getCleaningCount).sum());
    result.setCleaningCosts((float) data.stream().mapToDouble(StatisticsTableBean::getCleaningCosts).sum());
    result.setCleaningFees((float) data.stream().mapToDouble(StatisticsTableBean::getCleaningFees).sum());
    result.setGrossIncome((float) data.stream().mapToDouble(StatisticsTableBean::getGrossEarnings).sum());
    result.setNetIncome((float) data.stream().mapToDouble(StatisticsTableBean::getNetIncome).sum());
    result.setServiceFees((float) data.stream().mapToDouble(StatisticsTableBean::getServiceFees).sum());
    result.setEarnings((float) data.stream().mapToDouble(StatisticsTableBean::getEarnings).sum());
    result.setEarningsPayout((float) data.stream().mapToDouble(StatisticsTableBean::getEarningsPayout).sum());
    result.setNetEarnings((float) data.stream().mapToDouble(StatisticsTableBean::getNetEarnings).sum());
    return result;

}

From source file:ee.ria.xroad.proxy.util.MetaserviceTestUtil.java

/** The definition to extract {@link BindingOperation} names from
 * @param definition/*w ww  . j a v  a2  s  . co  m*/
 * @return List of the names of the {@link BindingOperation}s in the given definition
 */
public static List<String> parseOperationNamesFromWSDLDefinition(Definition definition) {
    @SuppressWarnings("unchecked")
    Collection<Service> services = definition.getServices().values();

    // note that these return raw type collections
    return services.stream().map(service -> service.getPorts().values()).flatMap(Collection::stream)
            .map(port -> ((Port) port).getBinding().getBindingOperations()).flatMap(List::stream)
            .map(bindingOperation -> ((BindingOperation) bindingOperation).getName())
            .collect(Collectors.toList());
}

From source file:com.vmware.admiral.compute.container.volume.VolumeUtil.java

private static Set<String> filterByVolume(String volumeName, Collection<ContainerDescription> descs) {

    Predicate<ContainerDescription> hasVolume = cd -> {
        if (cd.volumes != null) {
            return Arrays.stream(cd.volumes).anyMatch(v -> v.startsWith(volumeName));
        }//from   w ww.  j a v a  2  s  .c  om
        return false;
    };

    return descs.stream().filter(hasVolume).map(cd -> cd.name).collect(Collectors.toSet());
}

From source file:com.vmware.admiral.compute.container.volume.VolumeUtil.java

private static List<ContainerVolumeDescription> filterVolumes(ContainerDescription cd,
        Collection<ContainerVolumeDescription> volumes) {

    if (cd.volumes == null) {
        return Collections.emptyList();
    }//ww  w.  j ava 2  s.  co  m

    Predicate<ContainerVolumeDescription> hasVolume = vd -> Arrays.stream(cd.volumes)
            .anyMatch(v -> v.startsWith(vd.name));

    return volumes.stream().filter(hasVolume).collect(Collectors.toList());
}

From source file:com.schnobosoft.semeval.cortical.Util.java

/**
 * Scale a collection of double values to a new scale as defined by min and max.
 *
 * @param values  the values to scale//from ww  w . java2  s.c o m
 * @param measure the {@link Measure} to use for scaling (some have predefined min/max boundaries)
 * @return a list of doubles in the range between {@link #MIN_OUT} and {@link #MAX_OUT}
 */
public static List<Double> scale(Collection<Double> values, Measure measure) {
    double maxIn;
    double minIn;

    switch (measure) {
    case COSINE_SIM:
        maxIn = 1.0;
        minIn = 0.0;
        break;
    case JACCARD_DIST:
        maxIn = 0.0;
        minIn = -1.0;
        break;
    case EUCLIDIAN_DIST:
        maxIn = 0;
        minIn = values.stream().min(Double::compare).get();
        break;
    default:
        maxIn = values.stream().max(Double::compare).get();
        minIn = values.stream().min(Double::compare).get();
    }
    return values.stream().map(value -> scaleValue(MIN_OUT, MAX_OUT, maxIn, minIn, value))
            .collect(Collectors.toList());
}

From source file:com.android.tools.idea.templates.RepositoryUrlManager.java

private static String findExistingExplicitVersion(@NotNull Collection<GradleCoordinate> dependencies) {
    Optional<GradleCoordinate> highest = dependencies.stream()
            .filter(coordinate -> ImportModule.SUPPORT_GROUP_ID.equals(coordinate.getGroupId()))
            .max(COMPARE_PLUS_LOWER);//from   w w  w  .  j  a v a  2 s .  c o  m
    if (!highest.isPresent()) {
        return null;
    }
    String version = highest.get().getRevision();
    if (version.endsWith("+")) {
        return version.length() > 1 ? version.substring(0, version.length() - 1) : null;
    }
    return version;
}

From source file:com.ikanow.aleph2.example.flume_harvester.utils.FlumeUtils.java

/** Auto-generates the flume config from an input block
 *  If it's in test mode it also deletes the trackerDir (so this can be used for purging)
 * @param bucket_config//from www . j a v  a  2  s  .c o  m
 * @param morphlines_config_path
 * @param test_mode
 * @return
 */
public static FlumeBucketConfigBean createAutoFlumeConfig(final DataBucketBean bucket,
        final FlumeBucketConfigBean bucket_config, final boolean test_mode) {
    //TODO (ALEPH-10): eventually add support for additiona short cuts here
    //TODO (ALEPH-10): security

    final Collection<SpoolDirConfig> dirs = getSpoolDirs(bucket_config);
    final AtomicInteger counter = new AtomicInteger(0);

    if (!dirs.isEmpty()) {
        final ImmutableMap<String, String> new_flume_builder = dirs.stream()
                .reduce(ImmutableMap.<String, String>builder()
                        // defaults
                        .put("channels", "mem").put("channels:mem:capacity", "1000")
                        .put("channels:mem:transactionCapacity", "100").put("channels:mem:type", "memory"),
                        (acc, v) -> {
                            final int count = counter.incrementAndGet();

                            // (some tidy up that occurs in test mode)
                            return Optional.<ImmutableMap.Builder<String, String>>of(acc
                                    .put("sources:file_in_" + count + ":type", "spooldir")
                                    .put("sources:file_in_" + count + ":channels", "mem")
                                    .put("sources:file_in_" + count + ":trackerDir",
                                            getTrackingDirSuffix(bucket))
                                    .put("sources:file_in_" + count + ":deletePolicy",
                                            (v.delete_on_ingest() ? "immediate" : "never"))
                                    .put("sources:file_in_" + count + ":spoolDir",
                                            test_mode ? v.path() + "/" + getTestDirSuffix(bucket) : v.path())
                                    .put("sources:file_in_" + count + ":ignorePattern",
                                            Optional.ofNullable(v.ignore_pattern()).orElse("^$")))
                                    // Some optional fields
                                    .map(acc2 -> {
                                        return Optional.ofNullable(v.append_basename_field()).map(field -> acc2
                                                .put("sources:file_in_" + count + ":basenameHeader", "true")
                                                .put("sources:file_in_" + count + ":basenameHeaderKey", field))
                                                .orElse(acc);
                                    }).map(acc2 -> {
                                        return Optional.ofNullable(v.append_path_field()).map(field -> acc2
                                                .put("sources:file_in_" + count + ":fileHeader", "true")
                                                .put("sources:file_in_" + count + ":fileHeaderKey", field))
                                                .orElse(acc);
                                    }).get();
                        }, (acc1, acc2) -> acc1 // (can't happen in practice)   
                ).put("sources", StreamUtils.zipWithIndex(dirs.stream())
                        .map(i -> ("file_in_" + (1 + i.getIndex()))).collect(Collectors.joining(" ")))
                .build();
        ;

        // Clone the config with the new flume config
        return BeanTemplateUtils.clone(bucket_config)
                .with(FlumeBucketConfigBean::flume_config, new_flume_builder).done();
    } else { // Leave unchanged
        return bucket_config;
    }
}