Example usage for java.util Collection stream

List of usage examples for java.util Collection stream

Introduction

In this page you can find the example usage for java.util Collection stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:com.bekioui.jaxrs.server.core.NettyServer.java

private void resources(ResteasyDeployment deployment) {
    Collection<Object> resources = applicationContext.getBeansWithAnnotation(Path.class).values();

    if (swaggerEnabled) {
        List<? extends ResourceDescriptor> resourceDescriptors = swaggerResourceDescriptors != null
                ? swaggerResourceDescriptors
                : deploymentResourceDescriptors;
        Set<Class<?>> classes = resources.stream().filter(resourceFilter.apply(resourceDescriptors))
                .map(Object::getClass).collect(Collectors.toSet());
        SwaggerResource swaggerRessource = new SwaggerResource(swaggerSupplier.get(), classes);
        deployment.getResources().add(swaggerRessource);
        deployment.getProviders().add(new SwaggerSerializers());
    }/*from w  w  w. j  a v  a 2s . c  o m*/

    List<Object> deploymentResources = resources.stream()
            .filter(resourceFilter.apply(deploymentResourceDescriptors)).collect(Collectors.toList());
    deployment.getResources().addAll(deploymentResources);
}

From source file:com.ikanow.aleph2.analytics.spark.utils.SparkTechnologyUtils.java

/** Creates a command line call to launch spark
 * @param spark_home//from w w  w .j a v  a 2s  . c om
 * @param yarn_home
 * @param spark_master
 * @param main_clazz
 * @param context_signature
 * @param main_jar
 * @param other_jars
 * @param spark_job_options
 * @param spark_system_options
 */
public static ProcessBuilder createSparkJob(final String job_name, final String spark_home,
        final String yarn_home, final String spark_master, final Optional<String> maybe_main_clazz,
        final String context_signature, final Optional<String> test_signature, final String main_jar_or_py,
        final Collection<String> other_jars, final Collection<String> other_files,
        final Collection<String> other_lang_files, final List<String> external_jars,
        final List<String> external_files, final List<String> external_lang_files,
        final Optional<Map<String, Object>> spark_generic_options, final Map<String, String> spark_job_options,
        final Map<String, String> spark_system_options

) {
    //https://spark.apache.org/docs/1.2.0/submitting-applications.html

    final List<String> command_line = ImmutableList.<String>builder().add(SBT_SUBMIT_BINARY).add("--name")
            .add(job_name)
            .addAll(maybe_main_clazz.map(main_clazz -> Arrays.asList("--class", main_clazz))
                    .orElse(Collections.emptyList()))
            .add("--master").add(spark_master).add("--jars")
            .add(Stream.concat(other_jars.stream(), external_jars.stream()).collect(Collectors.joining(",")))
            .addAll(Optional
                    .of(Stream.concat(other_files.stream(), external_files.stream())
                            .collect(Collectors.joining(",")))
                    .filter(s -> !s.isEmpty()).map(s -> Arrays.asList("--files", s))
                    .orElse(Collections.emptyList()))
            //TODO (ALEPH-63): handle R in the example below
            .addAll(Optional
                    .of(Stream.concat(other_lang_files.stream(), external_lang_files.stream())
                            .collect(Collectors.joining(",")))
                    .filter(s -> !s.isEmpty()).map(s -> Arrays.asList("--py-files", s))
                    .orElse(Collections.emptyList()))
            .addAll(Optional.ofNullable(System.getProperty("hdp.version")).map(hdp_version -> { // Set HDP version from whatever I'm set to
                return (List<String>) ImmutableList.<String>of("--conf",
                        "spark.executor.extraJavaOptions=-Dhdp.version=" + hdp_version, "--conf",
                        "spark.driver.extraJavaOptions=-Dhdp.version=" + hdp_version, "--conf",
                        "spark.yarn.am.extraJavaOption=-Dhdp.version=" + hdp_version);
            }).orElse(Collections.emptyList()))
            .addAll(spark_job_options.isEmpty() ? Collections.emptyList()
                    : spark_job_options.entrySet().stream()
                            .flatMap(kv -> Stream.of("--conf", kv.getKey() + "=" + kv.getValue()))
                            .collect(Collectors.toList()))
            .addAll(spark_system_options.entrySet().stream()
                    .flatMap(kv -> Stream.of(kv.getKey(), kv.getValue())).collect(Collectors.toList()))
            .addAll(spark_generic_options.map(opts -> Arrays.asList("--conf",
                    SparkTopologyConfigBean.JOB_CONFIG_KEY + "="
                            + BeanTemplateUtils.configureMapper(Optional.empty()).convertValue(opts,
                                    JsonNode.class)))
                    .orElse(Collections.emptyList()))
            .add(main_jar_or_py).add(context_signature)
            .addAll(test_signature.map(ts -> Arrays.asList(ts)).orElse(Collections.emptyList())).build();

    final ProcessBuilder pb = new ProcessBuilder();

    final Map<String, String> mutable_env = pb.environment();
    mutable_env.put("HADOOP_CONF_DIR", yarn_home);

    return pb.directory(new File(spark_home)).command(command_line);
}

From source file:delfos.dataset.generated.modifieddatasets.pseudouser.PseudoUserRatingsDataset.java

@Override
public Map<Integer, RatingType> getItemRatingsRated(Integer idItem) throws ItemNotFound {

    Map<Integer, RatingType> itemRatingsRated = originalDatasetLoader.getRatingsDataset()
            .getItemRatingsRated(idItem);

    Collection<RatingType> ratedByPseudoUsers = pseudoUsersRatings.values().parallelStream()
            .flatMap(pseudoUserRatings -> pseudoUserRatings.values().stream())
            .filter(rating -> rating.getIdItem() == idItem).collect(Collectors.toList());

    Map<Integer, RatingType> itemsRatingsRated_byPseudoUsers = ratedByPseudoUsers.stream()
            .collect(Collectors.toMap(rating -> rating.getIdItem(), rating -> rating));

    Map<Integer, RatingType> ret = new TreeMap<>();

    ret.putAll(itemRatingsRated);/*w  w  w . j  a v a2 s.c  o  m*/
    ret.putAll(itemsRatingsRated_byPseudoUsers);

    return ret;
}

From source file:com.offbynull.voip.kademlia.model.NodeChangeSet.java

NodeChangeSet(Collection<Node> added, Collection<Node> removed, Collection<Node> updated) {
    Validate.notNull(removed);// w w w .  j a  va2 s  . co m
    Validate.notNull(added);
    Validate.notNull(updated);
    Validate.noNullElements(removed);
    Validate.noNullElements(added);
    Validate.noNullElements(updated);

    // ensure that there aren't any duplicate ids
    Set<Id> tempSet = new HashSet<>();
    removed.stream().map(x -> x.getId()).forEach(x -> tempSet.add(x));
    added.stream().map(x -> x.getId()).forEach(x -> tempSet.add(x));
    updated.stream().map(x -> x.getId()).forEach(x -> tempSet.add(x));
    Validate.isTrue(tempSet.size() == added.size() + removed.size() + updated.size());

    this.removed = (UnmodifiableList<Node>) UnmodifiableList.unmodifiableList(new ArrayList<>(removed));
    this.added = (UnmodifiableList<Node>) UnmodifiableList.unmodifiableList(new ArrayList<>(added));
    this.updated = (UnmodifiableList<Node>) UnmodifiableList.unmodifiableList(new ArrayList<>(updated));
}

From source file:ai.grakn.client.LoaderClient.java

/**
 * Transform queries into Json configuration needed by the Loader task
 * @param queries queries to include in configuration
 * @param batchNumber number of the current batch being sent
 * @return configuration for the loader task
 *///from  w ww. j  av a  2s  . c o  m
private String getConfiguration(Collection<InsertQuery> queries, int batchNumber) {
    return Json.object().set(KEYSPACE_PARAM, keyspace).set("batchNumber", batchNumber)
            .set(TASK_LOADER_INSERTS, queries.stream().map(InsertQuery::toString).collect(toList())).toString();
}

From source file:com.spankingrpgs.scarletmoon.loader.CharacterLoader.java

@Override
public void load(Collection<String> data, GameState state) {
    data.stream().forEach(datum -> loadCharacter(datum, state));
}

From source file:com.hortonworks.streamline.streams.service.NamespaceCatalogResource.java

private void assertNoTopologyRefersNamespace(Long namespaceId) {
    Collection<Topology> topologies = catalogService.listTopologies();
    boolean anyTopologyUseNamespace = topologies.stream()
            .anyMatch(t -> Objects.equals(t.getNamespaceId(), namespaceId));

    if (anyTopologyUseNamespace) {
        throw BadRequestException
                .message("Topology refers the namespace trying to remove - namespace id: " + namespaceId);
    }/* w w  w  . j  av a2  s.  com*/
}

From source file:com.offbynull.voip.kademlia.model.ActivityChangeSet.java

ActivityChangeSet(Collection<Activity> added, Collection<Activity> removed, Collection<Activity> updated) {
    Validate.notNull(removed);/* ww w.jav a 2 s . co  m*/
    Validate.notNull(added);
    Validate.notNull(updated);
    Validate.noNullElements(removed);
    Validate.noNullElements(added);
    Validate.noNullElements(updated);

    // ensure that there aren't any duplicate ids
    Set<Id> tempSet = new HashSet<>();
    removed.stream().map(x -> x.getNode().getId()).forEach(x -> tempSet.add(x));
    added.stream().map(x -> x.getNode().getId()).forEach(x -> tempSet.add(x));
    updated.stream().map(x -> x.getNode().getId()).forEach(x -> tempSet.add(x));
    Validate.isTrue(tempSet.size() == added.size() + removed.size() + updated.size());

    this.removed = (UnmodifiableList<Activity>) UnmodifiableList.unmodifiableList(new ArrayList<>(removed));
    this.added = (UnmodifiableList<Activity>) UnmodifiableList.unmodifiableList(new ArrayList<>(added));
    this.updated = (UnmodifiableList<Activity>) UnmodifiableList.unmodifiableList(new ArrayList<>(updated));
}