Example usage for java.util.stream Collectors mapping

List of usage examples for java.util.stream Collectors mapping

Introduction

In this page you can find the example usage for java.util.stream Collectors mapping.

Prototype

public static <T, U, A, R> Collector<T, ?, R> mapping(Function<? super T, ? extends U> mapper,
        Collector<? super U, A, R> downstream) 

Source Link

Document

Adapts a Collector accepting elements of type U to one accepting elements of type T by applying a mapping function to each input element before accumulation.

Usage

From source file:com.firewallid.util.FIUtils.java

public static <U, V extends Number> List<Tuple2<U, Double>> sumList(List<Tuple2<U, V>> l1,
        List<Tuple2<U, V>> l2) {
    l1.addAll(l2);// w  w  w  . j a v a  2 s.  co  m
    List<Tuple2<U, Double>> sum = l1.parallelStream()
            .collect(Collectors
                    .groupingBy(data -> data._1(), Collectors.mapping(data -> data._2(), Collectors.toList())))
            .entrySet().parallelStream()
            .map(data -> new Tuple2<>(data.getKey(),
                    data.getValue().parallelStream().mapToDouble(value -> value.doubleValue()).sum()))
            .collect(Collectors.toList());

    return sum;
}

From source file:com.create.validation.PropertyValidationErrorsProvider.java

private Map<String, List<String>> getPropertyValidationErrorsGroupedByNestedPath(
        List<ObjectError> objectErrors) {
    return objectErrors.stream().map(this::toPropertyValidationError)
            .collect(Collectors.groupingBy(PropertyValidationError::getNestedPath,
                    Collectors.mapping(PropertyValidationError::getMessage, Collectors.toList())));
}

From source file:be.ordina.msdashboard.graph.GraphRetriever.java

@Cacheable(value = Constants.GRAPH_CACHE_NAME, keyGenerator = "simpleKeyGenerator")
public Map<String, Object> retrieve() {
    List<Observable<Node>> observables = aggregators.stream()
            .collect(Collectors.mapping(NodeAggregator::aggregateNodes, Collectors.toList()));
    observables.add(redisService.getAllNodesAsObservable());

    Map<String, Object> graph = new HashMap<>();
    graph.put(DIRECTED, true);/*from  ww w.  ja  v a  2  s  .  com*/
    graph.put(MULTIGRAPH, false);
    graph.put(GRAPH, new String[0]);
    graph.put(LANES, constructLanes());
    graph.put(TYPES, constructTypes());

    Map<String, Object> nodesAndLinks = Observable.mergeDelayError(observables)
            .doOnError(throwable -> logger.error("An error occurred during merging aggregators:", throwable))
            .onErrorResumeNext(Observable.empty()).observeOn(Schedulers.io())
            .doOnNext(node -> logger.info("Merging node with id '{}'", node.getId()))
            .reduce(new ArrayList<>(), NodeMerger.merge())
            .doOnError(throwable -> logger.error("An error occurred during reducing:", throwable))
            .onErrorResumeNext(Observable.empty())
            .doOnNext(nodes -> logger.info("Merged all emitted nodes, converting to map"))
            .map(GraphMapper.toGraph())
            .doOnNext(nodesAndLinksMap -> logger.info("Converted to nodes and links map"))
            .doOnError(throwable -> logger.error("An error occurred during mapping:", throwable))
            .onErrorResumeNext(Observable.empty()).toBlocking().first();
    logger.info("Graph retrieved: {}", nodesAndLinks);
    graph.put(NODES, nodesAndLinks.get(NODES));
    graph.put(LINKS, nodesAndLinks.get(LINKS));

    return graph;
}

From source file:pl.edu.icm.comac.vis.server.service.AtomicGraphServiceImpl.java

@Override
public Graph constructGraphs(String[] ids) throws OpenRDFException {
    List<NodeCacheEntry> favCacheNodes = fetchNodes(ids);
    //build link map
    Map<String, Set<String>> links = favCacheNodes.parallelStream().filter(x -> !x.isOverflow())
            .map(x -> x.getRelations()).flatMap(x -> x.stream())
            .flatMap(x -> Arrays.stream(
                    new String[][] { { x.getSubject(), x.getObject() }, { x.getObject(), x.getSubject() } }))
            .collect(Collectors.groupingBy(x -> x[0], Collectors.mapping(x -> x[1], Collectors.toSet())));
    Set<String> large = favCacheNodes.stream().filter(x -> x.isOverflow()).map(x -> x.getId())
            .collect(Collectors.toSet());
    Set<String> normal = favCacheNodes.stream().filter(x -> !x.isOverflow()).map(x -> x.getId())
            .collect(Collectors.toSet());
    Set<String> unfav = graphToolkit.calculateAdditions(normal, large, links, MAX_RETURNED_RELATIONS);
    //now fetch the unfavs:
    List<NodeCacheEntry> unfavCacheNodes = fetchNodes(unfav.toArray(new String[unfav.size()]));
    List<NodeCacheEntry> allNodes = new ArrayList<NodeCacheEntry>();
    allNodes.addAll(favCacheNodes);/* w ww. j a v a  2s  . c om*/
    allNodes.addAll(unfavCacheNodes);
    List<NodeCacheEntry> largeNodes = allNodes.stream().filter(x -> x.isOverflow())
            .collect(Collectors.toList());
    List<RelationCacheEntry> largeRelations = calculateRelations(largeNodes);
    //now build the graph:

    List<Node> nodes = new ArrayList<>();

    List<Node> fnodes = favCacheNodes.stream().map(cached -> {
        Node res = new Node(cached.getId(), cached.getType(), cached.getName(), 1.0);
        res.setFavourite(true);
        return res;
    }).collect(Collectors.toList());
    nodes.addAll(fnodes);
    List<Node> ufnodes = unfavCacheNodes.stream().map(cached -> {
        Node res = new Node(cached.getId(), cached.getType(), cached.getName(), 1.0);
        res.setFavourite(false);
        return res;
    }).collect(Collectors.toList());
    nodes.addAll(ufnodes);
    Set<String> nodeIdSet = nodes.stream().map(x -> x.getId()).collect(Collectors.toSet());

    Set<Link> graphRelations = allNodes.parallelStream().filter(x -> !x.isOverflow())
            .flatMap(x -> x.getRelations().stream())
            .filter(x -> nodeIdSet.contains(x.subject) && nodeIdSet.contains(x.object))
            .map(x -> new Link(x.getPredicate(), x.getSubject(), x.getObject())).collect(Collectors.toSet());
    Graph res = new Graph();

    res.setNodes(nodes);
    res.setLinks(new ArrayList<Link>(graphRelations));
    return res;
}

From source file:com.firewallid.util.FIUtils.java

public static Vector sumVector(Vector v1, Vector v2) {
    if (v1.size() != v2.size()) {
        return null;
    }/*from w w  w .jav  a2s .co m*/

    List<Tuple2<Integer, Double>> v1List = vectorToList(v1);
    List<Tuple2<Integer, Double>> v2List = vectorToList(v2);

    List<Tuple2<Integer, Double>> sumList = combineList(v1List, v2List);

    /* Sum value of same key-pair */
    List<Tuple2<Integer, Double>> collect = sumList.parallelStream()
            .collect(Collectors.groupingBy(t -> t._1,
                    Collectors.mapping((Tuple2<Integer, Double> t) -> t._2, Collectors.toList())))
            .entrySet().parallelStream()
            .map((Map.Entry<Integer, List<Double>> t) -> new Tuple2<Integer, Double>(t.getKey(),
                    t.getValue().parallelStream().mapToDouble(Double::doubleValue).sum()))
            .collect(Collectors.toList());

    return Vectors.sparse(v1.size(), collect);
}

From source file:de.whs.poodle.repositories.EvaluationWorksheetRepository.java

public Map<EvaluationQuestion, List<String>> getQuestionToTextsMapForEvaluation(int evaluationWorksheetId) {
    List<EvaluationStatistic> stats = em
            .createQuery("FROM EvaluationStatistic WHERE question.section.worksheet.id = :worksheetId "
                    + "AND text IS NOT NULL", EvaluationStatistic.class)
            .setParameter("worksheetId", evaluationWorksheetId).getResultList();

    return stats.stream().collect(
            // group the list...
            Collectors.groupingBy(
                    // by question
                    EvaluationStatistic::getQuestion,
                    // and map each question to the list of texts
                    Collectors.mapping(EvaluationStatistic::getText, Collectors.toList())));
}

From source file:com.firewalld.sentimentanalysis.IDSentiWordNet.java

private Map<String, Double> createDictionary() throws IOException {
    Map<String, Double> dict = IOUtils
            .readLines(getClass().getClassLoader().getResourceAsStream(firewallConf.get(SWN_FILE)))
            .parallelStream()//from  w  ww . j a va 2 s .  c om
            /* If it's a comment, skip this line */
            .filter(line -> !line.trim().startsWith("#")).flatMap(line -> {
                String[] data = line.split("\t");
                String wordTypeMarker = data[0];

                // Example line:
                // POS ID PosS NegS SynsetTerm#sensenumber Desc
                // a 00009618 0.5 0.25 spartan#4 austere#3 ascetical#2 ascetic#2 practicing great self-denial;...etc
                // Is it a valid line? Otherwise, through exception.
                if (data.length != 6) {
                    throw new IllegalArgumentException(
                            String.format("Incorrect tabulation format in file, line: %s", line));
                }

                // Calculate synset score as score = PosS - NegS
                Double synsetScore = Double.parseDouble(data[2]) - Double.parseDouble(data[3]);

                // Get all Synset terms
                String[] synTermsSplit = data[4].split(" ");

                // Go through all terms of current synset.
                Stream<Tuple2<String, Tuple2<Double, Double>>> synSets = Arrays.asList(synTermsSplit)
                        .parallelStream().map(synTermSplit -> {
                            // Get synterm and synterm rank
                            String[] synTermAndRank = synTermSplit.split("#");
                            String synTerm = synTermAndRank[0] + "#" + wordTypeMarker;

                            double synTermRank = Double.parseDouble(synTermAndRank[1]);
                            // What we get here is a (term, (rank, score))
                            return new Tuple2<>(synTerm, new Tuple2<>(synTermRank, synsetScore));
                        });

                return synSets;
            })
            // What we get here is a map of the type:
            // term -> {score of synset#1, score of synset#2...}
            .collect(Collectors.groupingBy(synSet -> synSet._1,
                    Collectors.mapping(synSet -> synSet._2, Collectors.toList())))
            .entrySet().parallelStream().map(synSet -> {
                String word = synSet.getKey();
                List<Tuple2<Double, Double>> synSetScoreList = synSet.getValue();

                // Calculate weighted average. Weigh the synsets according to
                // their rank.
                // Score= 1/2*first + 1/3*second + 1/4*third ..... etc.
                // Sum = 1/1 + 1/2 + 1/3 ...
                Tuple2<Double, Double> scoreSum = synSetScoreList.parallelStream()
                        .reduce(new Tuple2<>(0.0, 0.0), (s1, s2) -> new Tuple2<>(
                                ((s1._1 == 0.0) ? 0.0 : s1._2 / s1._1) + ((s2._1 == 0.0) ? 0.0 : s2._2 / s2._1),
                                ((s1._1 == 0.0) ? 0.0 : 1 / s1._1) + ((s2._1 == 0.0) ? 0.0 : 1 / s2._1)));

                double score = scoreSum._1 / scoreSum._2;

                return new Tuple2<>(word, score);
            }).collect(Collectors.toMap(synSet -> synSet._1, synSet -> synSet._2));

    return dict;
}

From source file:io.mandrel.metrics.impl.MongoMetricsRepository.java

@Override
public void sync(Map<String, Long> accumulators) {

    LocalDateTime now = LocalDateTime.now();
    LocalDateTime keytime = now.withMinute(0).withSecond(0).withNano(0);

    // {global.XXX=0, global.YYY=0, ...} to {global{XXX=O, YYY=0}, ...}
    Stream<Pair<String, Pair<String, Long>>> map = accumulators.entrySet().stream().map(e -> {
        Iterable<String> results = splitter.split(e.getKey());
        List<String> elts = Lists.newArrayList(results);
        return Pair.of(elts.get(0), Pair.of(elts.get(1), e.getValue()));
    });//from  ww  w.j  a v  a  2 s.c  o  m
    Map<String, List<Pair<String, Long>>> byKey = map.collect(Collectors.groupingBy(e -> e.getLeft(),
            Collectors.mapping(e -> e.getRight(), Collectors.toList())));

    List<? extends WriteModel<Document>> requests = byKey.entrySet().stream().map(e -> {
        Document updates = new Document();

        e.getValue().stream().forEach(i -> {
            Iterable<String> results = splitter.split(i.getKey());
            List<String> elts = Lists.newArrayList(results);
            if (elts.size() > 1) {
                updates.put(elts.get(0) + "." + JsonBsonCodec.toBson(elts.get(1)), i.getValue());
            } else {
                updates.put(i.getKey(), i.getValue());
            }
        });

        return new UpdateOneModel<Document>(Filters.eq("_id", e.getKey()), new Document("$inc", updates),
                new UpdateOptions().upsert(true));
    }).collect(Collectors.toList());

    counters.bulkWrite(requests);

    requests = byKey.entrySet().stream().map(e -> {
        List<UpdateOneModel<Document>> tsUpdates = Lists.newArrayList();

        e.getValue().stream().forEach(i -> {
            Iterable<String> results = splitter.split(i.getKey());
            List<String> elts = Lists.newArrayList(results);

            if (elts.size() == 1 && e.getKey().equalsIgnoreCase(MetricKeys.global())) {
                tsUpdates.add(new UpdateOneModel<Document>(
                        Filters.and(Filters.eq("type", e.getKey() + MetricKeys.METRIC_DELIM + i.getKey()),
                                Filters.eq("timestamp_hour", keytime)),
                        new Document("$inc",
                                new Document("values." + Integer.toString(now.getMinute()), i.getValue())),
                        new UpdateOptions().upsert(true)));
            }
        });

        return tsUpdates;
    }).flatMap(list -> list.stream()).collect(Collectors.toList());

    timeseries.bulkWrite(requests);

}

From source file:com.firewallid.termcloud.TermCloud.java

public void saveTermCloudAll(JavaPairRDD<String, List<Tuple2<String, Double>>> doc, String fileNamePrefix)
        throws IOException {
    List<Tuple2<String, List<Tuple2<String, Double>>>> collectDoc = doc.collect();

    if (collectDoc.isEmpty()) {
        return;//from w  ww .  j a v a  2s .  c  o m
    }

    /* Reduced feature-value list */
    List<Tuple2<String, Double>> featureValueList = collectDoc.parallelStream()
            .map(titleFeatures -> titleFeatures._2).reduce((featureValueList1, featureValueList2) -> {
                List<Tuple2<String, Double>> combineList = FIUtils.combineList(featureValueList1,
                        featureValueList2);

                List<Tuple2<String, Double>> collect = combineList.parallelStream()
                        .collect(Collectors
                                .groupingBy(t -> t._1, Collectors.mapping(t -> t._2, Collectors.toList())))
                        .entrySet().parallelStream()
                        .map(t -> new Tuple2<String, Double>(t.getKey(),
                                t.getValue().parallelStream().mapToDouble(Double::doubleValue).sum()))
                        .collect(Collectors.toList());

                return collect;
            }).get();

    /* Sorting */
    List<Tuple2<String, Double>> featureValueListSorted = FIUtils.sortDescTupleListByValue(featureValueList);

    /* Top N */
    List<Tuple2<String, Double>> featureValueListTopN;
    if (featureValueListSorted.size() <= conf.getInt(TOPN, 100)) {
        featureValueListTopN = new ArrayList<>(featureValueListSorted);
    } else {
        featureValueListTopN = new ArrayList<>(featureValueListSorted.subList(0, conf.getInt(TOPN, 100)));
    }

    /* Text for file. One line, one feature-value pair */
    String featureValueText = featureValueListTopN.parallelStream()
            .map(feature -> feature._1 + StringEscapeUtils.unescapeJava(conf.get(LINE_DELIMITER)) + feature._2)
            .collect(Collectors.joining(System.lineSeparator()));

    /* Save to file */
    FIFile.writeStringToHDFSFile(FIFile.generateFullPath(conf.get(TERMCLOUD_FOLDER),
            createFileNameTermCloud(fileNamePrefix, conf.get(ALLNAME))), featureValueText);
}

From source file:io.tourniquet.junit.http.rules.ResponseStubbing.java

/**
 * Extract the query paramters from the path. If no path is set or no query parameters set, the map is empty.
 *
 * @return a map of key-value pairs resembling the query parameters
 *//*from   w w  w  . jav a 2s  . com*/
private Map<String, List<String>> getQueryParams() {

    return this.path.map(p -> {
        int idx = p.indexOf('?');
        if (idx != -1) {
            return (Map<String, List<String>>) Arrays.stream(p.substring(idx + 1).split("(&amp;|&)"))
                    .map(kv -> kv.split("="))
                    .collect(groupingBy(s -> s[0], Collectors.mapping(s -> s[1], Collectors.toList())));
        }
        return null;
    }).orElse(Collections.emptyMap());
}