List of usage examples for java.util.stream Collectors groupingBy
public static <T, K, A, D> Collector<T, ?, Map<K, D>> groupingBy(Function<? super T, ? extends K> classifier, Collector<? super T, A, D> downstream)
From source file:io.syndesis.dao.DeploymentDescriptorTest.java
@Test public void thereShouldBeNoDuplicateMavenCoordinates() { final Map<String, Long> coordinatesWithCount = StreamSupport.stream(deployment.spliterator(), true) .filter(data -> "connector".equals(data.get("kind").asText())) .flatMap(//from w ww . j a v a 2s . c o m connector -> StreamSupport.stream(connector.get("data").get("actions").spliterator(), true)) .map(action -> action.get("camelConnectorGAV").asText()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); final Map<String, Long> multipleCoordinates = coordinatesWithCount.entrySet().stream() .filter(e -> e.getValue() > 1).collect(Collectors.toMap(Entry::getKey, Entry::getValue)); assertThat(multipleCoordinates).as("Expected connector GAV coordinates to be unique").isEmpty(); }
From source file:com.gs.collections.impl.jmh.AnagramSetTest.java
@Benchmark public void serial_lazy_jdk() { Map<Alphagram, Set<String>> groupBy = this.jdkWords.stream() .collect(Collectors.groupingBy(Alphagram::new, Collectors.<String>toSet())); groupBy.entrySet().stream().map(Map.Entry::getValue).filter(list -> list.size() >= SIZE_THRESHOLD) .sorted(Comparator.<Set<String>>comparingInt(Set::size).reversed()) .map(list -> list.size() + ": " + list).forEach(e -> Assert.assertFalse(e.isEmpty())); }
From source file:com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.KubernetesV2SearchProvider.java
private Map<String, List<String>> getKeysRelatedToLogicalMatches(String matchQuery) { return logicalTypes.stream().map(type -> cacheUtils.getAllDataMatchingPattern(type, matchQuery).stream() .map(e -> e.getRelationships().values().stream().flatMap(Collection::stream) .filter(Objects::nonNull).map(k -> new ImmutablePair<>(k, e.getId()))) .flatMap(x -> x)).flatMap(x -> x) .collect(Collectors.groupingBy(Pair::getLeft, Collectors.reducing(Collections.emptyList(), i -> Collections.singletonList(i.getRight()), (a, b) -> { List<String> res = new ArrayList<>(); res.addAll(a);/*from www. j av a 2 s . com*/ res.addAll(b); return res; }))); }
From source file:com.blackducksoftware.integration.hub.detect.workflow.codelocation.BdioCodeLocationCreator.java
private Map<String, List<BdioCodeLocation>> groupByBdioNames(final List<BdioCodeLocation> bdioCodeLocations) { return bdioCodeLocations.stream().collect(Collectors.groupingBy(it -> it.bdioName, Collectors.toList())); }
From source file:com.qcadoo.mes.cmmsMachineParts.states.MaintenanceEventStateValidationService.java
private Map<Entity, Integer> getGroupedStaffWorkTimes(Entity event) { List<Entity> staffWorkTimes = event.getHasManyField(MaintenanceEventFields.STAFF_WORK_TIMES); Function<Entity, Entity> toWorker = entity -> entity.getBelongsToField(StaffWorkTimeFields.WORKER); ToIntFunction<Entity> toInt = entity -> entity.getIntegerField(StaffWorkTimeFields.LABOR_TIME); Map<Entity, Integer> map = staffWorkTimes.stream() .collect(Collectors.groupingBy(toWorker, Collectors.summingInt(toInt))); return map;/* ww w .ja v a 2 s . c om*/ }
From source file:com.gs.collections.impl.jmh.AnagramSetTest.java
@Benchmark public void parallel_lazy_jdk() { Map<Alphagram, Set<String>> groupBy = this.jdkWords.parallelStream() .collect(Collectors.groupingBy(Alphagram::new, Collectors.<String>toSet())); groupBy.entrySet().parallelStream().map(Map.Entry::getValue).filter(list -> list.size() >= SIZE_THRESHOLD) .sorted(Comparator.<Set<String>>comparingInt(Set::size).reversed()).parallel() .map(list -> list.size() + ": " + list).forEach(e -> Assert.assertFalse(e.isEmpty())); }
From source file:com.blackducksoftware.integration.hub.detect.workflow.codelocation.BdioCodeLocationCreator.java
private Map<String, List<BdioCodeLocation>> groupByCodeLocationNames( final List<BdioCodeLocation> bdioCodeLocations) { return bdioCodeLocations.stream() .collect(Collectors.groupingBy(it -> it.codeLocationName, Collectors.toList())); }
From source file:com.diversityarrays.kdxplore.data.kdx.CurationDataCollector.java
public CurationDataCollector(KdxploreDatabase kdxdb, Trial trial, boolean fullDetails) throws IOException, KdxploreConfigException { this.trial = trial; int trialId = trial.getTrialId(); this.deviceIdentifiers = kdxdb.getDeviceIdentifiers(); deviceIdentifierById.clear();/*from ww w.ja va2s .co m*/ for (DeviceIdentifier d : deviceIdentifiers) { deviceIdentifierById.put(d.getDeviceIdentifierId(), d); } // ProgressUpdater progressUpdater; // kdxdb.getKDXploreKSmartDatabase().collectPlotsFor(trial, // SampleGroupChoice.ANY_SAMPLE_GROUP, // KDSmartDatabase.WithPlotAttributesOption.WITH_PLOT_ATTRIBUTES, // KDSmartDatabase.WithTraitOption.ALL_WITH_TRAITS, // progressUpdater); // // this.plots = kdxdb.getPlots(trial, SampleGroupChoice.ANY_SAMPLE_GROUP, // KDSmartDatabase.WithPlotAttributesOption.WITH_PLOT_ATTRIBUTES); Closure<Pair<WhyMissing, MediaFileRecord>> reportMissing = new Closure<Pair<WhyMissing, MediaFileRecord>>() { @Override public void execute(Pair<WhyMissing, MediaFileRecord> arg0) { // TODO Auto-generated method stub } }; Map<Integer, Plot> plotById = DatabaseUtil.collectPlotsIncludingMediaFiles( kdxdb.getKDXploreKSmartDatabase(), trialId, "CurationDataCollector", SampleGroupChoice.ANY_SAMPLE_GROUP, reportMissing); this.plots = new ArrayList<>(plotById.values()); this.countByTagLabel = Collections.unmodifiableMap(plots.stream() .flatMap(plot -> plot.getTagsBySampleGroup().entrySet().stream()) .flatMap(e -> e.getValue().stream()) .collect(Collectors.groupingBy(Tag::getLabel, Collectors.reducing(0, e -> 1, Integer::sum)))); collectPlotPositionIdentifiers(plots); trialAttributes = kdxdb.getTrialAttributes(trialId); KDSmartDatabase kdsdb = kdxdb.getKDXploreKSmartDatabase(); plotAttributes = kdsdb.getAllPlotAttributesForTrial(trialId); traitInstances = kdsdb.getTraitInstances(trialId, KDSmartDatabase.WithTraitOption.ALL_WITH_TRAITS); // tagById.clear(); // for (Tag tag : kdsdb.getAllTags()) { // tagById.put(tag.getTagId(), tag); // } // collectTagPlotUsage(trialId, kdsdb); // collectMediaFileRecords(trialId, kdsdb); loadSampleGroups(kdxdb, trialId, fullDetails); }
From source file:io.syndesis.dao.DeploymentDescriptorTest.java
@Test public void thereShouldBeNoDuplicateNames() { final Map<String, Long> namesWithCount = StreamSupport.stream(deployment.spliterator(), true) .filter(data -> "connector".equals(data.get("kind").asText())) .flatMap(// w ww . j a v a 2 s . c o m connector -> StreamSupport.stream(connector.get("data").get("actions").spliterator(), true)) .map(action -> action.get("name").asText()) .collect(Collectors.groupingBy(Function.identity(), Collectors.counting())); final Map<String, Long> multipleNames = namesWithCount.entrySet().stream().filter(e -> e.getValue() > 1) .collect(Collectors.toMap(Entry::getKey, Entry::getValue)); assertThat(multipleNames).as("Expected unique action names").isEmpty(); }
From source file:com.ggvaidya.scinames.ui.DatasetDiffController.java
private Table<String, Dataset, String> getComparisonStats(Dataset... datasets) { Table<String, Dataset, String> precalc = HashBasedTable.create(); // No datasets? Give up now. if (datasets.length == 0) return precalc; // For each row, we start with the actual stats for the first dataset and // then provide diffs to subsequent datasets. Dataset ds1 = datasets[0];/*www . java2s .c o m*/ Project project = datasetDiffView.getProjectView().getProject(); precalc.put("Number of rows", ds1, String.valueOf(ds1.getRowCount())); Set<Name> namesInRows1 = ds1.getNamesInAllRows(); precalc.put("Number of names in rows", ds1, String.valueOf(namesInRows1.size())); Set<Name> recognizedNames1 = project.getRecognizedNames(ds1); precalc.put("Number of names recognized", ds1, String.valueOf(recognizedNames1.size())); Set<Name> binomialNamesInRows1 = ds1.getNamesInAllRows().stream().flatMap(n -> n.asBinomial()) .collect(Collectors.toSet()); precalc.put("Number of binomial names in rows", ds1, String.valueOf(binomialNamesInRows1.size())); Set<Name> binomialRecognizedNames1 = project.getRecognizedNames(ds1).stream().flatMap(n -> n.asBinomial()) .collect(Collectors.toSet()); precalc.put("Number of binomial names recognized", ds1, String.valueOf(binomialRecognizedNames1.size())); Set<DatasetColumn> ds1_cols = new HashSet<>(ds1.getColumns()); precalc.put("Number of columns", ds1, String.valueOf(ds1_cols.size())); List<Change> ds1_changes = ds1.getChanges(project).collect(Collectors.toList()); precalc.put("Number of changes", ds1, String.valueOf(ds1_changes.size())); Map<ChangeType, Long> ds1_changes_by_type = ds1_changes.stream() .collect(Collectors.groupingBy(ch -> ch.getType(), Collectors.counting())); for (ChangeType ct : ds1_changes_by_type.keySet()) { precalc.put("Number of changes of type '" + ct.getType() + "'", ds1, String.valueOf(ds1_changes_by_type.get(ct))); } // Now do comparison stats for each subsequent dataset. for (Dataset ds : datasets) { if (ds == ds1) continue; precalc.put("Number of rows", ds, ds.getRowCount() + ": " + (ds.getRowCount() - ds1.getRowCount()) + " (" + percentage(ds.getRowCount() - ds1.getRowCount(), ds1.getRowCount()) + ")"); Set<Name> recognizedNames = project.getRecognizedNames(ds); precalc.put("Number of names recognized", ds, recognizedNames.size() + ": " + (recognizedNames.size() - recognizedNames1.size()) + " (" + compareSets(recognizedNames1, recognizedNames) + ", " + percentage(recognizedNames.size() - recognizedNames1.size(), recognizedNames1.size()) + ")"); Set<Name> namesInRows = ds.getNamesInAllRows(); precalc.put("Number of names in rows", ds, namesInRows.size() + ": " + (namesInRows.size() - namesInRows1.size()) + " (" + compareSets(namesInRows1, namesInRows) + ", " + percentage(namesInRows.size() - namesInRows1.size(), namesInRows1.size()) + ")"); Set<Name> binomialRecognizedNames = project.getRecognizedNames(ds).stream().flatMap(n -> n.asBinomial()) .collect(Collectors.toSet()); precalc.put("Number of binomial names recognized", ds, binomialRecognizedNames.size() + ": " + (binomialRecognizedNames.size() - binomialRecognizedNames1.size()) + " (" + compareSets(binomialRecognizedNames1, binomialRecognizedNames) + ", " + percentage(binomialRecognizedNames.size() - binomialRecognizedNames1.size(), binomialRecognizedNames1.size()) + ")"); Set<Name> binomialNamesInRows = ds.getNamesInAllRows().stream().flatMap(n -> n.asBinomial()) .collect(Collectors.toSet()); precalc.put("Number of binomial names in rows", ds, binomialNamesInRows.size() + ": " + (binomialNamesInRows.size() - binomialNamesInRows1.size()) + " (" + compareSets(binomialNamesInRows1, binomialNamesInRows) + ", " + percentage(binomialNamesInRows.size() - binomialNamesInRows1.size(), binomialNamesInRows1.size()) + ")"); Set<DatasetColumn> ds_cols = new HashSet<>(ds.getColumns()); precalc.put("Number of columns", ds, ds_cols.size() + ": " + (ds_cols.size() - ds1_cols.size()) + " (" + compareSets(ds1.getColumns(), ds.getColumns()) + ", " + percentage(ds.getColumns().size() - ds1.getColumns().size(), ds1.getColumns().size()) + ")"); // What we want here is actually the number of changes SINCE ds1 // So: List<Dataset> datasetsBetween1AndDs = new LinkedList<>(); boolean ds1_found = false; for (Dataset dt : project.getDatasets()) { // Don't start until we see the first dataset. if (dt == ds1) { ds1_found = true; continue; } // Add every subsequent dataset if (ds1_found) datasetsBetween1AndDs.add(dt); // Until we find the current dataset. if (dt == ds) break; } List<Change> ds_changes = datasetsBetween1AndDs.stream().flatMap(dt -> dt.getChanges(project)) .collect(Collectors.toList()); precalc.put("Number of changes", ds, String.valueOf(ds_changes.size())); Map<ChangeType, Long> ds_changes_by_type = ds_changes.stream() .collect(Collectors.groupingBy(ch -> ch.getType(), Collectors.counting())); for (ChangeType ct : ds_changes_by_type.keySet()) { precalc.put("Number of changes of type '" + ct.getType() + "'", ds, String.valueOf(ds_changes_by_type.get(ct))); } } return precalc; }