Example usage for java.util.stream Collectors groupingBy

List of usage examples for java.util.stream Collectors groupingBy

Introduction

In this page you can find the example usage for java.util.stream Collectors groupingBy.

Prototype

public static <T, K> Collector<T, ?, Map<K, List<T>>> groupingBy(Function<? super T, ? extends K> classifier) 

Source Link

Document

Returns a Collector implementing a "group by" operation on input elements of type T , grouping elements according to a classification function, and returning the results in a Map .

Usage

From source file:alfio.manager.support.CustomMessageManager.java

public void sendMessages(String eventName, Optional<Integer> categoryId, List<MessageModification> input,
        String username) {/*from   ww  w . ja v  a2  s . co  m*/

    Event event = eventManager.getSingleEvent(eventName, username);
    preview(event, input, username);//dry run for checking the syntax
    Organization organization = eventManager.loadOrganizer(event, username);
    AtomicInteger counter = new AtomicInteger();
    Map<String, List<MessageModification>> byLanguage = input.stream()
            .collect(Collectors.groupingBy(m -> m.getLocale().getLanguage()));

    sendMessagesExecutor.execute(() -> {
        categoryId.map(id -> ticketRepository.findConfirmedByCategoryId(event.getId(), id))
                .orElseGet(() -> ticketRepository.findAllConfirmed(event.getId())).stream()
                .filter(t -> isNotBlank(t.getFullName()) && isNotBlank(t.getEmail())).parallel().map(t -> {
                    Model model = new ExtendedModelMap();
                    model.addAttribute("eventName", eventName);
                    model.addAttribute("fullName", t.getFullName());
                    model.addAttribute("organizationName", organization.getName());
                    model.addAttribute("organizationEmail", organization.getEmail());
                    model.addAttribute("reservationURL",
                            ticketReservationManager.reservationUrl(t.getTicketsReservationId(), event));
                    model.addAttribute("reservationID",
                            ticketReservationManager.getShortReservationID(event, t.getTicketsReservationId()));
                    model.addAttribute("ticketURL",
                            ticketReservationManager.ticketUpdateUrl(event, t.getUuid()));
                    return Triple.of(t, t.getEmail(), model);
                }).forEach(triple -> {
                    Ticket ticket = triple.getLeft();
                    MessageModification m = Optional.ofNullable(byLanguage.get(ticket.getUserLanguage()))
                            .orElseGet(() -> byLanguage.get(byLanguage.keySet().stream().findFirst()
                                    .orElseThrow(IllegalStateException::new)))
                            .get(0);
                    Model model = triple.getRight();
                    String subject = renderResource(m.getSubject(), model, m.getLocale(), templateManager);
                    String text = renderResource(m.getText(), model, m.getLocale(), templateManager);
                    List<Mailer.Attachment> attachments = new ArrayList<>();
                    if (m.isAttachTicket()) {
                        ticketReservationManager.findById(ticket.getTicketsReservationId())
                                .ifPresent(reservation -> {
                                    ticketCategoryRepository.getByIdAndActive(ticket.getCategoryId())
                                            .ifPresent(ticketCategory -> {
                                                attachments.add(generateTicketAttachment(ticket, reservation,
                                                        ticketCategory, organization));
                                            });
                                });
                    }
                    counter.incrementAndGet();
                    notificationManager.sendSimpleEmail(event, triple.getMiddle(), subject, () -> text,
                            attachments);
                });
    });

}

From source file:ws.salient.session.Sessions.java

public CompletableFuture execute(List<Command> commands) {
    Instant now = Instant.now();
    CompletableFuture result;//from   w ww.  j av a 2  s  .c o m
    try {
        commands.stream().filter(command -> command instanceof ModifyProfile).forEach((command) -> {
            log.info(toJson(command));
            profiles.modified(command.getAccountId());
        });

        commands.stream().filter(command -> command.getKnowledgeBaseId() != null).forEach((command) -> {
            String knowledgeBaseId = command.getKnowledgeBaseId();
            Map<String, String> aliases = profiles.getAliases(command.getAccountId(), command.getProfiles());
            if (aliases.containsKey(knowledgeBaseId)) {
                knowledgeBaseId = aliases.get(knowledgeBaseId);
            }
            command.setKnowledgeBaseId(knowledgeBaseId);
        });

        commands.forEach((command) -> {
            command.setTimestamp(now);
        });

        // Load knowledge bases in parallel
        List<CompletableFuture<KnowledgeBase>> knowledgeBases = commands.stream()
                .filter(command -> command.getKnowledgeBaseId() != null)
                .collect(Collectors.groupingBy((command) -> {
                    // Group commands by knowledgeBaseId
                    return command.getKnowledgeBaseId();
                })).values().stream().map((kbaseCommands) -> {
                    return CompletableFuture.supplyAsync(() -> {
                        // Load each knowledge base
                        return repository.getKnowledgeBase(kbaseCommands.get(0).getKnowledgeBaseId());
                    });
                }).collect(Collectors.toList());
        CompletableFuture.allOf(knowledgeBases.toArray(new CompletableFuture[knowledgeBases.size()])).get();

        // Load sessions in parallel
        List<CompletableFuture<Session>> sessions = commands.stream()
                .filter(command -> command.getSessionId() != null).collect(Collectors.groupingBy((command) -> {
                    // Group commands by sessionId
                    return command.getSessionId();
                })).values().stream().map((sessionCommands) -> {
                    return CompletableFuture.supplyAsync(() -> {
                        // Load each session
                        return getSession(sessionCommands.get(0));
                    });
                }).collect(Collectors.toList());
        CompletableFuture.allOf(sessions.toArray(new CompletableFuture[sessions.size()])).get();

        result = CompletableFuture.runAsync(() -> {
            int requestIndex = 0;
            for (Command command : commands) {
                if (command.getSessionId() != null) {
                    command.setTimestamp(now);
                    Session session = getSession(command);
                    session.accept(command);
                    store.put(session, command, requestIndex);
                    requestIndex++;
                }
            }
        }, commandExecutor).thenRun(() -> {
            this.sessions.forEach((id, session) -> {
                if (session.expired(now)) {
                    if (session.getProcessCount() == 0) {
                        int oldcount = sessions.size();
                        sessions.remove(id);
                        session.dispose();
                        log.info("Session count was " + oldcount + " now " + sessions.size());
                    }
                }
            });
        });
    } catch (InterruptedException | ExecutionException ex) {
        ex.printStackTrace();
        throw new RuntimeException(ex);
    }
    return result;
}

From source file:com.gs.collections.impl.jmh.AnagramListTest.java

@Benchmark
public void serial_lazy_jdk() {
    Map<Alphagram, List<String>> groupBy = this.jdkWords.stream()
            .collect(Collectors.groupingBy(Alphagram::new));
    groupBy.entrySet().stream().map(Map.Entry::getValue).filter(list -> list.size() >= SIZE_THRESHOLD)
            .sorted(Comparator.<List<String>>comparingInt(List::size).reversed())
            .map(list -> list.size() + ": " + list).forEach(e -> Assert.assertFalse(e.isEmpty()));
}

From source file:com.hack23.cia.web.impl.ui.application.views.common.chartfactory.impl.PartyChartDataManagerImpl.java

/**
 * Inits the party map.//ww w  .j a v a  2 s .  com
 */
private void initPartyMap() {
    if (partyMap == null) {
        final DataContainer<ViewRiksdagenVoteDataBallotPartySummaryDaily, RiksdagenVoteDataBallotPartyPeriodSummaryEmbeddedId> partyBallotSummaryDailyDataContainer = applicationManager
                .getDataContainer(ViewRiksdagenVoteDataBallotPartySummaryDaily.class);

        partyMap = partyBallotSummaryDailyDataContainer.getAll().parallelStream().filter(t -> t != null)
                .collect(Collectors.groupingBy(t -> t.getEmbeddedId().getParty()));
    }
}

From source file:com.uber.hoodie.common.table.view.HoodieTableFileSystemView.java

/**
 * Adds the provided statuses into the file system view, and also caches it inside this object.
 *
 * @param statuses/*from  w ww . ja v  a 2  s  . co m*/
 * @return
 */
private List<HoodieFileGroup> addFilesToView(FileStatus[] statuses) {
    Map<Pair<String, String>, List<HoodieDataFile>> dataFiles = convertFileStatusesToDataFiles(statuses)
            .collect(Collectors.groupingBy((dataFile) -> {
                String partitionPathStr = FSUtils.getRelativePartitionPath(new Path(metaClient.getBasePath()),
                        dataFile.getFileStatus().getPath().getParent());
                return Pair.of(partitionPathStr, dataFile.getFileId());
            }));
    Map<Pair<String, String>, List<HoodieLogFile>> logFiles = convertFileStatusesToLogFiles(statuses)
            .collect(Collectors.groupingBy((logFile) -> {
                String partitionPathStr = FSUtils.getRelativePartitionPath(new Path(metaClient.getBasePath()),
                        logFile.getPath().getParent());
                return Pair.of(partitionPathStr, logFile.getFileId());
            }));

    Set<Pair<String, String>> fileIdSet = new HashSet<>(dataFiles.keySet());
    fileIdSet.addAll(logFiles.keySet());

    List<HoodieFileGroup> fileGroups = new ArrayList<>();
    fileIdSet.forEach(pair -> {
        HoodieFileGroup group = new HoodieFileGroup(pair.getKey(), pair.getValue(), visibleActiveTimeline);
        if (dataFiles.containsKey(pair)) {
            dataFiles.get(pair).forEach(dataFile -> group.addDataFile(dataFile));
        }
        if (logFiles.containsKey(pair)) {
            logFiles.get(pair).forEach(logFile -> group.addLogFile(logFile));
        }
        fileGroups.add(group);
    });

    // add to the cache.
    fileGroups.forEach(group -> {
        fileGroupMap.put(group.getId(), group);
        if (!partitionToFileGroupsMap.containsKey(group.getPartitionPath())) {
            partitionToFileGroupsMap.put(group.getPartitionPath(), new ArrayList<>());
        }
        partitionToFileGroupsMap.get(group.getPartitionPath()).add(group);
    });

    return fileGroups;
}

From source file:com.hurence.logisland.connect.opc.CommonOpcSourceTask.java

@Override
public void start(Map<String, String> props) {
    setConfigurationProperties(props);/*from ww  w  . ja va2s .  c o  m*/

    transferQueue = new LinkedTransferQueue<>();
    opcOperations = new SmartOpcOperations<>(createOpcOperations());
    ConnectionProfile connectionProfile = createConnectionProfile();
    host = connectionProfile.getConnectionUri().getHost();
    tagInfoMap = CommonUtils.parseTagsFromProperties(props).stream()
            .collect(Collectors.toMap(TagInfo::getTagId, Function.identity()));
    minWaitTime = Math.min(10, tagInfoMap.values().stream().map(TagInfo::getSamplingInterval)
            .mapToLong(Duration::toMillis).min().getAsLong());
    opcOperations.connect(connectionProfile);
    if (!opcOperations.awaitConnected()) {
        throw new ConnectException("Unable to connect");
    }

    //set up polling source emission
    pollingScheduler = Executors.newSingleThreadScheduledExecutor();
    streamingThread = Executors.newSingleThreadExecutor();
    Map<Duration, List<TagInfo>> pollingMap = tagInfoMap.values().stream()
            .filter(tagInfo -> StreamingMode.POLL.equals(tagInfo.getStreamingMode()))
            .collect(Collectors.groupingBy(TagInfo::getSamplingInterval));
    final Map<String, OpcData> lastValues = Collections.synchronizedMap(new HashMap<>());
    pollingMap.forEach((k, v) -> pollingScheduler.scheduleAtFixedRate(() -> {
        final Instant now = Instant.now();
        v.stream().map(TagInfo::getTagId).map(lastValues::get).filter(Functions.not(Objects::isNull))
                .map(data -> Pair.of(now, data)).forEach(transferQueue::add);

    }, 0, k.toNanos(), TimeUnit.NANOSECONDS));
    //then subscribe for all
    final SubscriptionConfiguration subscriptionConfiguration = new SubscriptionConfiguration()
            .withDefaultSamplingInterval(Duration.ofMillis(10_000));
    tagInfoMap.values().forEach(tagInfo -> subscriptionConfiguration
            .withTagSamplingIntervalForTag(tagInfo.getTagId(), tagInfo.getSamplingInterval()));
    running.set(true);
    streamingThread.submit(() -> {
        while (running.get()) {
            try {
                createSessionIfNeeded();
                if (session == null) {
                    return;
                }

                session.stream(subscriptionConfiguration,
                        tagInfoMap.keySet().toArray(new String[tagInfoMap.size()])).forEach(opcData -> {
                            if (tagInfoMap.get(opcData.getTag()).getStreamingMode()
                                    .equals(StreamingMode.SUBSCRIBE)) {
                                transferQueue.add(Pair.of(
                                        hasServerSideSampling() ? opcData.getTimestamp() : Instant.now(),
                                        opcData));
                            } else {
                                lastValues.put(opcData.getTag(), opcData);
                            }
                        });
            } catch (Exception e) {
                if (running.get()) {
                    logger.warn("Stream interrupted while reading from " + host, e);
                    safeCloseSession();
                    lastValues.clear();

                }
            }
        }
    });

}

From source file:org.obiba.mica.search.CoverageQueryExecutor.java

/**
 * Extract hits from aggregations and merge them into the taxonomies descriptions.
 *///from w  w  w.j av  a2s .c  om
private Iterable<MicaSearch.TaxonomyCoverageDto> getCoverages(
        List<MicaSearch.AggregationResultDto> aggregations) {
    Map<String, Map<String, MicaSearch.TermsAggregationResultDto>> aggTermsTitlesMap = aggregations.stream()
            .collect(Collectors.toMap(MicaSearch.AggregationResultDto::getAggregation,
                    a -> a.getExtension(MicaSearch.TermsAggregationResultDto.terms).stream()
                            .collect(Collectors.toMap(MicaSearch.TermsAggregationResultDto::getKey, t -> t))));

    Map<String, List<BucketResult>> bucketResultsByTaxonomy = extractBucketResults(aggregations).stream()
            .collect(Collectors.groupingBy(BucketResult::getTaxonomy));

    Map<String, Map<String, Integer>> aggsMap = Maps.newHashMap();
    aggregations.forEach(agg -> {
        String name = agg.getAggregation();
        List<MicaSearch.TermsAggregationResultDto> results = agg
                .getExtension(MicaSearch.TermsAggregationResultDto.terms);
        if (results != null && !results.isEmpty() && isAttributeField(name)) {
            String key = name.replaceAll("^attributes-", "").replaceAll("-und$", "");
            if (!aggsMap.containsKey(key))
                aggsMap.put(key, Maps.newHashMap());
            results.forEach(res -> aggsMap.get(key).put(res.getKey(), res.getCount()));
        }
    });

    List<MicaSearch.TaxonomyCoverageDto> coverages = Lists.newArrayList();
    getTaxonomies().stream().filter(taxonomy -> applyFilter(taxonomy))
            .forEach(taxonomy -> addTaxonomyCoverage(coverages, taxonomy, aggsMap,
                    bucketResultsByTaxonomy.get(taxonomy.getName()), aggTermsTitlesMap));

    return coverages;
}

From source file:com.gs.collections.impl.jmh.AnagramListTest.java

@Benchmark
public void parallel_lazy_jdk() {
    Map<Alphagram, List<String>> groupBy = this.jdkWords.parallelStream()
            .collect(Collectors.groupingBy(Alphagram::new));
    groupBy.entrySet().parallelStream().map(Map.Entry::getValue).filter(list -> list.size() >= SIZE_THRESHOLD)
            .sorted(Comparator.<List<String>>comparingInt(List::size).reversed())
            .map(list -> list.size() + ": " + list).forEach(e -> Assert.assertFalse(e.isEmpty()));
}

From source file:com.epam.ta.reportportal.core.item.history.TestItemsHistoryHandlerImpl.java

@Override
public List<TestItemHistoryElement> getItemsHistory(String projectName, String[] startPointsIds,
        int historyDepth, boolean showBrokenLaunches) {

    Project project = projectRepository.findOne(projectName);
    BusinessRule.expect(project, Predicates.notNull()).verify(ErrorType.PROJECT_NOT_FOUND, projectName);

    Predicate<Integer> greaterThan = t -> t > MIN_HISTORY_DEPTH_BOUND;
    Predicate<Integer> lessThan = t -> t < MAX_HISTORY_DEPTH_BOUND;
    String historyDepthMessage = "Items history depth should be greater than '" + MIN_HISTORY_DEPTH_BOUND
            + "' and lower than '" + MAX_HISTORY_DEPTH_BOUND + "'";
    BusinessRule.expect(historyDepth, greaterThan.and(lessThan)).verify(UNABLE_LOAD_TEST_ITEM_HISTORY,
            historyDepthMessage);//  w  ww  .  j  a  v  a  2  s.  c  om

    BusinessRule.expect(startPointsIds.length, t -> t < MAX_HISTORY_SIZE_BOUND).verify(
            UNABLE_LOAD_TEST_ITEM_HISTORY,
            "History size should be less than '" + MAX_HISTORY_SIZE_BOUND + "' test items.");

    List<String> listIds = Lists.newArrayList(startPointsIds);

    List<TestItem> itemsForHistory = historyServiceStrategy.loadItems(listIds);
    historyServiceStrategy.validateItems(itemsForHistory, listIds, projectName);

    List<Launch> launches = historyServiceStrategy.loadLaunches(historyDepth,
            itemsForHistory.get(0).getLaunchRef(), projectName, showBrokenLaunches);
    List<String> historyLaunchesIds = launches.stream().map(Launch::getId).collect(Collectors.toList());

    List<TestItem> history = testItemRepository.loadItemsHistory(itemsForHistory, historyLaunchesIds,
            loadParentIds(itemsForHistory.get(0), historyLaunchesIds));

    Map<String, List<TestItem>> groupedItems = history.stream()
            .collect(Collectors.groupingBy(TestItem::getLaunchRef));
    return launches.stream().map(launch -> buildHistoryElement(launch, groupedItems.get(launch.getId())))
            .collect(Collectors.toList());
}

From source file:com.hack23.cia.web.impl.ui.application.views.common.chartfactory.impl.DocumentChartDataManagerImpl.java

/**
 * Gets the document type map./*from   ww  w  . j a  va2  s . c o m*/
 *
 * @return the document type map
 */
private Map<String, List<ViewRiksdagenDocumentTypeDailySummary>> getDocumentTypeMap() {
    final DataContainer<ViewRiksdagenDocumentTypeDailySummary, RiksdagenDocumentTypeSummaryEmbeddedId> documentTypeSummaryDailyDataContainer = applicationManager
            .getDataContainer(ViewRiksdagenDocumentTypeDailySummary.class);

    return documentTypeSummaryDailyDataContainer.getAll().parallelStream()
            .filter(t -> t != null && !t.getEmbeddedId().getPublicDate().startsWith(YEAR_PREFIX)
                    && MOT_PROP_BET.contains(t.getEmbeddedId().getDocumentType()))
            .collect(Collectors.groupingBy(t -> t.getEmbeddedId().getDocumentType()));
}