Example usage for java.util.stream Collectors groupingBy

List of usage examples for java.util.stream Collectors groupingBy

Introduction

In this page you can find the example usage for java.util.stream Collectors groupingBy.

Prototype

public static <T, K> Collector<T, ?, Map<K, List<T>>> groupingBy(Function<? super T, ? extends K> classifier) 

Source Link

Document

Returns a Collector implementing a "group by" operation on input elements of type T , grouping elements according to a classification function, and returning the results in a Map .

Usage

From source file:org.trustedanalytics.servicecatalog.service.rest.ServiceInstancesControllerHelpers.java

private Map<UUID, List<ServiceInstance>> createInstancesIndex(Collection<ServiceInstance> instances) {
    return instances.stream().collect(Collectors.groupingBy(i -> i.getServicePlan().getService().getGuid()));
}

From source file:alfio.manager.user.UserManager.java

public List<UserWithOrganizations> findAllUsers(String username) {
    List<Organization> organizations = findUserOrganizations(username);
    Predicate<Collection<?>> isNotEmpty = ks -> !ks.isEmpty();
    return Optional.of(organizations).filter(isNotEmpty).flatMap(org -> {
        Map<Integer, List<UserOrganization>> usersAndOrganizations = userOrganizationRepository
                .findByOrganizationIdsOrderByUserId(
                        organizations.stream().map(Organization::getId).collect(toList()))
                .stream().collect(Collectors.groupingBy(UserOrganization::getUserId));
        return Optional.of(usersAndOrganizations.keySet()).filter(isNotEmpty)
                .map(ks -> userRepository.findByIds(ks).stream().map(u -> {
                    List<UserOrganization> userOrganizations = usersAndOrganizations.get(u.getId());
                    return new UserWithOrganizations(u, organizations.stream().filter(
                            o -> userOrganizations.stream().anyMatch(uo -> uo.getOrganizationId() == o.getId()))
                            .collect(toList()));
                }).collect(toList()));//from  w  w w.ja  v a 2 s  . com
    }).orElseGet(Collections::emptyList);
}

From source file:com.epam.ta.reportportal.core.jasper.JasperDataProvider.java

public List<TestItemPojo> getReportSource(Launch launch) {
    List<TestItemPojo> result = Collections.emptyList();

    /* Get launch referred test items with SORT! */
    List<TestItem> ownedItems = testItemRepository.findByLaunch(launch);
    if (ownedItems.size() > 0) {

        /* Grouping test items by path field */
        Map<List<String>, List<TestItem>> grouped = ownedItems.stream().map(JasperDataProvider::adjustName)
                .collect(Collectors.groupingBy(TestItem::getPath));

        /* List of grouped test items by parent nodes */
        List<TestItem> prepared = this.processLaunchTree(grouped, Lists.newArrayList());

        result = prepared.stream().map(TestItemPojo::new).collect(Collectors.toList());
    }//from w ww.j  a  v a 2  s. c o m
    return result;
}

From source file:com.uber.hoodie.hadoop.realtime.HoodieRealtimeInputFormat.java

@Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {

    Stream<FileSplit> fileSplits = Arrays.stream(super.getSplits(job, numSplits)).map(is -> (FileSplit) is);

    // obtain all unique parent folders for splits
    Map<Path, List<FileSplit>> partitionsToParquetSplits = fileSplits
            .collect(Collectors.groupingBy(split -> split.getPath().getParent()));
    // TODO(vc): Should we handle also non-hoodie splits here?
    Map<String, HoodieTableMetaClient> metaClientMap = new HashMap<>();
    Map<Path, HoodieTableMetaClient> partitionsToMetaClient = partitionsToParquetSplits.keySet().stream()
            .collect(Collectors.toMap(Function.identity(), p -> {
                // find if we have a metaclient already for this partition.
                Optional<String> matchingBasePath = metaClientMap.keySet().stream()
                        .filter(basePath -> p.toString().startsWith(basePath)).findFirst();
                if (matchingBasePath.isPresent()) {
                    return metaClientMap.get(matchingBasePath.get());
                }// ww w. j  a  v a  2 s. com

                try {
                    HoodieTableMetaClient metaClient = getTableMetaClient(p.getFileSystem(conf), p);
                    metaClientMap.put(metaClient.getBasePath(), metaClient);
                    return metaClient;
                } catch (IOException e) {
                    throw new HoodieIOException("Error creating hoodie meta client against : " + p, e);
                }
            }));

    // for all unique split parents, obtain all delta files based on delta commit timeline, grouped on file id
    List<HoodieRealtimeFileSplit> rtSplits = new ArrayList<>();
    partitionsToParquetSplits.keySet().stream().forEach(partitionPath -> {
        // for each partition path obtain the data & log file groupings, then map back to inputsplits
        HoodieTableMetaClient metaClient = partitionsToMetaClient.get(partitionPath);
        HoodieTableFileSystemView fsView = new HoodieTableFileSystemView(metaClient,
                metaClient.getActiveTimeline());
        String relPartitionPath = FSUtils.getRelativePartitionPath(new Path(metaClient.getBasePath()),
                partitionPath);

        try {
            Stream<FileSlice> latestFileSlices = fsView.getLatestFileSlices(relPartitionPath);

            // subgroup splits again by file id & match with log files.
            Map<String, List<FileSplit>> groupedInputSplits = partitionsToParquetSplits.get(partitionPath)
                    .stream()
                    .collect(Collectors.groupingBy(split -> FSUtils.getFileId(split.getPath().getName())));
            latestFileSlices.forEach(fileSlice -> {
                List<FileSplit> dataFileSplits = groupedInputSplits.get(fileSlice.getFileId());
                dataFileSplits.forEach(split -> {
                    try {
                        List<String> logFilePaths = fileSlice.getLogFiles()
                                .map(logFile -> logFile.getPath().toString()).collect(Collectors.toList());
                        // Get the maxCommit from the last delta or compaction or commit - when bootstrapped from COW table
                        String maxCommitTime = metaClient.getActiveTimeline()
                                .getTimelineOfActions(Sets.newHashSet(HoodieTimeline.COMMIT_ACTION,
                                        HoodieTimeline.COMPACTION_ACTION, HoodieTimeline.DELTA_COMMIT_ACTION))
                                .filterCompletedInstants().lastInstant().get().getTimestamp();
                        rtSplits.add(new HoodieRealtimeFileSplit(split, logFilePaths, maxCommitTime));
                    } catch (IOException e) {
                        throw new HoodieIOException("Error creating hoodie real time split ", e);
                    }
                });
            });
        } catch (Exception e) {
            throw new HoodieException("Error obtaining data file/log file grouping: " + partitionPath, e);
        }
    });
    LOG.info("Returning a total splits of " + rtSplits.size());
    return rtSplits.toArray(new InputSplit[rtSplits.size()]);
}

From source file:org.trustedanalytics.servicecatalog.service.rest.ServiceInstancesControllerHelpers.java

private Map<UUID, List<ServiceKey>> createServiceKeysIndex(Observable<ServiceKey> serviceKeys) {
    return serviceKeys.toList().toBlocking().single().stream()
            .collect(Collectors.groupingBy(ServiceKey::getServiceInstanceGuid));
}

From source file:defaultmethods.StandardDeck.java

public Map<Integer, Deck> deal(int players, int numberOfCards) throws IllegalArgumentException {
    int cardsDealt = players * numberOfCards;
    int sizeOfDeck = entireDeck.size();
    if (cardsDealt > sizeOfDeck) {
        throw new IllegalArgumentException(
                "Number of players (" + players + ") times number of cards to be dealt (" + numberOfCards
                        + ") is greater than the number of cards in the deck (" + sizeOfDeck + ").");
    }/*from  ww w  . jav  a 2s . c  o  m*/

    Map<Integer, List<Card>> dealtDeck = entireDeck.stream().collect(Collectors.groupingBy(card -> {
        int cardIndex = entireDeck.indexOf(card);
        if (cardIndex >= cardsDealt)
            return (players + 1);
        else
            return (cardIndex % players) + 1;
    }));

    // Convert Map<Integer, List<Card>> to Map<Integer, Deck>
    Map<Integer, Deck> mapToReturn = new HashMap<>();

    for (int i = 1; i <= (players + 1); i++) {
        Deck currentDeck = deckFactory();
        currentDeck.addCards(dealtDeck.get(i));
        mapToReturn.put(i, currentDeck);
    }
    return mapToReturn;
}

From source file:com.miko.s4netty.handler.WorkerProviderHandler.java

@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
    if (msg instanceof HttpRequest) {
        HttpRequest req = (HttpRequest) msg;

        if (is100ContinueExpected(req)) {
            ctx.write(new DefaultFullHttpResponse(HTTP_1_1, CONTINUE));
        }//w w  w. ja  va  2  s.c  om

        boolean keepAlive = isKeepAlive(req);

        logger.debug("channelRead keepAlive= " + keepAlive);

        Gson gson = new Gson();
        List<CarDTO> carList = simpleCarService.getAll();

        Map<String, List<CarDTO>> byMake = simpleCarService.getAll().stream()
                .collect(Collectors.groupingBy(CarDTO::getMake));
        System.out.println("byMake = " + byMake);
        System.out.println("byMake Skoda= " + byMake.get("Skoda").iterator().next().getModel());

        JsonElement element = gson.toJsonTree(carList, new TypeToken<List<CarDTO>>() {
        }.getType());
        JsonArray jsonArray = element.getAsJsonArray();

        FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1, OK,
                Unpooled.wrappedBuffer(jsonArray.toString().getBytes()));
        response.headers().set(CONTENT_TYPE, "application/json");
        response.headers().set(CONTENT_LENGTH, response.content().readableBytes());

        //ignoring KeepAlive
        ctx.write(response).addListener(ChannelFutureListener.CLOSE);

    }
}

From source file:com.godaddy.logging.messagebuilders.providers.FoldedLogData.java

private FoldedLogData buildLogData(final RunningLogContext<List<Map<String, Object>>> runningLogContext) {
    FoldedLogData data = new FoldedLogData();

    Map<String, List<String>> keyNames = runningLogContext.getData().stream().flatMap(i -> i.keySet().stream())
            .collect(Collectors.groupingBy(i -> i));

    Map<String, Integer> keyCollisionIdentifier = new HashMap<>();

    runningLogContext.getData().stream().forEach(withObject -> {

        Map<String, Object> unnamedContext = getUnnamedContext(withObject);

        Map<String, Object> namedContext = appendNamedContext(withObject, keyNames, keyCollisionIdentifier);

        if (unnamedContext.size() > 0) {
            data.unnamed.add(unnamedContext);
        }//  w ww . ja  va2 s.  c  om
        if (namedContext.size() > 0) {
            data.named.add(namedContext);
        }
    });

    return data;
}

From source file:com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2CachingAgent.java

protected Map<KubernetesKind, List<KubernetesManifest>> loadPrimaryResourceList() {
    return namespaces.stream().map(n -> {
        try {//  w  w w  .  j  a  v  a  2s  .co  m
            return credentials.list(primaryKinds(), n);
        } catch (KubectlException e) {
            log.warn("Failed to read kind {} from namespace {}: {}", primaryKinds(), n, e.getMessage());
            return null;
        }
    }).filter(Objects::nonNull).flatMap(Collection::stream)
            .collect(Collectors.groupingBy(KubernetesManifest::getKind));
}

From source file:com.github.horrorho.inflatabledonkey.cloud.AuthorizeAssetsClient.java

@Override
public List<FileGroups> apply(HttpClient httpClient, Collection<Asset> assets) throws IOException {
    logger.trace("<< apply() - assets: {}", assets.size());
    try {//  w w  w . j av a2 s.  co  m
        List<CloudKit.Asset> ckAssets = ckAssets(assets);
        // Only expecting one contentBaseUrl. 
        // Can probably simplify code to handle only one.
        List<FileGroups> fileGroups = ckAssets.stream()
                .collect(Collectors.groupingBy(CloudKit.Asset::getContentBaseURL)).entrySet().stream()
                .map(e -> fileGroups(httpClient, e.getKey(), e.getValue())).collect(toList());
        logger.trace(">> apply() - fileGroups: {}", fileGroups.size());
        return fileGroups;

    } catch (UncheckedIOException ex) {
        throw ex.getCause();
    }
}