Example usage for java.util.stream Collectors groupingBy

List of usage examples for java.util.stream Collectors groupingBy

Introduction

In this page you can find the example usage for java.util.stream Collectors groupingBy.

Prototype

public static <T, K, A, D> Collector<T, ?, Map<K, D>> groupingBy(Function<? super T, ? extends K> classifier,
        Collector<? super T, A, D> downstream) 

Source Link

Document

Returns a Collector implementing a cascaded "group by" operation on input elements of type T , grouping elements according to a classification function, and then performing a reduction operation on the values associated with a given key using the specified downstream Collector .

Usage

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

@Override
public CompletableFuture<CacheEvictionStats> clearBlockCache(TableName tableName) {
    CompletableFuture<CacheEvictionStats> future = new CompletableFuture<>();
    addListener(getTableHRegionLocations(tableName), (locations, err) -> {
        if (err != null) {
            future.completeExceptionally(err);
            return;
        }// w w w. j a v a 2 s.c o m
        Map<ServerName, List<RegionInfo>> regionInfoByServerName = locations.stream()
                .filter(l -> l.getRegion() != null).filter(l -> !l.getRegion().isOffline())
                .filter(l -> l.getServerName() != null).collect(Collectors.groupingBy(l -> l.getServerName(),
                        Collectors.mapping(l -> l.getRegion(), Collectors.toList())));
        List<CompletableFuture<CacheEvictionStats>> futures = new ArrayList<>();
        CacheEvictionStatsAggregator aggregator = new CacheEvictionStatsAggregator();
        for (Map.Entry<ServerName, List<RegionInfo>> entry : regionInfoByServerName.entrySet()) {
            futures.add(clearBlockCache(entry.getKey(), entry.getValue()).whenComplete((stats, err2) -> {
                if (err2 != null) {
                    future.completeExceptionally(unwrapCompletionException(err2));
                } else {
                    aggregator.append(stats);
                }
            }));
        }
        addListener(CompletableFuture.allOf(futures.toArray(new CompletableFuture[futures.size()])),
                (ret, err3) -> {
                    if (err3 != null) {
                        future.completeExceptionally(unwrapCompletionException(err3));
                    } else {
                        future.complete(aggregator.sum());
                    }
                });
    });
    return future;
}

From source file:org.apache.hadoop.hbase.quotas.SnapshotQuotaObserverChore.java

/**
 * Sums the snapshot sizes for each namespace.
 *///w w w .  j ava2  s .c om
Map<String, Long> groupSnapshotSizesByNamespace(Multimap<TableName, SnapshotWithSize> snapshotsWithSize) {
    return snapshotsWithSize.entries().stream().collect(Collectors.groupingBy(
            // Convert TableName into the namespace string
            (e) -> e.getKey().getNamespaceAsString(),
            // Sum the values for namespace
            Collectors.mapping(Map.Entry::getValue, Collectors.summingLong((sws) -> sws.getSize()))));
}

From source file:org.apache.nifi.cluster.coordination.http.replication.ThreadPoolRequestReplicator.java

/**
 * Replicates the request to all nodes in the given set of node identifiers
 *
 * @param nodeIds             the NodeIdentifiers that identify which nodes to send the request to
 * @param method              the HTTP method to use
 * @param uri                 the URI to send the request to
 * @param entity              the entity to use
 * @param headers             the HTTP Headers
 * @param performVerification whether or not to verify that all nodes in the cluster are connected and that all nodes can perform request. Ignored if request is not mutable.
 * @param response            the response to update with the results
 * @param executionPhase      <code>true</code> if this is the execution phase, <code>false</code> otherwise
 * @param monitor             a monitor that will be notified when the request completes (successfully or otherwise)
 * @return an AsyncClusterResponse that can be used to obtain the result
 *//*  w w  w . j av  a2  s .  c  o m*/
AsyncClusterResponse replicate(final Set<NodeIdentifier> nodeIds, final String method, final URI uri,
        final Object entity, final Map<String, String> headers, final boolean performVerification,
        StandardAsyncClusterResponse response, final boolean executionPhase, final boolean merge,
        final Object monitor) {
    try {
        // state validation
        Objects.requireNonNull(nodeIds);
        Objects.requireNonNull(method);
        Objects.requireNonNull(uri);
        Objects.requireNonNull(entity);
        Objects.requireNonNull(headers);

        if (nodeIds.isEmpty()) {
            throw new IllegalArgumentException("Cannot replicate request to 0 nodes");
        }

        // verify all of the nodes exist and are in the proper state
        for (final NodeIdentifier nodeId : nodeIds) {
            final NodeConnectionStatus status = clusterCoordinator.getConnectionStatus(nodeId);
            if (status == null) {
                throw new UnknownNodeException("Node " + nodeId + " does not exist in this cluster");
            }

            if (status.getState() != NodeConnectionState.CONNECTED) {
                throw new IllegalClusterStateException(
                        "Cannot replicate request to Node " + nodeId + " because the node is not connected");
            }
        }

        logger.debug("Replicating request {} {} with entity {} to {}; response is {}", method, uri, entity,
                nodeIds, response);

        // Update headers to indicate the current revision so that we can
        // prevent multiple users changing the flow at the same time
        final Map<String, String> updatedHeaders = new HashMap<>(headers);
        final String requestId = updatedHeaders.computeIfAbsent(REQUEST_TRANSACTION_ID_HEADER,
                key -> UUID.randomUUID().toString());

        long verifyClusterStateNanos = -1;
        if (performVerification) {
            final long start = System.nanoTime();
            verifyClusterState(method, uri.getPath());
            verifyClusterStateNanos = System.nanoTime() - start;
        }

        int numRequests = responseMap.size();
        if (numRequests >= maxConcurrentRequests) {
            numRequests = purgeExpiredRequests();
        }

        if (numRequests >= maxConcurrentRequests) {
            final Map<String, Long> countsByUri = responseMap.values().stream().collect(
                    Collectors.groupingBy(StandardAsyncClusterResponse::getURIPath, Collectors.counting()));

            logger.error(
                    "Cannot replicate request {} {} because there are {} outstanding HTTP Requests already. Request Counts Per URI = {}",
                    method, uri.getPath(), numRequests, countsByUri);
            throw new IllegalStateException("There are too many outstanding HTTP requests with a total "
                    + numRequests + " outstanding requests");
        }

        // create a response object if one was not already passed to us
        if (response == null) {
            // create the request objects and replicate to all nodes.
            // When the request has completed, we need to ensure that we notify the monitor, if there is one.
            final CompletionCallback completionCallback = clusterResponse -> {
                try {
                    onCompletedResponse(requestId);
                } finally {
                    if (monitor != null) {
                        synchronized (monitor) {
                            monitor.notify();
                        }

                        logger.debug("Notified monitor {} because request {} {} has completed", monitor, method,
                                uri);
                    }
                }
            };

            final Runnable responseConsumedCallback = () -> onResponseConsumed(requestId);

            response = new StandardAsyncClusterResponse(requestId, uri, method, nodeIds, responseMapper,
                    completionCallback, responseConsumedCallback, merge);
            responseMap.put(requestId, response);
        }

        if (verifyClusterStateNanos > -1) {
            response.addTiming("Verify Cluster State", "All Nodes", verifyClusterStateNanos);
        }

        logger.debug("For Request ID {}, response object is {}", requestId, response);

        // if mutable request, we have to do a two-phase commit where we ask each node to verify
        // that the request can take place and then, if all nodes agree that it can, we can actually
        // issue the request. This is all handled by calling performVerification, which will replicate
        // the 'vote' request to all nodes and then if successful will call back into this method to
        // replicate the actual request.
        final boolean mutableRequest = isMutableRequest(method, uri.getPath());
        if (mutableRequest && performVerification) {
            logger.debug("Performing verification (first phase of two-phase commit) for Request ID {}",
                    requestId);
            performVerification(nodeIds, method, uri, entity, updatedHeaders, response, merge, monitor);
            return response;
        } else if (mutableRequest) {
            response.setPhase(StandardAsyncClusterResponse.COMMIT_PHASE);
        }

        // Callback function for generating a NodeHttpRequestCallable that can be used to perform the work
        final StandardAsyncClusterResponse finalResponse = response;
        NodeRequestCompletionCallback nodeCompletionCallback = nodeResponse -> {
            logger.debug("Received response from {} for {} {}", nodeResponse.getNodeId(), method,
                    uri.getPath());
            finalResponse.add(nodeResponse);
        };

        // instruct the node to actually perform the underlying action
        if (mutableRequest && executionPhase) {
            updatedHeaders.put(REQUEST_EXECUTION_HTTP_HEADER, "true");
        }

        // replicate the request to all nodes
        final Function<NodeIdentifier, NodeHttpRequest> requestFactory = nodeId -> new NodeHttpRequest(nodeId,
                method, createURI(uri, nodeId), entity, updatedHeaders, nodeCompletionCallback, finalResponse);
        submitAsyncRequest(nodeIds, uri.getScheme(), uri.getPath(), requestFactory, updatedHeaders);

        return response;
    } catch (final Throwable t) {
        if (monitor != null) {
            synchronized (monitor) {
                monitor.notify();
            }
            logger.debug("Notified monitor {} because request {} {} has failed with Throwable {}", monitor,
                    method, uri, t);
        }

        if (response != null) {
            final RuntimeException failure = (t instanceof RuntimeException) ? (RuntimeException) t
                    : new RuntimeException("Failed to submit Replication Request to background thread", t);
            response.setFailure(failure, new NodeIdentifier());
        }

        throw t;
    }
}

From source file:org.apache.nifi.remote.PeerDescriptionModifier.java

public PeerDescriptionModifier(final NiFiProperties properties) {
    final Map<Tuple<String, String>, List<Tuple<String, String>>> routeDefinitions = properties
            .getPropertyKeys().stream().filter(propertyKey -> propertyKey.startsWith(PROPERTY_PREFIX))
            .map(propertyKey -> {/* w  ww.  j ava 2  s . c om*/
                final Matcher matcher = PROPERTY_REGEX.matcher(propertyKey);
                if (!matcher.matches()) {
                    throw new IllegalArgumentException(format(
                            "Found an invalid Site-to-Site route definition property '%s'."
                                    + " Routing property keys should be formatted as 'nifi.remote.route.{protocol}.{name}.{routingConfigName}'."
                                    + " Where {protocol} is 'raw' or 'http', and {routingConfigName} is 'when', 'hostname', 'port' or 'secure'.",
                            propertyKey));
                }
                return matcher;
            })
            .collect(Collectors.groupingBy(matcher -> new Tuple<>(matcher.group(1), matcher.group(2)),
                    Collectors.mapping(matcher -> new Tuple<>(matcher.group(3), matcher.group(0)),
                            Collectors.toList())));

    routes = routeDefinitions.entrySet().stream().map(routeDefinition -> {
        final Route route = new Route();
        // E.g. [raw, example1], [http, example2]
        final Tuple<String, String> protocolAndRoutingName = routeDefinition.getKey();
        route.protocol = SiteToSiteTransportProtocol.valueOf(protocolAndRoutingName.getKey().toUpperCase());
        route.name = protocolAndRoutingName.getValue();
        routeDefinition.getValue().forEach(routingConfigNameAndPropertyKey -> {
            final String routingConfigName = routingConfigNameAndPropertyKey.getKey();
            final String propertyKey = routingConfigNameAndPropertyKey.getValue();
            final String routingConfigValue = properties.getProperty(propertyKey);
            try {
                switch (routingConfigName) {
                case "when":
                    route.predicate = Query.prepare(routingConfigValue);
                    break;
                case "hostname":
                    route.hostname = Query.prepare(routingConfigValue);
                    break;
                case "port":
                    route.port = Query.prepare(routingConfigValue);
                    break;
                case "secure":
                    route.secure = Query.prepare(routingConfigValue);
                    break;
                }
            } catch (AttributeExpressionLanguageParsingException e) {
                throw new IllegalArgumentException(format(
                        "Failed to parse NiFi expression language configured"
                                + " for Site-to-Site routing property at '%s' due to '%s'",
                        propertyKey, e.getMessage()), e);
            }
        });
        return route;
    }).map(Route::validate).collect(Collectors.groupingBy(r -> r.protocol));

}

From source file:org.codelibs.fess.app.web.admin.backup.AdminBackupAction.java

public static Consumer<Writer> getSearchLogNdjsonWriteCall() {
    return writer -> {
        final SearchLogBhv bhv = ComponentUtil.getComponent(SearchLogBhv.class);
        bhv.selectCursor(cb -> {/*from   w  w w .ja v a2s  . c o  m*/
            cb.query().matchAll();
            cb.query().addOrderBy_RequestedAt_Asc();
        }, entity -> {
            final StringBuilder buf = new StringBuilder();
            buf.append('{');
            appendJson("id", entity.getId(), buf).append(',');
            appendJson("query-id", entity.getQueryId(), buf).append(',');
            appendJson("user-info-id", entity.getUserInfoId(), buf).append(',');
            appendJson("user-session-id", entity.getUserSessionId(), buf).append(',');
            appendJson("user", entity.getUser(), buf).append(',');
            appendJson("search-word", entity.getSearchWord(), buf).append(',');
            appendJson("hit-count", entity.getHitCount(), buf).append(',');
            appendJson("query-page-size", entity.getQueryPageSize(), buf).append(',');
            appendJson("query-offset", entity.getQueryOffset(), buf).append(',');
            appendJson("referer", entity.getReferer(), buf).append(',');
            appendJson("languages", entity.getLanguages(), buf).append(',');
            appendJson("roles", entity.getRoles(), buf).append(',');
            appendJson("user-agent", entity.getUserAgent(), buf).append(',');
            appendJson("client-ip", entity.getClientIp(), buf).append(',');
            appendJson("access-type", entity.getAccessType(), buf).append(',');
            appendJson("query-time", entity.getQueryTime(), buf).append(',');
            appendJson("response-time", entity.getResponseTime(), buf).append(',');
            appendJson("requested-at", entity.getRequestedAt(), buf).append(',');
            final Map<String, List<String>> searchFieldMap = entity.getSearchFieldLogList().stream()
                    .collect(Collectors.groupingBy(Pair::getFirst,
                            Collectors.mapping(Pair::getSecond, Collectors.toList())));
            appendJson("search-field", searchFieldMap, buf);
            buf.append('}');
            buf.append('\n');
            try {
                writer.write(buf.toString());
            } catch (final IOException e) {
                throw new IORuntimeException(e);
            }
        });
    };
}

From source file:org.codice.alliance.nsili.common.ResultDAGConverter.java

private static Map<String, List<String>> getAttrMap(List<String> attributes) {
    return attributes.stream().map(ATTRIBUTE_PATTERN::matcher).filter(Matcher::matches).collect(
            Collectors.groupingBy(m -> m.group(2), Collectors.mapping(m -> m.group(3), Collectors.toList())));
}

From source file:org.codice.ddf.admin.application.service.migratable.FeatureProcessor.java

/**
 * Updates the specified features requirements to mark them required or not.
 *
 * @param report the report where to record errors if unable to update the features
 * @param region the region where to update the features
 * @param jfeatures the features to update
 * @return <code>true</code> if the features were updated successfully; <code>false</code>
 *     otherwise/*from   w ww.  j  ava2  s  .  c  o m*/
 */
public boolean updateFeaturesRequirements(ProfileMigrationReport report, String region,
        Set<JsonFeature> jfeatures) {
    return run(report, region, jfeatures.stream().map(JsonFeature::getId), Operation.UPDATE, jfeatures.stream()
            .collect(Collectors.groupingBy(JsonFeature::isRequired,
                    Collectors.mapping(JsonFeature::toRequirement, Collectors.toSet())))
            .entrySet().stream()
            .map(requirementsToUpdate -> updateFeaturesRequirements(region, requirementsToUpdate))
            .toArray(ThrowingRunnable[]::new));
}

From source file:org.eclipse.collections.impl.jmh.AggregateByTest.java

@Benchmark
public Map<Product, DoubleSummaryStatistics> aggregateByProduct_serial_lazy_jdk() {
    Map<Product, DoubleSummaryStatistics> result = this.jdkPositions.stream().collect(Collectors
            .groupingBy(Position::getProduct, Collectors.summarizingDouble(Position::getMarketValue)));
    Assert.assertNotNull(result);//from   w w  w  .j a va2  s .co  m
    return result;
}

From source file:org.eclipse.collections.impl.jmh.AggregateByTest.java

@Benchmark
public Map<Product, DoubleSummaryStatistics> aggregateByProduct_serial_lazy_streams_ec() {
    Map<Product, DoubleSummaryStatistics> result = this.ecPositions.stream().collect(Collectors
            .groupingBy(Position::getProduct, Collectors.summarizingDouble(Position::getMarketValue)));
    Assert.assertNotNull(result);/*from  w w  w  . j av  a2 s.  c o  m*/
    return result;
}

From source file:org.eclipse.collections.impl.jmh.AggregateByTest.java

@Benchmark
public Map<Account, DoubleSummaryStatistics> aggregateByAccount_serial_lazy_jdk() {
    Map<Account, DoubleSummaryStatistics> accountDoubleMap = this.jdkPositions.stream().collect(Collectors
            .groupingBy(Position::getAccount, Collectors.summarizingDouble(Position::getMarketValue)));
    Assert.assertNotNull(accountDoubleMap);
    return accountDoubleMap;
}