Example usage for java.util Collection stream

List of usage examples for java.util Collection stream

Introduction

In this page you can find the example usage for java.util Collection stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:com.bemis.portal.customer.service.impl.CustomerProfileLocalServiceImpl.java

protected Collection<String> getCustomerLocationBemisIds(CustomerProfile parentLocation) {

    Collection<CustomerProfile> customerLocations = getCustomerLocations(parentLocation);

    if ((customerLocations == null) || customerLocations.isEmpty()) {
        return Collections.emptySet();
    }//from w ww  .  j a  v a  2 s.  c  o  m

    return customerLocations.stream().map(CustomerProfile::getBemisCustomerId)
            .filter(bemisCusomerId -> !StringUtils.isEmpty(bemisCusomerId)).collect(Collectors.toSet());
}

From source file:io.neba.core.resourcemodels.registration.ModelRegistry.java

/**
 * Finds all {@link OsgiModelSource model sources} representing models for the given
 * {@link Resource} who's {@link OsgiModelSource#getModelName() model name}
 * matches the given model name./*from   w w w.j  a v  a 2s.c  o m*/
 *
 * @param resource  must not be <code>null</code>.
 * @param modelName can be <code>null</code>.
 * @return never <code>null</code> but rather an empty collection.
 */
private Collection<LookupResult> resolveMostSpecificModelSources(Resource resource, String modelName) {
    Collection<LookupResult> sources = new ArrayList<>();
    for (final String resourceType : mappableTypeHierarchyOf(resource)) {
        Collection<OsgiModelSource<?>> allSourcesForType = this.typeNameToModelSourcesMap.get(resourceType);
        Collection<OsgiModelSource<?>> sourcesWithMatchingModelName = filter(allSourcesForType, modelName);
        if (sourcesWithMatchingModelName != null && !sourcesWithMatchingModelName.isEmpty()) {
            sources.addAll(sourcesWithMatchingModelName.stream()
                    .map(source -> new LookupResult(source, resourceType)).collect(Collectors.toList()));
            break;
        }
    }
    return unmodifiableCollection(sources);
}

From source file:com.yahoo.bard.webservice.data.metric.TemplateDruidQuery.java

/**
 * Gather duplicate names across the collection of Aggregations and PostAggregations.
 *
 * @param aggregations  Set of Aggregations to inspect
 * @param postAggregations  Set of PostAggregations to inspect
 *
 * @return Set of collided names (if any)
 *//*from   w  ww .j  av  a  2s .c o  m*/
private Set<String> getNameCollisions(Collection<Aggregation> aggregations,
        Collection<PostAggregation> postAggregations) {
    Set<String> allNames = new HashSet<>();
    return Stream.concat(aggregations.stream(), postAggregations.stream()).map(MetricField::getName)
            .filter(not(allNames::add)) // Select names that already had been added to allNames
            .collect(Collectors.toSet());
}

From source file:com.hortonworks.streamline.streams.service.NamespaceCatalogResource.java

@POST
@Path("/namespaces/{id}/mapping/bulk")
@Timed//www . j a v a2  s .c  o m
public Response setServicesToClusterInNamespace(@PathParam("id") Long namespaceId,
        List<NamespaceServiceClusterMapping> mappings, @Context SecurityContext securityContext) {
    SecurityUtil.checkRoleOrPermissions(authorizer, securityContext, Roles.ROLE_ENVIRONMENT_SUPER_ADMIN,
            Namespace.NAMESPACE, namespaceId, WRITE);
    Namespace namespace = environmentService.getNamespace(namespaceId);
    if (namespace == null) {
        throw EntityNotFoundException.byId(namespaceId.toString());
    }

    String streamingEngine = namespace.getStreamingEngine();
    String timeSeriesDB = namespace.getTimeSeriesDB();

    Collection<NamespaceServiceClusterMapping> existing = environmentService
            .listServiceClusterMapping(namespaceId);
    Optional<NamespaceServiceClusterMapping> existingStreamingEngine = existing.stream()
            .filter(m -> m.getServiceName().equals(streamingEngine))
            // this should be only one
            .findFirst();

    // indicates that mapping of streaming engine has been changed or removed
    if (existingStreamingEngine.isPresent() && !mappings.contains(existingStreamingEngine.get())) {
        assertNoTopologyReferringNamespaceIsRunning(namespaceId,
                WSUtils.getUserFromSecurityContext(securityContext));
    }

    // we're OK to just check with new mappings since we will remove existing mappings
    assertServiceIsUnique(mappings, streamingEngine);
    assertServiceIsUnique(mappings, timeSeriesDB);

    // remove any existing mapping for (namespace, service name) pairs
    Collection<NamespaceServiceClusterMapping> existingMappings = environmentService
            .listServiceClusterMapping(namespaceId);
    if (existingMappings != null) {
        existingMappings.forEach(m -> environmentService.removeServiceClusterMapping(m.getNamespaceId(),
                m.getServiceName(), m.getClusterId()));
    }

    List<NamespaceServiceClusterMapping> newMappings = mappings.stream()
            .map(environmentService::addOrUpdateServiceClusterMapping).collect(toList());

    return WSUtils.respondEntities(newMappings, CREATED);
}

From source file:ddf.catalog.history.Historian.java

private Map<String, Metacard> getVersionMetacards(Collection<Metacard> metacards,
        Function<String, Action> action, Subject subject) {
    return metacards.stream().filter(MetacardVersionImpl::isNotVersion)
            .filter(DeletedMetacardImpl::isNotDeleted)
            .map(metacard -> new MetacardVersionImpl(uuidGenerator.generateUuid(), metacard,
                    action.apply(metacard.getId()), subject))
            .collect(Collectors.toMap(MetacardVersionImpl::getVersionOfId, Function.identity(),
                    Historian::firstInWinsMerge));
}

From source file:com.modusoperandi.dragonfly.widgets.table.TableWidgetPage.java

private void populateFacet(final SearchResponse response) {

    facetValue = null;/*from   ww  w .j  a  va  2s.  c  om*/
    valueList.clear();

    if (response.getAggregations() != null) {
        Terms f = response.getAggregations().get("f");
        Collection<Terms.Bucket> buckets = f.getBuckets();

        buckets.stream().forEach((bucket) -> {
            valueList.add(bucket.getKeyAsString() + " (" + Long.toString(bucket.getDocCount()) + ")");
        });
    }
}

From source file:com.github.erchu.beancp.commons.NameBasedMapConvention.java

private boolean anyPredicateMatch(final Collection<Predicate<String>> predicates,
        final BindingSide destinationBindingSide) {
    if (predicates.isEmpty()) {
        return false;
    }/*  w  w  w. j  av a 2  s. c  o m*/

    return predicates.stream().anyMatch(i -> i.test(destinationBindingSide.getName()));
}

From source file:com.hortonworks.streamline.streams.service.TopologyTestRunResource.java

private List<TopologyTestRunCase> filterTestRunCases(Integer limit, Collection<TopologyTestRunCase> cases) {
    if (limit == null) {
        limit = DEFAULT_LIST_ENTITIES_COUNT;
    }//from   w  w w . j  av a 2 s  .c o m

    return cases.stream()
            // reverse order
            .sorted((h1, h2) -> (int) (h2.getId() - h1.getId())).limit(limit).collect(toList());
}

From source file:com.hortonworks.streamline.streams.service.TopologyTestRunResource.java

private List<TopologyTestRunHistory> filterHistories(Integer limit,
        Collection<TopologyTestRunHistory> histories) {
    if (limit == null) {
        limit = DEFAULT_LIST_ENTITIES_COUNT;
    }/*from  w w w .ja  v a2 s  . com*/

    return histories.stream()
            // reverse order
            .sorted((h1, h2) -> (int) (h2.getId() - h1.getId())).limit(limit).collect(toList());
}

From source file:com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.agent.KubernetesV2OnDemandCachingAgent.java

protected void mergeCacheResults(Map<String, Collection<CacheData>> current,
        Map<String, Collection<CacheData>> added) {
    for (String group : added.keySet()) {
        Collection<CacheData> currentByGroup = current.get(group);
        Collection<CacheData> addedByGroup = added.get(group);

        currentByGroup = currentByGroup == null ? new ArrayList<>() : currentByGroup;
        addedByGroup = addedByGroup == null ? new ArrayList<>() : addedByGroup;

        for (CacheData addedCacheData : addedByGroup) {
            CacheData mergedEntry = currentByGroup.stream()
                    .filter(cd -> cd.getId().equals(addedCacheData.getId())).findFirst()
                    .flatMap(cd -> Optional.of(KubernetesCacheDataConverter.mergeCacheData(cd, addedCacheData)))
                    .orElse(addedCacheData);

            currentByGroup.removeIf(cd -> cd.getId().equals(addedCacheData.getId()));
            currentByGroup.add(mergedEntry);
        }/*from  w  w  w. j  av  a2  s  .  c  om*/

        current.put(group, currentByGroup);
    }
}