Example usage for java.util Set stream

List of usage examples for java.util Set stream

Introduction

In this page you can find the example usage for java.util Set stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:com.ggvaidya.scinames.complexquery.ComplexQueryViewController.java

public void updateTableWithNameClusters(Project project, List<NameCluster> nameClusters,
        List<Dataset> datasets) {
    Table<NameCluster, String, Set<String>> precalc = HashBasedTable.create();

    if (nameClusters == null) {
        dataTableView.setItems(FXCollections.emptyObservableList());
        return;/*from  w  w w  .j  av  a 2s. co  m*/
    }
    boolean flag_nameClustersAreTaxonConcepts = false;

    if (nameClusters.size() > 0 && TaxonConcept.class.isAssignableFrom(nameClusters.get(0).getClass()))
        flag_nameClustersAreTaxonConcepts = true;
    dataTableView.setItems(FXCollections.observableList(nameClusters));

    // Precalculate.
    List<String> existingColNames = new ArrayList<>();
    existingColNames.add("id");
    existingColNames.add("name");
    existingColNames.add("names_in_dataset");
    existingColNames.add("all_names_in_cluster");

    // If these are taxon concepts, there's three other columns we want
    // to emit.
    if (flag_nameClustersAreTaxonConcepts) {
        existingColNames.add("name_cluster_id");
        existingColNames.add("starts_with");
        existingColNames.add("ends_with");
        existingColNames.add("is_ongoing");
    } else {
        existingColNames.add("taxon_concept_count");
        existingColNames.add("taxon_concepts");
    }

    // Set<Name> recognizedNamesInDataset = namesDataset.getRecognizedNames(project).collect(Collectors.toSet());

    for (NameCluster cluster : nameClusters) {
        precalc.put(cluster, "id", getOneElementSet(cluster.getId().toString()));

        // Okay, here's what we need to do:
        //   - If names is ALL, then we can't do better than cluster.getName().
        // if(namesDataset == ALL) {
        precalc.put(cluster, "names_in_dataset",
                cluster.getNames().stream().map(n -> n.getFullName()).collect(Collectors.toSet()));
        precalc.put(cluster, "name", getOneElementSet(cluster.getName().getFullName()));
        //} else {
        /*
           // hey, here's something cool we can do: figure out which name(s)
           // this dataset uses from this cluster!
           List<String> namesInDataset = cluster.getNames().stream()
              .filter(n -> recognizedNamesInDataset.contains(n))
              .map(n -> n.getFullName())
              .collect(Collectors.toList());
           String firstName = "";
           if(namesInDataset.size() > 0)
              firstName = namesInDataset.get(0);
                   
           precalc.put(cluster, "names_in_dataset", new HashSet<>(namesInDataset));
           precalc.put(cluster, "name", getOneElementSet(firstName));            
        }*/

        precalc.put(cluster, "all_names_in_cluster",
                cluster.getNames().stream().map(n -> n.getFullName()).collect(Collectors.toSet()));

        // If it's a taxon concept, precalculate a few more columns.
        if (flag_nameClustersAreTaxonConcepts) {
            TaxonConcept tc = (TaxonConcept) cluster;

            precalc.put(cluster, "name_cluster_id", getOneElementSet(tc.getNameCluster().getId().toString()));
            precalc.put(cluster, "starts_with",
                    tc.getStartsWith().stream().map(ch -> ch.toString()).collect(Collectors.toSet()));
            precalc.put(cluster, "ends_with",
                    tc.getEndsWith().stream().map(ch -> ch.toString()).collect(Collectors.toSet()));
            precalc.put(cluster, "is_ongoing", getOneElementSet(tc.isOngoing(project) ? "yes" : "no"));
        } else {
            // If it's a true name cluster, then perhaps people will want
            // to know what taxon concepts are in here? Maybe for some sort
            // of PhD?
            List<TaxonConcept> tcs = cluster.getTaxonConcepts(project);

            precalc.put(cluster, "taxon_concept_count", getOneElementSet(String.valueOf(tcs.size())));
            precalc.put(cluster, "taxon_concepts",
                    tcs.stream().map(tc -> tc.toString()).collect(Collectors.toSet()));
        }

        // Okay, here's where we reconcile!
        for (Name n : cluster.getNames()) {
            // TODO: there's probably an optimization here, in which we should
            // loop on the smaller set (either loop on 'datasets' and compare
            // to cluster, or loop on cluster.foundIn and compare to 'datasets').
            for (Dataset ds : datasets) {
                Map<Name, Set<DatasetRow>> rowsByName = ds.getRowsByName();

                // Are we included in this name cluster? If not, skip!
                if (!cluster.getFoundIn().contains(ds))
                    continue;

                // Check to see if we have any rows for this name; if not, skip.
                if (!rowsByName.containsKey(n))
                    continue;

                Set<DatasetRow> matched = rowsByName.get(n);
                LOGGER.log(Level.FINER, "Adding {0} rows under name ''{1}''",
                        new Object[] { matched.size(), n.getFullName() });

                Map<Set<DatasetColumn>, List<DatasetRow>> rowsByCols = matched.stream()
                        .collect(Collectors.groupingBy((DatasetRow row) -> row.getColumns()));

                for (Set<DatasetColumn> cols : rowsByCols.keySet()) {
                    for (DatasetColumn col : cols) {
                        String colName = col.getName();

                        if (existingColNames.contains(colName))
                            colName = "datasets." + colName;

                        if (!precalc.contains(cluster, colName))
                            precalc.put(cluster, colName, new HashSet());

                        for (DatasetRow row : rowsByCols.get(cols)) {
                            if (!row.hasColumn(col))
                                continue;

                            precalc.get(cluster, colName).add(row.get(col));
                        }

                        LOGGER.log(Level.FINER, "Added {0} rows under name ''{1}''",
                                new Object[] { rowsByCols.get(cols).size(), n.getFullName() });
                    }
                }
            }
        }
    }

    dataTableView.getColumns().clear();
    for (String colName : existingColNames) {
        dataTableView.getColumns().add(createColumnFromPrecalc(colName, precalc));
    }

    // Get distinct column names.
    Stream<String> colNames = precalc.cellSet().stream().map(set -> set.getColumnKey());

    // Eliminate columns that are in the existingColNames.
    colNames = colNames.filter(colName -> !existingColNames.contains(colName));

    // And add tablecolumns for the rest.
    List<TableColumn<NameCluster, String>> cols = colNames.distinct().sorted()
            .map(colName -> createColumnFromPrecalc(colName, precalc)).collect(Collectors.toList());
    dataTableView.getColumns().addAll(cols);
    dataTableView.refresh();

    // Fill in status text field.
    statusTextField
            .setText(dataTableView.getItems().size() + " rows across " + cols.size() + " reconciled columns");
}

From source file:com.ggvaidya.scinames.complexquery.ComplexQueryViewController.java

public void updateTableWithChangesUsingNameClusters(Project project, List<NameCluster> nameClusters,
        List<Dataset> datasets) {
    Set<Change> changesToDisplay = new HashSet<>();
    for (NameCluster cluster : nameClusters) {
        // Yes, we want to use getAllChanges() here, because we'd like to match eliminated changes too.
        changesToDisplay/*from   w w  w. jav  a2  s  .  c om*/
                .addAll(datasets.stream().flatMap(ds -> ds.getAllChanges()).collect(Collectors.toSet()));
    }

    List<Change> changes = changesToDisplay.stream().sorted((a, b) -> a.getDataset().compareTo(b.getDataset()))
            .collect(Collectors.toList());

    NameClusterManager ncm = project.getNameClusterManager();

    // And add tablecolumns for the rest.
    dataTableView.getColumns().clear();
    dataTableView.getColumns().addAll(createTableColumnFromChange("id", ch -> ch.getId().toString()),
            createTableColumnFromChange("dataset", ch -> ch.getDataset().getName()),
            createTableColumnFromChange("type", ch -> ch.getType().getType()),
            createTableColumnFromChange("from", ch -> ch.getFromString()),
            createTableColumnFromChange("from_name_cluster_ids",
                    ch -> ncm.getClusters(ch.getFrom()).stream().map(cl -> cl.getId().toString())
                            .collect(Collectors.joining(" and "))),
            createTableColumnFromChange("from_name_clusters",
                    ch -> ncm.getClusters(ch.getFrom()).stream()
                            .map(cl -> cl.getNames().stream().map(n -> n.getFullName())
                                    .collect(Collectors.joining("; ")))
                            .collect(Collectors.joining(" and "))),
            createTableColumnFromChange("to", ch -> ch.getToString()),
            createTableColumnFromChange("to_name_cluster_ids",
                    ch -> ncm.getClusters(ch.getTo()).stream().map(cl -> cl.getId().toString())
                            .collect(Collectors.joining(" and "))),
            createTableColumnFromChange("to_name_clusters",
                    ch -> ncm.getClusters(ch.getTo()).stream()
                            .map(cl -> cl.getNames().stream().map(n -> n.getFullName())
                                    .collect(Collectors.joining("; ")))
                            .collect(Collectors.joining(" and "))),
            createTableColumnFromChange("filter_status",
                    ch -> project.getChangeFilter().test(ch) ? "retained" : "eliminated"),
            createTableColumnFromChange("citations", ch -> ch.getCitationStream().map(cit -> cit.getCitation())
                    .collect(Collectors.joining("; "))));

    dataTableView.getItems().clear();
    dataTableView.getItems().addAll(changes);

    dataTableView.refresh();

    // Fill in status text field.
    statusTextField.setText(dataTableView.getItems().size() + " changes across "
            + changes.stream().map(ch -> ch.getDataset()).distinct().count() + " distinct datasets");
}

From source file:ddf.catalog.source.solr.SolrMetacardClientImpl.java

private void filterAttributes(QueryRequest request, SolrQuery query) {
    if (skipFilteredAttributes(request)) {
        return;/*from   w ww. ja v a2 s .  co  m*/
    }

    Set<String> excludedAttributes = (Set<String>) request.getPropertyValue(EXCLUDE_ATTRIBUTES);

    Set<String> excludedFields = resolver.fieldsCache.stream().filter(Objects::nonNull)
            .filter(field -> excludedAttributes.stream().anyMatch(field::startsWith))
            .collect(Collectors.toSet());

    Set<String> wildcardFields = SchemaFields.FORMAT_TO_SUFFIX_MAP.values().stream()
            .filter(suffix -> excludedFields.stream().noneMatch(field -> field.endsWith(suffix)))
            .map(suffix -> "*" + suffix).collect(Collectors.toSet());

    Set<String> includedFields = SchemaFields.FORMAT_TO_SUFFIX_MAP.values().stream()
            .filter(suffix -> excludedFields.stream().anyMatch(field -> field.endsWith(suffix)))
            .flatMap(suffix -> resolver.fieldsCache.stream().filter(Objects::nonNull)
                    .filter(field -> field.endsWith(suffix))
                    .filter(field -> excludedAttributes.stream().noneMatch(field::startsWith)))
            .collect(Collectors.toSet());

    Set<String> fields = Sets.union(includedFields, wildcardFields);

    if (query.getFields() != null && query.getFields().length() > 2) {
        fields = Sets.union(fields, Sets.newHashSet(query.getFields().substring(2).split(",")));
    }

    if (!fields.isEmpty()) {
        query.setFields(fields.toArray(new String[fields.size()]));
    }
}

From source file:org.apache.nifi.remote.util.SiteToSiteRestApiClient.java

/**
 * Parse the comma-separated URLs string for the remote NiFi instances.
 * @return A set containing one or more URLs
 * @throws IllegalArgumentException when it fails to parse the URLs string,
 * URLs string contains multiple protocols (http and https mix),
 * or none of URL is specified./*w  w w .  j a  v  a2 s.c om*/
 */
public static Set<String> parseClusterUrls(final String clusterUrlStr) {
    final Set<String> urls = new LinkedHashSet<>();
    if (clusterUrlStr != null && clusterUrlStr.length() > 0) {
        Arrays.stream(clusterUrlStr.split(",")).map(s -> s.trim()).filter(s -> s.length() > 0).forEach(s -> {
            validateUriString(s);
            urls.add(resolveBaseUrl(s).intern());
        });
    }

    if (urls.size() == 0) {
        throw new IllegalArgumentException("Cluster URL was not specified.");
    }

    final Predicate<String> isHttps = url -> url.toLowerCase().startsWith("https:");
    if (urls.stream().anyMatch(isHttps) && urls.stream().anyMatch(isHttps.negate())) {
        throw new IllegalArgumentException("Different protocols are used in the cluster URLs " + clusterUrlStr);
    }

    return Collections.unmodifiableSet(urls);
}

From source file:org.fcrepo.apix.jena.impl.JenaServiceRegistry.java

@Override
public void update() {

    // For all resources in the registry, get the URIs of everything that calls itself a Service, or is explicitly
    // registered as a service

    final Set<URI> serviceURIs = Stream.concat(super.list().stream().map(this::get).map(Util::parse)
            .flatMap(m -> m.listSubjectsWithProperty(m.getProperty(RDF_TYPE), m.getResource(CLASS_SERVICE))
                    .mapWith(Resource::getURI).toSet().stream().map(URI::create)),
            objectResourcesOf(null, PROP_CONTAINS_SERVICE, parse(this.get(registryContainer))).stream())
            .collect(Collectors.toSet());

    // Map canonical URI to service resource. If multiple service resources
    // indicate the same canonical URI, pick one arbitrarily.
    final Map<URI, URI> canonical = serviceURIs.stream().flatMap(this::attemptLookupService)
            .collect(Collectors.toMap(s -> s.canonicalURI(), s -> s.uri(), (a, b) -> a));

    canonicalUriMap.putAll(canonical);// w ww. j  a v a2 s.  c  o  m

    canonicalUriMap.keySet().removeIf(k -> !canonical.containsKey(k));
}

From source file:com.thinkbiganalytics.feedmgr.rest.controller.TemplatesRestController.java

@GET
@Path("/registered/{templateId}/actions/change")
@Produces(MediaType.APPLICATION_JSON)// w w  w .ja v a2 s . co  m
@ApiOperation("Constructs and returns a permission change request for a set of users/groups containing the actions that the requester may permit or revoke.")
@ApiResponses({
        @ApiResponse(code = 200, message = "Returns the change request that may be modified by the client and re-posted.", response = PermissionsChange.class),
        @ApiResponse(code = 400, message = "The type is not valid.", response = RestResponseStatus.class),
        @ApiResponse(code = 404, message = "No template exists with the specified ID.", response = RestResponseStatus.class) })
public Response getAllowedPermissionsChange(@PathParam("templateId") String templateIdStr,
        @QueryParam("type") String changeType, @QueryParam("user") Set<String> userNames,
        @QueryParam("group") Set<String> groupNames) {
    if (StringUtils.isBlank(changeType)) {
        throw new WebApplicationException("The query parameter \"type\" is required", Status.BAD_REQUEST);
    }

    Set<? extends Principal> users = Arrays.stream(this.securityTransform.asUserPrincipals(userNames))
            .collect(Collectors.toSet());
    Set<? extends Principal> groups = Arrays.stream(this.securityTransform.asGroupPrincipals(groupNames))
            .collect(Collectors.toSet());

    return this.securityService
            .createTemplatePermissionChange(templateIdStr, ChangeType.valueOf(changeType.toUpperCase()),
                    Stream.concat(users.stream(), groups.stream()).collect(Collectors.toSet()))
            .map(p -> Response.ok(p).build()).orElseThrow(() -> new WebApplicationException(
                    "A template with the given ID does not exist: " + templateIdStr, Status.NOT_FOUND));
}

From source file:com.thinkbiganalytics.feedmgr.rest.controller.DatasourceController.java

@GET
@Path("{id}/actions/change")
@Produces(MediaType.APPLICATION_JSON)//  w ww.j  av a 2 s.c  o  m
@ApiOperation("Constructs and returns a permission change request for a set of users/groups containing the actions that the requester may permit or revoke.")
@ApiResponses({
        @ApiResponse(code = 200, message = "Returns the change request that may be modified by the client and re-posted.", response = PermissionsChange.class),
        @ApiResponse(code = 400, message = "The type is not valid.", response = RestResponseStatus.class),
        @ApiResponse(code = 404, message = "No data source exists with the specified ID.", response = RestResponseStatus.class) })
public Response getAllowedPermissionsChange(@PathParam("id") final String datasourceIdStr,
        @QueryParam("type") final String changeType, @QueryParam("user") final Set<String> userNames,
        @QueryParam("group") final Set<String> groupNames) {
    if (StringUtils.isBlank(changeType)) {
        throw new WebApplicationException("The query parameter \"type\" is required",
                Response.Status.BAD_REQUEST);
    }

    Set<? extends Principal> users = Arrays.stream(this.securityTransform.asUserPrincipals(userNames))
            .collect(Collectors.toSet());
    Set<? extends Principal> groups = Arrays.stream(this.securityTransform.asGroupPrincipals(groupNames))
            .collect(Collectors.toSet());

    return this.securityService
            .createDatasourcePermissionChange(datasourceIdStr,
                    PermissionsChange.ChangeType.valueOf(changeType.toUpperCase()),
                    Stream.concat(users.stream(), groups.stream()).collect(Collectors.toSet()))
            .map(p -> Response.ok(p).build())
            .orElseThrow(() -> new WebApplicationException(
                    "A data source with the given ID does not exist: " + datasourceIdStr,
                    Response.Status.NOT_FOUND));
}

From source file:azkaban.flowtrigger.database.JdbcFlowTriggerInstanceLoaderImpl.java

@Override
public int deleteTriggerExecutionsFinishingOlderThan(final long timestamp) {
    try {//from  ww w  .  j ava2 s  . co  m
        final Collection<TriggerInstance> res = this.dbOperator.query(SELECT_EXECUTION_OLDER_THAN,
                new TriggerInstanceHandler(SORT_MODE.SORT_ON_START_TIME_DESC), timestamp);
        final Set<String> toBeDeleted = new HashSet<>();
        for (final TriggerInstance inst : res) {
            if ((inst.getStatus() == Status.CANCELLED
                    || (inst.getStatus() == Status.SUCCEEDED && inst.getFlowExecId() != -1))
                    && inst.getEndTime() <= timestamp) {
                toBeDeleted.add(inst.getId());
            }
        }
        int numDeleted = 0;
        if (!toBeDeleted.isEmpty()) {
            final String ids = toBeDeleted.stream().map(s -> "'" + s + "'").collect(Collectors.joining(", "));
            numDeleted = this.dbOperator.update(DELETE_EXECUTIONS.replace("?", ids));
        }
        logger.info("{} dependency instance record(s) deleted", numDeleted);
        return numDeleted;
    } catch (final SQLException ex) {
        handleSQLException(ex);
        return 0;
    }
}

From source file:com.haulmont.cuba.security.app.EntityLog.java

protected void internalRegisterModifyAttributeValue(CategoryAttributeValue entity,
        @Nullable EntityAttributeChanges changes, Set<String> attributes) {
    String propertyName = DynamicAttributesUtils.encodeAttributeCode(entity.getCode());
    if (!attributes.contains(propertyName)) {
        return;//from w ww. ja  v  a 2  s  . c  om
    }

    Date ts = timeSource.currentTimestamp();
    EntityManager em = persistence.getEntityManager();

    Set<String> dirty;
    if (changes == null) {
        dirty = persistence.getTools().getDirtyFields(entity);
    } else {
        dirty = changes.getAttributes();
    }
    boolean registerDeleteOp = dirty.contains("deleteTs") && entity.isDeleted();
    boolean hasChanges = dirty.stream().anyMatch(s -> s.endsWith("Value"));
    if (hasChanges) {
        EntityLogItem item = metadata.create(EntityLogItem.class);
        item.setEventTs(ts);
        item.setUser(findUser(em));
        item.setType(EntityLogItem.Type.MODIFY);
        item.setEntity(getEntityName(entity));
        item.setObjectEntityId(entity.getObjectEntityId());
        item.setAttributes(createDynamicLogAttribute(entity, changes, registerDeleteOp));

        enqueueItem(item);
    }
}