Example usage for java.util LinkedHashSet iterator

List of usage examples for java.util LinkedHashSet iterator

Introduction

In this page you can find the example usage for java.util LinkedHashSet iterator.

Prototype

Iterator<E> iterator();

Source Link

Document

Returns an iterator over the elements in this set.

Usage

From source file:com.ge.predix.acs.service.policy.evaluation.PolicyEvaluationServiceImpl.java

@Override
public PolicyEvaluationResult evalPolicy(final PolicyEvaluationRequestV1 request) {
    ZoneEntity zone = this.zoneResolver.getZoneEntityOrFail();
    String uri = request.getResourceIdentifier();
    String subjectIdentifier = request.getSubjectIdentifier();
    String action = request.getAction();
    LinkedHashSet<String> policySetsEvaluationOrder = request.getPolicySetsEvaluationOrder();

    if (uri == null || subjectIdentifier == null || action == null) {
        LOGGER.error(String.format(
                "Policy evaluation request is missing required input parameters: "
                        + "resourceURI='%s' subjectIdentifier='%s' action='%s'",
                uri, subjectIdentifier, action));

        throw new IllegalArgumentException("Policy evaluation request is missing required input parameters. "
                + "Please review and resubmit the request.");
    }//from   w  ww .  j  a  va2 s  .  c o m

    List<PolicySet> allPolicySets = this.policyService.getAllPolicySets();

    if (allPolicySets.isEmpty()) {
        return new PolicyEvaluationResult(Effect.NOT_APPLICABLE);
    }

    LinkedHashSet<PolicySet> filteredPolicySets = filterPolicySetsByPriority(subjectIdentifier, uri,
            allPolicySets, policySetsEvaluationOrder);

    // At this point empty evaluation order means we have only one policy set.
    // Fixing policy evaluation order so we could build a cache key.
    PolicyEvaluationRequestCacheKey key;
    if (policySetsEvaluationOrder.isEmpty()) {
        key = new Builder().zoneId(zone.getName())
                .policySetIds(Stream.of(filteredPolicySets.iterator().next().getName())
                        .collect(Collectors.toCollection(LinkedHashSet::new)))
                .request(request).build();
    } else {
        key = new Builder().zoneId(zone.getName()).request(request).build();
    }

    PolicyEvaluationResult result = this.cache.get(key);
    if (null == result) {
        result = new PolicyEvaluationResult(Effect.NOT_APPLICABLE);

        HashSet<Attribute> supplementalResourceAttributes;
        if (null == request.getResourceAttributes()) {
            supplementalResourceAttributes = new HashSet<>();
        } else {
            supplementalResourceAttributes = new HashSet<>(request.getResourceAttributes());
        }
        HashSet<Attribute> supplementalSubjectAttributes;
        if (null == request.getSubjectAttributes()) {
            supplementalSubjectAttributes = new HashSet<>();
        } else {
            supplementalSubjectAttributes = new HashSet<>(request.getSubjectAttributes());
        }

        for (PolicySet policySet : filteredPolicySets) {
            result = evalPolicySet(policySet, subjectIdentifier, uri, action, supplementalResourceAttributes,
                    supplementalSubjectAttributes);
            if (result.getEffect() == Effect.NOT_APPLICABLE) {
                continue;
            } else {
                break;
            }
        }

        LOGGER.info(
                String.format(
                        "Processed Policy Evaluation for: "
                                + "resourceUri='%s', subjectIdentifier='%s', action='%s'," + " result='%s'",
                        uri, subjectIdentifier, action, result.getEffect()));
        this.cache.set(key, result);
    }
    return result;
}

From source file:ubic.gemma.core.loader.expression.geo.model.GeoValues.java

/**
 * This creates a new GeoValues that has data only for the selected samples. The quantiatation type information will
 * be semi-deep copies. This is only needed for when we are splitting a series apart, especially when it is not
 * along Platform lines.// w w w . ja v  a 2s .  c o m
 *
 * @param samples samples
 * @return geo values
 */
public GeoValues subset(Collection<GeoSample> samples) {

    GeoValues v = new GeoValues();

    /*
     * First, create new sampleDimensions and start setting up empty data.
     */
    for (GeoSample s : samples) {
        GeoPlatform p = s.getPlatforms().iterator().next();

        if (!v.sampleDimensions.containsKey(p)) {
            v.sampleDimensions.put(p, new HashMap<Integer, LinkedHashSet<GeoSample>>());

            // deep copy.
            for (Integer o : this.sampleDimensions.get(p).keySet()) {
                v.sampleDimensions.get(p).put(o, new LinkedHashSet<GeoSample>());
                for (GeoSample ss : this.sampleDimensions.get(p).get(o)) {
                    v.sampleDimensions.get(p).get(o).add(ss); // could use add all
                }
            }

            v.data.put(p, new HashMap<Integer, Map<String, List<Object>>>());
            for (Integer o : this.data.get(p).keySet()) {
                v.data.get(p).put(o, new HashMap<String, List<Object>>());

                for (String probeId : this.data.get(p).get(o).keySet()) {
                    v.data.get(p).get(o).put(probeId, new ArrayList<>());
                }
            }
        }
    }

    /*
     * Then, subset the data.
     */
    for (GeoPlatform p : v.sampleDimensions.keySet()) {
        for (Integer o : v.sampleDimensions.get(p).keySet()) {
            LinkedHashSet<GeoSample> dimsamples = v.sampleDimensions.get(p).get(o);

            int i = 0;
            for (Iterator<GeoSample> it = dimsamples.iterator(); it.hasNext();) {
                GeoSample geoSample = it.next();

                if (samples.contains(geoSample)) {

                    Map<String, List<Object>> newmap = v.data.get(p).get(o);
                    for (String probeId : newmap.keySet()) {
                        newmap.get(probeId).add(this.data.get(p).get(o).get(probeId).get(i));
                    }

                } else {
                    // this is where we remove the unneded samples from the sampledimensions.
                    it.remove();
                }

                i++;
            }

        }
    }

    /*
     * The qt stuff can just be copied over, not deep copy.
     */
    v.quantitationTypeIndexMap.putAll(this.quantitationTypeIndexMap);
    v.quantitationTypeNameMap.putAll(this.quantitationTypeNameMap);

    return v;
}

From source file:ubic.gemma.loader.expression.geo.model.GeoValues.java

/**
 * This creates a new GeoValues that has data only for the selected samples. The quantiatation type information will
 * be semi-deep copies. This is only needed for when we are splitting a series apart, especially when it is not
 * along Platform lines.//from ww w. ja v a 2s  .c  o m
 * 
 * @param samples
 * @return
 */
public GeoValues subset(Collection<GeoSample> samples) {

    GeoValues v = new GeoValues();

    /*
     * First, create new sampleDimensions and start setting up empty data.
     */
    for (GeoSample s : samples) {
        GeoPlatform p = s.getPlatforms().iterator().next();

        if (!v.sampleDimensions.containsKey(p)) {
            v.sampleDimensions.put(p, new HashMap<Object, LinkedHashSet<GeoSample>>());

            // deep copy.
            for (Object o : this.sampleDimensions.get(p).keySet()) {
                v.sampleDimensions.get(p).put(o, new LinkedHashSet<GeoSample>());
                for (GeoSample ss : this.sampleDimensions.get(p).get(o)) {
                    v.sampleDimensions.get(p).get(o).add(ss); // could use add all
                }
            }

            v.data.put(p, new HashMap<Object, Map<String, List<Object>>>());
            for (Object o : this.data.get(p).keySet()) {
                v.data.get(p).put(o, new HashMap<String, List<Object>>());

                for (String probeId : this.data.get(p).get(o).keySet()) {
                    v.data.get(p).get(o).put(probeId, new ArrayList<Object>());
                }
            }
        }
    }

    /*
     * Then, subset the data.
     */
    for (GeoPlatform p : v.sampleDimensions.keySet()) {
        for (Object o : v.sampleDimensions.get(p).keySet()) {
            LinkedHashSet<GeoSample> dimsamples = v.sampleDimensions.get(p).get(o);

            int i = 0;
            for (Iterator<GeoSample> it = dimsamples.iterator(); it.hasNext();) {
                GeoSample geoSample = it.next();

                if (samples.contains(geoSample)) {

                    Map<String, List<Object>> newmap = v.data.get(p).get(o);
                    for (String probeId : newmap.keySet()) {
                        newmap.get(probeId).add(this.data.get(p).get(o).get(probeId).get(i));
                    }

                } else {
                    // this is where we remove the unneded samples from the sampledimensions.
                    it.remove();
                }

                i++;
            }

        }
    }

    /*
     * The qt stuff can just be copied over, not deep copy.
     */
    v.quantitationTypeIndexMap.putAll(this.quantitationTypeIndexMap);
    v.quantitationTypeNameMap.putAll(this.quantitationTypeNameMap);

    return v;
}

From source file:ColumnStorage.ColumnProject.java

public ArrayList<String> getFileNameByIndex(ArrayList<Short> idx) {
    if (idx == null) {
        return null;
    }/*from   w ww  .j a v a  2s.c o m*/

    if (idx.size() == 0) {
        return null;
    }

    if (infos.size() == 0) {
        return null;
    }

    LinkedHashSet<String> result = new LinkedHashSet<String>();
    short foundTimes = 0;
    int size = idx.size();
    int count = 0;
    for (int i = 0; i < size; i++) {
        count = 0;
        while (count < infos.size()) {
            ColumnInfo info = infos.get(count);

            if (!info.idxs.contains(idx.get(i))) {

                count++;
                continue;
            } else {
                foundTimes++;
                result.add(info.name);
                break;
            }
        }
    }

    if (foundTimes == size) {
        ArrayList<String> rrArrayList = new ArrayList<String>();
        Iterator<String> iterator = result.iterator();
        while (iterator.hasNext()) {
            rrArrayList.add(iterator.next());
        }

        return rrArrayList;
    } else {
        return null;
    }
}

From source file:com.alibaba.wasp.plan.parser.druid.DruidDMLParser.java

public Pair<List<Pair<String, byte[]>>, List<ColumnStruct>> buildFieldsPair(
        MetaEventOperation metaEventOperation, FTable table, LinkedHashSet<String> columns, ValuesClause values)
        throws IOException {
    // Convert a row's each field into byte[] value
    List<SQLExpr> exprValues = values.getValues();
    if (exprValues.size() != columns.size()) {
        throw new IOException("Insert clause " + columns.size() + " columns " + " not match "
                + exprValues.size() + " values ");
    }//from   w w w .  j av  a 2  s.  c  o m
    Pair<String, byte[]>[] array = new Pair[table.getPrimaryKeys().size()];
    // Construct all ColumnAction
    List<ColumnStruct> cols = new ArrayList<ColumnStruct>(columns.size());
    assert (columns.size() == exprValues.size());
    Iterator<String> iter = columns.iterator();
    int i = 0;
    while (iter.hasNext()) {
        String columnName = iter.next();
        // Get the column's info
        Field column = metaEventOperation.getColumnInfo(table, columnName);
        byte[] value = convert(column, exprValues.get(i));
        Iterator<Entry<String, Field>> pkIter = table.getPrimaryKeys().entrySet().iterator();
        int j = 0;
        while (pkIter.hasNext()) {
            if (pkIter.next().getKey().equalsIgnoreCase(columnName)) {
                array[j] = new Pair<String, byte[]>(columnName, value);
                break;
            }
            j++;
        }
        // Check the input is the same as DataType
        checkType(column, exprValues.get(i));
        ColumnStruct columnAction = new ColumnStruct(table.getTableName(), column.getFamily(), columnName,
                column.getType(), value);
        cols.add(columnAction);
        i++;
    }

    return new Pair<List<Pair<String, byte[]>>, List<ColumnStruct>>(Arrays.asList(array), cols);
}