Example usage for java.util LinkedHashSet LinkedHashSet

List of usage examples for java.util LinkedHashSet LinkedHashSet

Introduction

In this page you can find the example usage for java.util LinkedHashSet LinkedHashSet.

Prototype

public LinkedHashSet() 

Source Link

Document

Constructs a new, empty linked hash set with the default initial capacity (16) and load factor (0.75).

Usage

From source file:com.pimp.companionforband.utils.jsontocsv.writer.CSVWriter.java

private LinkedHashSet<String> collectHeaders(List<LinkedHashMap<String, String>> flatJson) {
    LinkedHashSet<String> headers = new LinkedHashSet<>();
    for (LinkedHashMap<String, String> map : flatJson) {
        headers.addAll(map.keySet());/*from  ww w  .j av  a 2s .co m*/
    }
    return headers;
}

From source file:facebook4j.TargetingParameter.java

public TargetingParameter regions(Collection<String> regions) {
    if (this.regions == null) {
        this.regions = new LinkedHashSet<String>();
    }//  w  w w.j a  v  a 2 s .c o  m
    this.regions.addAll(regions);
    return this;
}

From source file:net.audumla.climate.ClimateDataFactory.java

/**
 * Replaces the climate data with a writable version.
 *
 * @param cd the existing climate data bean
 * @return the climate data//from   w  ww  .j av a 2s.com
 */
public static WritableClimateData convertToWritableClimateData(ClimateData cd) {
    if (cd == null) {
        return null;
    }
    Set<Class<?>> interfaces = new LinkedHashSet<Class<?>>();
    interfaces.addAll(ClassUtils.getAllInterfaces(cd.getClass()));
    return BeanUtils.convertBean(cd, WritableClimateData.class,
            interfaces.toArray(new Class<?>[interfaces.size()]));
}

From source file:com.brienwheeler.lib.db.MergingPersistenceUnitPostProcessor.java

/**
 * Post-process the persistence unit information.  If we have seen this persistence
 * unit name before, merge any newly-defined classes in.  Also, if any properties
 * are set on this post-processor, set them onto the target PersistenceUnitInfo
 *//*from w w w .j  a  v a 2  s  .  c  om*/
@Override
public synchronized void postProcessPersistenceUnitInfo(MutablePersistenceUnitInfo pui) {
    ValidationUtils.assertNotNull(pui, "persistenceUnitInfo cannot be null");
    Set<String> classes = puiClasses.get(pui.getPersistenceUnitName());
    if (classes == null) {
        classes = new LinkedHashSet<String>();
        puiClasses.put(pui.getPersistenceUnitName(), classes);
    }
    pui.getManagedClassNames().addAll(classes);
    classes.addAll(pui.getManagedClassNames());

    if (properties != null)
        pui.setProperties(properties);
}

From source file:edu.uci.ics.jung.graph.OrderedSparseMultigraph.java

@Override
public Collection<V> getSuccessors(V vertex) {
    if (!containsVertex(vertex))
        return null;

    Set<V> succs = new LinkedHashSet<V>();
    for (E edge : getOutgoing_internal(vertex)) {
        if (getEdgeType(edge) == EdgeType.DIRECTED) {
            succs.add(this.getDest(edge));
        } else {/*  w w  w .  ja v  a 2  s  .  com*/
            succs.add(getOpposite(vertex, edge));
        }
    }
    return Collections.unmodifiableCollection(succs);
}

From source file:com.streamsets.pipeline.stage.processor.fieldfilter.FieldFilterProcessor.java

@Override
protected void process(Record record, SingleLaneBatchMaker batchMaker) throws StageException {
    // use List to preserve the order of list fieldPaths - need to watch out for duplicates though
    List<String> allFieldPaths = record.getEscapedFieldPathsOrdered();
    // use LinkedHashSet to preserve order and dedupe as we go
    LinkedHashSet<String> fieldsToRemove;
    switch (filterOperation) {
    case REMOVE:/*from  w  ww .  j ava  2 s .  com*/
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            fieldsToRemove.addAll(matchingFieldPaths);
        }
        break;
    case REMOVE_NULL:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && record.get(fieldPath).getValue() == null) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case REMOVE_EMPTY:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && record.get(fieldPath).getValue() != null
                        && record.get(fieldPath).getValue().equals("")) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case REMOVE_NULL_EMPTY:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && (record.get(fieldPath).getValue() == null
                        || record.get(fieldPath).getValue().equals(""))) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case REMOVE_CONSTANT:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && record.get(fieldPath).getValue() != null
                        && record.get(fieldPath).getValue().equals(constant)) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case KEEP:
        //Algorithm:
        // - Get all possible field paths in the record
        //
        // - Remove arguments fields which must be retained, its parent fields and the child fields from above set
        //   (Account for presence of wild card characters while doing so) The remaining set of fields is what must be
        //   removed from the record.
        //
        // - Keep fieldsToRemove in order - sorting is too costly
        //List all the possible field paths in this record
        fieldsToRemove = new LinkedHashSet<>(allFieldPaths);
        for (String field : fields) {
            //Keep parent fields
            //get the parent fieldPaths for each of the fields to keep
            List<String> parentFieldPaths = getParentFields(field);
            //remove parent paths from the fieldsToRemove set
            //Note that parent names could contain wild card characters
            for (String parentField : parentFieldPaths) {
                List<String> matchingFieldPaths = FieldRegexUtil.getMatchingFieldPaths(parentField,
                        allFieldPaths);
                fieldsToRemove.removeAll(matchingFieldPaths);
            }

            //Keep the field itself
            //remove the field path itself from the fieldsToRemove set
            //Consider wild card characters
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            fieldsToRemove.removeAll(matchingFieldPaths);

            //Keep the children of the field
            //For each of the fieldPaths that match the argument field path, remove all the child paths
            // Remove children at the end to avoid ConcurrentModificationException
            Set<String> childrenToKeep = new HashSet<>();
            for (String matchingFieldPath : matchingFieldPaths) {
                for (String fieldToRemove : fieldsToRemove) {
                    // for the old way, startsWith is appropriate when we have
                    // different path structures, or "nested" (multiple dimensioned) index structures.
                    //  eg: /USA[0]/SanFrancisco/folsom/streets[0] must still match:
                    //      /USA[0]/SanFrancisco/folsom/streets[0][0]   hence: startsWith.
                    if (StringUtils.countMatches(fieldToRemove, "/") == StringUtils
                            .countMatches(matchingFieldPath, "/")
                            && StringUtils.countMatches(fieldToRemove, "[") == StringUtils
                                    .countMatches(matchingFieldPath, "[")) {
                        if (fieldToRemove.equals(matchingFieldPath)) {
                            childrenToKeep.add(fieldToRemove);
                        }
                    } else {
                        if (fieldToRemove.startsWith(matchingFieldPath)) {
                            childrenToKeep.add(fieldToRemove);
                        }
                    }
                }
            }
            fieldsToRemove.removeAll(childrenToKeep);
        }
        break;
    default:
        throw new IllegalStateException(
                Utils.format("Unexpected Filter Operation '{}'", filterOperation.name()));
    }
    // We don't sort because we maintained list fields in ascending order (but not a full ordering)
    // Instead we just iterate in reverse to delete
    Iterator<String> itr = (new LinkedList<>(fieldsToRemove)).descendingIterator();
    while (itr.hasNext()) {
        record.delete(itr.next());
    }
    batchMaker.addRecord(record);
}

From source file:com.netflix.discovery.shared.Application.java

public Application() {
    instances = new LinkedHashSet<InstanceInfo>();
    instancesMap = new ConcurrentHashMap<String, InstanceInfo>();
}

From source file:fr.mby.utils.spring.beans.factory.annotation.ProxywiredAnnotationBeanPostProcessor.java

/**
 * Copy paste from AutowiredAnnotationBeanPostProcessor with Proxywired type added.
 *//* w  ww . j a v  a2  s.  com*/
@SuppressWarnings("unchecked")
protected ProxywiredAnnotationBeanPostProcessor() {
    super();

    final Set<Class<? extends Annotation>> autowiredAnnotationTypes = new LinkedHashSet<Class<? extends Annotation>>();
    autowiredAnnotationTypes.add(Autowired.class);
    autowiredAnnotationTypes.add(Value.class);
    autowiredAnnotationTypes.add(ProxywiredAnnotationBeanPostProcessor.PROXY_ANNOTATION);
    final ClassLoader cl = AutowiredAnnotationBeanPostProcessor.class.getClassLoader();
    try {
        autowiredAnnotationTypes.add((Class<? extends Annotation>) cl.loadClass("javax.inject.Inject"));
        this.logger.info("JSR-330 'javax.inject.Inject' annotation found and supported for autowiring");
    } catch (final ClassNotFoundException ex) {
        // JSR-330 API not available - simply skip.
    }

    super.setAutowiredAnnotationTypes(autowiredAnnotationTypes);
}

From source file:ws.salient.model.Module.java

public Module withInclude(String include) {
    if (includes == null) {
        includes = new LinkedHashSet();
    }/*from   www. java  2 s  .  c  om*/
    includes.add(include);
    return this;
}