Example usage for java.util LinkedHashSet add

List of usage examples for java.util LinkedHashSet add

Introduction

In this page you can find the example usage for java.util LinkedHashSet add.

Prototype

boolean add(E e);

Source Link

Document

Adds the specified element to this set if it is not already present (optional operation).

Usage

From source file:gaffer.accumulostore.operation.spark.handler.AccumuloStoreRelation.java

private void buildSchema() {
    LOGGER.info("Building Spark SQL schema for groups {}", StringUtils.join(groups, ','));
    for (final String group : groups) {
        final SchemaElementDefinition elementDefn = store.getSchema().getElement(group);
        final List<StructField> structFieldList = new ArrayList<>();
        if (elementDefn instanceof SchemaEntityDefinition) {
            entityOrEdgeByGroup.put(group, EntityOrEdge.ENTITY);
            final SchemaEntityDefinition entityDefinition = (SchemaEntityDefinition) elementDefn;
            final String vertexClass = store.getSchema().getType(entityDefinition.getVertex()).getClassString();
            final DataType vertexType = getType(vertexClass);
            if (vertexType == null) {
                throw new RuntimeException("Vertex must be a recognised type: found " + vertexClass);
            }/*w  ww . jav  a  2  s.c o  m*/
            LOGGER.info("Group {} is an entity group - {} is of type {}", group, VERTEX_COL_NAME, vertexType);
            structFieldList.add(new StructField(VERTEX_COL_NAME, vertexType, true, Metadata.empty()));
        } else {
            entityOrEdgeByGroup.put(group, EntityOrEdge.EDGE);
            final SchemaEdgeDefinition edgeDefinition = (SchemaEdgeDefinition) elementDefn;
            final String srcClass = store.getSchema().getType(edgeDefinition.getSource()).getClassString();
            final String dstClass = store.getSchema().getType(edgeDefinition.getDestination()).getClassString();
            final DataType srcType = getType(srcClass);
            final DataType dstType = getType(dstClass);
            if (srcType == null || dstType == null) {
                throw new RuntimeException("Both source and destination must be recognised types: source was "
                        + srcClass + " destination was " + dstClass);
            }
            LOGGER.info("Group {} is an edge group - {} is of type {}, {} is of type {}", group, SRC_COL_NAME,
                    srcType, DST_COL_NAME, dstType);
            structFieldList.add(new StructField(SRC_COL_NAME, srcType, true, Metadata.empty()));
            structFieldList.add(new StructField(DST_COL_NAME, dstType, true, Metadata.empty()));
        }
        final Set<String> properties = elementDefn.getProperties();
        for (final String property : properties) {
            final String propertyClass = elementDefn.getPropertyClass(property).getCanonicalName();
            final DataType propertyType = getType(propertyClass);
            if (propertyType == null) {
                LOGGER.warn("Ignoring property {} as it is not a recognised type", property);
            } else {
                LOGGER.info("Property {} is of type {}", property, propertyType);
                structFieldList.add(new StructField(property, propertyType, true, Metadata.empty()));
            }
        }
        structTypeByGroup.put(group,
                new StructType(structFieldList.toArray(new StructField[structFieldList.size()])));
    }
    // Create reverse map of field name to StructField
    final Map<String, Set<StructField>> fieldToStructs = new HashMap<>();
    for (final String group : groups) {
        final StructType groupSchema = structTypeByGroup.get(group);
        for (final String field : groupSchema.fieldNames()) {
            if (fieldToStructs.get(field) == null) {
                fieldToStructs.put(field, new HashSet<StructField>());
            }
            fieldToStructs.get(field).add(groupSchema.apply(field));
        }
    }
    // Check consistency, i.e. if the same field appears in multiple groups then the types are consistent
    for (final Map.Entry<String, Set<StructField>> entry : fieldToStructs.entrySet()) {
        final Set<StructField> schemas = entry.getValue();
        if (schemas.size() > 1) {
            throw new IllegalArgumentException("Inconsistent fields: the field " + entry.getKey()
                    + " has more than one definition: " + StringUtils.join(schemas, ','));
        }
    }
    // Merge schemas for groups together - fields should appear in the order the groups were provided
    final LinkedHashSet<StructField> fields = new LinkedHashSet<>();
    fields.add(new StructField(GROUP, DataTypes.StringType, false, Metadata.empty()));
    usedProperties.add(GROUP);
    for (final String group : groups) {
        final StructType groupSchema = structTypeByGroup.get(group);
        for (final String field : groupSchema.fieldNames()) {
            final StructField struct = groupSchema.apply(field);
            // Add struct to fields unless it has already been added
            if (!fields.contains(struct)) {
                fields.add(struct);
                usedProperties.add(field);
            }
        }
    }
    structType = new StructType(fields.toArray(new StructField[fields.size()]));
    LOGGER.info("Schema is {}", structType);
}

From source file:org.alfresco.util.cache.AbstractAsynchronouslyRefreshedCache.java

@Override
public void onRefreshableCacheEvent(RefreshableCacheEvent refreshableCacheEvent) {
    // Ignore events not targeted for this cache
    if (!refreshableCacheEvent.getCacheId().equals(cacheId)) {
        return;/*from  www.  j  av  a 2 s .com*/
    }
    if (logger.isDebugEnabled()) {
        logger.debug("Async cache onRefreshableCacheEvent " + refreshableCacheEvent + " on " + this);
    }

    // If in a transaction delay the refresh until after it commits

    if (TransactionSupportUtil.getTransactionId() != null) {
        if (logger.isDebugEnabled()) {
            logger.debug(
                    "Async cache adding" + refreshableCacheEvent.getKey() + " to post commit list: " + this);
        }
        TransactionData txData = getTransactionData();
        txData.keys.add(refreshableCacheEvent.getKey());
    } else {
        LinkedHashSet<String> keys = new LinkedHashSet<String>();
        keys.add(refreshableCacheEvent.getKey());
        queueRefreshAndSubmit(keys);
    }
}

From source file:org.kuali.rice.kns.util.properties.PropertyTree.java

/**
 * Returns an unmodifiable Set containing the keys of all of the entries of this PropertyTree.
 * //www. j  a va 2s.c  om
 * @see java.util.Map#keySet()
 */
public Set keySet() {
    LinkedHashSet keys = new LinkedHashSet();

    Set entrySet = entrySet();
    for (Iterator i = entrySet.iterator(); i.hasNext();) {
        Map.Entry e = (Map.Entry) i.next();

        keys.add(e.getKey());
    }

    return Collections.unmodifiableSet(keys);
}

From source file:com.novartis.opensource.yada.format.Joiner.java

/**
 * Returns the set of unique headers across all requests
 * @return the {@link LinkedHashSet} containing all the requested columns across all headers
 *///from w  ww .ja v a 2 s.c om
private LinkedHashSet<String> getGlobalHeader() {
    LinkedHashSet<String> globalHeader = new LinkedHashSet<>();
    for (YADAQueryResult yqr : getYadaQueryResults()) {
        // iterate over results and stitch together in StringBuffer
        for (String hdr : yqr.getConvertedHeader()) {
            globalHeader.add(hdr);
        }
    }
    return globalHeader;
}

From source file:org.wrml.runtime.schema.SchemaBuilder.java

public SchemaBuilder extend(final Class<?> baseSchemaInterface, final Class<?>... baseSchemaInterfaces) {

    final SchemaLoader schemaLoader = _Context.getSchemaLoader();
    extend(schemaLoader.getTypeUri(baseSchemaInterface));

    if (baseSchemaInterfaces != null) {
        final LinkedHashSet<URI> baseSchemaUris = new LinkedHashSet<>(baseSchemaInterfaces.length);
        for (final Class<?> schemaInterface : baseSchemaInterfaces) {
            baseSchemaUris.add(schemaLoader.getTypeUri(schemaInterface));
        }/*from   w w w .j ava2  s  . c  om*/

        _Schema.getBaseSchemaUris().addAll(baseSchemaUris);
    }

    return this;
}

From source file:org.apache.ambari.server.state.UpgradeHelper.java

/**
 * Generates a list of UpgradeGroupHolder items that are used to execute either
 * an upgrade or a downgrade./*from   ww  w.j a  v a  2  s  .c o m*/
 *
 * @param upgradePack
 *          the upgrade pack
 * @param context
 *          the context that wraps key fields required to perform an upgrade
 * @return the list of holders
 */
public List<UpgradeGroupHolder> createSequence(UpgradePack upgradePack, UpgradeContext context)
        throws AmbariException {

    context.setAmbariMetaInfo(m_ambariMetaInfo.get());
    Cluster cluster = context.getCluster();
    MasterHostResolver mhr = context.getResolver();

    Map<String, Map<String, ProcessingComponent>> allTasks = upgradePack.getTasks();
    List<UpgradeGroupHolder> groups = new ArrayList<UpgradeGroupHolder>();

    for (Grouping group : upgradePack.getGroups(context.getDirection())) {

        UpgradeGroupHolder groupHolder = new UpgradeGroupHolder();
        groupHolder.name = group.name;
        groupHolder.title = group.title;
        groupHolder.skippable = group.skippable;
        groupHolder.allowRetry = group.allowRetry;

        // !!! all downgrades are skippable
        if (context.getDirection().isDowngrade()) {
            groupHolder.skippable = true;
        }

        StageWrapperBuilder builder = group.getBuilder();

        List<UpgradePack.OrderService> services = group.services;

        if (context.getDirection().isDowngrade() && !services.isEmpty()) {
            List<UpgradePack.OrderService> reverse = new ArrayList<UpgradePack.OrderService>(services);
            Collections.reverse(reverse);
            services = reverse;
        }

        // !!! cluster and service checks are empty here
        for (UpgradePack.OrderService service : services) {

            if (!allTasks.containsKey(service.serviceName)) {
                continue;
            }

            for (String component : service.components) {
                if (!allTasks.get(service.serviceName).containsKey(component)) {
                    continue;
                }

                HostsType hostsType = mhr.getMasterAndHosts(service.serviceName, component);
                if (null == hostsType) {
                    continue;
                }

                if (!hostsType.unhealthy.isEmpty()) {
                    context.addUnhealthy(hostsType.unhealthy);
                }

                Service svc = cluster.getService(service.serviceName);
                ProcessingComponent pc = allTasks.get(service.serviceName).get(component);

                setDisplayNames(context, service.serviceName, component);

                // Special case for NAMENODE
                if (service.serviceName.equalsIgnoreCase("HDFS") && component.equalsIgnoreCase("NAMENODE")) {
                    // !!! revisit if needed
                    if (!hostsType.hosts.isEmpty() && hostsType.master != null && hostsType.secondary != null) {
                        // The order is important, first do the standby, then the active namenode.
                        LinkedHashSet<String> order = new LinkedHashSet<String>();

                        order.add(hostsType.secondary);
                        order.add(hostsType.master);

                        // Override the hosts with the ordered collection
                        hostsType.hosts = order;
                    }

                    builder.add(context, hostsType, service.serviceName, svc.isClientOnlyService(), pc);

                } else {
                    builder.add(context, hostsType, service.serviceName, svc.isClientOnlyService(), pc);
                }
            }
        }

        List<StageWrapper> proxies = builder.build(context);

        if (!proxies.isEmpty()) {
            groupHolder.items = proxies;
            postProcess(context, groupHolder);
            groups.add(groupHolder);
        }
    }

    if (LOG.isDebugEnabled()) {
        for (UpgradeGroupHolder group : groups) {
            LOG.debug(group.name);

            int i = 0;
            for (StageWrapper proxy : group.items) {
                LOG.debug("  Stage {}", Integer.valueOf(i++));
                int j = 0;

                for (TaskWrapper task : proxy.getTasks()) {
                    LOG.debug("    Task {} {}", Integer.valueOf(j++), task);
                }
            }
        }
    }

    return groups;
}

From source file:com.datatorrent.stram.StramClient.java

public static LinkedHashSet<String> findJars(LogicalPlan dag, Class<?>[] defaultClasses) {
    List<Class<?>> jarClasses = new ArrayList<Class<?>>();

    for (String className : dag.getClassNames()) {
        try {/*from  ww  w .  j a va  2s  .  c om*/
            Class<?> clazz = Thread.currentThread().getContextClassLoader().loadClass(className);
            jarClasses.add(clazz);
        } catch (ClassNotFoundException e) {
            throw new IllegalArgumentException("Failed to load class " + className, e);
        }
    }

    for (Class<?> clazz : Lists.newArrayList(jarClasses)) {
        // process class and super classes (super does not require deploy annotation)
        for (Class<?> c = clazz; c != Object.class && c != null; c = c.getSuperclass()) {
            //LOG.debug("checking " + c);
            jarClasses.add(c);
            jarClasses.addAll(Arrays.asList(c.getInterfaces()));
        }
    }

    jarClasses.addAll(Arrays.asList(defaultClasses));

    if (dag.isDebug()) {
        LOG.debug("Deploy dependencies: {}", jarClasses);
    }

    LinkedHashSet<String> localJarFiles = new LinkedHashSet<String>(); // avoid duplicates
    HashMap<String, String> sourceToJar = new HashMap<String, String>();

    for (Class<?> jarClass : jarClasses) {
        if (jarClass.getProtectionDomain().getCodeSource() == null) {
            // system class
            continue;
        }
        String sourceLocation = jarClass.getProtectionDomain().getCodeSource().getLocation().toString();
        String jar = sourceToJar.get(sourceLocation);
        if (jar == null) {
            // don't create jar file from folders multiple times
            jar = JarFinder.getJar(jarClass);
            sourceToJar.put(sourceLocation, jar);
            LOG.debug("added sourceLocation {} as {}", sourceLocation, jar);
        }
        if (jar == null) {
            throw new AssertionError("Cannot resolve jar file for " + jarClass);
        }
        localJarFiles.add(jar);
    }

    String libJarsPath = dag.getValue(LogicalPlan.LIBRARY_JARS);
    if (!StringUtils.isEmpty(libJarsPath)) {
        String[] libJars = StringUtils.splitByWholeSeparator(libJarsPath, LIB_JARS_SEP);
        localJarFiles.addAll(Arrays.asList(libJars));
    }

    LOG.info("Local jar file dependencies: " + localJarFiles);

    return localJarFiles;
}

From source file:org.openflexo.foundation.sg.implmodel.TechnologyModuleDefinition.java

public LinkedHashSet<TechnologyModuleDefinition> getRequiredModules() {
    LinkedHashSet<TechnologyModuleDefinition> result = new LinkedHashSet<TechnologyModuleDefinition>();
    for (String name : requiredModuleNames) {
        TechnologyModuleDefinition technologyModuleDefinition = getTechnologyModuleDefinition(name);
        if (technologyModuleDefinition != null) {
            result.add(technologyModuleDefinition);
        }/*from  w w  w  .j a  va  2s . c  o m*/
    }

    return result;
}

From source file:org.openflexo.foundation.sg.implmodel.TechnologyModuleDefinition.java

public LinkedHashSet<TechnologyModuleDefinition> getCompatibleModules() {
    LinkedHashSet<TechnologyModuleDefinition> result = new LinkedHashSet<TechnologyModuleDefinition>();
    for (String name : compatibleModuleNames) {
        TechnologyModuleDefinition technologyModuleDefinition = getTechnologyModuleDefinition(name);
        if (technologyModuleDefinition != null) {
            result.add(technologyModuleDefinition);
        }//from  ww w.j  a  v  a2  s .  c  om
    }

    return result;
}

From source file:org.openflexo.foundation.sg.implmodel.TechnologyModuleDefinition.java

public LinkedHashSet<TechnologyModuleDefinition> getIncompatibleModules() {
    LinkedHashSet<TechnologyModuleDefinition> result = new LinkedHashSet<TechnologyModuleDefinition>();
    for (String name : incompatibleModuleNames) {
        TechnologyModuleDefinition technologyModuleDefinition = getTechnologyModuleDefinition(name);
        if (technologyModuleDefinition != null) {
            result.add(technologyModuleDefinition);
        }//from   w ww. jav a2  s  . co m
    }

    return result;
}