Example usage for java.util LinkedHashSet addAll

List of usage examples for java.util LinkedHashSet addAll

Introduction

In this page you can find the example usage for java.util LinkedHashSet addAll.

Prototype

boolean addAll(Collection<? extends E> c);

Source Link

Document

Adds all of the elements in the specified collection to this set if they're not already present (optional operation).

Usage

From source file:net.famzangl.minecraft.minebot.ai.command.CommandRegistry.java

public String[] fillTabComplete(MinebotNetHandler minebotNetHandler, String[] serverResponse,
        String lastSendTabComplete) {
    String command = getCommandId(lastSendTabComplete);
    LinkedHashSet<String> res = new LinkedHashSet<String>();
    for (String c : commandTable.keySet()) {
        if (c.startsWith(command)) {
            res.add("/" + c);
        }//w  w  w .  j av  a  2 s .  co m
    }
    if (res.isEmpty())
        return serverResponse;
    res.addAll(Arrays.asList(serverResponse));

    return res.toArray(new String[0]);
}

From source file:com.gs.obevo.api.appdata.Environment.java

public RichIterable<FileObject> getSourceDirs() {
    if (this.sourceDirs == null) {
        // only keep the distinct list of files here
        LinkedHashSet<FileObject> fileObjects = new LinkedHashSet<FileObject>();
        if (coreSourcePath != null) {
            fileObjects.add(coreSourcePath);
        }//from   w ww  .j ava2s  .c o m
        if (additionalSourceDirs != null) {
            fileObjects.addAll(additionalSourceDirs.flatCollect(new Function<String, Iterable<FileObject>>() {
                @Override
                public Iterable<FileObject> valueOf(String path) {
                    MutableList<FileObject> resolvedFileObjects = Lists.mutable.empty();
                    for (FileResolverStrategy fileResolverStrategy : fileResolverStrategies) {
                        resolvedFileObjects.addAllIterable(fileResolverStrategy.resolveFileObjects(path));
                    }
                    if (resolvedFileObjects.isEmpty()) {
                        throw new IllegalArgumentException("Unable to find the given path [" + path
                                + "] via any of the fileResolverStrategies:"
                                + fileResolverStrategies.makeString(", "));
                    }
                    return resolvedFileObjects;
                }
            }).toList());
        }
        this.sourceDirs = Lists.mutable.withAll(fileObjects);
    }
    return this.sourceDirs;
}

From source file:org.pentaho.reporting.engine.classic.core.modules.misc.datafactory.sql.SimpleSQLReportDataFactory.java

public String[] getReferencedFields(final String query, final DataRow parameters)
        throws ReportDataFactoryException {

    final boolean isNewConnection = connection == null;
    try {/*from   w  ww  .  j  a v a 2  s.  c  om*/
        final ParametrizationProviderFactory factory = createParametrizationProviderFactory();
        final Connection connection = getConnection(parameters);
        final ParametrizationProvider parametrizationProvider = factory.create(connection);
        final String computedQuery = computedQuery(query, parameters);
        parametrizationProvider.rewriteQueryForParametrization(connection, computedQuery, parameters);
        final LinkedHashSet<String> list = new LinkedHashSet<String>();
        list.addAll(Arrays.asList(parametrizationProvider.getPreparedParameterNames()));
        if (userField != null) {
            list.add(userField);
        }
        if (passwordField != null) {
            list.add(passwordField);
        }
        list.add(DataFactory.QUERY_LIMIT);
        return list.toArray(new String[list.size()]);
    } catch (ReportDataFactoryException e) {
        logger.warn("Unable to perform cache preparation", e);
        throw e;
    } catch (SQLException e) {
        logger.warn("Unable to perform cache preparation", e);
        throw new ReportDataFactoryException("Unable to perform cache preparation", e);
    } finally {
        if (isNewConnection) {
            close();
        }
    }
}

From source file:com.yahoo.bard.webservice.data.metric.TemplateDruidQuery.java

/**
 * Merges two template queries into one. The original queries are not mutated.
 *
 * @param sibling  the query to merge./*from www  .j a v  a 2  s  .c o m*/
 *
 * @return merged query
 */
public TemplateDruidQuery merge(TemplateDruidQuery sibling) {

    // TODO: Handle merging with a null TDQ

    // Correct the queries to have the same depth by nesting if necessary.
    TemplateDruidQuery self = this;
    while (self.depth > sibling.depth) {
        sibling = sibling.nest();
    }
    while (sibling.depth > self.depth) {
        self = self.nest();
    }

    // Merge together all the aggregations and post aggregations for the outer query.
    Set<Aggregation> mergedAggregations = mergeAggregations(self.getAggregations(), sibling.getAggregations());
    LinkedHashSet<PostAggregation> mergedPostAggregations = new LinkedHashSet<>(self.getPostAggregations());
    mergedPostAggregations.addAll(sibling.getPostAggregations());

    // Merge the time grains
    ZonelessTimeGrain mergedGrain = mergeTimeGrains(self.getTimeGrain(), sibling.getTimeGrain());
    TemplateDruidQuery mergedNested = self.isNested() ? self.nestedQuery.merge(sibling.getInnerQuery()) : null;
    return new TemplateDruidQuery(mergedAggregations, mergedPostAggregations, mergedNested, mergedGrain);
}

From source file:org.apache.geode.cache.lucene.internal.cli.LuceneIndexCommands.java

@SuppressWarnings("unchecked")
protected List<LuceneIndexDetails> getIndexListing() {
    final Execution functionExecutor = getMembersFunctionExecutor(getMembers(getCache()));

    if (functionExecutor instanceof AbstractExecution) {
        ((AbstractExecution) functionExecutor).setIgnoreDepartedMembers(true);
    }/*from ww w  . j  ava 2 s .c o m*/

    final ResultCollector resultsCollector = functionExecutor.execute(new LuceneListIndexFunction());
    final List<Set<LuceneIndexDetails>> results = (List<Set<LuceneIndexDetails>>) resultsCollector.getResult();

    List<LuceneIndexDetails> sortedResults = results.stream().flatMap(Collection::stream).sorted()
            .collect(Collectors.toList());
    LinkedHashSet<LuceneIndexDetails> uniqResults = new LinkedHashSet<>();
    uniqResults.addAll(sortedResults);
    sortedResults.clear();
    sortedResults.addAll(uniqResults);
    return sortedResults;
}

From source file:com.streamsets.pipeline.stage.processor.fieldfilter.FieldFilterProcessor.java

@Override
protected void process(Record record, SingleLaneBatchMaker batchMaker) throws StageException {
    // use List to preserve the order of list fieldPaths - need to watch out for duplicates though
    List<String> allFieldPaths = record.getEscapedFieldPathsOrdered();
    // use LinkedHashSet to preserve order and dedupe as we go
    LinkedHashSet<String> fieldsToRemove;
    switch (filterOperation) {
    case REMOVE://  w  ww .  j  a v a2  s.co m
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            fieldsToRemove.addAll(matchingFieldPaths);
        }
        break;
    case REMOVE_NULL:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && record.get(fieldPath).getValue() == null) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case REMOVE_EMPTY:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && record.get(fieldPath).getValue() != null
                        && record.get(fieldPath).getValue().equals("")) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case REMOVE_NULL_EMPTY:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && (record.get(fieldPath).getValue() == null
                        || record.get(fieldPath).getValue().equals(""))) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case REMOVE_CONSTANT:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && record.get(fieldPath).getValue() != null
                        && record.get(fieldPath).getValue().equals(constant)) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case KEEP:
        //Algorithm:
        // - Get all possible field paths in the record
        //
        // - Remove arguments fields which must be retained, its parent fields and the child fields from above set
        //   (Account for presence of wild card characters while doing so) The remaining set of fields is what must be
        //   removed from the record.
        //
        // - Keep fieldsToRemove in order - sorting is too costly
        //List all the possible field paths in this record
        fieldsToRemove = new LinkedHashSet<>(allFieldPaths);
        for (String field : fields) {
            //Keep parent fields
            //get the parent fieldPaths for each of the fields to keep
            List<String> parentFieldPaths = getParentFields(field);
            //remove parent paths from the fieldsToRemove set
            //Note that parent names could contain wild card characters
            for (String parentField : parentFieldPaths) {
                List<String> matchingFieldPaths = FieldRegexUtil.getMatchingFieldPaths(parentField,
                        allFieldPaths);
                fieldsToRemove.removeAll(matchingFieldPaths);
            }

            //Keep the field itself
            //remove the field path itself from the fieldsToRemove set
            //Consider wild card characters
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            fieldsToRemove.removeAll(matchingFieldPaths);

            //Keep the children of the field
            //For each of the fieldPaths that match the argument field path, remove all the child paths
            // Remove children at the end to avoid ConcurrentModificationException
            Set<String> childrenToKeep = new HashSet<>();
            for (String matchingFieldPath : matchingFieldPaths) {
                for (String fieldToRemove : fieldsToRemove) {
                    // for the old way, startsWith is appropriate when we have
                    // different path structures, or "nested" (multiple dimensioned) index structures.
                    //  eg: /USA[0]/SanFrancisco/folsom/streets[0] must still match:
                    //      /USA[0]/SanFrancisco/folsom/streets[0][0]   hence: startsWith.
                    if (StringUtils.countMatches(fieldToRemove, "/") == StringUtils
                            .countMatches(matchingFieldPath, "/")
                            && StringUtils.countMatches(fieldToRemove, "[") == StringUtils
                                    .countMatches(matchingFieldPath, "[")) {
                        if (fieldToRemove.equals(matchingFieldPath)) {
                            childrenToKeep.add(fieldToRemove);
                        }
                    } else {
                        if (fieldToRemove.startsWith(matchingFieldPath)) {
                            childrenToKeep.add(fieldToRemove);
                        }
                    }
                }
            }
            fieldsToRemove.removeAll(childrenToKeep);
        }
        break;
    default:
        throw new IllegalStateException(
                Utils.format("Unexpected Filter Operation '{}'", filterOperation.name()));
    }
    // We don't sort because we maintained list fields in ascending order (but not a full ordering)
    // Instead we just iterate in reverse to delete
    Iterator<String> itr = (new LinkedList<>(fieldsToRemove)).descendingIterator();
    while (itr.hasNext()) {
        record.delete(itr.next());
    }
    batchMaker.addRecord(record);
}

From source file:org.openmrs.web.controller.report.CohortReportFormController.java

/**
 * Creates a command object and tries to fill it with data from the saved report schema with the
 * id given by the 'reportId' parameter.
 * //from w w w . j  a  va 2s  . c om
 * @see org.springframework.web.servlet.mvc.AbstractFormController#formBackingObject(javax.servlet.http.HttpServletRequest)
 */
protected Object formBackingObject(HttpServletRequest request) throws Exception {
    CommandObject command = new CommandObject();

    if (Context.isAuthenticated() && !isFormSubmission(request)) {
        // if this is an existing report, get its data
        String idString = request.getParameter("reportId");
        if (idString != null) {
            Integer id = Integer.valueOf(idString);
            ReportService rs = (ReportService) Context.getService(ReportService.class);
            ReportSchemaXml schemaXml = rs.getReportSchemaXml(id);
            ReportSchema schema = rs.getReportSchema(schemaXml);
            CohortDataSetDefinition cohorts = null;
            if (schema.getDataSetDefinitions() == null)
                schema.setDataSetDefinitions(new ArrayList<DataSetDefinition>());
            if (schema.getDataSetDefinitions().size() == 0)
                schema.getDataSetDefinitions().add(new CohortDataSetDefinition());
            for (DataSetDefinition d : schema.getDataSetDefinitions()) {
                if (d instanceof CohortDataSetDefinition) {
                    if (cohorts != null)
                        throw new Exception(
                                "You may not edit a report that contains more than one Cohort Dataset Definition");
                    cohorts = (CohortDataSetDefinition) d;
                } else {
                    throw new Exception(
                            "You may not edit a report that contains datasets besides Cohort Dataset Definition");
                }
            }
            if (cohorts == null)
                throw new Exception(
                        "You may only edit a report that has exactly one Cohort Dataset Definition");

            command.setReportId(id);
            command.setName(schema.getName());
            command.setDescription(schema.getDescription());
            command.getParameters().addAll(schema.getReportParameters());
            command.setUuid(schemaXml.getUuid());

            // populate command.rows, directly from XML
            Document xml = DocumentBuilderFactory.newInstance().newDocumentBuilder()
                    .parse(new InputSource(new StringReader(schemaXml.getXml())));
            // xml looks like <reportSchema>...<dataSets>...<dataSetDefinition class="org.openmrs.report.CohortDataSetDefinition">
            // TODO: do this with xpath
            Node temp = findChild(xml, "reportSchema");
            temp = findChild(temp, "dataSets");
            temp = findChildWithAttribute(temp, "dataSetDefinition", "class",
                    "org.openmrs.report.CohortDataSetDefinition");

            Map<String, String> nameToStrategy = new LinkedHashMap<String, String>();
            Node strategies = findChild(temp, "strategies");
            if (strategies != null) {
                NodeList nl = strategies.getChildNodes();
                // each is a <entry><string>name</string><cohort ...><specification>strategy</specification></cohort></entry>
                for (int i = 0; i < nl.getLength(); ++i) {
                    Node node = nl.item(i);
                    if ("entry".equals(node.getNodeName())) {
                        String name = findChild(node, "string").getFirstChild().getNodeValue();
                        String strategy = findChild(findChild(node, "cohort"), "specification").getFirstChild()
                                .getNodeValue();
                        nameToStrategy.put(name, strategy);
                    }
                }
            }

            Map<String, String> nameToDescription = new LinkedHashMap<String, String>();
            Node descriptions = findChild(temp, "descriptions");
            if (descriptions != null) {
                NodeList nl = descriptions.getChildNodes();
                // each is a <entry><string>name</string><string>descr</string></entry>
                for (int i = 0; i < nl.getLength(); ++i) {
                    Node node = nl.item(i);
                    if ("entry".equals(node.getNodeName())) {
                        String name = findChild(node, "string").getFirstChild().getNodeValue();
                        String descr = findChild(node, "string", 2).getFirstChild().getNodeValue();
                        nameToDescription.put(name, descr);
                    }
                }
            }

            LinkedHashSet<String> names = new LinkedHashSet<String>();
            names.addAll(nameToStrategy.keySet());
            names.addAll(nameToDescription.keySet());

            List<CohortReportRow> rows = new ArrayList<CohortReportRow>();
            for (String name : names) {
                String descr = nameToDescription.get(name);
                String strat = nameToStrategy.get(name);
                CohortReportRow row = new CohortReportRow();
                row.setName(name);
                row.setDescription(descr);
                row.setQuery(strat);
                rows.add(row);
            }
            command.setRows(rows);
        }
    }
    return command;
}

From source file:com.act.biointerpretation.desalting.ReactionDesalter.java

private List<Long> buildIdMapping(Long[] oldChemIds) {
    LinkedHashSet<Long> newIDs = new LinkedHashSet<>(oldChemIds.length);

    for (Long oldChemId : oldChemIds) {
        List<Long> newChemIds = oldChemicalIdToNewChemicalIds.get(oldChemId);
        if (newChemIds == null) {
            throw new RuntimeException(String
                    .format("Found old chemical id %d that is not in the old -> new chem id map", oldChemId));
        }/*from w  w w.ja va 2  s .  c  o m*/

        newIDs.addAll(newChemIds);
    }

    List<Long> results = new ArrayList<>();
    // TODO: does ArrayList's constructor also add all the hashed elements in order?  I know addAll does.
    results.addAll(newIDs);
    return results;
}

From source file:cz.incad.kramerius.pdf.impl.FirstPagePDFServiceImpl.java

void itemVals(Map<String, LinkedHashSet<String>> detailItemValues, List<String> list, String key) {
    LinkedHashSet<String> vals = detailItemValues.get(key);
    if (vals == null) {
        vals = new LinkedHashSet<String>();
        detailItemValues.put(key, vals);
    }/*from   w ww . ja v  a 2 s  .c  o  m*/
    vals.addAll(list);
}

From source file:com.espertech.esper.epl.spec.PatternStreamSpecRaw.java

private PatternStreamSpecCompiled compileInternal(StatementContext context, Set<String> eventTypeReferences,
        boolean isInsertInto, Collection<Integer> assignedTypeNumberStack, MatchEventSpec tags,
        Set<String> priorAllTags) throws ExprValidationException {
    if (tags == null) {
        tags = new MatchEventSpec();
    }/*from  w  w  w  .  java  2s .  co m*/
    Deque<Integer> subexpressionIdStack = new ArrayDeque<Integer>(assignedTypeNumberStack);
    ExprEvaluatorContext evaluatorContextStmt = new ExprEvaluatorContextStatement(context);
    Stack<EvalFactoryNode> nodeStack = new Stack<EvalFactoryNode>();

    // detemine ordered tags
    Set<EvalFactoryNode> filterFactoryNodes = EvalNodeUtil.recursiveGetChildNodes(evalFactoryNode,
            FilterForFilterFactoryNodes.INSTANCE);
    LinkedHashSet<String> allTagNamesOrdered = new LinkedHashSet<String>();
    if (priorAllTags != null) {
        allTagNamesOrdered.addAll(priorAllTags);
    }
    for (EvalFactoryNode filterNode : filterFactoryNodes) {
        EvalFilterFactoryNode factory = (EvalFilterFactoryNode) filterNode;
        int tagNumber;
        if (factory.getEventAsName() != null) {
            if (!allTagNamesOrdered.contains(factory.getEventAsName())) {
                allTagNamesOrdered.add(factory.getEventAsName());
                tagNumber = allTagNamesOrdered.size() - 1;
            } else {
                tagNumber = findTagNumber(factory.getEventAsName(), allTagNamesOrdered);
            }
            factory.setEventAsTagNumber(tagNumber);
        }
    }

    recursiveCompile(evalFactoryNode, context, evaluatorContextStmt, eventTypeReferences, isInsertInto, tags,
            subexpressionIdStack, nodeStack, allTagNamesOrdered);

    Audit auditPattern = AuditEnum.PATTERN.getAudit(context.getAnnotations());
    Audit auditPatternInstance = AuditEnum.PATTERNINSTANCES.getAudit(context.getAnnotations());
    EvalFactoryNode compiledEvalFactoryNode = evalFactoryNode;
    if (auditPattern != null || auditPatternInstance != null) {
        EvalAuditInstanceCount instanceCount = new EvalAuditInstanceCount();
        compiledEvalFactoryNode = recursiveAddAuditNode(null, auditPattern != null,
                auditPatternInstance != null, evalFactoryNode, evalNodeExpressions, instanceCount);
    }

    return new PatternStreamSpecCompiled(compiledEvalFactoryNode, tags.getTaggedEventTypes(),
            tags.getArrayEventTypes(), allTagNamesOrdered, this.getViewSpecs(), this.getOptionalStreamName(),
            this.getOptions());
}