Example usage for java.util LinkedHashSet add

List of usage examples for java.util LinkedHashSet add

Introduction

In this page you can find the example usage for java.util LinkedHashSet add.

Prototype

boolean add(E e);

Source Link

Document

Adds the specified element to this set if it is not already present (optional operation).

Usage

From source file:Simulator.PerformanceCalculation.java

private CategoryDataset createDataset() {
    DefaultCategoryDataset dataset = new DefaultCategoryDataset();

    LinkedHashSet no = new LinkedHashSet();
    LinkedHashMap<Integer, ArrayList<Double>> wait1 = new LinkedHashMap<>();

    for (Map.Entry<Integer, TraceObject> entry : l.getLocalTrace().entrySet()) {
        TraceObject traceObject = entry.getValue();

        if (wait1.get(traceObject.getSurgeonId()) == null) {
            ArrayList details = new ArrayList();
            details.add(traceObject.getWaitTime1());
            details.add(traceObject.getWaitTime2());
            wait1.put(traceObject.getSurgeonId(), details);
        } else {//from  w  w w . ja va2 s  . com
            wait1.get(traceObject.getSurgeonId()).add(traceObject.getWaitTime1());
            wait1.get(traceObject.getSurgeonId()).add(traceObject.getWaitTime2());
        }

        no.add(traceObject.getSurgeonId());
    }
    String[] column = new String[no.size()];

    String series1 = "Minimum Cost";
    String series2 = "Acceptable Cost";
    String series3 = "Actual Cost";

    for (int i = 0; i < no.size(); i++) {
        column[i] = "Surgeon " + (i + 1);
    }

    LinkedHashMap<Integer, Double> average = new LinkedHashMap<>();
    for (Map.Entry<Integer, ArrayList<Double>> entry : wait1.entrySet()) {
        Integer integer = entry.getKey();
        ArrayList<Double> arrayList = entry.getValue();
        double total = 0;
        for (Double double1 : arrayList) {
            total += double1;
        }
        average.put(integer, total / 600);
    }

    for (int i = 1; i <= average.size(); i++) {
        int costMin = (Configuration.minimumSurgeryTime * (wait1.get(i).size() / 2)
                * Configuration.costOfPatientWaiting);
        int costAccept = (Configuration.acceptedSurgeryTime * (wait1.get(i).size() / 2)
                * Configuration.costOfPatientWaiting);
        int actualCost = (int) Math.round(average.get(i) * Configuration.costOfPatientWaiting);

        int x = actualCost - (costAccept + costMin);
        if (x > actualCost) {
            x = actualCost;
        }
        dataset.addValue(costMin, series1, column[i - 1]);
        dataset.addValue(costAccept - costMin, series2, column[i - 1]);
        dataset.addValue(x, series3, column[i - 1]);
    }

    return dataset;
}

From source file:org.ncic.bioinfo.sparkseq.algorithms.utils.GATKVariantContextUtils.java

/**
 * Merges VariantContexts into a single hybrid.  Takes genotypes for common samples in priority order, if provided.
 * If uniquifySamples is true, the priority order is ignored and names are created by concatenating the VC name with
 * the sample name./*from   w w w. j a  va 2 s  .c  o  m*/
 * simpleMerge does not verify any more unique sample names EVEN if genotypeMergeOptions == GenotypeMergeType.REQUIRE_UNIQUE. One should use
 * SampleUtils.verifyUniqueSamplesNames to check that before using simpleMerge.
 *
 * For more information on this method see: http://www.thedistractionnetwork.com/programmer-problem/
 *
 * @param unsortedVCs               collection of unsorted VCs
 * @param priorityListOfVCs         priority list detailing the order in which we should grab the VCs
 * @param filteredRecordMergeType   merge type for filtered records
 * @param genotypeMergeOptions      merge option for genotypes
 * @param annotateOrigin            should we annotate the set it came from?
 * @param printMessages             should we print messages?
 * @param setKey                    the key name of the set
 * @param filteredAreUncalled       are filtered records uncalled?
 * @param mergeInfoWithMaxAC        should we merge in info from the VC with maximum allele count?
 * @return new VariantContext       representing the merge of unsortedVCs
 */
public static VariantContext simpleMerge(final Collection<VariantContext> unsortedVCs,
        final List<String> priorityListOfVCs, final int originalNumOfVCs,
        final FilteredRecordMergeType filteredRecordMergeType, final GenotypeMergeType genotypeMergeOptions,
        final boolean annotateOrigin, final boolean printMessages, final String setKey,
        final boolean filteredAreUncalled, final boolean mergeInfoWithMaxAC) {
    if (unsortedVCs == null || unsortedVCs.size() == 0)
        return null;

    if (priorityListOfVCs != null && originalNumOfVCs != priorityListOfVCs.size())
        throw new IllegalArgumentException(
                "the number of the original VariantContexts must be the same as the number of VariantContexts in the priority list");

    if (annotateOrigin && priorityListOfVCs == null && originalNumOfVCs == 0)
        throw new IllegalArgumentException(
                "Cannot merge calls and annotate their origins without a complete priority list of VariantContexts or the number of original VariantContexts");

    final List<VariantContext> preFilteredVCs = sortVariantContextsByPriority(unsortedVCs, priorityListOfVCs,
            genotypeMergeOptions);
    // Make sure all variant contexts are padded with reference base in case of indels if necessary
    List<VariantContext> VCs = new ArrayList<>();

    for (final VariantContext vc : preFilteredVCs) {
        if (!filteredAreUncalled || vc.isNotFiltered())
            VCs.add(vc);
    }

    if (VCs.size() == 0) // everything is filtered out and we're filteredAreUncalled
        return null;

    // establish the baseline info from the first VC
    final VariantContext first = VCs.get(0);
    final String name = first.getSource();
    final Allele refAllele = determineReferenceAllele(VCs);

    final LinkedHashSet<Allele> alleles = new LinkedHashSet<>();
    final Set<String> filters = new HashSet<>();
    final Map<String, Object> attributes = new LinkedHashMap<>();
    final Set<String> inconsistentAttributes = new HashSet<>();
    final Set<String> variantSources = new HashSet<>(); // contains the set of sources we found in our set of VCs that are variant
    final Set<String> rsIDs = new LinkedHashSet<>(1); // most of the time there's one id

    VariantContext longestVC = first;
    int depth = 0;
    int maxAC = -1;
    final Map<String, Object> attributesWithMaxAC = new LinkedHashMap<>();
    double log10PError = CommonInfo.NO_LOG10_PERROR;
    boolean anyVCHadFiltersApplied = false;
    VariantContext vcWithMaxAC = null;
    GenotypesContext genotypes = GenotypesContext.create();

    // counting the number of filtered and variant VCs
    int nFiltered = 0;

    boolean remapped = false;

    // cycle through and add info from the other VCs, making sure the loc/reference matches
    for (final VariantContext vc : VCs) {
        if (longestVC.getStart() != vc.getStart())
            throw new IllegalStateException(
                    "BUG: attempting to merge VariantContexts with different start sites: first="
                            + first.toString() + " second=" + vc.toString());

        if (VariantContextUtils.getSize(vc) > VariantContextUtils.getSize(longestVC))
            longestVC = vc; // get the longest location

        nFiltered += vc.isFiltered() ? 1 : 0;
        if (vc.isVariant())
            variantSources.add(vc.getSource());

        AlleleMapper alleleMapping = resolveIncompatibleAlleles(refAllele, vc, alleles);
        remapped = remapped || alleleMapping.needsRemapping();

        alleles.addAll(alleleMapping.values());

        mergeGenotypes(genotypes, vc, alleleMapping, genotypeMergeOptions == GenotypeMergeType.UNIQUIFY);

        // We always take the QUAL of the first VC with a non-MISSING qual for the combined value
        if (log10PError == CommonInfo.NO_LOG10_PERROR)
            log10PError = vc.getLog10PError();

        filters.addAll(vc.getFilters());
        anyVCHadFiltersApplied |= vc.filtersWereApplied();

        //
        // add attributes
        //
        // special case DP (add it up) and ID (just preserve it)
        //
        if (vc.hasAttribute(VCFConstants.DEPTH_KEY))
            depth += vc.getAttributeAsInt(VCFConstants.DEPTH_KEY, 0);
        if (vc.hasID())
            rsIDs.add(vc.getID());
        if (mergeInfoWithMaxAC && vc.hasAttribute(VCFConstants.ALLELE_COUNT_KEY)) {
            String rawAlleleCounts = vc.getAttributeAsString(VCFConstants.ALLELE_COUNT_KEY, null);
            // lets see if the string contains a "," separator
            if (rawAlleleCounts.contains(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR)) {
                final List<String> alleleCountArray = Arrays
                        .asList(rawAlleleCounts.substring(1, rawAlleleCounts.length() - 1)
                                .split(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR));
                for (final String alleleCount : alleleCountArray) {
                    final int ac = Integer.valueOf(alleleCount.trim());
                    if (ac > maxAC) {
                        maxAC = ac;
                        vcWithMaxAC = vc;
                    }
                }
            } else {
                final int ac = Integer.valueOf(rawAlleleCounts);
                if (ac > maxAC) {
                    maxAC = ac;
                    vcWithMaxAC = vc;
                }
            }
        }

        for (final Map.Entry<String, Object> p : vc.getAttributes().entrySet()) {
            final String key = p.getKey();
            final Object value = p.getValue();
            // only output annotations that have the same value in every input VC
            // if we don't like the key already, don't go anywhere
            if (!inconsistentAttributes.contains(key)) {
                final boolean alreadyFound = attributes.containsKey(key);
                final Object boundValue = attributes.get(key);
                final boolean boundIsMissingValue = alreadyFound
                        && boundValue.equals(VCFConstants.MISSING_VALUE_v4);

                if (alreadyFound && !boundValue.equals(value) && !boundIsMissingValue) {
                    // we found the value but we're inconsistent, put it in the exclude list
                    inconsistentAttributes.add(key);
                    attributes.remove(key);
                } else if (!alreadyFound || boundIsMissingValue) { // no value
                    attributes.put(key, value);
                }
            }
        }
    }

    // if we have more alternate alleles in the merged VC than in one or more of the
    // original VCs, we need to strip out the GL/PLs (because they are no longer accurate), as well as allele-dependent attributes like AC,AF, and AD
    for (final VariantContext vc : VCs) {
        if (vc.getAlleles().size() == 1)
            continue;
        if (hasPLIncompatibleAlleles(alleles, vc.getAlleles())) {
            genotypes = stripPLsAndAD(genotypes);
            // this will remove stale AC,AF attributed from vc
            VariantContextUtils.calculateChromosomeCounts(vc, attributes, true);
            break;
        }
    }

    // take the VC with the maxAC and pull the attributes into a modifiable map
    if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
        attributesWithMaxAC.putAll(vcWithMaxAC.getAttributes());
    }

    // if at least one record was unfiltered and we want a union, clear all of the filters
    if ((filteredRecordMergeType == FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED && nFiltered != VCs.size())
            || filteredRecordMergeType == FilteredRecordMergeType.KEEP_UNCONDITIONAL)
        filters.clear();

    if (annotateOrigin) { // we care about where the call came from
        String setValue;
        if (nFiltered == 0 && variantSources.size() == originalNumOfVCs) // nothing was unfiltered
            setValue = MERGE_INTERSECTION;
        else if (nFiltered == VCs.size()) // everything was filtered out
            setValue = MERGE_FILTER_IN_ALL;
        else if (variantSources.isEmpty()) // everyone was reference
            setValue = MERGE_REF_IN_ALL;
        else {
            final LinkedHashSet<String> s = new LinkedHashSet<>();
            for (final VariantContext vc : VCs)
                if (vc.isVariant())
                    s.add(vc.isFiltered() ? MERGE_FILTER_PREFIX + vc.getSource() : vc.getSource());
            setValue = Utils.join("-", s);
        }

        if (setKey != null) {
            attributes.put(setKey, setValue);
            if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
                attributesWithMaxAC.put(setKey, setValue);
            }
        }
    }

    if (depth > 0)
        attributes.put(VCFConstants.DEPTH_KEY, String.valueOf(depth));

    final String ID = rsIDs.isEmpty() ? VCFConstants.EMPTY_ID_FIELD : Utils.join(",", rsIDs);

    final VariantContextBuilder builder = new VariantContextBuilder().source(name).id(ID);
    builder.loc(longestVC.getChr(), longestVC.getStart(), longestVC.getEnd());
    builder.alleles(alleles);
    builder.genotypes(genotypes);
    builder.log10PError(log10PError);
    if (anyVCHadFiltersApplied) {
        builder.filters(filters.isEmpty() ? filters : new TreeSet<>(filters));
    }
    builder.attributes(new TreeMap<>(mergeInfoWithMaxAC ? attributesWithMaxAC : attributes));

    // Trim the padded bases of all alleles if necessary
    final VariantContext merged = builder.make();
    if (printMessages && remapped)
        System.out.printf("Remapped => %s%n", merged);
    return merged;
}

From source file:org.apache.ws.scout.registry.BusinessQueryManagerImpl.java

/**
 * Finds organizations in the registry that match the specified parameters
 *
 * @param findQualifiers/*from w w w.  j av  a  2 s.c o  m*/
 * @param namePatterns
 * @param classifications
 * @param specifications
 * @param externalIdentifiers
 * @param externalLinks
 * @return BulkResponse
 * @throws JAXRException
 */
public BulkResponse findOrganizations(Collection findQualifiers, Collection namePatterns,
        Collection classifications, Collection specifications, Collection externalIdentifiers,
        Collection externalLinks) throws JAXRException {
    IRegistry registry = (IRegistry) registryService.getRegistry();
    try {
        FindQualifiers juddiFindQualifiers = mapFindQualifiers(findQualifiers);
        Name[] nameArray = mapNamePatterns(namePatterns);
        BusinessList result = registry.findBusiness(nameArray, null,
                ScoutJaxrUddiHelper.getIdentifierBagFromExternalIdentifiers(externalIdentifiers),
                ScoutJaxrUddiHelper.getCategoryBagFromClassifications(classifications), null,
                juddiFindQualifiers, registryService.getMaxRows());

        BusinessInfo[] bizInfoArr = null;
        BusinessInfos bizInfos = result.getBusinessInfos();
        LinkedHashSet<Organization> orgs = new LinkedHashSet<Organization>();
        if (bizInfos != null) {
            List<BusinessInfo> bizInfoList = bizInfos.getBusinessInfo();
            for (BusinessInfo businessInfo : bizInfoList) {
                //Now get the details on the individual biz
                BusinessDetail detail = registry.getBusinessDetail(businessInfo.getBusinessKey());
                orgs.add(((BusinessLifeCycleManagerImpl) registryService.getLifeCycleManagerImpl())
                        .createOrganization(detail));
            }
            bizInfoArr = new BusinessInfo[bizInfoList.size()];
            bizInfoList.toArray(bizInfoArr);
        }
        return new BulkResponseImpl(orgs);
    } catch (RegistryException e) {
        throw new JAXRException(e);
    }
}

From source file:edu.ksu.cis.santos.mdcf.dml.symbol.SymbolTable.java

private <T extends Member> Set<Pair<Feature, T>> allMembers(final Iterable<String> featureNames,
        final Class<T> clazz, final String memberName, final boolean isOverriden) {
    final LinkedHashSet<Pair<Feature, T>> b = new LinkedHashSet<>();
    final Multimap<String, String> map = isOverriden ? superTransitiveMap() : subTransitiveMap();
    for (final String featureName : featureNames) {
        Member m = declaredMemberMap(featureName).get(memberName);
        if ((m != null) && clazz.isAssignableFrom(m.getClass())) {
            @SuppressWarnings("unchecked")
            final T t = (T) m;
            b.add(ImmutablePair.<Feature, T>of(feature(featureName), t));
        }/*from ww w . j av a  2 s. c o m*/
        final Collection<String> superNames = map.get(featureName);
        if (superNames != null) {
            for (final String superName : superNames) {
                m = declaredMemberMap(superName).get(memberName);
                if ((m != null) && clazz.isAssignableFrom(m.getClass())) {
                    @SuppressWarnings("unchecked")
                    final T t = (T) m;
                    b.add(ImmutablePair.<Feature, T>of(feature(featureName), t));
                }
            }
        }
    }
    return Collections.unmodifiableSet(b);
}

From source file:org.codehaus.mojo.jsimport.AbstractImportMojo.java

/**
 * Go through all of unassigned globals and enhance the file dependencies collection given the file that they are
 * declared in.//from w ww  . j a  v a 2s.  co m
 * 
 * @throws MojoExecutionException if something goes wrong.
 */
protected void processSourceFilesForUnassignedSymbolDeclarations() throws MojoExecutionException {

    // For all of the js files containing unassigned vars...
    Set<Entry<String, Set<String>>> entrySet = fileUnassignedGlobals.entrySet();
    for (Entry<String, Set<String>> entry : entrySet) {

        // For each of the unassigned vars...
        String variableDeclFile = entry.getKey();
        for (String variableName : entry.getValue()) {
            // Resolve the file that contains the var's assignment and throw
            // an exception if it cannot be found.
            String variableAssignedFile = fileAssignedGlobals.get(variableName);
            if (variableAssignedFile == null && compileFileAssignedGlobals != null) {
                variableAssignedFile = compileFileAssignedGlobals.get(variableName);
            }

            // We've tried pretty hard, but we can't find a dependency. Time to barf.
            if (variableAssignedFile == null) {
                getLog().error("Dependency not found: " + variableName + " in file: " + variableDeclFile);
                throw new MojoExecutionException("Build stopping given dependency issue.");

            }

            // Enhance the declaring file's graph of dependencies.
            LinkedHashSet<String> variableDeclFileImports = fileDependencies.get(variableDeclFile);
            if (variableDeclFileImports == null) {
                variableDeclFileImports = new LinkedHashSet<String>();
                fileDependencies.put(variableDeclFile, variableDeclFileImports);
            }

            variableDeclFileImports.add(variableAssignedFile);
        }
    }
}

From source file:com.alibaba.wasp.plan.parser.druid.DruidDQLParser.java

/**
 * Process select Statement and generate QueryPlan
 *
 *//*from ww  w.  j  a v  a2  s .c o m*/
private void getSelectPlan(ParseContext context, SQLSelectStatement sqlSelectStatement,
        MetaEventOperation metaEventOperation) throws IOException {
    SQLSelect select = sqlSelectStatement.getSelect();
    SQLSelectQuery sqlSelectQuery = select.getQuery();
    if (sqlSelectQuery instanceof MySqlSelectQueryBlock) {
        MySqlSelectQueryBlock sqlSelectQueryBlock = (MySqlSelectQueryBlock) sqlSelectQuery;
        // SELECT
        // FROM
        // WHERE

        // Parse The FROM clause
        String fTableName = parseFromClause(sqlSelectQueryBlock.getFrom());
        LOG.debug("SELECT SQL TableSource " + sqlSelectQueryBlock.getFrom());
        // check if table exists and get Table info(WTable)
        FTable table = metaEventOperation.checkAndGetTable(fTableName, false);

        LinkedHashSet<String> selectItem = null;
        AggregateInfo aggregateInfo = parseAggregateClause(sqlSelectQueryBlock.getSelectList(), table);
        if (aggregateInfo == null) {
            // Parse The SELECT clause
            if (sqlSelectQueryBlock.getSelectList().size() == 1
                    && sqlSelectQueryBlock.getSelectList().get(0).getExpr() instanceof SQLAllColumnExpr) {
                // This is SELECT * clause
                selectItem = parseFTable(table);
            } else {
                selectItem = parseSelectClause(sqlSelectQueryBlock.getSelectList());
            }
        } else {
            selectItem = new LinkedHashSet<String>();
            if (aggregateInfo.getField() == null) {
                //TODO
            }
            if (!aggregateInfo.getField().getName().equals("*")) {
                selectItem.add(aggregateInfo.getField().getName());
            }
        }
        LOG.debug("SELECT SQL:Select columns " + sqlSelectQueryBlock.getSelectList());
        // check if table has this columns
        metaEventOperation.checkAndGetFields(table, selectItem);

        // Parse The WHERE clause
        SQLExpr where = sqlSelectQueryBlock.getWhere();
        LOG.debug("SELECT SQL:where " + where);
        QueryInfo actionInfo = parseWhereClause(table, metaEventOperation, where,
                sqlSelectQueryBlock.isForUpdate());
        LOG.debug("ActionInfo " + actionInfo.toString());

        // Parse The Limit clause
        SQLExpr rowCount = null;
        if (sqlSelectQueryBlock.getLimit() != null) {
            rowCount = sqlSelectQueryBlock.getLimit().getRowCount();
        }
        int limit = -1;
        if (rowCount != null) {
            limit = convertToInt(rowCount);
        }

        // Convert to QueryPlan
        if (aggregateInfo == null) {
            convertToQueryPlan(table, context, actionInfo, metaEventOperation, selectItem, limit);
        } else {
            actionInfo.setType(QueryInfo.QueryType.AGGREGATE);
            actionInfo.setAggregateInfo(aggregateInfo);
            convertToQueryPlan(table, context, actionInfo, metaEventOperation);
        }
    } else if (sqlSelectQuery instanceof SQLUnionQuery) {
        throw new UnsupportedException("Union clause Unsupported");
    }
}

From source file:org.apache.tajo.engine.planner.rewrite.ProjectionPushDownRule.java

public LogicalNode visitWindowAgg(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block,
        WindowAggNode node, Stack<LogicalNode> stack) throws PlanningException {
    Context newContext = new Context(context);

    if (node.hasPartitionKeys()) {
        for (Column c : node.getPartitionKeys()) {
            newContext.addNecessaryReferences(new FieldEval(c));
        }/*w  w w .j  a v  a  2 s.  com*/
    }

    if (node.hasSortSpecs()) {
        for (SortSpec sortSpec : node.getSortSpecs()) {
            newContext.addNecessaryReferences(new FieldEval(sortSpec.getSortKey()));
        }
    }

    for (WindowFunctionEval winFunc : node.getWindowFunctions()) {
        if (winFunc.hasSortSpecs()) {
            for (SortSpec sortSpec : winFunc.getSortSpecs()) {
                newContext.addNecessaryReferences(new FieldEval(sortSpec.getSortKey()));
            }
        }
    }

    int nonFunctionColumnNum = node.getTargets().length - node.getWindowFunctions().length;
    LinkedHashSet<String> nonFunctionColumns = Sets.newLinkedHashSet();
    for (int i = 0; i < nonFunctionColumnNum; i++) {
        FieldEval fieldEval = (new FieldEval(node.getTargets()[i].getNamedColumn()));
        nonFunctionColumns.add(newContext.addExpr(fieldEval));
    }

    final String[] aggEvalNames;
    if (node.hasAggFunctions()) {
        final int evalNum = node.getWindowFunctions().length;
        aggEvalNames = new String[evalNum];
        for (int evalIdx = 0, targetIdx = nonFunctionColumnNum; targetIdx < node
                .getTargets().length; evalIdx++, targetIdx++) {
            Target target = node.getTargets()[targetIdx];
            WindowFunctionEval winFunc = node.getWindowFunctions()[evalIdx];
            aggEvalNames[evalIdx] = newContext.addExpr(new Target(winFunc, target.getCanonicalName()));
        }
    } else {
        aggEvalNames = null;
    }

    // visit a child node
    LogicalNode child = super.visitWindowAgg(newContext, plan, block, node, stack);

    node.setInSchema(child.getOutSchema());

    List<Target> targets = Lists.newArrayList();
    if (nonFunctionColumnNum > 0) {
        for (String column : nonFunctionColumns) {
            Target target = context.targetListMgr.getTarget(column);

            // it rewrite grouping keys.
            // This rewrite sets right column names and eliminates duplicated grouping keys.
            if (context.targetListMgr.isEvaluated(column)) {
                targets.add(new Target(new FieldEval(target.getNamedColumn())));
            } else {
                if (target.getEvalTree().getType() == EvalType.FIELD) {
                    targets.add(target);
                }
            }
        }
    }

    // Getting projected targets
    if (node.hasAggFunctions() && aggEvalNames != null) {
        WindowFunctionEval[] aggEvals = new WindowFunctionEval[aggEvalNames.length];
        int i = 0;
        for (Iterator<String> it = getFilteredReferences(aggEvalNames, TUtil.newList(aggEvalNames)); it
                .hasNext();) {

            String referenceName = it.next();
            Target target = context.targetListMgr.getTarget(referenceName);

            if (LogicalPlanner.checkIfBeEvaluatedAtWindowAgg(target.getEvalTree(), node)) {
                aggEvals[i++] = target.getEvalTree();
                context.targetListMgr.markAsEvaluated(target);

                targets.add(new Target(new FieldEval(target.getNamedColumn())));
            }
        }
        if (aggEvals.length > 0) {
            node.setWindowFunctions(aggEvals);
        }
    }

    node.setTargets(targets.toArray(new Target[targets.size()]));
    return node;
}

From source file:org.codehaus.mojo.jsimport.AbstractGenerateHtmlMojo.java

/**
 * Given a set of file paths, build a new set of any dependencies each of these paths may have, and any dependencies
 * that these dependencies have etc.//from  w  w  w. ja va  2  s  .co m
 * 
 * @param a set of nodes already visited so as to avoid overflow.
 * @param filePaths the set of file paths to iterate over.
 * @param allImports the set to build.
 * @return if not null then this represents a file path that revealed a cyclical depedency issue.
 */
private String buildImportsRecursively(Set<String> visitedNodes, LinkedHashSet<String> filePaths,
        LinkedHashSet<String> allImports) {
    String cyclicFilePath = null;
    for (String filePath : filePaths) {
        if (!visitedNodes.contains(filePath)) {
            visitedNodes.add(filePath);

            LinkedHashSet<String> filePathDependencies = fileDependencies.get(filePath);
            if (filePathDependencies == null && compileFileDependencies != null) {
                filePathDependencies = compileFileDependencies.get(filePath);
            }

            if (filePathDependencies != null) {
                cyclicFilePath = buildImportsRecursively(visitedNodes, filePathDependencies, allImports);
            } else if (allImports.contains(filePath)) {
                cyclicFilePath = filePath;
            }

            if (cyclicFilePath != null) {
                break;
            }

            allImports.add(filePath);
        }
    }
    return cyclicFilePath;
}

From source file:org.springframework.extensions.webscripts.AbstractWebScript.java

/**
 * <p>A locale based lookup sequence is build using the supplied {@link Locale} and (if it is 
 * different) the default {@link Locale}.
 * <ol><li>Lookup <{@code}descid><{@code}language_country_variant>.properties</li>
 * <li>Lookup <{@code}descid><{@code}language_country>.properties</li>
 * <li>Lookup <{@code}descid><{@code}language>.properties</li>
 * </ol>//from  w w  w.j av  a 2  s  . c om
 * The repeat but with the default {@link Locale}. Finally lookup <descid>.properties
 * </p>
 * @param path String
 * @param locale The requested {@link Locale}.
 * @return LinkedHashSet<String>
 */
@SuppressWarnings("static-access")
private LinkedHashSet<String> buildLocalePathList(final String path, final Locale locale) {
    final LinkedHashSet<String> pathSet = new LinkedHashSet<String>();

    // Add the paths for the current locale...
    pathSet.add(path + '_' + locale.toString() + DOT_PROPS);
    if (locale.getCountry().length() != 0) {
        pathSet.add(path + '_' + locale.getLanguage() + '_' + locale.getCountry() + DOT_PROPS);
    }
    pathSet.add(path + '_' + locale.getLanguage() + DOT_PROPS);

    if (locale.equals(Locale.getDefault())) {
        // We're already using the default Locale, so don't add it's paths again.
    } else {
        // Use the default locale to add some more possible paths...
        final Locale defLocale = locale.getDefault();
        pathSet.add(path + '_' + defLocale.toString() + DOT_PROPS);
        if (defLocale.getCountry().length() != 0) {
            pathSet.add(path + '_' + defLocale.getLanguage() + '_' + defLocale.getCountry() + DOT_PROPS);
        }
        pathSet.add(path + '_' + defLocale.getLanguage() + DOT_PROPS);
    }

    // Finally add a path with no locale information...
    pathSet.add(path + DOT_PROPS);
    return pathSet;
}

From source file:com.android.email.mail.store.ImapFolder.java

public void fetchInternal(Message[] messages, FetchProfile fp, MessageRetrievalListener listener)
        throws MessagingException {
    if (messages.length == 0) {
        return;//from  w w w .j av a  2  s.c  om
    }
    checkOpen();
    HashMap<String, Message> messageMap = new HashMap<String, Message>();
    for (Message m : messages) {
        messageMap.put(m.getUid(), m);
    }

    /*
     * Figure out what command we are going to run:
     * FLAGS     - UID FETCH (FLAGS)
     * ENVELOPE  - UID FETCH (INTERNALDATE UID RFC822.SIZE FLAGS BODY.PEEK[
     *                            HEADER.FIELDS (date subject from content-type to cc)])
     * STRUCTURE - UID FETCH (BODYSTRUCTURE)
     * BODY_SANE - UID FETCH (BODY.PEEK[]<0.N>) where N = max bytes returned
     * BODY      - UID FETCH (BODY.PEEK[])
     * Part      - UID FETCH (BODY.PEEK[ID]) where ID = mime part ID
     */

    final LinkedHashSet<String> fetchFields = new LinkedHashSet<String>();

    fetchFields.add(ImapConstants.UID);
    if (fp.contains(FetchProfile.Item.FLAGS)) {
        fetchFields.add(ImapConstants.FLAGS);
    }
    if (fp.contains(FetchProfile.Item.ENVELOPE)) {
        fetchFields.add(ImapConstants.INTERNALDATE);
        fetchFields.add(ImapConstants.RFC822_SIZE);
        fetchFields.add(ImapConstants.FETCH_FIELD_HEADERS);
    }
    if (fp.contains(FetchProfile.Item.STRUCTURE)) {
        fetchFields.add(ImapConstants.BODYSTRUCTURE);
    }

    if (fp.contains(FetchProfile.Item.BODY_SANE)) {
        fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_SANE);
    }
    if (fp.contains(FetchProfile.Item.BODY)) {
        fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK);
    }

    // TODO Why are we only fetching the first part given?
    final Part fetchPart = fp.getFirstPart();
    if (fetchPart != null) {
        final String[] partIds = fetchPart.getHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA);
        // TODO Why can a single part have more than one Id? And why should we only fetch
        // the first id if there are more than one?
        if (partIds != null) {
            fetchFields.add(ImapConstants.FETCH_FIELD_BODY_PEEK_BARE + "[" + partIds[0] + "]");
        }
    }

    try {
        mConnection.sendCommand(String.format(Locale.US, ImapConstants.UID_FETCH + " %s (%s)",
                ImapStore.joinMessageUids(messages),
                Utility.combine(fetchFields.toArray(new String[fetchFields.size()]), ' ')), false);
        ImapResponse response;
        do {
            response = null;
            try {
                response = mConnection.readResponse();

                if (!response.isDataResponse(1, ImapConstants.FETCH)) {
                    continue; // Ignore
                }
                final ImapList fetchList = response.getListOrEmpty(2);
                final String uid = fetchList.getKeyedStringOrEmpty(ImapConstants.UID).getString();
                if (TextUtils.isEmpty(uid))
                    continue;

                ImapMessage message = (ImapMessage) messageMap.get(uid);
                if (message == null)
                    continue;

                if (fp.contains(FetchProfile.Item.FLAGS)) {
                    final ImapList flags = fetchList.getKeyedListOrEmpty(ImapConstants.FLAGS);
                    for (int i = 0, count = flags.size(); i < count; i++) {
                        final ImapString flag = flags.getStringOrEmpty(i);
                        if (flag.is(ImapConstants.FLAG_DELETED)) {
                            message.setFlagInternal(Flag.DELETED, true);
                        } else if (flag.is(ImapConstants.FLAG_ANSWERED)) {
                            message.setFlagInternal(Flag.ANSWERED, true);
                        } else if (flag.is(ImapConstants.FLAG_SEEN)) {
                            message.setFlagInternal(Flag.SEEN, true);
                        } else if (flag.is(ImapConstants.FLAG_FLAGGED)) {
                            message.setFlagInternal(Flag.FLAGGED, true);
                        }
                    }
                }
                if (fp.contains(FetchProfile.Item.ENVELOPE)) {
                    final Date internalDate = fetchList.getKeyedStringOrEmpty(ImapConstants.INTERNALDATE)
                            .getDateOrNull();
                    final int size = fetchList.getKeyedStringOrEmpty(ImapConstants.RFC822_SIZE)
                            .getNumberOrZero();
                    final String header = fetchList
                            .getKeyedStringOrEmpty(ImapConstants.BODY_BRACKET_HEADER, true).getString();

                    message.setInternalDate(internalDate);
                    message.setSize(size);
                    message.parse(Utility.streamFromAsciiString(header));
                }
                if (fp.contains(FetchProfile.Item.STRUCTURE)) {
                    ImapList bs = fetchList.getKeyedListOrEmpty(ImapConstants.BODYSTRUCTURE);
                    if (!bs.isEmpty()) {
                        try {
                            parseBodyStructure(bs, message, ImapConstants.TEXT);
                        } catch (MessagingException e) {
                            if (Logging.LOGD) {
                                LogUtils.v(Logging.LOG_TAG, e, "Error handling message");
                            }
                            message.setBody(null);
                        }
                    }
                }
                if (fp.contains(FetchProfile.Item.BODY) || fp.contains(FetchProfile.Item.BODY_SANE)) {
                    // Body is keyed by "BODY[]...".
                    // Previously used "BODY[..." but this can be confused with "BODY[HEADER..."
                    // TODO Should we accept "RFC822" as well??
                    ImapString body = fetchList.getKeyedStringOrEmpty("BODY[]", true);
                    InputStream bodyStream = body.getAsStream();
                    message.parse(bodyStream);
                }
                if (fetchPart != null) {
                    InputStream bodyStream = fetchList.getKeyedStringOrEmpty("BODY[", true).getAsStream();
                    String encodings[] = fetchPart.getHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING);

                    String contentTransferEncoding = null;
                    if (encodings != null && encodings.length > 0) {
                        contentTransferEncoding = encodings[0];
                    } else {
                        // According to http://tools.ietf.org/html/rfc2045#section-6.1
                        // "7bit" is the default.
                        contentTransferEncoding = "7bit";
                    }

                    try {
                        // TODO Don't create 2 temp files.
                        // decodeBody creates BinaryTempFileBody, but we could avoid this
                        // if we implement ImapStringBody.
                        // (We'll need to share a temp file.  Protect it with a ref-count.)
                        fetchPart.setBody(
                                decodeBody(bodyStream, contentTransferEncoding, fetchPart.getSize(), listener));
                    } catch (Exception e) {
                        // TODO: Figure out what kinds of exceptions might actually be thrown
                        // from here. This blanket catch-all is because we're not sure what to
                        // do if we don't have a contentTransferEncoding, and we don't have
                        // time to figure out what exceptions might be thrown.
                        LogUtils.e(Logging.LOG_TAG, "Error fetching body %s", e);
                    }
                }

                if (listener != null) {
                    listener.messageRetrieved(message);
                }
            } finally {
                destroyResponses();
            }
        } while (!response.isTagged());
    } catch (IOException ioe) {
        throw ioExceptionHandler(mConnection, ioe);
    }
}