Example usage for java.util LinkedHashSet add

List of usage examples for java.util LinkedHashSet add

Introduction

In this page you can find the example usage for java.util LinkedHashSet add.

Prototype

boolean add(E e);

Source Link

Document

Adds the specified element to this set if it is not already present (optional operation).

Usage

From source file:org.pentaho.reporting.engine.classic.extensions.datasources.mondrian.AbstractMDXDataFactory.java

public String[] getReferencedFields(final String queryName, final DataRow parameters)
        throws ReportDataFactoryException {
    final boolean isNewConnection = connection == null;
    try {/*w w w  .  j  a v  a 2  s  .  c o m*/
        if (connection == null) {
            connection = mondrianConnectionProvider.createConnection(computeProperties(parameters),
                    dataSourceProvider.getDataSource());
        }
    } catch (SQLException e) {
        logger.error(e);
        throw new ReportDataFactoryException("Failed to create DataSource (SQL Exception - error code: "
                + e.getErrorCode() + "):" + e.toString(), e);
    } catch (MondrianException e) {
        logger.error(e);
        throw new ReportDataFactoryException("Failed to create DataSource (Mondrian Exception):" + e.toString(),
                e);
    }

    try {
        if (connection == null) {
            throw new ReportDataFactoryException("Factory is closed.");
        }
        final LinkedHashSet<String> parameter = new LinkedHashSet<String>();

        final MDXCompiler compiler = new MDXCompiler(parameters, getLocale());
        final String computedQuery = computedQuery(queryName, parameters);
        final String mdxQuery = compiler.translateAndLookup(computedQuery, parameters);
        parameter.addAll(compiler.getCollectedParameter());
        // Alternatively, JNDI is possible. Maybe even more ..
        final Query query = connection.parseQuery(mdxQuery);
        final Parameter[] queryParameters = query.getParameters();
        for (int i = 0; i < queryParameters.length; i++) {
            final Parameter queryParameter = queryParameters[i];
            parameter.add(queryParameter.getName());
        }
        if (jdbcUserField != null) {
            parameter.add(jdbcUserField);
        }
        if (roleField != null) {
            parameter.add(roleField);
        }
        parameter.add(DataFactory.QUERY_LIMIT);
        return parameter.toArray(new String[parameter.size()]);
    } catch (MondrianException e) {
        throw new ReportDataFactoryException("Failed to create datasource:" + e.getLocalizedMessage(), e);
    } finally {
        if (isNewConnection) {
            close();
        }
    }
}

From source file:com.geewhiz.pacify.managers.FilterManager.java

private LinkedHashSet<Defect> filterPArchive(PArchive pArchive) {
    logger.info("      Customize Archive [{}]", pArchive.getRelativePath());

    LinkedHashSet<Defect> defects = new LinkedHashSet<Defect>();

    Map<PFile, File> replaceFiles = new HashMap<PFile, File>();

    for (PFile pFile : pArchive.getPFiles()) {
        logger.info("         Customize File [{}]", pFile.getRelativePath());
        logger.debug("             Filtering [{}] in archive [{}] using encoding [{}] and filter [{}]",
                pFile.getRelativePath(), pMarker.getAbsoluteFileFor(pArchive).getAbsolutePath(),
                pFile.getEncoding(), pFile.getFilterClass());

        File fileToFilter = extractFile(pArchive, pFile);
        PacifyFilter pacifyFilter = getFilterForPFile(pArchive, pFile);

        Map<String, String> propertyValues = new HashMap<String, String>();
        LinkedHashSet<Defect> propertyValueDefects = fillPropertyValuesFor(propertyValues, pFile);
        if (propertyValueDefects.size() > 0) {
            return propertyValueDefects;
        }//from  ww  w. j a va2  s  . co m

        String beginToken = pMarker.getBeginTokenFor(pArchive, pFile);
        String endToken = pMarker.getEndTokenFor(pArchive, pFile);
        String encoding = pFile.getEncoding();

        defects.addAll(pacifyFilter.filter(propertyValues, beginToken, endToken, fileToFilter, encoding));

        replaceFiles.put(pFile, fileToFilter);
        logger.info("             [{}] placeholders replaced.", pFile.getPProperties().size());
    }

    try {
        FileUtils.replaceFilesInArchive(pMarker, pArchive, replaceFiles);
    } catch (ArchiveDefect e) {
        defects.add(e);
    }

    for (Entry<PFile, File> entry : replaceFiles.entrySet()) {
        entry.getValue().delete();
    }

    return defects;
}

From source file:org.apache.geronimo.mavenplugins.car.AbstractCarMojo.java

protected LinkedHashSet<DependencyType> toDependencies(List<Dependency> explicitDependencies,
        UseMavenDependencies useMavenDependencies, boolean includeImport)
        throws InvalidDependencyVersionException, ArtifactResolutionException, ProjectBuildingException,
        MojoExecutionException {/*ww  w  . j av  a 2  s .  co  m*/
    List<DependencyType> dependencyTypes = new ArrayList<DependencyType>();
    for (Dependency dependency : explicitDependencies) {
        dependencyTypes.add(dependency.toDependencyType());
    }
    LinkedHashSet<DependencyType> dependencies = new LinkedHashSet<DependencyType>();

    if (useMavenDependencies == null || !useMavenDependencies.isValue()) {
        dependencies.addAll(dependencyTypes);
        localDependencies = new HashSet<Artifact>();
        for (DependencyType dependency : dependencies) {
            localDependencies.add(geronimoToMavenArtifact(dependency.toArtifact()));
        }
    } else {
        Map<String, DependencyType> explicitDependencyMap = new HashMap<String, DependencyType>();
        for (DependencyType dependency : dependencyTypes) {
            explicitDependencyMap.put(getKey(dependency), dependency);
        }

        getDependencies(project, useMavenDependencies.isUseTransitiveDependencies());
        for (Artifact entry : localDependencies) {
            dependencies.add(toDependencyType(entry, explicitDependencyMap,
                    useMavenDependencies.isIncludeVersion(), includeImport));
        }
    }

    return dependencies;
}

From source file:org.broadinstitute.gatk.utils.variant.GATKVariantContextUtils.java

/**
 * Merges VariantContexts into a single hybrid.  Takes genotypes for common samples in priority order, if provided.
 * If uniquifySamples is true, the priority order is ignored and names are created by concatenating the VC name with
 * the sample name./*from w  ww.j  av  a  2  s  .  c  o  m*/
 * simpleMerge does not verify any more unique sample names EVEN if genotypeMergeOptions == GenotypeMergeType.REQUIRE_UNIQUE. One should use
 * SampleUtils.verifyUniqueSamplesNames to check that before using simpleMerge.
 *
 * For more information on this method see: http://www.thedistractionnetwork.com/programmer-problem/
 *
 * @param unsortedVCs               collection of unsorted VCs
 * @param priorityListOfVCs         priority list detailing the order in which we should grab the VCs
 * @param filteredRecordMergeType   merge type for filtered records
 * @param genotypeMergeOptions      merge option for genotypes
 * @param annotateOrigin            should we annotate the set it came from?
 * @param printMessages             should we print messages?
 * @param setKey                    the key name of the set
 * @param filteredAreUncalled       are filtered records uncalled?
 * @param mergeInfoWithMaxAC        should we merge in info from the VC with maximum allele count?
 * @return new VariantContext       representing the merge of unsortedVCs
 */
public static VariantContext simpleMerge(final Collection<VariantContext> unsortedVCs,
        final List<String> priorityListOfVCs, final int originalNumOfVCs,
        final FilteredRecordMergeType filteredRecordMergeType, final GenotypeMergeType genotypeMergeOptions,
        final boolean annotateOrigin, final boolean printMessages, final String setKey,
        final boolean filteredAreUncalled, final boolean mergeInfoWithMaxAC) {
    if (unsortedVCs == null || unsortedVCs.isEmpty())
        return null;

    if (priorityListOfVCs != null && originalNumOfVCs != priorityListOfVCs.size())
        throw new IllegalArgumentException(
                "the number of the original VariantContexts must be the same as the number of VariantContexts in the priority list");

    if (annotateOrigin && priorityListOfVCs == null && originalNumOfVCs == 0)
        throw new IllegalArgumentException(
                "Cannot merge calls and annotate their origins without a complete priority list of VariantContexts or the number of original VariantContexts");

    final List<VariantContext> preFilteredVCs = sortVariantContextsByPriority(unsortedVCs, priorityListOfVCs,
            genotypeMergeOptions);
    // Make sure all variant contexts are padded with reference base in case of indels if necessary
    List<VariantContext> VCs = new ArrayList<>();

    for (final VariantContext vc : preFilteredVCs) {
        if (!filteredAreUncalled || vc.isNotFiltered())
            VCs.add(vc);
    }

    if (VCs.isEmpty()) // everything is filtered out and we're filteredAreUncalled
        return null;

    // establish the baseline info from the first VC
    final VariantContext first = VCs.get(0);
    final String name = first.getSource();
    final Allele refAllele = determineReferenceAllele(VCs);

    final LinkedHashSet<Allele> alleles = new LinkedHashSet<>();
    final Set<String> filters = new HashSet<>();
    final Map<String, Object> attributes = new LinkedHashMap<>();
    final Set<String> inconsistentAttributes = new HashSet<>();
    final Set<String> variantSources = new HashSet<>(); // contains the set of sources we found in our set of VCs that are variant
    final Set<String> rsIDs = new LinkedHashSet<>(1); // most of the time there's one id

    VariantContext longestVC = first;
    int depth = 0;
    int maxAC = -1;
    final Map<String, Object> attributesWithMaxAC = new LinkedHashMap<>();
    double log10PError = CommonInfo.NO_LOG10_PERROR;
    boolean anyVCHadFiltersApplied = false;
    VariantContext vcWithMaxAC = null;
    GenotypesContext genotypes = GenotypesContext.create();

    // counting the number of filtered and variant VCs
    int nFiltered = 0;

    boolean remapped = false;

    // cycle through and add info from the other VCs, making sure the loc/reference matches
    for (final VariantContext vc : VCs) {
        if (longestVC.getStart() != vc.getStart())
            throw new IllegalStateException(
                    "BUG: attempting to merge VariantContexts with different start sites: first="
                            + first.toString() + " second=" + vc.toString());

        if (VariantContextUtils.getSize(vc) > VariantContextUtils.getSize(longestVC))
            longestVC = vc; // get the longest location

        nFiltered += vc.isFiltered() ? 1 : 0;
        if (vc.isVariant())
            variantSources.add(vc.getSource());

        AlleleMapper alleleMapping = resolveIncompatibleAlleles(refAllele, vc, alleles);
        remapped = remapped || alleleMapping.needsRemapping();

        alleles.addAll(alleleMapping.values());

        mergeGenotypes(genotypes, vc, alleleMapping, genotypeMergeOptions == GenotypeMergeType.UNIQUIFY);

        // We always take the QUAL of the first VC with a non-MISSING qual for the combined value
        if (log10PError == CommonInfo.NO_LOG10_PERROR)
            log10PError = vc.getLog10PError();

        filters.addAll(vc.getFilters());
        anyVCHadFiltersApplied |= vc.filtersWereApplied();

        //
        // add attributes
        //
        // special case DP (add it up) and ID (just preserve it)
        //
        if (vc.hasAttribute(VCFConstants.DEPTH_KEY))
            depth += vc.getAttributeAsInt(VCFConstants.DEPTH_KEY, 0);
        if (vc.hasID())
            rsIDs.add(vc.getID());
        if (mergeInfoWithMaxAC && vc.hasAttribute(VCFConstants.ALLELE_COUNT_KEY)) {
            String rawAlleleCounts = vc.getAttributeAsString(VCFConstants.ALLELE_COUNT_KEY, null);
            // lets see if the string contains a "," separator
            if (rawAlleleCounts.contains(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR)) {
                final List<String> alleleCountArray = Arrays
                        .asList(rawAlleleCounts.substring(1, rawAlleleCounts.length() - 1)
                                .split(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR));
                for (final String alleleCount : alleleCountArray) {
                    final int ac = Integer.valueOf(alleleCount.trim());
                    if (ac > maxAC) {
                        maxAC = ac;
                        vcWithMaxAC = vc;
                    }
                }
            } else {
                final int ac = Integer.valueOf(rawAlleleCounts);
                if (ac > maxAC) {
                    maxAC = ac;
                    vcWithMaxAC = vc;
                }
            }
        }

        for (final Map.Entry<String, Object> p : vc.getAttributes().entrySet()) {
            final String key = p.getKey();
            final Object value = p.getValue();
            // only output annotations that have the same value in every input VC
            // if we don't like the key already, don't go anywhere
            if (!inconsistentAttributes.contains(key)) {
                final boolean alreadyFound = attributes.containsKey(key);
                final Object boundValue = attributes.get(key);
                final boolean boundIsMissingValue = alreadyFound
                        && boundValue.equals(VCFConstants.MISSING_VALUE_v4);

                if (alreadyFound && !boundValue.equals(value) && !boundIsMissingValue) {
                    // we found the value but we're inconsistent, put it in the exclude list
                    inconsistentAttributes.add(key);
                    attributes.remove(key);
                } else if (!alreadyFound || boundIsMissingValue) { // no value
                    attributes.put(key, value);
                }
            }
        }
    }

    // if we have more alternate alleles in the merged VC than in one or more of the
    // original VCs, we need to strip out the GL/PLs (because they are no longer accurate), as well as allele-dependent attributes like AC,AF, and AD
    for (final VariantContext vc : VCs) {
        if (vc.getAlleles().size() == 1)
            continue;
        if (hasPLIncompatibleAlleles(alleles, vc.getAlleles())) {
            if (!genotypes.isEmpty()) {
                logger.debug(String.format(
                        "Stripping PLs at %s:%d-%d due to incompatible alleles merged=%s vs. single=%s",
                        vc.getChr(), vc.getStart(), vc.getEnd(), alleles, vc.getAlleles()));
            }
            genotypes = stripPLsAndAD(genotypes);
            // this will remove stale AC,AF attributed from vc
            VariantContextUtils.calculateChromosomeCounts(vc, attributes, true);
            break;
        }
    }

    // take the VC with the maxAC and pull the attributes into a modifiable map
    if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
        attributesWithMaxAC.putAll(vcWithMaxAC.getAttributes());
    }

    // if at least one record was unfiltered and we want a union, clear all of the filters
    if ((filteredRecordMergeType == FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED && nFiltered != VCs.size())
            || filteredRecordMergeType == FilteredRecordMergeType.KEEP_UNCONDITIONAL)
        filters.clear();

    if (annotateOrigin) { // we care about where the call came from
        String setValue;
        if (nFiltered == 0 && variantSources.size() == originalNumOfVCs) // nothing was unfiltered
            setValue = MERGE_INTERSECTION;
        else if (nFiltered == VCs.size()) // everything was filtered out
            setValue = MERGE_FILTER_IN_ALL;
        else if (variantSources.isEmpty()) // everyone was reference
            setValue = MERGE_REF_IN_ALL;
        else {
            final LinkedHashSet<String> s = new LinkedHashSet<>();
            for (final VariantContext vc : VCs)
                if (vc.isVariant())
                    s.add(vc.isFiltered() ? MERGE_FILTER_PREFIX + vc.getSource() : vc.getSource());
            setValue = Utils.join("-", s);
        }

        if (setKey != null) {
            attributes.put(setKey, setValue);
            if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
                attributesWithMaxAC.put(setKey, setValue);
            }
        }
    }

    if (depth > 0)
        attributes.put(VCFConstants.DEPTH_KEY, String.valueOf(depth));

    final String ID = rsIDs.isEmpty() ? VCFConstants.EMPTY_ID_FIELD : Utils.join(",", rsIDs);

    final VariantContextBuilder builder = new VariantContextBuilder().source(name).id(ID);
    builder.loc(longestVC.getChr(), longestVC.getStart(), longestVC.getEnd());
    builder.alleles(alleles);
    builder.genotypes(genotypes);
    builder.log10PError(log10PError);
    if (anyVCHadFiltersApplied) {
        builder.filters(filters.isEmpty() ? filters : new TreeSet<>(filters));
    }
    builder.attributes(new TreeMap<>(mergeInfoWithMaxAC ? attributesWithMaxAC : attributes));

    // Trim the padded bases of all alleles if necessary
    final VariantContext merged = builder.make();
    if (printMessages && remapped)
        System.out.printf("Remapped => %s%n", merged);
    return merged;
}

From source file:pt.lsts.neptus.util.logdownload.LogsDownloaderWorkerActions.java

private void testingForLogFilesFromEachLogFolderAndFillInfo(LinkedList<LogFolderInfo> tmpLogFolderList) {

    long timeF1 = System.currentTimeMillis();

    Object[] objArray = new Object[gui.logFolderList.myModel.size()];
    gui.logFolderList.myModel.copyInto(objArray);
    for (Object comp : objArray) {
        if (stopLogListProcessing)
            break;

        try {// w ww.j a va 2  s  .  co m
            LogFolderInfo logFolder = (LogFolderInfo) comp;

            int indexLFolder = tmpLogFolderList.indexOf(logFolder);
            LinkedHashSet<LogFileInfo> logFilesTmp = (indexLFolder != -1)
                    ? tmpLogFolderList.get(indexLFolder).getLogFiles()
                    : new LinkedHashSet<LogFileInfo>();
            for (LogFileInfo logFx : logFilesTmp) {
                if (stopLogListProcessing)
                    break;

                if (!logFolder.getLogFiles().contains(logFx)) {
                    // The file or directory is new
                    logFolder.addFile(logFx);
                } else {
                    // The file or directory is already known so let us update
                    LogFileInfo lfx = logFolder.getLogFile(logFx.getName()/* fxStr */);
                    if (lfx.getSize() == -1) {
                        lfx.setSize(logFx.getSize());
                    } else if (lfx.getSize() != logFx.getSize()) {
                        // System.out.println("//////////// " + lfx.getSize() + "  " + logFx.getSize());
                        if (lfx.getState() == LogFolderInfo.State.SYNC)
                            lfx.setState(LogFolderInfo.State.INCOMPLETE);
                        else if (lfx.getState() == LogFolderInfo.State.LOCAL)
                            lfx.setState(LogFolderInfo.State.INCOMPLETE);
                        lfx.setSize(logFx.getSize());
                        lfx.setFile(logFx.getFile());
                    } else if (lfx.getSize() == logFx.getSize()) {
                        if (lfx.getState() == LogFolderInfo.State.LOCAL)
                            lfx.setState(LogFolderInfo.State.SYNC);
                    }
                    lfx.setHost(logFx.getHost());

                    if (logFx.isDirectory()) {
                        ArrayList<LogFileInfo> notMatchElements = new ArrayList<>();
                        notMatchElements.addAll(lfx.getDirectoryContents());
                        for (LogFileInfo lfi : logFx.getDirectoryContents()) {
                            boolean alreadyExists = false;
                            for (LogFileInfo lfiLocal : lfx.getDirectoryContents()) {
                                if (lfi.equals(lfiLocal)) {
                                    alreadyExists = true;
                                    notMatchElements.remove(lfiLocal);
                                    lfi.setSize(lfiLocal.getSize());
                                    lfi.setFile(lfiLocal.getFile());
                                    lfi.setHost(lfiLocal.getHost());
                                }
                            }
                            if (!alreadyExists) {
                                lfx.getDirectoryContents().add(lfi);
                                lfx.setState(LogFolderInfo.State.INCOMPLETE);
                            }
                        }
                        for (LogFileInfo lfi : notMatchElements) {
                            lfx.getDirectoryContents().remove(lfi);
                        }
                    }

                    if (lfx.isDirectory()) {
                        if (!LogsDownloaderWorkerUtil.getFileTarget(lfx.getName(),
                                worker.getDirBaseToStoreFiles(), worker.getLogLabel()).exists()) {
                            for (LogFileInfo lfi : lfx.getDirectoryContents()) {
                                if (!LogsDownloaderWorkerUtil.getFileTarget(lfi.getName(),
                                        worker.getDirBaseToStoreFiles(), worker.getLogLabel()).exists()) {
                                    if (lfx.getState() != LogFolderInfo.State.NEW
                                            && lfx.getState() != LogFolderInfo.State.DOWNLOADING)
                                        lfx.setState(LogFolderInfo.State.INCOMPLETE);
                                    break;
                                }
                            }
                        } else {
                            long sizeD = LogsDownloaderWorkerUtil.getDiskSizeFromLocal(lfx, worker);
                            if (lfx.getSize() != sizeD && lfx.getState() == LogFolderInfo.State.SYNC)
                                lfx.setState(LogFolderInfo.State.INCOMPLETE);
                        }
                    } else {
                        if (!LogsDownloaderWorkerUtil.getFileTarget(lfx.getName(),
                                worker.getDirBaseToStoreFiles(), worker.getLogLabel()).exists()) {
                            if (lfx.getState() != LogFolderInfo.State.NEW
                                    && lfx.getState() != LogFolderInfo.State.DOWNLOADING) {
                                lfx.setState(LogFolderInfo.State.INCOMPLETE);
                                // System.out.println("//////////// " + lfx.getName() + "  " + LogsDownloaderUtil.getFileTarget(lfx.getName()).exists());
                            }
                        } else {
                            long sizeD = LogsDownloaderWorkerUtil.getDiskSizeFromLocal(lfx, worker);
                            if (lfx.getSize() != sizeD && lfx.getState() == LogFolderInfo.State.SYNC)
                                lfx.setState(LogFolderInfo.State.INCOMPLETE);
                        }
                    }
                }
            }

            // Put LOCAL state on files not in server
            LinkedHashSet<LogFileInfo> toDelFL = new LinkedHashSet<LogFileInfo>();
            for (LogFileInfo lfx : logFolder.getLogFiles()) {
                if (!logFilesTmp.contains(lfx)
                /* !res.keySet().contains(lfx.getName()) */) {
                    lfx.setState(LogFolderInfo.State.LOCAL);
                    if (!LogsDownloaderWorkerUtil
                            .getFileTarget(lfx.getName(), worker.getDirBaseToStoreFiles(), worker.getLogLabel())
                            .exists()) {
                        toDelFL.add(lfx);
                        // logFolder.getLogFiles().remove(lfx); //This cannot be done here
                    }
                }
            }
            for (LogFileInfo lfx : toDelFL)
                logFolder.getLogFiles().remove(lfx);
        } catch (Exception e) {
            NeptusLog.pub().debug(e.getMessage());
        }
    }

    NeptusLog.pub().warn(".......Testing for log files from each log folder "
            + (System.currentTimeMillis() - timeF1) + "ms");
}

From source file:com.prasanna.android.stacknetwork.service.UserServiceHelper.java

public LinkedHashSet<Tag> getTags(String site, int pageSize, boolean meTags) {
    int page = 1;
    LinkedHashSet<Tag> tags = null;
    String restEndPoint = meTags ? "/me/tags" : "/tags";
    boolean hasMore = true;

    Map<String, String> queryParams = AppUtils.getDefaultQueryParams();
    queryParams.put(StackUri.QueryParams.SORT, StackUri.Sort.ACTIVITY);
    queryParams.put(StackUri.QueryParams.SITE, OperatingSite.getSite().apiSiteParameter);
    queryParams.put(StackUri.QueryParams.PAGE_SIZE, String.valueOf(pageSize));
    queryParams.put(StackUri.QueryParams.SORT, meTags ? Sort.NAME : Sort.POPULAR);
    queryParams.put(StackUri.QueryParams.ORDER, meTags ? Order.ASC : Order.DESC);

    while (hasMore) {
        queryParams.put(StackUri.QueryParams.PAGE, String.valueOf(page++));
        JSONObjectWrapper jsonObjectWrapper = executeHttpGetRequest(restEndPoint, queryParams);

        if (jsonObjectWrapper != null) {
            JSONArray jsonArray = jsonObjectWrapper.getJSONArray(JsonFields.ITEMS);
            if (jsonArray != null && jsonArray.length() > 0) {
                if (tags == null)
                    tags = new LinkedHashSet<Tag>();

                for (int i = 0; i < jsonArray.length(); i++) {
                    try {
                        JSONObjectWrapper tagJson = JSONObjectWrapper.wrap(jsonArray.getJSONObject(i));
                        tags.add(new Tag(tagJson.getString(JsonFields.Tag.NAME)));
                    } catch (JSONException e) {
                        LogWrapper.d(getLogTag(), e.getMessage());
                    }//from  w w w  .j a  v  a2  s  . co m
                }
            }

            /* Get all the tags only for a registered user */
            hasMore = meTags ? jsonObjectWrapper.getBoolean(JsonFields.HAS_MORE) : false;

            /*
             * Dont bombard the server if the user has like 10-15 pages of tags,
             * delay each request by 100ms
             */
            sleep(100);
        }
    }

    return tags;
}

From source file:org.apache.tajo.plan.rewrite.rules.ProjectionPushDownRule.java

public LogicalNode visitGroupBy(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block,
        GroupbyNode node, Stack<LogicalNode> stack) throws TajoException {
    Context newContext = new Context(context);

    int groupingKeyNum = node.getGroupingColumns().length;
    LinkedHashSet<String> groupingKeyNames = null;
    String[] aggEvalNames = null;

    // if this query block is distinct, this groupby node have the same target to that of its above operator.
    // So, it does not need to add new expression to newContext.
    if (!node.isForDistinctBlock()) {
        // Getting grouping key names
        if (groupingKeyNum > 0) {
            groupingKeyNames = Sets.newLinkedHashSet();
            for (int i = 0; i < groupingKeyNum; i++) {
                FieldEval fieldEval = new FieldEval(node.getGroupingColumns()[i]);
                groupingKeyNames.add(newContext.addExpr(fieldEval));
            }//from   ww  w  . j a va  2s .c  o  m
        }

        // Getting eval names
        if (node.hasAggFunctions()) {
            final int evalNum = node.getAggFunctions().size();
            aggEvalNames = new String[evalNum];
            for (int evalIdx = 0, targetIdx = node.getGroupingColumns().length; targetIdx < node.getTargets()
                    .size(); evalIdx++, targetIdx++) {
                Target target = node.getTargets().get(targetIdx);
                EvalNode evalNode = node.getAggFunctions().get(evalIdx);
                aggEvalNames[evalIdx] = newContext.addExpr(new Target(evalNode, target.getCanonicalName()));
            }
        }
    }

    // visit a child node
    LogicalNode child = super.visitGroupBy(newContext, plan, block, node, stack);

    node.setInSchema(child.getOutSchema());
    if (node.isForDistinctBlock()) { // the grouping columns should be updated according to the schema of child node.
        node.setGroupingColumns(child.getOutSchema().toArray());
        node.setTargets(PlannerUtil.schemaToTargets(child.getOutSchema()));

        // Because it updates grouping columns and targets, it should refresh grouping key num and names.
        groupingKeyNum = node.getGroupingColumns().length;
        groupingKeyNames = Sets.newLinkedHashSet();
        for (int i = 0; i < groupingKeyNum; i++) {
            FieldEval fieldEval = new FieldEval(node.getGroupingColumns()[i]);
            groupingKeyNames.add(newContext.addExpr(fieldEval));
        }
    }

    List<Target> targets = Lists.newArrayList();
    if (groupingKeyNum > 0 && groupingKeyNames != null) {
        // Restoring grouping key columns
        final List<Column> groupingColumns = new ArrayList<>();
        for (String groupingKey : groupingKeyNames) {
            Target target = context.targetListMgr.getTarget(groupingKey);

            // it rewrite grouping keys.
            // This rewrite sets right column names and eliminates duplicated grouping keys.
            if (context.targetListMgr.isEvaluated(groupingKey)) {
                Column c = target.getNamedColumn();
                if (!groupingColumns.contains(c)) {
                    groupingColumns.add(c);
                    targets.add(new Target(new FieldEval(target.getNamedColumn())));
                }
            } else {
                if (target.getEvalTree().getType() == EvalType.FIELD) {
                    Column c = ((FieldEval) target.getEvalTree()).getColumnRef();
                    if (!groupingColumns.contains(c)) {
                        groupingColumns.add(c);
                        targets.add(target);
                        context.targetListMgr.markAsEvaluated(target);
                    }
                } else {
                    throw new TajoInternalError(
                            "Cannot evaluate this expression in grouping keys: " + target.getEvalTree());
                }
            }
        }

        node.setGroupingColumns(groupingColumns.toArray(new Column[groupingColumns.size()]));
    }

    // Getting projected targets
    if (node.hasAggFunctions() && aggEvalNames != null) {
        List<AggregationFunctionCallEval> aggEvals = new ArrayList<>();
        for (Iterator<String> it = getFilteredReferences(aggEvalNames, Arrays.asList(aggEvalNames)); it
                .hasNext();) {

            String referenceName = it.next();
            Target target = context.targetListMgr.getTarget(referenceName);

            if (LogicalPlanner.checkIfBeEvaluatedAtGroupBy(target.getEvalTree(), node)) {
                aggEvals.add(target.getEvalTree());
                context.targetListMgr.markAsEvaluated(target);
            }
        }
        if (aggEvals.size() > 0) {
            node.setAggFunctions(aggEvals);
        }
    }
    List<Target> finalTargets = buildGroupByTarget(node, targets, aggEvalNames);
    node.setTargets(finalTargets);

    LogicalPlanner.verifyProjectedFields(block, node);

    return node;
}

From source file:org.apache.tajo.engine.planner.rewrite.ProjectionPushDownRule.java

public LogicalNode visitJoin(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block, JoinNode node,
        Stack<LogicalNode> stack) throws PlanningException {
    Context newContext = new Context(context);

    String joinQualReference = null;
    if (node.hasJoinQual()) {
        for (EvalNode eachQual : AlgebraicUtil.toConjunctiveNormalFormArray(node.getJoinQual())) {
            if (eachQual instanceof BinaryEval) {
                BinaryEval binaryQual = (BinaryEval) eachQual;

                for (int i = 0; i < 2; i++) {
                    EvalNode term = binaryQual.getChild(i);
                    pushDownIfComplexTermInJoinCondition(newContext, eachQual, term);
                }//from w w  w.  j a v  a 2  s. co  m
            }
        }

        joinQualReference = newContext.addExpr(node.getJoinQual());
        newContext.addNecessaryReferences(node.getJoinQual());
    }

    String[] referenceNames = null;
    if (node.hasTargets()) {
        referenceNames = new String[node.getTargets().length];
        int i = 0;
        for (Iterator<Target> it = getFilteredTarget(node.getTargets(), context.requiredSet); it.hasNext();) {
            Target target = it.next();
            referenceNames[i++] = newContext.addExpr(target);
        }
    }

    stack.push(node);
    LogicalNode left = visit(newContext, plan, block, node.getLeftChild(), stack);
    LogicalNode right = visit(newContext, plan, block, node.getRightChild(), stack);
    stack.pop();

    Schema merged = SchemaUtil.merge(left.getOutSchema(), right.getOutSchema());

    node.setInSchema(merged);

    if (node.hasJoinQual()) {
        Target target = context.targetListMgr.getTarget(joinQualReference);
        if (newContext.targetListMgr.isEvaluated(joinQualReference)) {
            throw new PlanningException(
                    "Join condition must be evaluated in the proper Join Node: " + joinQualReference);
        } else {
            node.setJoinQual(target.getEvalTree());
            newContext.targetListMgr.markAsEvaluated(target);
        }
    }

    LinkedHashSet<Target> projectedTargets = Sets.newLinkedHashSet();
    for (Iterator<String> it = getFilteredReferences(context.targetListMgr.getNames(), context.requiredSet); it
            .hasNext();) {
        String referenceName = it.next();
        Target target = context.targetListMgr.getTarget(referenceName);

        if (context.targetListMgr.isEvaluated(referenceName)) {
            Target fieldReference = new Target(new FieldEval(target.getNamedColumn()));
            if (LogicalPlanner.checkIfBeEvaluatedAtJoin(block, fieldReference.getEvalTree(), node,
                    stack.peek().getType() != NodeType.JOIN)) {
                projectedTargets.add(fieldReference);
            }
        } else if (LogicalPlanner.checkIfBeEvaluatedAtJoin(block, target.getEvalTree(), node,
                stack.peek().getType() != NodeType.JOIN)) {
            projectedTargets.add(target);
            context.targetListMgr.markAsEvaluated(target);
        }
    }

    node.setTargets(projectedTargets.toArray(new Target[projectedTargets.size()]));
    LogicalPlanner.verifyProjectedFields(block, node);
    return node;
}

From source file:org.opencb.opencga.storage.core.variant.io.VariantVcfDataWriter.java

private void addAnnotationInfo(LinkedHashSet<VCFHeaderLine> meta) {
    // check if variant annotations are exported in the INFO column
    annotations = null;/*from w  ww.  jav a 2  s  .co m*/
    if (queryOptions != null && queryOptions.getString("annotations") != null
            && !queryOptions.getString("annotations").isEmpty()) {
        String annotationString;
        switch (queryOptions.getString("annotations")) {
        case "all":
            annotationString = ALL_ANNOTATIONS.replaceAll(",", "|");
            break;
        case "default":
            annotationString = DEFAULT_ANNOTATIONS.replaceAll(",", "|");
            break;
        default:
            annotationString = queryOptions.getString("annotations").replaceAll(",", "|");
            break;
        }
        //            String annotationString = queryOptions.getString("annotations", DEFAULT_ANNOTATIONS).replaceAll(",", "|");
        annotations = Arrays.asList(annotationString.split("\\|"));
        meta.add(new VCFInfoHeaderLine("CSQ", 1, VCFHeaderLineType.String,
                "Consequence annotations from CellBase. " + "Format: " + annotationString));
    }
}

From source file:com.sonicle.webtop.calendar.Service.java

private LinkedHashSet<Integer> getActiveFolderIds() {
    LinkedHashSet<Integer> ids = new LinkedHashSet<>();
    synchronized (roots) {
        for (ShareRootCalendar root : getActiveRoots()) {
            for (ShareFolderCalendar folder : foldersByRoot.get(root.getShareId())) {
                if (inactiveFolders.contains(folder.getCalendar().getCalendarId()))
                    continue;
                ids.add(folder.getCalendar().getCalendarId());
            }/*w  w w . ja  v  a 2  s  .  c  o m*/
        }
    }
    return ids;
}