Example usage for java.util LinkedHashSet add

List of usage examples for java.util LinkedHashSet add

Introduction

In this page you can find the example usage for java.util LinkedHashSet add.

Prototype

boolean add(E e);

Source Link

Document

Adds the specified element to this set if it is not already present (optional operation).

Usage

From source file:org.apache.hive.ptest.execution.conf.UnitTestPropertiesParser.java

private void addTestToResult(Map<String, LinkedHashSet<TestInfo>> result, TestInfo testInfo) {
    LinkedHashSet<TestInfo> moduleSet = result.get(testInfo.moduleName);
    if (moduleSet == null) {
        moduleSet = new LinkedHashSet<>();
        result.put(testInfo.moduleName, moduleSet);
    }/*ww w .  java  2 s.  c  om*/
    moduleSet.add(testInfo);
}

From source file:org.overlord.sramp.server.mvn.services.MavenFacadeServlet.java

/**
 * Generates the maven-metadata.xml file dynamically for a given groupId/artifactId pair.  This will
 * list all of the versions available for that groupId+artifactId, along with the latest release and
 * snapshot versions.//from www. j  a v a 2  s.  co  m
 * @param gavInfo
 */
private String doGenerateArtifactDirMavenMetaData(MavenGavInfo gavInfo) throws Exception {
    List<BaseArtifactType> artifacts = queryService.query("/s-ramp[@maven.groupId = '" + gavInfo.getGroupId()
            + "' and @maven.artifactId = '" + gavInfo.getArtifactId() + "']", "createdTimestamp", true);
    if (artifacts.size() == 0) {
        return null;
    }

    String groupId = gavInfo.getGroupId();
    String artifactId = gavInfo.getArtifactId();
    String latest = null;
    String release = null;
    String lastUpdated = null;

    LinkedHashSet<String> versions = new LinkedHashSet<String>();
    SimpleDateFormat format = new SimpleDateFormat("yyyyMMddHHmmss");
    for (BaseArtifactType artifact : artifacts) {
        String version = SrampModelUtils.getCustomProperty(artifact, "maven.version");
        if (versions.add(version)) {
            latest = version;
            if (!version.endsWith("-SNAPSHOT")) {
                release = version;
            }
        }
        lastUpdated = format.format(artifact.getCreatedTimestamp().toGregorianCalendar().getTime());
    }

    StringBuilder mavenMetadata = new StringBuilder();
    mavenMetadata.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
    mavenMetadata.append("<metadata>\n");
    mavenMetadata.append("  <groupId>").append(groupId).append("</groupId>\n");
    mavenMetadata.append("  <artifactId>").append(artifactId).append("</artifactId>\n");
    mavenMetadata.append("  <versioning>\n");
    mavenMetadata.append("    <latest>").append(latest).append("</latest>\n");
    mavenMetadata.append("    <release>").append(release).append("</release>\n");
    mavenMetadata.append("    <versions>\n");
    for (String version : versions) {
        mavenMetadata.append("      <version>").append(version).append("</version>\n");
    }
    mavenMetadata.append("    </versions>\n");
    mavenMetadata.append("    <lastUpdated>").append(lastUpdated).append("</lastUpdated>\n");
    mavenMetadata.append("  </versioning>\n");
    mavenMetadata.append("</metadata>\n");

    if (!gavInfo.isHash()) {
        return mavenMetadata.toString();
    } else {
        return generateHash(mavenMetadata.toString(), gavInfo.getHashAlgorithm());
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.java

public void addPartitionToUnderServedQueues(String queueName, String partition) {
    LinkedHashSet<String> underServedQueues = partitionToUnderServedQueues.get(partition);
    if (null == underServedQueues) {
        underServedQueues = new LinkedHashSet<String>();
        partitionToUnderServedQueues.put(partition, underServedQueues);
    }/*from w w w.  j av  a2 s  .  com*/
    underServedQueues.add(queueName);
}

From source file:cz.incad.kramerius.pdf.impl.FirstPagePDFServiceImpl.java

String templateSelection(PreparedDocument rdoc, String... pids)
        throws XPathExpressionException, IOException, ParserConfigurationException, SAXException {
    ResourceBundle resourceBundle = resourceBundleService.getResourceBundle("base", localesProvider.get());

    StringTemplate template = new StringTemplate(IOUtils.readAsString(
            this.getClass().getResourceAsStream("templates/_first_page.st"), Charset.forName("UTF-8"), true));
    FirstPageViewObject fpvo = prepareViewObject(resourceBundle);

    // tistena polozka
    GeneratedItem itm = new GeneratedItem();

    Map<String, LinkedHashSet<String>> detailItemValues = new HashMap<String, LinkedHashSet<String>>();
    Map<String, ObjectPidsPath> pathsMapping = new HashMap<String, ObjectPidsPath>();
    LinkedHashSet<String> roots = new LinkedHashSet<String>();
    for (String pid : pids) {
        ObjectPidsPath sPath = selectOnePath(pid);
        pathsMapping.put(pid, sPath);//from   w  ww .j  a  v  a  2  s. co  m
        roots.add(sPath.getRoot());
    }

    for (String pid : pids) {
        ObjectPidsPath path = pathsMapping.get(pid);
        Map<String, Map<String, List<String>>> mods = processModsFromPath(path, null);
        String rootPid = path.getRoot();
        if (mods.get(rootPid).containsKey(TitleBuilder.MODS_TITLE)) {
            List<String> list = mods.get(rootPid).get(TitleBuilder.MODS_TITLE);
            if (!list.isEmpty()) {
                String key = TitleBuilder.MODS_TITLE;
                itemVals(detailItemValues, list, key);
            }
        }

        String[] rProps = renderedProperties(roots.size() == 1);
        String[] fromRootToLeaf = path.getPathFromRootToLeaf();
        for (int i = 0; i < fromRootToLeaf.length; i++) {
            String pidPath = fromRootToLeaf[i];
            for (String prop : rProps) {

                if (mods.get(pidPath).containsKey(prop)) {
                    List<String> list = mods.get(pidPath).get(prop);
                    itemVals(detailItemValues, list, prop);
                }
            }
        }
    }

    // hlavni nazev
    List<DetailItem> details = new ArrayList<FirstPagePDFServiceImpl.DetailItem>();
    LinkedHashSet<String> maintitles = detailItemValues.get(TitleBuilder.MODS_TITLE);
    String key = maintitles != null && maintitles.size() > 1 ? resourceBundle.getString("pdf.fp.titles")
            : resourceBundle.getString("pdf.fp.title");
    if (maintitles != null && (!maintitles.isEmpty())) {
        details.add(new DetailItem(key, vals(maintitles).toString()));
    }

    for (String prop : renderedProperties(roots.size() == 1)) {
        LinkedHashSet<String> vals = detailItemValues.get(prop);
        key = vals != null && vals.size() > 1 ? resourceBundle.getString(i18nKey(prop) + "s")
                : resourceBundle.getString(i18nKey(prop));
        if (vals != null && (!vals.isEmpty())) {
            details.add(new DetailItem(key, vals(vals).toString()));
        }
    }

    // stranky v pdfku
    pagesInSelectiontPdf(rdoc, resourceBundle, details);

    itm.setDetailItems((DetailItem[]) details.toArray(new DetailItem[details.size()]));
    fpvo.setGeneratedItems(new GeneratedItem[] { itm });

    template.setAttribute("viewinfo", fpvo);

    String templateText = template.toString();

    return templateText;
}

From source file:com.streamsets.datacollector.definition.ConfigDefinitionExtractor.java

void resolveDependencies(String configPrefix, List<ConfigDefinition> defs, Object contextMsg) {
    Map<String, ConfigDefinition> definitionsMap = new HashMap<>();
    Map<String, Map<String, Set<Object>>> dependencyMap = new HashMap<>();
    Map<String, Boolean> isFullyProcessed = new HashMap<>();
    for (ConfigDefinition def : defs) {
        definitionsMap.put(def.getName(), def);
        dependencyMap.put(def.getName(), new HashMap<String, Set<Object>>());
        isFullyProcessed.put(def.getName(), false);
    }/*w w w.java2  s  . c  o m*/

    cycles.clear();

    for (ConfigDefinition def : defs) {
        String dependsOnKey = def.getDependsOn();
        if (!StringUtils.isEmpty(dependsOnKey)) {
            verifyDependencyExists(definitionsMap, def, dependsOnKey, contextMsg);
            ConfigDefinition dependsOnDef = definitionsMap.get(dependsOnKey);
            // evaluate dependsOn triggers
            ConfigDef annotation = def.getConfigField().getAnnotation(ConfigDef.class);
            Set<Object> triggers = new HashSet<>();
            for (String trigger : annotation.triggeredByValue()) {
                triggers.add(ConfigValueExtractor.get().extract(dependsOnDef.getConfigField(),
                        dependsOnDef.getType(), trigger, contextMsg, true));
            }
            dependencyMap.get(def.getName()).put(dependsOnDef.getName(), triggers);
        }
        // Add direct dependencies to dependencyMap
        if (!def.getDependsOnMap().isEmpty()) {
            // Copy same as above.
            for (Map.Entry<String, List<Object>> dependsOn : def.getDependsOnMap().entrySet()) {
                dependsOnKey = dependsOn.getKey();
                if (!StringUtils.isEmpty(dependsOnKey)) {
                    verifyDependencyExists(definitionsMap, def, dependsOnKey, contextMsg);
                    Set<Object> triggers = new HashSet<>();
                    ConfigDefinition dependsOnDef = definitionsMap.get(dependsOnKey);
                    for (Object trigger : dependsOn.getValue()) {
                        triggers.add(ConfigValueExtractor.get().extract(dependsOnDef.getConfigField(),
                                dependsOnDef.getType(), (String) trigger, contextMsg, true));
                    }
                    Map<String, Set<Object>> dependencies = dependencyMap.get(def.getName());
                    if (dependencies.containsKey(dependsOnKey)) {
                        dependencies.get(dependsOnKey).addAll(triggers);
                    } else {
                        dependencies.put(dependsOnKey, triggers);
                    }
                }
            }
        }
    }

    for (ConfigDefinition def : defs) {

        if (isFullyProcessed.get(def.getName())) {
            continue;
        }
        // Now find all indirect dependencies
        Deque<StackNode> stack = new ArrayDeque<>();
        stack.push(new StackNode(def, new LinkedHashSet<String>()));
        while (!stack.isEmpty()) {
            StackNode current = stack.peek();
            // We processed this one's dependencies before, don't bother adding its children
            // The dependencies of this one have all been processed
            if (current.childrenAddedToStack) {
                stack.pop();
                Map<String, Set<Object>> currentDependencies = dependencyMap.get(current.def.getName());
                Set<String> children = new HashSet<>(current.def.getDependsOnMap().keySet());
                for (String child : children) {
                    if (StringUtils.isEmpty(child)) {
                        continue;
                    }
                    Map<String, Set<Object>> depsOfChild = dependencyMap.get(child);
                    for (Map.Entry<String, Set<Object>> depOfChild : depsOfChild.entrySet()) {
                        if (currentDependencies.containsKey(depOfChild.getKey())) {
                            // Add only the common trigger values,
                            // since it has to be one of those for both these to be triggered.
                            Set<Object> currentTriggers = currentDependencies.get(depOfChild.getKey());
                            Set<Object> childTriggers = depOfChild.getValue();
                            currentDependencies.put(depOfChild.getKey(),
                                    Sets.intersection(currentTriggers, childTriggers));
                        } else {
                            currentDependencies.put(depOfChild.getKey(), new HashSet<>(depOfChild.getValue()));
                        }
                    }
                }
                isFullyProcessed.put(current.def.getName(), true);
            } else {
                Set<String> children = current.def.getDependsOnMap().keySet();
                String dependsOn = current.def.getDependsOn();
                LinkedHashSet<String> dependencyAncestors = new LinkedHashSet<>(current.ancestors);
                dependencyAncestors.add(current.def.getName());
                if (!StringUtils.isEmpty(dependsOn) && !isFullyProcessed.get(current.def.getDependsOn())
                        && !detectCycle(dependencyAncestors, cycles, dependsOn)) {
                    stack.push(
                            new StackNode(definitionsMap.get(current.def.getDependsOn()), dependencyAncestors));
                }
                for (String child : children) {
                    if (!StringUtils.isEmpty(child) && !isFullyProcessed.get(child)
                            && !detectCycle(dependencyAncestors, cycles, child)) {
                        stack.push(new StackNode(definitionsMap.get(child), dependencyAncestors));
                    }
                }
                current.childrenAddedToStack = true;
            }
        }
    }
    Preconditions.checkState(cycles.isEmpty(),
            "The following cycles were detected in the configuration dependencies:\n"
                    + Joiner.on("\n").join(cycles));
    for (Map.Entry<String, Map<String, Set<Object>>> entry : dependencyMap.entrySet()) {
        Map<String, List<Object>> dependencies = new HashMap<>();
        definitionsMap.get(entry.getKey()).setDependsOnMap(dependencies);
        for (Map.Entry<String, Set<Object>> trigger : entry.getValue().entrySet()) {
            List<Object> triggerValues = new ArrayList<>();
            triggerValues.addAll(trigger.getValue());
            dependencies.put(trigger.getKey(), triggerValues);
        }
        definitionsMap.get(entry.getKey()).setDependsOn("");
    }
}

From source file:de.andreasschoknecht.LS3.DocumentCollection.java

/**
 * Delete a model from the Term-Document Matrix search structure.
 *
 * @param modelName The model name of the model to be removed.
 *//*from  w w w  .  j a  v  a  2s. c om*/
public void deleteModel(String modelName) {
    // Make sure file name is correct
    if (!modelName.endsWith(".pnml"))
        modelName = modelName + ".pnml";

    // Delete column from TD Matrix and set correct number of columns
    int deletionIndex = 0;
    for (int i = 0, l = fileList.length; i < l; i++) {
        if (fileList[i].equals(modelName)) {
            tdMatrix.deleteColumn(i);
            deletionIndex = i;
        }
    }

    // Delete model name from fileList (update to new file list).
    String[] newFileList = new String[fileList.length - 1];
    int counter = 0;
    for (int i = 0, l = fileList.length; i < l; i++) {
        if (i != deletionIndex) {
            newFileList[counter] = fileList[i];
            counter++;
        }
    }
    setFileList(newFileList);

    // Delete LS3Document representation of file "modelName" (update to new ArrayList of LS3Documents).
    for (int i = 0, l = ls3Documents.size(); i < l; i++) {
        if (ls3Documents.get(i).getPNMLPath().endsWith(modelName)) {
            ls3Documents.remove(i);
            i = l;
        }
    }

    // Delete term rows that only contain values 0.0. I.e. delete unnecessary terms.
    ArrayList<Integer> termDeletionIndices = new ArrayList<Integer>();
    boolean delete = true;

    double[][] matrix = tdMatrix.getMatrix();
    for (int i = 0, k = tdMatrix.getRowNumber(); i < k; i++) {
        for (int j = 0, l = tdMatrix.getColumnNumber(); j < l; j++) {
            if (matrix[i][j] != 0.0) {
                delete = false;
                j = l;
            }
        }
        if (delete == true)
            termDeletionIndices.add(i);
        else
            delete = true;
    }

    int deletionCounter = 0;
    for (int index : termDeletionIndices) {
        tdMatrix.deleteRow(index - deletionCounter);
        deletionCounter++;
    }

    // Update term list of document collection.
    deletionCounter = 0;
    LinkedHashSet<String> newTermList = new LinkedHashSet<String>();
    for (String term : termCollection) {
        if (!termDeletionIndices.contains(deletionCounter))
            newTermList.add(term);

        deletionCounter++;
    }

    setTermCollection(newTermList);

    // Update term list of TDMatrix object
    tdMatrix.setTermArray(termCollection.toArray(new String[0]));

}

From source file:eionet.cr.web.action.HarvestSourcesActionBean.java

/**
 *
 * @return// ww  w  . j  av  a2s . co m
 * @throws DAOException
 */
public Resolution delete() throws DAOException {

    if (isUserLoggedIn()) {
        if (sourceUrl != null && !sourceUrl.isEmpty()) {

            // An authenticated user can delete sources he own. An
            // administrator can delete any source.
            // A priority source can not be deleted. The administrator must
            // first change it to a non-priority source, then delete it.

            LinkedHashSet<String> sourcesToDelete = new LinkedHashSet<String>();
            LinkedHashSet<String> notOwnedSources = new LinkedHashSet<String>();
            LinkedHashSet<String> prioritySources = new LinkedHashSet<String>();
            LinkedHashSet<String> currentlyHarvested = new LinkedHashSet<String>();

            for (String url : sourceUrl) {

                HarvestSourceDTO source = factory.getDao(HarvestSourceDAO.class).getHarvestSourceByUrl(url);
                if (source != null) {

                    if (CurrentHarvests.contains(url)) {
                        currentlyHarvested.add(url);
                    } else {
                        if (userCanDelete(source)) {
                            sourcesToDelete.add(url);
                        } else if (source.isPrioritySource()) {
                            prioritySources.add(url);
                        } else if (!getUserName().equals(source.getOwner())) {
                            notOwnedSources.add(url);
                        }
                    }
                }
            }

            logger.debug("Deleting the following sources: " + sourcesToDelete);
            factory.getDao(HarvestSourceDAO.class).removeHarvestSources(sourcesToDelete);

            if (!sourcesToDelete.isEmpty()) {
                StringBuffer msg = new StringBuffer();
                msg.append("The following sources were successfully removed from the system: <ul>");
                for (String uri : sourcesToDelete) {
                    msg.append("<li>").append(uri).append("</li>");
                }
                msg.append("</ul>");
                addSystemMessage(msg.toString());
            }

            StringBuffer warnings = new StringBuffer();
            if (!prioritySources.isEmpty()) {
                warnings.append(
                        "The following sources could not be deleted, because they are priority sources: <ul>");
                for (String url : prioritySources) {
                    warnings.append("<li>").append(url).append("</li>");
                }
                warnings.append("</ul>");
            }
            if (!notOwnedSources.isEmpty()) {
                warnings.append(
                        "The following sources could not be deleted, because you are not their owner: <ul>");
                for (String url : notOwnedSources) {
                    warnings.append("<li>").append(url).append("</li>");
                }
                warnings.append("</ul>");
            }
            if (!currentlyHarvested.isEmpty()) {
                warnings.append(
                        "The following sources could not be deleted, because they are curently being harvested: <ul>");
                for (String url : currentlyHarvested) {
                    warnings.append("<li>").append(url).append("</li>");
                }
                warnings.append("</ul>");
            }

            if (warnings.length() > 0) {
                addWarningMessage(warnings.toString());
            }
        }
    } else {
        addWarningMessage(getBundle().getString("not.logged.in"));
    }
    return search();
}

From source file:com.clust4j.utils.VecUtils.java

public static LinkedHashSet<Double> unique(final double[] arr) {
    final LinkedHashSet<Double> out = new LinkedHashSet<>();
    for (Double t : arr)
        out.add(t);
    return out;/*from  ww w  .  j a va2s .  c om*/
}

From source file:com.clust4j.utils.VecUtils.java

public static LinkedHashSet<Integer> unique(final int[] arr) {
    final LinkedHashSet<Integer> out = new LinkedHashSet<>();
    for (Integer t : arr)
        out.add(t);
    return out;/*from ww  w . java  2s . com*/
}

From source file:org.broadinstitute.sting.utils.variant.GATKVariantContextUtils.java

/**
 * Merges VariantContexts into a single hybrid.  Takes genotypes for common samples in priority order, if provided.
 * If uniquifySamples is true, the priority order is ignored and names are created by concatenating the VC name with
 * the sample name.//from  ww w.  j av  a 2s .c o m
 * simpleMerge does not verify any more unique sample names EVEN if genotypeMergeOptions == GenotypeMergeType.REQUIRE_UNIQUE. One should use
 * SampleUtils.verifyUniqueSamplesNames to check that before using sempleMerge.
 *
 * For more information on this method see: http://www.thedistractionnetwork.com/programmer-problem/
 *
 * @param unsortedVCs               collection of unsorted VCs
 * @param priorityListOfVCs         priority list detailing the order in which we should grab the VCs
 * @param filteredRecordMergeType   merge type for filtered records
 * @param genotypeMergeOptions      merge option for genotypes
 * @param annotateOrigin            should we annotate the set it came from?
 * @param printMessages             should we print messages?
 * @param setKey                    the key name of the set
 * @param filteredAreUncalled       are filtered records uncalled?
 * @param mergeInfoWithMaxAC        should we merge in info from the VC with maximum allele count?
 * @return new VariantContext       representing the merge of unsortedVCs
 */
public static VariantContext simpleMerge(final Collection<VariantContext> unsortedVCs,
        final List<String> priorityListOfVCs, final int originalNumOfVCs,
        final FilteredRecordMergeType filteredRecordMergeType, final GenotypeMergeType genotypeMergeOptions,
        final boolean annotateOrigin, final boolean printMessages, final String setKey,
        final boolean filteredAreUncalled, final boolean mergeInfoWithMaxAC) {

    if (unsortedVCs == null || unsortedVCs.size() == 0)
        return null;

    if (priorityListOfVCs != null && originalNumOfVCs != priorityListOfVCs.size())
        throw new IllegalArgumentException(
                "the number of the original VariantContexts must be the same as the number of VariantContexts in the priority list");

    if (annotateOrigin && priorityListOfVCs == null && originalNumOfVCs == 0)
        throw new IllegalArgumentException(
                "Cannot merge calls and annotate their origins without a complete priority list of VariantContexts or the number of original VariantContexts");

    final List<VariantContext> preFilteredVCs = sortVariantContextsByPriority(unsortedVCs, priorityListOfVCs,
            genotypeMergeOptions);
    // Make sure all variant contexts are padded with reference base in case of indels if necessary
    final List<VariantContext> VCs = new ArrayList<VariantContext>();

    for (final VariantContext vc : preFilteredVCs) {
        if (!filteredAreUncalled || vc.isNotFiltered())
            VCs.add(vc);
    }
    if (VCs.size() == 0) // everything is filtered out and we're filteredAreUncalled
        return null;

    // establish the baseline info from the first VC
    final VariantContext first = VCs.get(0);
    final String name = first.getSource();
    final Allele refAllele = determineReferenceAllele(VCs);

    final Set<Allele> alleles = new LinkedHashSet<Allele>();
    final Set<String> filters = new HashSet<String>();
    final Map<String, Object> attributes = new LinkedHashMap<String, Object>();
    final Set<String> inconsistentAttributes = new HashSet<String>();
    final Set<String> variantSources = new HashSet<String>(); // contains the set of sources we found in our set of VCs that are variant
    final Set<String> rsIDs = new LinkedHashSet<String>(1); // most of the time there's one id

    VariantContext longestVC = first;
    int depth = 0;
    int maxAC = -1;
    final Map<String, Object> attributesWithMaxAC = new LinkedHashMap<String, Object>();
    double log10PError = CommonInfo.NO_LOG10_PERROR;
    VariantContext vcWithMaxAC = null;
    GenotypesContext genotypes = GenotypesContext.create();

    // counting the number of filtered and variant VCs
    int nFiltered = 0;

    boolean remapped = false;

    // cycle through and add info from the other VCs, making sure the loc/reference matches

    for (final VariantContext vc : VCs) {
        if (longestVC.getStart() != vc.getStart())
            throw new IllegalStateException(
                    "BUG: attempting to merge VariantContexts with different start sites: first="
                            + first.toString() + " second=" + vc.toString());

        if (VariantContextUtils.getSize(vc) > VariantContextUtils.getSize(longestVC))
            longestVC = vc; // get the longest location

        nFiltered += vc.isFiltered() ? 1 : 0;
        if (vc.isVariant())
            variantSources.add(vc.getSource());

        AlleleMapper alleleMapping = resolveIncompatibleAlleles(refAllele, vc, alleles);
        remapped = remapped || alleleMapping.needsRemapping();

        alleles.addAll(alleleMapping.values());

        mergeGenotypes(genotypes, vc, alleleMapping, genotypeMergeOptions == GenotypeMergeType.UNIQUIFY);

        // We always take the QUAL of the first VC with a non-MISSING qual for the combined value
        if (log10PError == CommonInfo.NO_LOG10_PERROR)
            log10PError = vc.getLog10PError();

        filters.addAll(vc.getFilters());

        //
        // add attributes
        //
        // special case DP (add it up) and ID (just preserve it)
        //
        if (vc.hasAttribute(VCFConstants.DEPTH_KEY))
            depth += vc.getAttributeAsInt(VCFConstants.DEPTH_KEY, 0);
        if (vc.hasID())
            rsIDs.add(vc.getID());
        if (mergeInfoWithMaxAC && vc.hasAttribute(VCFConstants.ALLELE_COUNT_KEY)) {
            String rawAlleleCounts = vc.getAttributeAsString(VCFConstants.ALLELE_COUNT_KEY, null);
            // lets see if the string contains a , separator
            if (rawAlleleCounts.contains(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR)) {
                List<String> alleleCountArray = Arrays
                        .asList(rawAlleleCounts.substring(1, rawAlleleCounts.length() - 1)
                                .split(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR));
                for (String alleleCount : alleleCountArray) {
                    final int ac = Integer.valueOf(alleleCount.trim());
                    if (ac > maxAC) {
                        maxAC = ac;
                        vcWithMaxAC = vc;
                    }
                }
            } else {
                final int ac = Integer.valueOf(rawAlleleCounts);
                if (ac > maxAC) {
                    maxAC = ac;
                    vcWithMaxAC = vc;
                }
            }
        }

        for (final Map.Entry<String, Object> p : vc.getAttributes().entrySet()) {
            String key = p.getKey();
            // if we don't like the key already, don't go anywhere
            if (!inconsistentAttributes.contains(key)) {
                final boolean alreadyFound = attributes.containsKey(key);
                final Object boundValue = attributes.get(key);
                final boolean boundIsMissingValue = alreadyFound
                        && boundValue.equals(VCFConstants.MISSING_VALUE_v4);

                if (alreadyFound && !boundValue.equals(p.getValue()) && !boundIsMissingValue) {
                    // we found the value but we're inconsistent, put it in the exclude list
                    //System.out.printf("Inconsistent INFO values: %s => %s and %s%n", key, boundValue, p.getValue());
                    inconsistentAttributes.add(key);
                    attributes.remove(key);
                } else if (!alreadyFound || boundIsMissingValue) { // no value
                    //if ( vc != first ) System.out.printf("Adding key %s => %s%n", p.getKey(), p.getValue());
                    attributes.put(key, p.getValue());
                }
            }
        }
    }

    // if we have more alternate alleles in the merged VC than in one or more of the
    // original VCs, we need to strip out the GL/PLs (because they are no longer accurate), as well as allele-dependent attributes like AC,AF, and AD
    for (final VariantContext vc : VCs) {
        if (vc.getAlleles().size() == 1)
            continue;
        if (hasPLIncompatibleAlleles(alleles, vc.getAlleles())) {
            if (!genotypes.isEmpty()) {
                logger.debug(String.format(
                        "Stripping PLs at %s:%d-%d due to incompatible alleles merged=%s vs. single=%s",
                        vc.getChr(), vc.getStart(), vc.getEnd(), alleles, vc.getAlleles()));
            }
            genotypes = stripPLsAndAD(genotypes);
            // this will remove stale AC,AF attributed from vc
            VariantContextUtils.calculateChromosomeCounts(vc, attributes, true);
            break;
        }
    }

    // take the VC with the maxAC and pull the attributes into a modifiable map
    if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
        attributesWithMaxAC.putAll(vcWithMaxAC.getAttributes());
    }

    // if at least one record was unfiltered and we want a union, clear all of the filters
    if ((filteredRecordMergeType == FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED && nFiltered != VCs.size())
            || filteredRecordMergeType == FilteredRecordMergeType.KEEP_UNCONDITIONAL)
        filters.clear();

    if (annotateOrigin) { // we care about where the call came from
        String setValue;
        if (nFiltered == 0 && variantSources.size() == originalNumOfVCs) // nothing was unfiltered
            setValue = MERGE_INTERSECTION;
        else if (nFiltered == VCs.size()) // everything was filtered out
            setValue = MERGE_FILTER_IN_ALL;
        else if (variantSources.isEmpty()) // everyone was reference
            setValue = MERGE_REF_IN_ALL;
        else {
            final LinkedHashSet<String> s = new LinkedHashSet<String>();
            for (final VariantContext vc : VCs)
                if (vc.isVariant())
                    s.add(vc.isFiltered() ? MERGE_FILTER_PREFIX + vc.getSource() : vc.getSource());
            setValue = Utils.join("-", s);
        }

        if (setKey != null) {
            attributes.put(setKey, setValue);
            if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
                attributesWithMaxAC.put(setKey, setValue);
            }
        }
    }

    if (depth > 0)
        attributes.put(VCFConstants.DEPTH_KEY, String.valueOf(depth));

    final String ID = rsIDs.isEmpty() ? VCFConstants.EMPTY_ID_FIELD : Utils.join(",", rsIDs);

    final VariantContextBuilder builder = new VariantContextBuilder().source(name).id(ID);
    builder.loc(longestVC.getChr(), longestVC.getStart(), longestVC.getEnd());
    builder.alleles(alleles);
    builder.genotypes(genotypes);
    builder.log10PError(log10PError);
    builder.filters(filters.isEmpty() ? filters : new TreeSet<String>(filters));
    builder.attributes(new TreeMap<String, Object>(mergeInfoWithMaxAC ? attributesWithMaxAC : attributes));

    // Trim the padded bases of all alleles if necessary
    final VariantContext merged = builder.make();
    if (printMessages && remapped)
        System.out.printf("Remapped => %s%n", merged);
    return merged;
}