Example usage for java.util HashMap containsValue

List of usage examples for java.util HashMap containsValue

Introduction

In this page you can find the example usage for java.util HashMap containsValue.

Prototype

public boolean containsValue(Object value) 

Source Link

Document

Returns true if this map maps one or more keys to the specified value.

Usage

From source file:org.xwiki.contrib.mail.internal.JavamailMessageParser.java

public HashMap<String, String> fillAttachmentContentIds(ArrayList<MimeBodyPart> bodyparts) {
    HashMap<String, String> attmap = new HashMap<String, String>();

    for (MimeBodyPart bodypart : bodyparts) {
        String fileName = null;/*from w w w .  j av a2  s .c  o  m*/
        String cid = null;
        try {
            fileName = bodypart.getFileName();
            cid = bodypart.getContentID();
        } catch (MessagingException e) {
            logger.warn("Failed to retrieve attachment information", e);
        }
        if (!StringUtils.isBlank(cid) && fileName != null) {
            logger.debug("fillAttachmentContentIds: Treating attachment: {} with contentid {}", fileName, cid);
            String name = getAttachmentValidName(fileName);
            int nb = 1;
            if (!name.contains(".")) {
                name += ".ext";
            }
            String newName = name;
            while (attmap.containsValue(newName)) {
                logger.debug("fillAttachmentContentIds: " + newName + " attachment already exists, renaming to "
                        + name.replaceAll("(.*)\\.([^.]*)", "$1-" + nb + ".$2"));
                newName = name.replaceAll("(.*)\\.([^.]*)", "$1-" + nb + ".$2");
                nb++;
            }
            attmap.put(cid, newName);
        } else {
            logger.debug("fillAttachmentContentIds: content ID is null, nothing to do");
        }
    }

    return attmap;
}

From source file:org.kuali.coeus.common.budget.impl.struts.BudgetPersonnelAction.java

public ActionForward deleteBudgetPersonnelDetails(ActionMapping mapping, ActionForm form,
        HttpServletRequest request, HttpServletResponse response) throws Exception {
    BudgetForm budgetForm = (BudgetForm) form;
    Budget budget = budgetForm.getBudget();
    int selectedBudgetPeriodIndex = budgetForm.getViewBudgetPeriod() - 1;
    int selectedBudgetLineItemIndex = getSelectedLine(request);
    getBudgetPersonnelBudgetService().deleteBudgetPersonnelDetails(budget, selectedBudgetPeriodIndex,
            selectedBudgetLineItemIndex, getSelectedPersonnel(request));

    HashMap uniqueBudgetPersonnelCount = new HashMap();
    int qty = 0;//from   ww  w. j a  va  2s . c  om
    for (BudgetPersonnelDetails budgetPersonnelDetails : budget.getBudgetPeriod(selectedBudgetPeriodIndex)
            .getBudgetLineItem(selectedBudgetLineItemIndex).getBudgetPersonnelDetailsList()) {
        if (!uniqueBudgetPersonnelCount.containsValue(budgetPersonnelDetails.getPersonId())) {
            uniqueBudgetPersonnelCount.put(qty, budgetPersonnelDetails.getPersonId());
            qty = qty + 1;
        }
    }
    budget.getBudgetPeriod(selectedBudgetPeriodIndex).getBudgetLineItem(selectedBudgetLineItemIndex)
            .setQuantity(new Integer(qty));

    //If it is the last person to be deleted from the Line Item, then remove the line item also
    if (qty == 0) {
        budget.getBudgetPeriod(selectedBudgetPeriodIndex).getBudgetLineItems()
                .remove(selectedBudgetLineItemIndex);
    }

    recalculateBudgetPeriod(budgetForm, budget, budget.getBudgetPeriod(selectedBudgetPeriodIndex));
    return mapping.findForward(Constants.MAPPING_BASIC);
}

From source file:org.kuali.kra.budget.web.struts.action.BudgetPersonnelAction.java

public ActionForward deleteBudgetPersonnelDetails(ActionMapping mapping, ActionForm form,
        HttpServletRequest request, HttpServletResponse response) throws Exception {
    BudgetForm budgetForm = (BudgetForm) form;
    BudgetDocument budgetDocument = budgetForm.getDocument();
    Budget budget = budgetDocument.getBudget();
    int selectedBudgetPeriodIndex = budgetForm.getViewBudgetPeriod() - 1;
    int selectedBudgetLineItemIndex = getSelectedLine(request);
    BudgetPersonnelBudgetService budgetPersonnelBudgetService = KraServiceLocator
            .getService(BudgetPersonnelBudgetService.class);
    budgetPersonnelBudgetService.deleteBudgetPersonnelDetails(budget, selectedBudgetPeriodIndex,
            selectedBudgetLineItemIndex, getSelectedPersonnel(request));

    HashMap uniqueBudgetPersonnelCount = new HashMap();
    int qty = 0;//  w  ww . j  ava  2  s.  com
    for (BudgetPersonnelDetails budgetPersonnelDetails : budget.getBudgetPeriod(selectedBudgetPeriodIndex)
            .getBudgetLineItem(selectedBudgetLineItemIndex).getBudgetPersonnelDetailsList()) {
        if (!uniqueBudgetPersonnelCount.containsValue(budgetPersonnelDetails.getPersonId())) {
            uniqueBudgetPersonnelCount.put(qty, budgetPersonnelDetails.getPersonId());
            qty = qty + 1;
        }
    }
    budget.getBudgetPeriod(selectedBudgetPeriodIndex).getBudgetLineItem(selectedBudgetLineItemIndex)
            .setQuantity(new Integer(qty));

    //If it is the last person to be deleted from the Line Item, then remove the line item also
    if (qty == 0) {
        budget.getBudgetPeriod(selectedBudgetPeriodIndex).getBudgetLineItems()
                .remove(selectedBudgetLineItemIndex);
    }

    recalculateBudgetPeriod(budgetForm, budget, budget.getBudgetPeriod(selectedBudgetPeriodIndex));
    return mapping.findForward(Constants.MAPPING_BASIC);
}

From source file:org.kuali.ole.select.service.impl.OleDocStoreSearchService.java

public List<DocData> getDocSearchResults(Map<String, String> criteria, String attr, List<Object> vals,
        boolean isMultiValuedSearch) {
    int maxLimit = Integer.parseInt(SpringContext.getBean(ConfigurationService.class)
            .getPropertyValueAsString(OLEConstants.DOCSEARCH_ORDERQUEUE_LIMIT_KEY));
    String title = "";
    if (criteria != null) {
        for (Map.Entry<String, String> entry : criteria.entrySet()) {
            if (entry.getKey().equals("title")) {
                title = entry.getValue();
            }//from w  w  w. j  a  v  a  2 s. co m
        }
    }
    List<DocData> results = new ArrayList<DocData>();
    try {
        org.kuali.ole.docstore.common.document.Item item = new ItemOleml();
        org.kuali.ole.docstore.common.search.SearchParams searchParams = new org.kuali.ole.docstore.common.search.SearchParams();
        searchParams.setPageSize(maxLimit);
        SearchResponse searchResponse = null;
        HashMap titleIdMap = new HashMap();
        //searchParams.getSearchConditions().add(searchParams.buildSearchCondition("", searchParams.buildSearchField(org.kuali.ole.docstore.common.document.content.enums.DocType.ITEM.getCode(), "Title_display", title), ""));
        if (isMultiValuedSearch) {
            boolean isTitleExist = false;
            if (StringUtils.isNotBlank(title)) {
                searchParams.getSearchConditions()
                        .add(searchParams.buildSearchCondition("AND", searchParams.buildSearchField(
                                org.kuali.ole.docstore.common.document.content.enums.DocType.ITEM.getCode(),
                                BibConstants.TITLE_SEARCH, title), "AND"));
                isTitleExist = true;
            }
            if (CollectionUtils.isNotEmpty(vals)) {
                int loop = 0;
                for (Object iv : vals) {
                    String id = iv.toString();
                    boolean isIdExists = titleIdMap.containsValue(id);
                    if (isTitleExist) {
                        searchParams.getSearchConditions()
                                .add(searchParams.buildSearchCondition("phrase", searchParams.buildSearchField(
                                        org.kuali.ole.docstore.common.document.content.enums.DocType.ITEM
                                                .getCode(),
                                        ItemConstants.BIB_IDENTIFIER, id), "AND"));
                    } else {
                        searchParams.getSearchConditions()
                                .add(searchParams.buildSearchCondition("phrase", searchParams.buildSearchField(
                                        org.kuali.ole.docstore.common.document.content.enums.DocType.ITEM
                                                .getCode(),
                                        ItemConstants.BIB_IDENTIFIER, id), "OR"));
                    }

                    loop++;
                    if (loop == maxLimit)
                        break;
                }
            }

            //searchParams.getSearchConditions().add(searchParams.buildSearchCondition("", searchParams.buildSearchField(org.kuali.ole.docstore.common.document.content.enums.DocType.ITEM.getCode(), BibConstants.TITLE_SEARCH, title), ""));
        } else {
            if (StringUtils.isNotBlank(title)) {
                searchParams.getSearchConditions()
                        .add(searchParams.buildSearchCondition("any", searchParams.buildSearchField(
                                org.kuali.ole.docstore.common.document.content.enums.DocType.ITEM.getCode(),
                                BibConstants.TITLE_SEARCH, title), ""));
            }
        }

        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), ItemConstants.BIB_IDENTIFIER));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), Bib.TITLE));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), Bib.AUTHOR));
        searchParams.getSearchResultFields().add(searchParams.buildSearchResultField(DocType.ITEM.getCode(),
                BibConstants.PUBLICATIONDATE_DISPLAY));
        searchParams.getSearchResultFields().add(
                searchParams.buildSearchResultField(DocType.ITEM.getCode(), BibConstants.PUBLISHER_DISPLAY));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), BibConstants.ISBN_DISPLAY));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), BibConstants.LOCALID_DISPLAY));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), BibConstants.UNIQUE_ID));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), Item.ID));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), BibConstants.FORMAT_DISPLAY));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), BibConstants.DOC_FORMAT));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), BibConstants.DOC_CATEGORY));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), BibConstants.DOC_TYPE));
        searchParams.getSearchResultFields()
                .add(searchParams.buildSearchResultField(DocType.ITEM.getCode(), "format"));

        searchResponse = getDocstoreClientLocator().getDocstoreClient().search(searchParams);
        for (SearchResult searchResult : searchResponse.getSearchResults()) {
            DocData docData = new DocData();
            docData = buildDocInfoBean(docData, searchResult);
            results.add(docData);
        }
    } catch (Exception ex) {
        GlobalVariables.getMessageMap().putError(KRADConstants.GLOBAL_ERRORS, "Item Exists");
        LOG.error(org.kuali.ole.OLEConstants.ITEM_EXIST + ex);
    }
    return results;
}

From source file:com.tonbeller.jpivot.chart.ChartComponent.java

/**
* Get a unique name string for a dataitem derived from the member chain
*
* @param myTree  (full member tree)//from   ww w  .  ja  v  a  2s.co  m
 * @param members - the list to be processed (either X/Y axis)
* @return retValue as String
*/
private String buildName(MemberTree myTree, Member[] members) {
    String retValue = new String();
    HashMap levelMap = new HashMap();
    HashMap hierarchyMap = new HashMap();
    for (int j = members.length - 1; j >= 0; j--) {
        Member member = members[j];
        while (member != null) {
            // only process if no other items from this level processed - should not be duplicates!
            if (!levelMap.containsValue(member.getLevel())) {
                levelMap.put(member.getLevel().toString(), member.getLevel());
                if (member.getRootDistance() == 0) {
                    // if root member, only add to name if no other members of the hierarchy are already added
                    if (!hierarchyMap.containsValue(member.getLevel().getHierarchy())
                            || myTree.getRootMembers(member.getLevel().getHierarchy()).length > 1) {
                        hierarchyMap.put(member.getLevel().getHierarchy().toString(),
                                member.getLevel().getHierarchy());
                        retValue = member.getLabel() + "." + retValue;
                    }
                } else {
                    hierarchyMap.put(member.getLevel().getHierarchy().toString(),
                            member.getLevel().getHierarchy());
                    retValue = member.getLabel() + "." + retValue;
                }
            }
            member = myTree.getParent(member);
        }
    }
    return retValue;
}

From source file:org.apache.hadoop.hive.ql.optimizer.AbstractBucketJoinProc.java

protected boolean checkConvertBucketMapJoin(BucketJoinProcCtx context,
        Map<String, Operator<? extends OperatorDesc>> aliasToOpInfo, Map<Byte, List<ExprNodeDesc>> keysMap,
        String baseBigAlias, List<String> joinAliases) throws SemanticException {

    LinkedHashMap<String, List<Integer>> tblAliasToNumberOfBucketsInEachPartition = new LinkedHashMap<String, List<Integer>>();
    LinkedHashMap<String, List<List<String>>> tblAliasToBucketedFilePathsInEachPartition = new LinkedHashMap<String, List<List<String>>>();

    HashMap<String, Operator<? extends OperatorDesc>> topOps = pGraphContext.getTopOps();

    HashMap<String, String> aliasToNewAliasMap = new HashMap<String, String>();

    // (partition to bucket file names) and (partition to bucket number) for
    // the big table;
    LinkedHashMap<Partition, List<String>> bigTblPartsToBucketFileNames = new LinkedHashMap<Partition, List<String>>();
    LinkedHashMap<Partition, Integer> bigTblPartsToBucketNumber = new LinkedHashMap<Partition, Integer>();

    Integer[] joinKeyOrder = null; // accessing order of join cols to bucket cols, should be same
    boolean bigTablePartitioned = true;
    for (int index = 0; index < joinAliases.size(); index++) {
        String alias = joinAliases.get(index);
        Operator<? extends OperatorDesc> topOp = aliasToOpInfo.get(alias);
        // The alias may not be present in case of a sub-query
        if (topOp == null) {
            return false;
        }/*from ww  w  . j a  v  a  2  s .c  o  m*/
        List<String> keys = toColumns(keysMap.get((byte) index));
        if (keys == null || keys.isEmpty()) {
            return false;
        }
        int oldKeySize = keys.size();
        TableScanOperator tso = TableAccessAnalyzer.genRootTableScan(topOp, keys);
        if (tso == null) {
            // We cannot get to root TableScan operator, likely because there is a join or group-by
            // between topOp and root TableScan operator. We don't handle that case, and simply return
            return false;
        }

        // For nested sub-queries, the alias mapping is not maintained in QB currently.
        if (topOps.containsValue(tso)) {
            for (Map.Entry<String, Operator<? extends OperatorDesc>> topOpEntry : topOps.entrySet()) {
                if (topOpEntry.getValue() == tso) {
                    String newAlias = topOpEntry.getKey();
                    if (!newAlias.equals(alias)) {
                        joinAliases.set(index, newAlias);
                        if (baseBigAlias.equals(alias)) {
                            baseBigAlias = newAlias;
                        }
                        aliasToNewAliasMap.put(alias, newAlias);
                        alias = newAlias;
                    }
                    break;
                }
            }
        } else {
            // Ideally, this should never happen, and this should be an assert.
            return false;
        }

        // The join keys cannot be transformed in the sub-query currently.
        // TableAccessAnalyzer.genRootTableScan will only return the base table scan
        // if the join keys are constants or a column. Even a simple cast of the join keys
        // will result in a null table scan operator. In case of constant join keys, they would
        // be removed, and the size before and after the genRootTableScan will be different.
        if (keys.size() != oldKeySize) {
            return false;
        }

        if (joinKeyOrder == null) {
            joinKeyOrder = new Integer[keys.size()];
        }

        Table tbl = tso.getConf().getTableMetadata();
        if (tbl.isPartitioned()) {
            PrunedPartitionList prunedParts = pGraphContext.getPrunedPartitions(alias, tso);
            List<Partition> partitions = prunedParts.getNotDeniedPartns();
            // construct a mapping of (Partition->bucket file names) and (Partition -> bucket number)
            if (partitions.isEmpty()) {
                if (!alias.equals(baseBigAlias)) {
                    tblAliasToNumberOfBucketsInEachPartition.put(alias, Arrays.<Integer>asList());
                    tblAliasToBucketedFilePathsInEachPartition.put(alias, new ArrayList<List<String>>());
                }
            } else {
                List<Integer> buckets = new ArrayList<Integer>();
                List<List<String>> files = new ArrayList<List<String>>();
                for (Partition p : partitions) {
                    if (!checkBucketColumns(p.getBucketCols(), keys, joinKeyOrder)) {
                        return false;
                    }
                    List<String> fileNames = getBucketFilePathsOfPartition(p.getDataLocation(), pGraphContext);
                    // The number of files for the table should be same as number of buckets.
                    int bucketCount = p.getBucketCount();

                    if (fileNames.size() != 0 && fileNames.size() != bucketCount) {
                        String msg = "The number of buckets for table " + tbl.getTableName() + " partition "
                                + p.getName() + " is " + p.getBucketCount()
                                + ", whereas the number of files is " + fileNames.size();
                        throw new SemanticException(ErrorMsg.BUCKETED_TABLE_METADATA_INCORRECT.getMsg(msg));
                    }

                    if (alias.equals(baseBigAlias)) {
                        bigTblPartsToBucketFileNames.put(p, fileNames);
                        bigTblPartsToBucketNumber.put(p, bucketCount);
                    } else {
                        files.add(fileNames);
                        buckets.add(bucketCount);
                    }
                }
                if (!alias.equals(baseBigAlias)) {
                    tblAliasToNumberOfBucketsInEachPartition.put(alias, buckets);
                    tblAliasToBucketedFilePathsInEachPartition.put(alias, files);
                }
            }
        } else {
            if (!checkBucketColumns(tbl.getBucketCols(), keys, joinKeyOrder)) {
                return false;
            }
            List<String> fileNames = getBucketFilePathsOfPartition(tbl.getDataLocation(), pGraphContext);
            Integer num = new Integer(tbl.getNumBuckets());

            // The number of files for the table should be same as number of buckets.
            if (fileNames.size() != 0 && fileNames.size() != num) {
                String msg = "The number of buckets for table " + tbl.getTableName() + " is "
                        + tbl.getNumBuckets() + ", whereas the number of files is " + fileNames.size();
                throw new SemanticException(ErrorMsg.BUCKETED_TABLE_METADATA_INCORRECT.getMsg(msg));
            }

            if (alias.equals(baseBigAlias)) {
                bigTblPartsToBucketFileNames.put(null, fileNames);
                bigTblPartsToBucketNumber.put(null, tbl.getNumBuckets());
                bigTablePartitioned = false;
            } else {
                tblAliasToNumberOfBucketsInEachPartition.put(alias, Arrays.asList(num));
                tblAliasToBucketedFilePathsInEachPartition.put(alias, Arrays.asList(fileNames));
            }
        }
    }

    // All tables or partitions are bucketed, and their bucket number is
    // stored in 'bucketNumbers', we need to check if the number of buckets in
    // the big table can be divided by no of buckets in small tables.
    for (Integer numBucketsInPartitionOfBigTable : bigTblPartsToBucketNumber.values()) {
        if (!checkNumberOfBucketsAgainstBigTable(tblAliasToNumberOfBucketsInEachPartition,
                numBucketsInPartitionOfBigTable)) {
            return false;
        }
    }

    context.setTblAliasToNumberOfBucketsInEachPartition(tblAliasToNumberOfBucketsInEachPartition);
    context.setTblAliasToBucketedFilePathsInEachPartition(tblAliasToBucketedFilePathsInEachPartition);
    context.setBigTblPartsToBucketFileNames(bigTblPartsToBucketFileNames);
    context.setBigTblPartsToBucketNumber(bigTblPartsToBucketNumber);
    context.setJoinAliases(joinAliases);
    context.setBaseBigAlias(baseBigAlias);
    context.setBigTablePartitioned(bigTablePartitioned);
    if (!aliasToNewAliasMap.isEmpty()) {
        context.setAliasToNewAliasMap(aliasToNewAliasMap);
    }

    return true;
}

From source file:org.apache.padaf.xmpbox.parser.XMPDocumentBuilder.java

/**
 * Treat each rdf:Description (which must represent a schema), instanciate
 * class representation of this schema and add it to metadata
 * //  w ww .j av  a  2  s  .  c o  m
 * @param metadata
 *            Metadata to attach new elements
 * @throws XmpParsingException
 *             When element expected not found
 * @throws XMLStreamException
 *             When error during reading the rest of xmp stream
 * @throws XmpSchemaException
 *             When instancing schema object failed or in PDF/A Extension
 *             case, if its namespace miss
 * @throws XmpUnknownValueTypeException
 *             When ValueType found not correspond to basic type and not has
 *             been declared in current schema
 * @throws XmpExpectedRdfAboutAttribute
 *             When rdf:Description not contains rdf:about attribute
 * @throws BadFieldValueException
 *             When a bad value found in Schema description content
 */
protected void parseDescription(XMPMetadata metadata) throws XmpParsingException, XMLStreamException,
        XmpSchemaException, XmpUnknownValueTypeException, XmpExpectedRdfAboutAttribute, BadFieldValueException {
    nsMap.resetComplexBasicTypesDeclarationInSchemaLevel();
    int cptNS = reader.get().getNamespaceCount();
    HashMap<String, String> namespaces = new HashMap<String, String>();
    for (int i = 0; i < cptNS; i++) {
        namespaces.put(reader.get().getNamespacePrefix(i), reader.get().getNamespaceURI(i));
        if (nsMap.isComplexBasicTypes(reader.get().getNamespaceURI(i))) {
            // System.out.println("in parseDesc method: prefix:"+reader.get().getNamespacePrefix(i)+", nsURI:"+reader.get().getNamespaceURI(i));
            nsMap.setComplexBasicTypesDeclarationForLevelSchema(reader.get().getNamespaceURI(i),
                    reader.get().getNamespacePrefix(i));
        }
    }
    // Different treatment for PDF/A Extension schema
    // System.out.println(PDFAExtensionSchema.PDFAEXTENSION+";"+PDFAExtensionSchema.PDFAPROPERTY+";"+PDFAExtensionSchema.PDFASCHEMA);
    if (namespaces.containsKey(PDFAExtensionSchema.PDFAEXTENSION)) {
        if (namespaces.containsKey(PDFAExtensionSchema.PDFAPROPERTY)
                && namespaces.containsKey(PDFAExtensionSchema.PDFASCHEMA)) {
            if (namespaces.containsValue(PDFAExtensionSchema.PDFAEXTENSIONURI)
                    && namespaces.containsValue(PDFAExtensionSchema.PDFAPROPERTYURI)
                    && namespaces.containsValue(PDFAExtensionSchema.PDFASCHEMAURI)) {
                PDFAExtensionSchema schema = metadata.createAndAddPDFAExtensionSchemaWithNS(namespaces);
                treatDescriptionAttributes(metadata, schema);
                parseExtensionSchema(schema, metadata);

            } else {
                throw new XmpUnexpectedNamespaceURIException(
                        "Unexpected namespaceURI in PDFA Extension Schema encountered");
            }
        } else {
            throw new XmpUnexpectedNamespacePrefixException(
                    "Unexpected namespace Prefix in PDFA Extension Schema");
        }

    } else {
        int c = 0;
        String namespaceUri = reader.get().getNamespaceURI(c);
        String namespacePrefix = reader.get().getNamespacePrefix(c);
        c++;
        XMPSchema schema = nsMap.getAssociatedSchemaObject(metadata, namespaceUri, namespacePrefix);
        while (c < reader.get().getNamespaceCount() && schema == null) {
            // try next
            namespaceUri = reader.get().getNamespaceURI(c);
            namespacePrefix = reader.get().getNamespacePrefix(c);
            schema = nsMap.getAssociatedSchemaObject(metadata, namespaceUri, namespacePrefix);
            c++;
        }

        if (schema != null) {
            namespaces.remove(namespacePrefix);
        } else {
            schema = metadata.createAndAddDefaultSchema(namespacePrefix, namespaceUri);
        }

        for (int i = 1; i < cptNS; i++) {
            schema.setAttribute(new Attribute(XMPSchema.NS_NAMESPACE, "xmlns",
                    reader.get().getNamespacePrefix(i), reader.get().getNamespaceURI(i)));
        }
        treatDescriptionAttributes(metadata, schema);
        while (reader.get().nextTag() == XMLStreamReader.START_ELEMENT) {
            parseProperty(schema, metadata);
        }
    }

}

From source file:org.lexgrid.valuesets.helper.VSDServiceHelper.java

/**
 * Return the absolute reference for the supplied csName. Add the entry to
 * the refVersions if it isn't there//from   w  w  w.jav a  2  s  .c  o m
 * 
 * @param csName
 *            - the local identifier of the coding scheme to be resolved
 * @param maps
 *            - mappings that contain local ids to URIs
 * @param versionTag
 *            - the version tag to use if there is more than one version in
 *            the service
 * @param refVersions
 *            - a list of URI/version pairs that are already resolved
 * @return the URI/Version to use or null if none can be found
 * @throws LBException
 */
public AbsoluteCodingSchemeVersionReference resolveCSVersion(String csName, Mappings maps, String versionTag,
        HashMap<String, String> refVersions) throws LBException {
    String csURI = getURIForCodingSchemeName(maps, csName);
    if (!StringUtils.isEmpty(csURI)) {
        // If it is already in the list, use it
        if (refVersions.containsKey(csURI))
            return Constructors.createAbsoluteCodingSchemeVersionReference(csURI, refVersions.get(csURI));

        // If it is a named version, try to resolve it
        if (!StringUtils.isEmpty(versionTag)) {
            String tagVersion = rm_.getInternalVersionStringForTag(csURI, versionTag);
            if (!StringUtils.isEmpty(tagVersion)) {
                return Constructors.createAbsoluteCodingSchemeVersionReference(csURI, tagVersion);
            }
        }

        // Default to the named version - KnownTags.PRODUCTION, if it exists
        String tagVersion = null;
        try {
            tagVersion = rm_.getInternalVersionStringForTag(csURI, KnownTags.PRODUCTION.toString());
            if (!StringUtils.isEmpty(tagVersion)) {
                // Add the constructed AbsoluteCodingSchemeVersionReference to the refVersions
                refVersions.put(csURI, tagVersion);

                return Constructors.createAbsoluteCodingSchemeVersionReference(csURI, tagVersion);
            }
        } catch (LBParameterException e) {
            // continue on
        }
        // Take whatever is most appropriate from the service
        AbsoluteCodingSchemeVersionReferenceList serviceCsVersions = getAbsoluteCodingSchemeVersionReference(
                csURI);

        if (serviceCsVersions == null)
            return null;

        if (refVersions != null && refVersions.size() > 0) {
            for (AbsoluteCodingSchemeVersionReference serviceCsVersion : serviceCsVersions
                    .getAbsoluteCodingSchemeVersionReference()) {
                if (refVersions.containsValue(serviceCsVersion.getCodingSchemeVersion()))
                    return serviceCsVersion;
            }
        }

        if (serviceCsVersions.getAbsoluteCodingSchemeVersionReferenceCount() > 0) {
            refVersions.put(csURI,
                    serviceCsVersions.getAbsoluteCodingSchemeVersionReference(0).getCodingSchemeVersion());
            return serviceCsVersions.getAbsoluteCodingSchemeVersionReference(0);
        }
    }
    return null;

}

From source file:net.sf.mzmine.modules.peaklistmethods.alignment.ransac.RansacAlignerTask.java

/**
 * // w  ww. jav  a 2  s.com
 * @param peakList
 * @return
 */
private HashMap<PeakListRow, PeakListRow> getAlignmentMap(PeakList peakList) {

    // Create a table of mappings for best scores
    HashMap<PeakListRow, PeakListRow> alignmentMapping = new HashMap<PeakListRow, PeakListRow>();

    if (alignedPeakList.getNumberOfRows() < 1) {
        return alignmentMapping;
    }

    // Create a sorted set of scores matching
    TreeSet<RowVsRowScore> scoreSet = new TreeSet<RowVsRowScore>();

    // RANSAC algorithm
    List<AlignStructMol> list = ransacPeakLists(alignedPeakList, peakList);
    PolynomialFunction function = this.getPolynomialFunction(list);

    PeakListRow allRows[] = peakList.getRows();

    for (PeakListRow row : allRows) {
        // Calculate limits for a row with which the row can be aligned
        Range mzRange = mzTolerance.getToleranceRange(row.getAverageMZ());

        double rt;
        try {
            rt = function.value(row.getAverageRT());
        } catch (NullPointerException e) {
            rt = row.getAverageRT();
        }
        if (Double.isNaN(rt) || rt == -1) {
            rt = row.getAverageRT();
        }

        Range rtRange = rtToleranceAfter.getToleranceRange(rt);

        // Get all rows of the aligned peaklist within parameter limits
        PeakListRow candidateRows[] = alignedPeakList.getRowsInsideScanAndMZRange(rtRange, mzRange);

        for (PeakListRow candidate : candidateRows) {
            RowVsRowScore score;
            if (sameChargeRequired && (!PeakUtils.compareChargeState(row, candidate))) {
                continue;
            }

            try {
                score = new RowVsRowScore(row, candidate, mzRange.getSize() / 2, rtRange.getSize() / 2, rt);

                scoreSet.add(score);
                errorMessage = score.getErrorMessage();

            } catch (Exception e) {
                e.printStackTrace();
                setStatus(TaskStatus.ERROR);
                return null;
            }
        }
        processedRows++;
    }

    // Iterate scores by descending order
    Iterator<RowVsRowScore> scoreIterator = scoreSet.iterator();
    while (scoreIterator.hasNext()) {

        RowVsRowScore score = scoreIterator.next();

        // Check if the row is already mapped
        if (alignmentMapping.containsKey(score.getPeakListRow())) {
            continue;
        }

        // Check if the aligned row is already filled
        if (alignmentMapping.containsValue(score.getAlignedRow())) {
            continue;
        }

        alignmentMapping.put(score.getPeakListRow(), score.getAlignedRow());

    }

    return alignmentMapping;
}

From source file:guineu.modules.filter.Alignment.RANSAC.RansacAlignerTask.java

/**
 *
 * @param peakList//from  ww w .j  a va  2  s .com
 * @return
 */
private HashMap<PeakListRow, PeakListRow> getAlignmentMap(Dataset peakList) {

    // Create a table of mappings for best scores
    HashMap<PeakListRow, PeakListRow> alignmentMapping = new HashMap<PeakListRow, PeakListRow>();

    if (alignedPeakList.getNumberRows() < 1) {
        return alignmentMapping;
    }

    // Create a sorted set of scores matching
    TreeSet<RowVsRowScore> scoreSet = new TreeSet<RowVsRowScore>();

    // RANSAC algorithm
    List<AlignStructMol> list = ransacPeakLists(alignedPeakList, peakList);
    PolynomialFunction function = this.getPolynomialFunction(list,
            ((SimpleLCMSDataset) alignedPeakList).getRowsRTRange());

    PeakListRow allRows[] = peakList.getRows().toArray(new PeakListRow[0]);

    for (PeakListRow row : allRows) {
        double rt = 0.0;
        try {
            rt = function.value(((SimplePeakListRowLCMS) row).getRT());
        } catch (NullPointerException e) {
            rt = ((SimplePeakListRowLCMS) row).getRT();
        }

        if (Double.isNaN(rt) || rt == -1) {
            rt = ((SimplePeakListRowLCMS) row).getRT();
        }

        Range mzRange = this.mzTolerance.getToleranceRange(((SimplePeakListRowLCMS) row).getMZ());
        Range rtRange = this.rtToleranceAfterRTcorrection.getToleranceRange(rt);
        // Get all rows of the aligned peaklist within parameter limits
        PeakListRow candidateRows[] = ((SimpleLCMSDataset) alignedPeakList).getRowsInsideRTAndMZRange(rtRange,
                mzRange);

        for (PeakListRow candidate : candidateRows) {
            RowVsRowScore score;
            try {
                score = new RowVsRowScore(row, candidate, mzTolerance.getTolerance(),
                        rtToleranceAfterRTcorrection.getTolerance(), rt);

                scoreSet.add(score);
                errorMessage = score.getErrorMessage();

            } catch (Exception e) {
                e.printStackTrace();
                setStatus(TaskStatus.ERROR);
                return null;
            }
        }
        progress = (double) processedRows++ / (double) totalRows;
    }

    // Iterate scores by descending order
    Iterator<RowVsRowScore> scoreIterator = scoreSet.iterator();
    while (scoreIterator.hasNext()) {

        RowVsRowScore score = scoreIterator.next();

        // Check if the row is already mapped
        if (alignmentMapping.containsKey(score.getPeakListRow())) {
            continue;
        }

        // Check if the aligned row is already filled
        if (alignmentMapping.containsValue(score.getAlignedRow())) {
            continue;
        }

        alignmentMapping.put(score.getPeakListRow(), score.getAlignedRow());

    }

    return alignmentMapping;
}