Example usage for java.util Set clear

List of usage examples for java.util Set clear

Introduction

In this page you can find the example usage for java.util Set clear.

Prototype

void clear();

Source Link

Document

Removes all of the elements from this set (optional operation).

Usage

From source file:gov.nih.nci.security.dao.AuthorizationDAOImpl.java

public Set getProtectionGroups(String protectionElementId) throws CSObjectNotFoundException {
    Session s = null;//from  w  w w  .  j  a  v  a2  s. c  o m
    Set result = new HashSet();
    try {
        s = HibernateSessionFactoryHelper.getAuditSession(sf);
        if (StringUtilities.isBlank(protectionElementId)) {
            throw new CSObjectNotFoundException("Primary key can't be null");
        }
        ProtectionElement protectionElement = (ProtectionElement) this.getObjectByPrimaryKey(s,
                ProtectionElement.class, new Long(protectionElementId));
        result = protectionElement.getProtectionGroups();

        List list = new ArrayList();
        Iterator toSortIterator = result.iterator();
        while (toSortIterator.hasNext()) {
            list.add(toSortIterator.next());
        }
        Collections.sort(list);
        result.clear();
        result.addAll(list);

        log.debug("The result size:" + result.size());

    } catch (Exception ex) {
        if (log.isDebugEnabled())
            log.debug(
                    "Authorization|||getProtectionGroups|Failure|Error in obtaining Protection Groups for Protection Element Id "
                            + protectionElementId + "|" + ex.getMessage());
        throw new CSObjectNotFoundException(
                "An error occurred while obtaining Associated Protection Groups for the Protection Element\n"
                        + ex.getMessage(),
                ex);
    } finally {
        try {
            s.close();
        } catch (Exception ex2) {
            if (log.isDebugEnabled())
                log.debug("Authorization|||getProtectionGroups|Failure|Error in Closing Session |"
                        + ex2.getMessage());
        }
    }
    if (log.isDebugEnabled())
        log.debug(
                "Authorization|||getProtectionGroups|Success|Successful in obtaining Protection Groups for Protection Element Id "
                        + protectionElementId + "|");
    return result;
}

From source file:gov.nih.nci.security.dao.AuthorizationDAOImpl.java

public Set getUsers(String groupId) throws CSObjectNotFoundException {
    //todo//ww w. j a v a2s  . c o m
    Session s = null;
    Set users = new HashSet();
    try {
        s = HibernateSessionFactoryHelper.getAuditSession(sf);

        Group group = (Group) this.getObjectByPrimaryKey(s, Group.class, new Long(groupId));
        users = group.getUsers();

        List list = new ArrayList();
        Iterator toSortIterator = users.iterator();
        while (toSortIterator.hasNext()) {
            User user = (User) toSortIterator.next();
            try {
                user = (User) performEncrytionDecryption(user, false);
            } catch (EncryptionException e) {
                throw new CSObjectNotFoundException(e);
            }
            list.add(user);

        }
        Collections.sort(list);
        users.clear();
        users.addAll(list);

        log.debug("The result size:" + users.size());

    } catch (Exception ex) {
        log.error(ex);
        if (log.isDebugEnabled())
            log.debug("Authorization|||getUsers|Failure|Error in obtaining Users for Group Id " + groupId + "|"
                    + ex.getMessage());
        throw new CSObjectNotFoundException(
                "An error occurred while obtaining Associated Users for the Group\n" + ex.getMessage(), ex);
    } finally {
        try {
            s.close();
        } catch (Exception ex2) {
            if (log.isDebugEnabled())
                log.debug("Authorization|||getUsers|Failure|Error in Closing Session |" + ex2.getMessage());
        }
    }
    if (log.isDebugEnabled())
        log.debug(
                "Authorization|||getUsers|Success|Successful in obtaining Users for Group Id " + groupId + "|");
    return users;

}

From source file:com.edgenius.wiki.service.impl.PageServiceImpl.java

public Page savePage(Page pageValue, int requireNotified, boolean forceSave)
        throws PageException, VersionConflictException, DuplicatedPageException, PageSaveTiemoutExcetpion {
    Page page = null;/*from   w ww.ja v a  2 s  .  c om*/
    String spaceUname = pageValue.getSpace().getUnixName();
    String newPageTitle = pageValue.getTitle();
    Integer newPageUid = pageValue.getUid();

    log.info("Page saving for " + pageValue.getTitle() + " on space " + spaceUname);
    Space space;
    //page already exist, need clone then save a new record in database
    String oldTitle = null;
    boolean needRefreshCache = false;

    if (newPageUid != null) {
        //The page will create  old version to new record but update same UID as current 
        //it would get same result by pageDAO.getCurrentByUuid() but a little bit faster in performance.
        page = pageDAO.get(newPageUid);
    } else if (!StringUtils.isBlank(pageValue.getPageUuid())) {
        //if user choose a item from My Draft in Dashboard, this won't bring in a newPageUid 
        //There are 3 scenarios for this case. 
        //1. it is a existed page draft.Following method returns current page,
        //2. non-existed page draft. Following method returns null.
        //3. non-existed page but page has a copy in trash bin! The below method return null as well, but the uuid is already invalid
        // as it is used by trashed page - so need further check - if it has trashed page, reset pageUUID
        page = pageDAO.getCurrentByUuid(pageValue.getPageUuid());

        if (page == null) {
            Page removedPage = pageDAO.getByUuid(pageValue.getPageUuid());
            if (removedPage != null && removedPage.isRemoved()) {
                //case 3, treat it as new page
                pageValue.setPageUuid(null);
            }
        }
    }

    if (!forceSave && !checkVersion(pageValue, page)) {
        throw new VersionConflictException(page.getVersion());
    }

    //!!!Title duplicated problem: user try to create a new page or rename a page but same title already exist in space 
    Page sameTitlePage = pageDAO.getCurrentPageByTitle(spaceUname, newPageTitle);
    if (page != null) {
        if (sameTitlePage != null) {
            if (!sameTitlePage.getPageUuid().equals(page.getPageUuid()))
                throw new DuplicatedPageException();
        }

        //keep old page :NOTE: this piece code has duplicate with fixLinksToTitle() method
        History oldPage = (History) page.cloneToHistory();
        //put this page to history page:create a new record with cloned value except Uid
        //         history page does not save link, tag and attachment info. 
        //         The key is save content change!
        oldPage.setAttachments(null);
        oldPage.setParent(null);
        historyDAO.saveOrUpdate(oldPage);

        if (!StringUtils.equalsIgnoreCase(oldPage.getTitle(), newPageTitle)) {
            // oldTitle is not null, so that update PageLink on below
            oldTitle = oldPage.getTitle();
            needRefreshCache = true;
            //remove old page with old title from cache first, new page should add after page saved
            removePageCache(spaceUname, page, false);
        }
        //update current page with new value
        space = page.getSpace();

        copyValueFromView(page, pageValue);
        //         page.setUnixName(WikiUtil.getPageUnixname(newPageTitle));
        WikiUtil.setTouchedInfo(userReadingService, page);
        page.setVersion(page.getVersion() + 1);
    } else {
        //for new create page: same title page must not exist
        if (sameTitlePage != null) {
            throw new DuplicatedPageException("Page has duplicated title:" + newPageTitle);
        }

        needRefreshCache = true;
        //a new page first time save:
        page = new Page();
        copyValueFromView(page, pageValue);

        space = spaceDAO.getByUname(spaceUname);
        page.setSpace(space);

        //??? CascadeType.PERSIST seems does not work well. I must explicit call save(), but in CascadeType.ALL, it is not necessary.
        pageProgressDAO.saveOrUpdate(page.getPageProgress());

        page.setVersion(1);
        //if there is draft existed before page first create, keep draft uuid as page uuid!!! 
        if (StringUtils.isBlank(pageValue.getPageUuid()))
            page.setPageUuid(WikiUtil.createPageUuid(spaceUname, spaceUname, spaceUname, repositoryService));
        else
            page.setPageUuid(pageValue.getPageUuid());
        //         page.setUnixName(WikiUtil.getPageUnixname(newPageTitle));
        WikiUtil.setTouchedInfo(userReadingService, page);

        if (pageValue.getParent() != null && !StringUtils.isBlank(pageValue.getParent().getPageUuid())) {
            Page parentPage = pageDAO.getCurrentByUuid(pageValue.getParent().getPageUuid());
            if (parentPage != null) {
                //maybe parent page is deleted as well.
                page.setParent(parentPage);
                page.setLevel(parentPage.getLevel() + 1);
            } else {
                log.warn("page parent page does not exist. Page title is " + pageValue.getTitle()
                        + ". Parent page uuid is " + pageValue.getParent().getPageUuid());
            }
        } else
            //root page, such as home page
            page.setLevel(0);
    }

    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    //update page tags
    tagService.saveUpdatePageTag(page, pageValue.getTagString());

    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    // !!!! Important: this update attachments status must before  renderService.renderHTML(page)
    // otherwise, the drafts attachment won't render in {attach} or {gallery} macro....

    // Update page attachment status 
    //   remove this user's draft, does not use getDraft() then remove, because following error: 
    //      org.hibernate.HibernateException: Found shared references to a collection: com.edgenius.wiki.model.Page.tags
    try {
        User viewer = WikiUtil.getUser(userReadingService);
        mergeAttahment(getPageAttachment(spaceUname, page.getPageUuid(), true, true, viewer),
                pageValue.getAttachments(), spaceUname, viewer, PageType.NONE_DRAFT);
        upgradeAttachmentStatus(spaceUname, page.getPageUuid(), page.getModifier(), PageType.NONE_DRAFT);
    } catch (RepositoryException e) {
        //not critical exception, just log:
        log.error("Update attachment status during saving page:" + page.getPageUuid() + " in space "
                + spaceUname + ".Error: ", e);
    } catch (RepositoryTiemoutExcetpion e) {
        log.error("Merge attachment saving page:" + page.getPageUuid() + " in space " + spaceUname + ".Error: ",
                e);
    }

    List<RenderPiece> pieces = renderService.renderHTML(page);
    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    //update page links
    Set<PageLink> links = page.getLinks();
    if (links == null) {
        links = new HashSet<PageLink>();
        page.setLinks(links);
    }

    List<PageLink> newLinks = new ArrayList<PageLink>();
    for (RenderPiece object : pieces) {
        if (object instanceof LinkModel) {
            LinkModel ln = (LinkModel) object;
            //!!! Only linkToCreate and LinkToView support at moment(29/10/2008)
            if (ln.getType() == LinkModel.LINK_TO_CREATE_FLAG || ln.getType() == LinkModel.LINK_TO_VIEW_FLAG) {
                if (StringUtils.length(ln.getLink()) > SharedConstants.TITLE_MAX_LEN) {
                    log.warn("Found invalid link(too long), skip it on PageLink table:" + ln.getLink()
                            + " on page " + newPageTitle);
                } else {
                    PageLink link = PageLink.copyFrom(page, ln);
                    newLinks.add(link);
                }
            }
        }
    }

    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    //update menu item for space
    //found other if current page has menuItem macro.

    MenuItem menuItem = null;
    //if this marked true, it will trigger the Shell update space request, so update Shell menu.
    page.setMenuUpdated(false);

    for (RenderPiece renderPiece : pieces) {
        if (renderPiece instanceof MacroModel
                && MenuItemMacro.NAME.equalsIgnoreCase(((MacroModel) renderPiece).macroName)) {
            //copy value to MenuItem object
            menuItem = new MenuItem();
            HashMap<String, String> values = ((MacroModel) renderPiece).values;
            if (values != null) {
                menuItem.setTitle(values.get(NameConstants.TITLE));
                menuItem.setOrder(NumberUtils.toInt(values.get(NameConstants.ORDER)));
                menuItem.setParent(values.get(NameConstants.PARENT_UUID));
            }
            menuItem.setPageTitle(page.getTitle());
            menuItem.setPageUuid(page.getPageUuid());
            //suppose only one menuItem in a page, if multiple, even also only use first of them.
            break;
        }
    }

    Set<MenuItem> menuItems = space.getSetting().getMenuItems();
    if (menuItem != null) {
        //update menu list in current space setting
        if (menuItems == null) {
            menuItems = new TreeSet<MenuItem>(new MenuItemComparator());
            space.getSetting().setMenuItems(menuItems);
        } else {
            //try to remove old value
            menuItems.remove(menuItem);
        }

        log.info("Menu item is add or update to page {}.", page.getPageUuid());
        menuItems.add(menuItem);
        settingService.saveOrUpdateSpaceSetting(space, space.getSetting());
        page.setMenuUpdated(true);
    } else if (menuItems != null) {
        //need check if menu item is deleted from page if it had. Try to remove it.
        if (menuItems.remove(new MenuItem(page.getPageUuid()))) {
            log.info("Menu item is removed from page {}.", page.getPageUuid());
            settingService.saveOrUpdateSpaceSetting(space, space.getSetting());
            page.setMenuUpdated(true);
        }
    }

    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    //merge new links and existed links
    //delete non-existed
    for (Iterator<PageLink> iter = links.iterator(); iter.hasNext();) {
        PageLink ln = iter.next();
        ln.setAmount(0);
        for (Iterator<PageLink> newIter = newLinks.iterator(); newIter.hasNext();) {
            PageLink nlnk = newIter.next();
            if (ln.equals(nlnk)) {
                ln.setAmount(ln.getAmount() + 1);
                newIter.remove();
            }
        }
        if (ln.getAmount() == 0) {
            iter.remove();
        }
    }
    if (newLinks.size() > 0) {
        ArrayList<PageLink> linksList = new ArrayList<PageLink>(links);
        //there some new added links
        int idx;
        for (PageLink newLnk : newLinks) {
            if ((idx = linksList.indexOf(newLnk)) != -1) {
                PageLink ln = linksList.get(idx);
                ln.setAmount(ln.getAmount() + 1);
            } else {
                linksList.add(newLnk);
            }
        }
        links.clear();
        links.addAll(linksList);
    }

    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    //persistent
    page.setType(PageType.NONE_DRAFT);
    pageDAO.saveOrUpdate(page);

    //!!!NOTE: follow 3 lines code must after pageDAO.saveOrUpdate(),otherwise, if home page
    // is new page and contain link, method pageDAO.getCurrentByTitle() in LinkRenderHelper.exist() 
    //method will throw exception!!!(15/03/2007_my new car Honda Accord arrives in home:)
    if (pageValue.getNewPageType() == PageAttribute.NEW_HOMEPAGE) {
        space.setHomepage(page);
        spaceDAO.saveOrUpdate(space);
    }

    //update cache only when a new page created or page title updated
    if (needRefreshCache) {
        addPageCache(spaceUname, page);
    }
    refreshAncestors(spaceUname, page);

    //page title change so change all page which refer link to this page.
    //only title change,oldTitle is not null.
    if ((Global.AutoFixLinks & WikiConstants.AUTO_FIX_TITLE_CHANGE_LINK) > 0 && oldTitle != null) {
        String newTitle = page.getTitle();
        try {
            fixLinksToTitle(spaceUname, oldTitle, newTitle);
        } catch (Exception e) {
            log.error("Unable to fix page title change on other pages content.", e);
        }
    }

    //remove all draft whatever auto or manual
    removeDraftInternal(spaceUname, page.getPageUuid(), page.getModifier(), PageType.NONE_DRAFT, false);

    //MOVE to PageIndexInterceptor
    //      if(requireNotified)
    //         sendNodification(page);

    log.info("Page saved " + newPageTitle + " on space " + spaceUname + ". Page uid: " + newPageUid);

    PageEventListener[] listeners = eventContainer.getPageEventListeners(page.getPageUuid());
    if (listeners != null && listeners.length > 0) {
        log.info("Page saved event dispatching...");
        for (PageEventListener listener : listeners) {
            try {
                listener.pageSaving(page.getPageUuid());
            } catch (PageEventHanderException e) {
                log.error("Page saved event processed failed on " + listener.getClass().getName(), e);
            }
        }
    }
    return page;

}

From source file:gov.nih.nci.security.dao.AuthorizationDAOImpl.java

public Set getGroups(String userId) throws CSObjectNotFoundException {
    Session s = null;/*from   www.j a  va  2 s  .c o m*/
    Set groups = new HashSet();
    try {
        s = HibernateSessionFactoryHelper.getAuditSession(sf);

        User user = (User) this.getObjectByPrimaryKey(s, User.class, new Long(userId));
        groups = user.getGroups();

        Iterator groupIterator = groups.iterator();
        Set removedGroups = new HashSet();
        while (groupIterator.hasNext()) {
            Group g = (Group) groupIterator.next();
            if (g.getApplication().getApplicationId().intValue() != this.application.getApplicationId()
                    .intValue()) {
                removedGroups.add(g);
            }
        }
        groups.removeAll(removedGroups);
        List list = new ArrayList();
        Iterator toSortIterator = groups.iterator();
        while (toSortIterator.hasNext()) {
            list.add(toSortIterator.next());
        }
        Collections.sort(list);
        groups.clear();
        groups.addAll(list);

        log.debug("The result size:" + groups.size());

    } catch (Exception ex) {
        log.error(ex);
        if (log.isDebugEnabled())
            log.debug("Authorization|||getGroups|Failure|Error in obtaining Groups for User Id " + userId + "|"
                    + ex.getMessage());
        throw new CSObjectNotFoundException(
                "An error occurred while obtaining Associated Groups for the User\n" + ex.getMessage(), ex);
    } finally {
        try {
            s.close();
        } catch (Exception ex2) {
            if (log.isDebugEnabled())
                log.debug("Authorization|||getGroups|Failure|Error in Closing Session |" + ex2.getMessage());
        }
    }
    if (log.isDebugEnabled())
        log.debug(
                "Authorization|||getGroups|Success|Successful in obtaining Groups for User Id " + userId + "|");
    return groups;

}

From source file:org.broadinstitute.sting.utils.variant.GATKVariantContextUtils.java

/**
 * Merges VariantContexts into a single hybrid.  Takes genotypes for common samples in priority order, if provided.
 * If uniquifySamples is true, the priority order is ignored and names are created by concatenating the VC name with
 * the sample name./*from   w ww .j ava  2 s.  co m*/
 * simpleMerge does not verify any more unique sample names EVEN if genotypeMergeOptions == GenotypeMergeType.REQUIRE_UNIQUE. One should use
 * SampleUtils.verifyUniqueSamplesNames to check that before using sempleMerge.
 *
 * For more information on this method see: http://www.thedistractionnetwork.com/programmer-problem/
 *
 * @param unsortedVCs               collection of unsorted VCs
 * @param priorityListOfVCs         priority list detailing the order in which we should grab the VCs
 * @param filteredRecordMergeType   merge type for filtered records
 * @param genotypeMergeOptions      merge option for genotypes
 * @param annotateOrigin            should we annotate the set it came from?
 * @param printMessages             should we print messages?
 * @param setKey                    the key name of the set
 * @param filteredAreUncalled       are filtered records uncalled?
 * @param mergeInfoWithMaxAC        should we merge in info from the VC with maximum allele count?
 * @return new VariantContext       representing the merge of unsortedVCs
 */
public static VariantContext simpleMerge(final Collection<VariantContext> unsortedVCs,
        final List<String> priorityListOfVCs, final int originalNumOfVCs,
        final FilteredRecordMergeType filteredRecordMergeType, final GenotypeMergeType genotypeMergeOptions,
        final boolean annotateOrigin, final boolean printMessages, final String setKey,
        final boolean filteredAreUncalled, final boolean mergeInfoWithMaxAC) {

    if (unsortedVCs == null || unsortedVCs.size() == 0)
        return null;

    if (priorityListOfVCs != null && originalNumOfVCs != priorityListOfVCs.size())
        throw new IllegalArgumentException(
                "the number of the original VariantContexts must be the same as the number of VariantContexts in the priority list");

    if (annotateOrigin && priorityListOfVCs == null && originalNumOfVCs == 0)
        throw new IllegalArgumentException(
                "Cannot merge calls and annotate their origins without a complete priority list of VariantContexts or the number of original VariantContexts");

    final List<VariantContext> preFilteredVCs = sortVariantContextsByPriority(unsortedVCs, priorityListOfVCs,
            genotypeMergeOptions);
    // Make sure all variant contexts are padded with reference base in case of indels if necessary
    final List<VariantContext> VCs = new ArrayList<VariantContext>();

    for (final VariantContext vc : preFilteredVCs) {
        if (!filteredAreUncalled || vc.isNotFiltered())
            VCs.add(vc);
    }
    if (VCs.size() == 0) // everything is filtered out and we're filteredAreUncalled
        return null;

    // establish the baseline info from the first VC
    final VariantContext first = VCs.get(0);
    final String name = first.getSource();
    final Allele refAllele = determineReferenceAllele(VCs);

    final Set<Allele> alleles = new LinkedHashSet<Allele>();
    final Set<String> filters = new HashSet<String>();
    final Map<String, Object> attributes = new LinkedHashMap<String, Object>();
    final Set<String> inconsistentAttributes = new HashSet<String>();
    final Set<String> variantSources = new HashSet<String>(); // contains the set of sources we found in our set of VCs that are variant
    final Set<String> rsIDs = new LinkedHashSet<String>(1); // most of the time there's one id

    VariantContext longestVC = first;
    int depth = 0;
    int maxAC = -1;
    final Map<String, Object> attributesWithMaxAC = new LinkedHashMap<String, Object>();
    double log10PError = CommonInfo.NO_LOG10_PERROR;
    VariantContext vcWithMaxAC = null;
    GenotypesContext genotypes = GenotypesContext.create();

    // counting the number of filtered and variant VCs
    int nFiltered = 0;

    boolean remapped = false;

    // cycle through and add info from the other VCs, making sure the loc/reference matches

    for (final VariantContext vc : VCs) {
        if (longestVC.getStart() != vc.getStart())
            throw new IllegalStateException(
                    "BUG: attempting to merge VariantContexts with different start sites: first="
                            + first.toString() + " second=" + vc.toString());

        if (VariantContextUtils.getSize(vc) > VariantContextUtils.getSize(longestVC))
            longestVC = vc; // get the longest location

        nFiltered += vc.isFiltered() ? 1 : 0;
        if (vc.isVariant())
            variantSources.add(vc.getSource());

        AlleleMapper alleleMapping = resolveIncompatibleAlleles(refAllele, vc, alleles);
        remapped = remapped || alleleMapping.needsRemapping();

        alleles.addAll(alleleMapping.values());

        mergeGenotypes(genotypes, vc, alleleMapping, genotypeMergeOptions == GenotypeMergeType.UNIQUIFY);

        // We always take the QUAL of the first VC with a non-MISSING qual for the combined value
        if (log10PError == CommonInfo.NO_LOG10_PERROR)
            log10PError = vc.getLog10PError();

        filters.addAll(vc.getFilters());

        //
        // add attributes
        //
        // special case DP (add it up) and ID (just preserve it)
        //
        if (vc.hasAttribute(VCFConstants.DEPTH_KEY))
            depth += vc.getAttributeAsInt(VCFConstants.DEPTH_KEY, 0);
        if (vc.hasID())
            rsIDs.add(vc.getID());
        if (mergeInfoWithMaxAC && vc.hasAttribute(VCFConstants.ALLELE_COUNT_KEY)) {
            String rawAlleleCounts = vc.getAttributeAsString(VCFConstants.ALLELE_COUNT_KEY, null);
            // lets see if the string contains a , separator
            if (rawAlleleCounts.contains(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR)) {
                List<String> alleleCountArray = Arrays
                        .asList(rawAlleleCounts.substring(1, rawAlleleCounts.length() - 1)
                                .split(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR));
                for (String alleleCount : alleleCountArray) {
                    final int ac = Integer.valueOf(alleleCount.trim());
                    if (ac > maxAC) {
                        maxAC = ac;
                        vcWithMaxAC = vc;
                    }
                }
            } else {
                final int ac = Integer.valueOf(rawAlleleCounts);
                if (ac > maxAC) {
                    maxAC = ac;
                    vcWithMaxAC = vc;
                }
            }
        }

        for (final Map.Entry<String, Object> p : vc.getAttributes().entrySet()) {
            String key = p.getKey();
            // if we don't like the key already, don't go anywhere
            if (!inconsistentAttributes.contains(key)) {
                final boolean alreadyFound = attributes.containsKey(key);
                final Object boundValue = attributes.get(key);
                final boolean boundIsMissingValue = alreadyFound
                        && boundValue.equals(VCFConstants.MISSING_VALUE_v4);

                if (alreadyFound && !boundValue.equals(p.getValue()) && !boundIsMissingValue) {
                    // we found the value but we're inconsistent, put it in the exclude list
                    //System.out.printf("Inconsistent INFO values: %s => %s and %s%n", key, boundValue, p.getValue());
                    inconsistentAttributes.add(key);
                    attributes.remove(key);
                } else if (!alreadyFound || boundIsMissingValue) { // no value
                    //if ( vc != first ) System.out.printf("Adding key %s => %s%n", p.getKey(), p.getValue());
                    attributes.put(key, p.getValue());
                }
            }
        }
    }

    // if we have more alternate alleles in the merged VC than in one or more of the
    // original VCs, we need to strip out the GL/PLs (because they are no longer accurate), as well as allele-dependent attributes like AC,AF, and AD
    for (final VariantContext vc : VCs) {
        if (vc.getAlleles().size() == 1)
            continue;
        if (hasPLIncompatibleAlleles(alleles, vc.getAlleles())) {
            if (!genotypes.isEmpty()) {
                logger.debug(String.format(
                        "Stripping PLs at %s:%d-%d due to incompatible alleles merged=%s vs. single=%s",
                        vc.getChr(), vc.getStart(), vc.getEnd(), alleles, vc.getAlleles()));
            }
            genotypes = stripPLsAndAD(genotypes);
            // this will remove stale AC,AF attributed from vc
            VariantContextUtils.calculateChromosomeCounts(vc, attributes, true);
            break;
        }
    }

    // take the VC with the maxAC and pull the attributes into a modifiable map
    if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
        attributesWithMaxAC.putAll(vcWithMaxAC.getAttributes());
    }

    // if at least one record was unfiltered and we want a union, clear all of the filters
    if ((filteredRecordMergeType == FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED && nFiltered != VCs.size())
            || filteredRecordMergeType == FilteredRecordMergeType.KEEP_UNCONDITIONAL)
        filters.clear();

    if (annotateOrigin) { // we care about where the call came from
        String setValue;
        if (nFiltered == 0 && variantSources.size() == originalNumOfVCs) // nothing was unfiltered
            setValue = MERGE_INTERSECTION;
        else if (nFiltered == VCs.size()) // everything was filtered out
            setValue = MERGE_FILTER_IN_ALL;
        else if (variantSources.isEmpty()) // everyone was reference
            setValue = MERGE_REF_IN_ALL;
        else {
            final LinkedHashSet<String> s = new LinkedHashSet<String>();
            for (final VariantContext vc : VCs)
                if (vc.isVariant())
                    s.add(vc.isFiltered() ? MERGE_FILTER_PREFIX + vc.getSource() : vc.getSource());
            setValue = Utils.join("-", s);
        }

        if (setKey != null) {
            attributes.put(setKey, setValue);
            if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
                attributesWithMaxAC.put(setKey, setValue);
            }
        }
    }

    if (depth > 0)
        attributes.put(VCFConstants.DEPTH_KEY, String.valueOf(depth));

    final String ID = rsIDs.isEmpty() ? VCFConstants.EMPTY_ID_FIELD : Utils.join(",", rsIDs);

    final VariantContextBuilder builder = new VariantContextBuilder().source(name).id(ID);
    builder.loc(longestVC.getChr(), longestVC.getStart(), longestVC.getEnd());
    builder.alleles(alleles);
    builder.genotypes(genotypes);
    builder.log10PError(log10PError);
    builder.filters(filters.isEmpty() ? filters : new TreeSet<String>(filters));
    builder.attributes(new TreeMap<String, Object>(mergeInfoWithMaxAC ? attributesWithMaxAC : attributes));

    // Trim the padded bases of all alleles if necessary
    final VariantContext merged = builder.make();
    if (printMessages && remapped)
        System.out.printf("Remapped => %s%n", merged);
    return merged;
}

From source file:com.net2plan.interfaces.networkDesign.NetPlan.java

/**
 * <p>Returns the set of links that are a bottleneck, i.e the fraction of occupied capacity respect to the total
 * is highest. If no layer is provided, the default layer is assumed.</p>
 *
 * @param optionalLayerParameter Network layer (optional)
 * @return The {@code Set} of bottleneck links
 *///from www.  jav  a 2s . com
public Set<Link> getLinksAreBottleneck(NetworkLayer... optionalLayerParameter) {
    NetworkLayer layer = checkInThisNetPlanOptionalLayerParameter(optionalLayerParameter);
    double maxRho = 0;
    final double PRECISION_FACTOR = Double.parseDouble(Configuration.getOption("precisionFactor"));
    Set<Link> res = new HashSet<Link>();
    for (Link e : layer.links)
        if (Math.abs(e.cache_occupiedCapacity / e.capacity - maxRho) < PRECISION_FACTOR)
            res.add(e);
        else if (e.cache_occupiedCapacity / e.capacity > maxRho) {
            maxRho = e.cache_occupiedCapacity / e.capacity;
            res.clear();
            res.add(e);
        }
    return res;
}

From source file:org.openmrs.module.sync.api.db.hibernate.HibernateSyncDAO.java

/**
 * @see org.openmrs.module.sync.api.db.SyncDAO#processCollection(java.lang.Class,
 *      java.lang.String, java.lang.String)
 *///  ww  w. j ava  2 s  .c o m
public void processCollection(Class collectionType, String incoming, String originalRecordUuid)
        throws Exception {

    OpenmrsObject owner = null;
    String ownerClassName = null;
    String ownerCollectionPropertyName = null;
    String ownerUuid = null;
    String ownerCollectionAction = null; //is this coll update or recreate?
    NodeList nodes = null;
    Set entries = null;
    int i = 0;
    boolean needsRecreate = false;

    //first find out what kid of set we are dealing with:
    //Hibernate PersistentSortedSet == TreeSet, note this is derived from PersistentSet so we have to test for it first
    //Hibernate PersistentSet == HashSet
    if (!org.hibernate.collection.PersistentSet.class.isAssignableFrom(collectionType)) {
        //don't know how to process this collection type
        log.error("Do not know how to process this collection type: " + collectionType.getName());
        throw new SyncIngestException(SyncConstants.ERROR_ITEM_BADXML_MISSING, null, incoming, null);
    }

    //next, pull out the owner node and get owner instance: 
    //we need reference to owner object before we start messing with collection entries
    nodes = SyncUtil.getChildNodes(incoming);
    if (nodes == null) {
        throw new SyncIngestException(SyncConstants.ERROR_ITEM_BADXML_MISSING, null, incoming, null);
    }
    for (i = 0; i < nodes.getLength(); i++) {
        if ("owner".equals(nodes.item(i).getNodeName())) {
            //pull out collection owner info: class name of owner, its uuid, and name of poperty on owner that holds this collection
            ownerClassName = ((Element) nodes.item(i)).getAttribute("type");
            ownerCollectionPropertyName = ((Element) nodes.item(i)).getAttribute("properyName");
            ownerCollectionAction = ((Element) nodes.item(i)).getAttribute("action");
            ownerUuid = ((Element) nodes.item(i)).getAttribute("uuid");
            break;
        }
    }
    if (ownerUuid == null) {
        log.error("Owner uuid is null while processing collection.");
        throw new SyncIngestException(SyncConstants.ERROR_ITEM_BADXML_MISSING, null, incoming, null);
    }
    owner = (OpenmrsObject) SyncUtil.getOpenmrsObj(ownerClassName, ownerUuid);

    //we didn't get the owner record: throw an exception
    //TODO: in future, when we have conflict resolution, this may be handled differently
    if (owner == null) {
        log.error("Cannot retrieve the collection's owner object.");
        log.error("Owner info: " + "\nownerClassName:" + ownerClassName + "\nownerCollectionPropertyName:"
                + ownerCollectionPropertyName + "\nownerCollectionAction:" + ownerCollectionAction
                + "\nownerUuid:" + ownerUuid);
        throw new SyncIngestException(SyncConstants.ERROR_ITEM_BADXML_MISSING, null, incoming, null);
    }

    //NOTE: we cannot just new up a collection and assign to parent:
    //if hibernate mapping has cascade deletes, it will orphan existing collection and hibernate will throw error
    //to that effect: "A collection with cascade="all-delete-orphan" was no longer referenced by the owning entity instance"
    //*only* if this is recreate; clear up the existing collection and start over
    Method m = null;
    m = SyncUtil.getGetterMethod(owner.getClass(), ownerCollectionPropertyName);
    if (m == null) {
        log.error(
                "Cannot retrieve getter method for ownerCollectionPropertyName:" + ownerCollectionPropertyName);
        log.error("Owner info: " + "\nownerClassName:" + ownerClassName + "\nownerCollectionPropertyName:"
                + ownerCollectionPropertyName + "\nownerCollectionAction:" + ownerCollectionAction
                + "\nownerUuid:" + ownerUuid);
        throw new SyncIngestException(SyncConstants.ERROR_ITEM_BADXML_MISSING, null, incoming, null);
    }
    entries = (Set) m.invoke(owner, (Object[]) null);

    /*Two instances where even after this we may need to create a new collection:
     * a) when collection is lazy=false and it is newly created; then asking parent for it will
     * not return new & empty proxy, it will return null
     * b) Special recreate logic:
     * if fetched owner instance has nothing attached, then it is safe to just create brand new collection
     * and assign it to owner without worrying about getting orphaned deletes error
     * if owner has something attached, then we process recreate as delete/update; 
     * that is clear out the existing entries and then proceed to add ones received via sync. 
     * This code essentially mimics hibernate org.hibernate.engine.Collections.prepareCollectionForUpdate()
     * implementation. 
     * NOTE: The unfortunate bi-product of this approach is that this series of events will not produce 
     * 'recreate' event in the interceptor: thus parent's sync journal entries will look slightly diferently 
     * from what child was sending up: child sent up single 'recreate' collection action however
     * parent will instead have single 'update' with deletes & updates in it. Presumably, this is a distinction
     * without a difference.
     */

    if (entries == null) {
        if (org.hibernate.collection.PersistentSortedSet.class.isAssignableFrom(collectionType)) {
            needsRecreate = true;
            entries = new TreeSet();
        } else if (org.hibernate.collection.PersistentSet.class.isAssignableFrom(collectionType)) {
            needsRecreate = true;
            entries = new HashSet();
        }
    }

    if (entries == null) {
        log.error("Was not able to retrieve reference to the collection using owner object.");
        log.error("Owner info: " + "\nownerClassName:" + ownerClassName + "\nownerCollectionPropertyName:"
                + ownerCollectionPropertyName + "\nownerCollectionAction:" + ownerCollectionAction
                + "\nownerUuid:" + ownerUuid);
        throw new SyncIngestException(SyncConstants.ERROR_ITEM_BADXML_MISSING, null, incoming, null);
    }

    //clear existing entries before adding new ones:
    if ("recreate".equals(ownerCollectionAction)) {
        entries.clear();
    }

    //now, finally process nodes, phew!!
    for (i = 0; i < nodes.getLength(); i++) {
        if ("entry".equals(nodes.item(i).getNodeName())) {
            String entryClassName = ((Element) nodes.item(i)).getAttribute("type");
            String entryUuid = ((Element) nodes.item(i)).getAttribute("uuid");
            String entryAction = ((Element) nodes.item(i)).getAttribute("action");
            Object entry = SyncUtil.getOpenmrsObj(entryClassName, entryUuid);

            // objects like Privilege, Role, and GlobalProperty might have different
            // uuids for different objects
            if (entry == null && SyncUtil.hasNoAutomaticPrimaryKey(entryClassName)) {
                String key = ((Element) nodes.item(i)).getAttribute("primaryKey");
                entry = getOpenmrsObjectByPrimaryKey(entryClassName, key);
            }

            if (entry == null) {
                // blindly ignore this entry if it doesn't exist and we're trying to delete it
                if (!"delete".equals(entryAction)) {
                    //the object not found: most likely cause here is data collision

                    log.error("Was not able to retrieve reference to the collection entry object by uuid.");
                    log.error("Entry info: " + "\nentryClassName: " + entryClassName + "\nentryUuid: "
                            + entryUuid + "\nentryAction: " + entryAction);
                    log.error("Sync record original uuid: " + originalRecordUuid);
                    throw new SyncIngestException(SyncConstants.ERROR_ITEM_UUID_NOT_FOUND,
                            ownerClassName + " missing " + entryClassName + "," + entryUuid, incoming, null);
                }
            } else if ("update".equals(entryAction)) {
                if (!OpenmrsUtil.collectionContains(entries, entry)) {
                    entries.add(entry);
                }
            } else if ("delete".equals(entryAction)) {
                OpenmrsUtil.collectionContains(entries, entry);

                if (!entries.remove(entry)) {
                    //couldn't find entry in collection: hmm, bad implementation of equals?
                    //fall back to trying to find the item in entries by uuid
                    OpenmrsObject toBeRemoved = null;
                    for (Object o : entries) {
                        if (o instanceof OpenmrsObject) {
                            if (entryUuid.equals(((OpenmrsObject) o).getUuid())) {
                                toBeRemoved = (OpenmrsObject) o;
                                break;
                            }
                        }
                    }
                    if (toBeRemoved == null) {
                        //the item to be removed was not located in the collection: log it for reference and continue
                        log.warn("Was not able to process collection entry delete.");
                        log.warn("Owner info: " + "\nownerClassName:" + ownerClassName
                                + "\nownerCollectionPropertyName:" + ownerCollectionPropertyName
                                + "\nownerCollectionAction:" + ownerCollectionAction + "\nownerUuid:"
                                + ownerUuid);
                        log.warn("entry info: " + "\nentryClassName:" + entryClassName + "\nentryUuid:"
                                + entryUuid);
                        log.warn("Sync record original uuid: " + originalRecordUuid);
                    } else {
                        //finally, remove it from the collection
                        entries.remove(toBeRemoved);
                    }
                }

            } else {
                log.error("Unknown collection entry action, action was: " + entryAction);
                throw new SyncIngestException(SyncConstants.ERROR_ITEM_NOT_COMMITTED, ownerClassName, incoming,
                        null);
            }
        }
    }

    /*
     * Pass the original uuid to interceptor: this will prevent the change
     * from being sent back to originating server. 
     */
    HibernateSyncInterceptor.setOriginalRecordUuid(originalRecordUuid);

    //assign collection back to the owner if it is recreated
    if (needsRecreate) {
        SyncUtil.setProperty(owner, ownerCollectionPropertyName, entries);
    }

    //finally, trigger update
    try {
        //no need to mess around with precommit actions for collections, at least
        //at this point
        SyncUtil.updateOpenmrsObject(owner, ownerClassName, ownerUuid);
    } catch (Exception e) {
        log.error("Unexpected exception occurred while processing hibernate collections", e);
        throw new SyncIngestException(SyncConstants.ERROR_ITEM_NOT_COMMITTED, ownerClassName, incoming, null);
    }
}

From source file:org.broadinstitute.gatk.utils.variant.GATKVariantContextUtils.java

/**
 * Merges VariantContexts into a single hybrid.  Takes genotypes for common samples in priority order, if provided.
 * If uniquifySamples is true, the priority order is ignored and names are created by concatenating the VC name with
 * the sample name.//from w ww. j av a2s.  c  o m
 * simpleMerge does not verify any more unique sample names EVEN if genotypeMergeOptions == GenotypeMergeType.REQUIRE_UNIQUE. One should use
 * SampleUtils.verifyUniqueSamplesNames to check that before using simpleMerge.
 *
 * For more information on this method see: http://www.thedistractionnetwork.com/programmer-problem/
 *
 * @param unsortedVCs               collection of unsorted VCs
 * @param priorityListOfVCs         priority list detailing the order in which we should grab the VCs
 * @param filteredRecordMergeType   merge type for filtered records
 * @param genotypeMergeOptions      merge option for genotypes
 * @param annotateOrigin            should we annotate the set it came from?
 * @param printMessages             should we print messages?
 * @param setKey                    the key name of the set
 * @param filteredAreUncalled       are filtered records uncalled?
 * @param mergeInfoWithMaxAC        should we merge in info from the VC with maximum allele count?
 * @return new VariantContext       representing the merge of unsortedVCs
 */
public static VariantContext simpleMerge(final Collection<VariantContext> unsortedVCs,
        final List<String> priorityListOfVCs, final int originalNumOfVCs,
        final FilteredRecordMergeType filteredRecordMergeType, final GenotypeMergeType genotypeMergeOptions,
        final boolean annotateOrigin, final boolean printMessages, final String setKey,
        final boolean filteredAreUncalled, final boolean mergeInfoWithMaxAC) {
    if (unsortedVCs == null || unsortedVCs.isEmpty())
        return null;

    if (priorityListOfVCs != null && originalNumOfVCs != priorityListOfVCs.size())
        throw new IllegalArgumentException(
                "the number of the original VariantContexts must be the same as the number of VariantContexts in the priority list");

    if (annotateOrigin && priorityListOfVCs == null && originalNumOfVCs == 0)
        throw new IllegalArgumentException(
                "Cannot merge calls and annotate their origins without a complete priority list of VariantContexts or the number of original VariantContexts");

    final List<VariantContext> preFilteredVCs = sortVariantContextsByPriority(unsortedVCs, priorityListOfVCs,
            genotypeMergeOptions);
    // Make sure all variant contexts are padded with reference base in case of indels if necessary
    List<VariantContext> VCs = new ArrayList<>();

    for (final VariantContext vc : preFilteredVCs) {
        if (!filteredAreUncalled || vc.isNotFiltered())
            VCs.add(vc);
    }

    if (VCs.isEmpty()) // everything is filtered out and we're filteredAreUncalled
        return null;

    // establish the baseline info from the first VC
    final VariantContext first = VCs.get(0);
    final String name = first.getSource();
    final Allele refAllele = determineReferenceAllele(VCs);

    final LinkedHashSet<Allele> alleles = new LinkedHashSet<>();
    final Set<String> filters = new HashSet<>();
    final Map<String, Object> attributes = new LinkedHashMap<>();
    final Set<String> inconsistentAttributes = new HashSet<>();
    final Set<String> variantSources = new HashSet<>(); // contains the set of sources we found in our set of VCs that are variant
    final Set<String> rsIDs = new LinkedHashSet<>(1); // most of the time there's one id

    VariantContext longestVC = first;
    int depth = 0;
    int maxAC = -1;
    final Map<String, Object> attributesWithMaxAC = new LinkedHashMap<>();
    double log10PError = CommonInfo.NO_LOG10_PERROR;
    boolean anyVCHadFiltersApplied = false;
    VariantContext vcWithMaxAC = null;
    GenotypesContext genotypes = GenotypesContext.create();

    // counting the number of filtered and variant VCs
    int nFiltered = 0;

    boolean remapped = false;

    // cycle through and add info from the other VCs, making sure the loc/reference matches
    for (final VariantContext vc : VCs) {
        if (longestVC.getStart() != vc.getStart())
            throw new IllegalStateException(
                    "BUG: attempting to merge VariantContexts with different start sites: first="
                            + first.toString() + " second=" + vc.toString());

        if (VariantContextUtils.getSize(vc) > VariantContextUtils.getSize(longestVC))
            longestVC = vc; // get the longest location

        nFiltered += vc.isFiltered() ? 1 : 0;
        if (vc.isVariant())
            variantSources.add(vc.getSource());

        AlleleMapper alleleMapping = resolveIncompatibleAlleles(refAllele, vc, alleles);
        remapped = remapped || alleleMapping.needsRemapping();

        alleles.addAll(alleleMapping.values());

        mergeGenotypes(genotypes, vc, alleleMapping, genotypeMergeOptions == GenotypeMergeType.UNIQUIFY);

        // We always take the QUAL of the first VC with a non-MISSING qual for the combined value
        if (log10PError == CommonInfo.NO_LOG10_PERROR)
            log10PError = vc.getLog10PError();

        filters.addAll(vc.getFilters());
        anyVCHadFiltersApplied |= vc.filtersWereApplied();

        //
        // add attributes
        //
        // special case DP (add it up) and ID (just preserve it)
        //
        if (vc.hasAttribute(VCFConstants.DEPTH_KEY))
            depth += vc.getAttributeAsInt(VCFConstants.DEPTH_KEY, 0);
        if (vc.hasID())
            rsIDs.add(vc.getID());
        if (mergeInfoWithMaxAC && vc.hasAttribute(VCFConstants.ALLELE_COUNT_KEY)) {
            String rawAlleleCounts = vc.getAttributeAsString(VCFConstants.ALLELE_COUNT_KEY, null);
            // lets see if the string contains a "," separator
            if (rawAlleleCounts.contains(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR)) {
                final List<String> alleleCountArray = Arrays
                        .asList(rawAlleleCounts.substring(1, rawAlleleCounts.length() - 1)
                                .split(VCFConstants.INFO_FIELD_ARRAY_SEPARATOR));
                for (final String alleleCount : alleleCountArray) {
                    final int ac = Integer.valueOf(alleleCount.trim());
                    if (ac > maxAC) {
                        maxAC = ac;
                        vcWithMaxAC = vc;
                    }
                }
            } else {
                final int ac = Integer.valueOf(rawAlleleCounts);
                if (ac > maxAC) {
                    maxAC = ac;
                    vcWithMaxAC = vc;
                }
            }
        }

        for (final Map.Entry<String, Object> p : vc.getAttributes().entrySet()) {
            final String key = p.getKey();
            final Object value = p.getValue();
            // only output annotations that have the same value in every input VC
            // if we don't like the key already, don't go anywhere
            if (!inconsistentAttributes.contains(key)) {
                final boolean alreadyFound = attributes.containsKey(key);
                final Object boundValue = attributes.get(key);
                final boolean boundIsMissingValue = alreadyFound
                        && boundValue.equals(VCFConstants.MISSING_VALUE_v4);

                if (alreadyFound && !boundValue.equals(value) && !boundIsMissingValue) {
                    // we found the value but we're inconsistent, put it in the exclude list
                    inconsistentAttributes.add(key);
                    attributes.remove(key);
                } else if (!alreadyFound || boundIsMissingValue) { // no value
                    attributes.put(key, value);
                }
            }
        }
    }

    // if we have more alternate alleles in the merged VC than in one or more of the
    // original VCs, we need to strip out the GL/PLs (because they are no longer accurate), as well as allele-dependent attributes like AC,AF, and AD
    for (final VariantContext vc : VCs) {
        if (vc.getAlleles().size() == 1)
            continue;
        if (hasPLIncompatibleAlleles(alleles, vc.getAlleles())) {
            if (!genotypes.isEmpty()) {
                logger.debug(String.format(
                        "Stripping PLs at %s:%d-%d due to incompatible alleles merged=%s vs. single=%s",
                        vc.getChr(), vc.getStart(), vc.getEnd(), alleles, vc.getAlleles()));
            }
            genotypes = stripPLsAndAD(genotypes);
            // this will remove stale AC,AF attributed from vc
            VariantContextUtils.calculateChromosomeCounts(vc, attributes, true);
            break;
        }
    }

    // take the VC with the maxAC and pull the attributes into a modifiable map
    if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
        attributesWithMaxAC.putAll(vcWithMaxAC.getAttributes());
    }

    // if at least one record was unfiltered and we want a union, clear all of the filters
    if ((filteredRecordMergeType == FilteredRecordMergeType.KEEP_IF_ANY_UNFILTERED && nFiltered != VCs.size())
            || filteredRecordMergeType == FilteredRecordMergeType.KEEP_UNCONDITIONAL)
        filters.clear();

    if (annotateOrigin) { // we care about where the call came from
        String setValue;
        if (nFiltered == 0 && variantSources.size() == originalNumOfVCs) // nothing was unfiltered
            setValue = MERGE_INTERSECTION;
        else if (nFiltered == VCs.size()) // everything was filtered out
            setValue = MERGE_FILTER_IN_ALL;
        else if (variantSources.isEmpty()) // everyone was reference
            setValue = MERGE_REF_IN_ALL;
        else {
            final LinkedHashSet<String> s = new LinkedHashSet<>();
            for (final VariantContext vc : VCs)
                if (vc.isVariant())
                    s.add(vc.isFiltered() ? MERGE_FILTER_PREFIX + vc.getSource() : vc.getSource());
            setValue = Utils.join("-", s);
        }

        if (setKey != null) {
            attributes.put(setKey, setValue);
            if (mergeInfoWithMaxAC && vcWithMaxAC != null) {
                attributesWithMaxAC.put(setKey, setValue);
            }
        }
    }

    if (depth > 0)
        attributes.put(VCFConstants.DEPTH_KEY, String.valueOf(depth));

    final String ID = rsIDs.isEmpty() ? VCFConstants.EMPTY_ID_FIELD : Utils.join(",", rsIDs);

    final VariantContextBuilder builder = new VariantContextBuilder().source(name).id(ID);
    builder.loc(longestVC.getChr(), longestVC.getStart(), longestVC.getEnd());
    builder.alleles(alleles);
    builder.genotypes(genotypes);
    builder.log10PError(log10PError);
    if (anyVCHadFiltersApplied) {
        builder.filters(filters.isEmpty() ? filters : new TreeSet<>(filters));
    }
    builder.attributes(new TreeMap<>(mergeInfoWithMaxAC ? attributesWithMaxAC : attributes));

    // Trim the padded bases of all alleles if necessary
    final VariantContext merged = builder.make();
    if (printMessages && remapped)
        System.out.printf("Remapped => %s%n", merged);
    return merged;
}

From source file:org.apache.hadoop.hive.metastore.txn.TxnHandler.java

/**
 * Lock acquisition is meant to be fair, so every lock can only block on some lock with smaller
 * hl_lock_ext_id by only checking earlier locks.
 *
 * For any given SQL statment all locks required by it are grouped under single extLockId and are
 * granted all at once or all locks wait.
 *
 * This is expected to run at READ_COMMITTED.
 *
 * Note: this calls acquire() for (extLockId,intLockId) but extLockId is the same and we either take
 * all locks for given extLockId or none.  Would be more efficient to update state on all locks
 * at once.  Semantics are the same since this is all part of the same txn.
 *
 * If there is a concurrent commitTxn/rollbackTxn, those can only remove rows from HIVE_LOCKS.
 * If they happen to be for the same txnid, there will be a WW conflict (in MS DB), if different txnid,
 * checkLock() will in the worst case keep locks in Waiting state a little longer.
 *//*  w ww. ja  v  a2  s . c o  m*/
@RetrySemantics.SafeToRetry("See @SafeToRetry")
private LockResponse checkLock(Connection dbConn, long extLockId)
        throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, MetaException, SQLException {
    TxnStore.MutexAPI.LockHandle handle = null;
    Statement stmt = null;
    ResultSet rs = null;
    LockResponse response = new LockResponse();
    /**
     * todo: Longer term we should pass this from client somehow - this would be an optimization;  once
     * that is in place make sure to build and test "writeSet" below using OperationType not LockType
     * With Static Partitions we assume that the query modifies exactly the partitions it locked.  (not entirely
     * realistic since Update/Delete may have some predicate that filters out all records out of
     * some partition(s), but plausible).  For DP, we acquire locks very wide (all known partitions),
     * but for most queries only a fraction will actually be updated.  #addDynamicPartitions() tells
     * us exactly which ones were written to.  Thus using this trick to kill a query early for
     * DP queries may be too restrictive.
     */
    boolean isPartOfDynamicPartitionInsert = true;
    try {
        /**
         * checkLock() must be mutexed against any other checkLock to make sure 2 conflicting locks
         * are not granted by parallel checkLock() calls.
         */
        handle = getMutexAPI().acquireLock(MUTEX_KEY.CheckLock.name());
        List<LockInfo> locksBeingChecked = getLockInfoFromLockId(dbConn, extLockId);//being acquired now
        response.setLockid(extLockId);

        LOG.debug("checkLock(): Setting savepoint. extLockId=" + JavaUtils.lockIdToString(extLockId));
        Savepoint save = dbConn.setSavepoint();
        StringBuilder query = new StringBuilder(
                "select hl_lock_ext_id, " + "hl_lock_int_id, hl_db, hl_table, hl_partition, hl_lock_state, "
                        + "hl_lock_type, hl_txnid from HIVE_LOCKS where hl_db in (");

        Set<String> strings = new HashSet<String>(locksBeingChecked.size());

        //This the set of entities that the statement represented by extLockId wants to update
        List<LockInfo> writeSet = new ArrayList<>();

        for (LockInfo info : locksBeingChecked) {
            strings.add(info.db);
            if (!isPartOfDynamicPartitionInsert && info.type == LockType.SHARED_WRITE) {
                writeSet.add(info);
            }
        }
        if (!writeSet.isEmpty()) {
            if (writeSet.get(0).txnId == 0) {
                //Write operation always start a txn
                throw new IllegalStateException(
                        "Found Write lock for " + JavaUtils.lockIdToString(extLockId) + " but no txnid");
            }
            stmt = dbConn.createStatement();
            StringBuilder sb = new StringBuilder(
                    " ws_database, ws_table, ws_partition, " + "ws_txnid, ws_commit_id "
                            + "from WRITE_SET where ws_commit_id >= " + writeSet.get(0).txnId + " and (");//see commitTxn() for more info on this inequality
            for (LockInfo info : writeSet) {
                sb.append("(ws_database = ").append(quoteString(info.db)).append(" and ws_table = ")
                        .append(quoteString(info.table)).append(" and ws_partition ")
                        .append(info.partition == null ? "is null" : "= " + quoteString(info.partition))
                        .append(") or ");
            }
            sb.setLength(sb.length() - 4);//nuke trailing " or "
            sb.append(")");
            //1 row is sufficient to know we have to kill the query
            rs = stmt.executeQuery(sqlGenerator.addLimitClause(1, sb.toString()));
            if (rs.next()) {
                /**
                 * if here, it means we found an already committed txn which overlaps with the current one and
                 * it updated the same resource the current txn wants to update.  By First-committer-wins
                 * rule, current txn will not be allowed to commit so  may as well kill it now;  This is just an
                 * optimization to prevent wasting cluster resources to run a query which is known to be DOA.
                 * {@link #commitTxn(CommitTxnRequest)} has the primary responsibility to ensure this.
                 * checkLock() runs at READ_COMMITTED so you could have another (Hive) txn running commitTxn()
                 * in parallel and thus writing to WRITE_SET.  commitTxn() logic is properly mutexed to ensure
                 * that we don't "miss" any WW conflicts. We could've mutexed the checkLock() and commitTxn()
                 * as well but this reduces concurrency for very little gain.
                 * Note that update/delete (which runs as dynamic partition insert) acquires a lock on the table,
                 * but WRITE_SET has entries for actual partitions updated.  Thus this optimization will "miss"
                 * the WW conflict but it will be caught in commitTxn() where actual partitions written are known.
                 * This is OK since we want 2 concurrent updates that update different sets of partitions to both commit.
                 */
                String resourceName = rs.getString(1) + '/' + rs.getString(2);
                String partName = rs.getString(3);
                if (partName != null) {
                    resourceName += '/' + partName;
                }

                String msg = "Aborting " + JavaUtils.txnIdToString(writeSet.get(0).txnId)
                        + " since a concurrent committed transaction [" + JavaUtils.txnIdToString(rs.getLong(4))
                        + "," + rs.getLong(5) + "] has already updated resouce '" + resourceName + "'";
                LOG.info(msg);
                if (abortTxns(dbConn, Collections.singletonList(writeSet.get(0).txnId), true) != 1) {
                    throw new IllegalStateException(msg + " FAILED!");
                }
                dbConn.commit();
                throw new TxnAbortedException(msg);
            }
            close(rs, stmt, null);
        }

        boolean first = true;
        for (String s : strings) {
            if (first)
                first = false;
            else
                query.append(", ");
            query.append('\'');
            query.append(s);
            query.append('\'');
        }
        query.append(")");

        // If any of the table requests are null, then I need to pull all the
        // table locks for this db.
        boolean sawNull = false;
        strings.clear();
        for (LockInfo info : locksBeingChecked) {
            if (info.table == null) {
                sawNull = true;
                break;
            } else {
                strings.add(info.table);
            }
        }
        if (!sawNull) {
            query.append(" and (hl_table is null or hl_table in(");
            first = true;
            for (String s : strings) {
                if (first)
                    first = false;
                else
                    query.append(", ");
                query.append('\'');
                query.append(s);
                query.append('\'');
            }
            query.append("))");

            // If any of the partition requests are null, then I need to pull all
            // partition locks for this table.
            sawNull = false;
            strings.clear();
            for (LockInfo info : locksBeingChecked) {
                if (info.partition == null) {
                    sawNull = true;
                    break;
                } else {
                    strings.add(info.partition);
                }
            }
            if (!sawNull) {
                query.append(" and (hl_partition is null or hl_partition in(");
                first = true;
                for (String s : strings) {
                    if (first)
                        first = false;
                    else
                        query.append(", ");
                    query.append('\'');
                    query.append(s);
                    query.append('\'');
                }
                query.append("))");
            }
        }
        query.append(" and hl_lock_ext_id <= ").append(extLockId);

        LOG.debug("Going to execute query <" + query.toString() + ">");
        stmt = dbConn.createStatement();
        rs = stmt.executeQuery(query.toString());
        SortedSet<LockInfo> lockSet = new TreeSet<LockInfo>(new LockInfoComparator());
        while (rs.next()) {
            lockSet.add(new LockInfo(rs));
        }
        // Turn the tree set into an array so we can move back and forth easily
        // in it.
        LockInfo[] locks = lockSet.toArray(new LockInfo[lockSet.size()]);
        if (LOG.isTraceEnabled()) {
            LOG.trace("Locks to check(full): ");
            for (LockInfo info : locks) {
                LOG.trace("  " + info);
            }
        }

        for (LockInfo info : locksBeingChecked) {
            // Find the lock record we're checking
            int index = -1;
            for (int i = 0; i < locks.length; i++) {
                if (locks[i].equals(info)) {
                    index = i;
                    break;
                }
            }

            // If we didn't find the lock, then it must not be in the table
            if (index == -1) {
                LOG.debug("Going to rollback");
                dbConn.rollback();
                throw new MetaException(
                        "How did we get here, we heartbeated our lock before we started! ( " + info + ")");
            }

            // If we've found it and it's already been marked acquired,
            // then just look at the other locks.
            if (locks[index].state == LockState.ACQUIRED) {
                /**this is what makes this method @SafeToRetry*/
                continue;
            }

            // Look at everything in front of this lock to see if it should block
            // it or not.
            boolean acquired = false;
            for (int i = index - 1; i >= 0; i--) {
                // Check if we're operating on the same database, if not, move on
                if (!locks[index].db.equals(locks[i].db)) {
                    continue;
                }

                // If table is null on either of these, then they are claiming to
                // lock the whole database and we need to check it.  Otherwise,
                // check if they are operating on the same table, if not, move on.
                if (locks[index].table != null && locks[i].table != null
                        && !locks[index].table.equals(locks[i].table)) {
                    continue;
                }

                // If partition is null on either of these, then they are claiming to
                // lock the whole table and we need to check it.  Otherwise,
                // check if they are operating on the same partition, if not, move on.
                if (locks[index].partition != null && locks[i].partition != null
                        && !locks[index].partition.equals(locks[i].partition)) {
                    continue;
                }

                // We've found something that matches what we're trying to lock,
                // so figure out if we can lock it too.
                LockAction lockAction = jumpTable.get(locks[index].type).get(locks[i].type).get(locks[i].state);
                LOG.debug("desired Lock: " + info + " checked Lock: " + locks[i] + " action: " + lockAction);
                switch (lockAction) {
                case WAIT:
                    if (!ignoreConflict(info, locks[i])) {
                        /*we acquire all locks for a given query atomically; if 1 blocks, all go into (remain) in
                        * Waiting state.  wait() will undo any 'acquire()' which may have happened as part of
                        * this (metastore db) transaction and then we record which lock blocked the lock
                        * we were testing ('info').*/
                        wait(dbConn, save);
                        String sqlText = "update HIVE_LOCKS" + " set HL_BLOCKEDBY_EXT_ID=" + locks[i].extLockId
                                + ", HL_BLOCKEDBY_INT_ID=" + locks[i].intLockId + " where HL_LOCK_EXT_ID="
                                + info.extLockId + " and HL_LOCK_INT_ID=" + info.intLockId;
                        LOG.debug("Executing sql: " + sqlText);
                        int updCnt = stmt.executeUpdate(sqlText);
                        if (updCnt != 1) {
                            shouldNeverHappen(info.txnId, info.extLockId, info.intLockId);
                        }
                        LOG.debug("Going to commit");
                        dbConn.commit();
                        response.setState(LockState.WAITING);
                        LOG.debug("Lock(" + info + ") waiting for Lock(" + locks[i] + ")");
                        return response;
                    }
                    //fall through to ACQUIRE
                case ACQUIRE:
                    acquire(dbConn, stmt, extLockId, info);
                    acquired = true;
                    break;
                case KEEP_LOOKING:
                    continue;
                }
                if (acquired)
                    break; // We've acquired this lock component,
                // so get out of the loop and look at the next component.
            }

            // If we've arrived here and we have not already acquired, it means there's nothing in the
            // way of the lock, so acquire the lock.
            if (!acquired)
                acquire(dbConn, stmt, extLockId, info);
        }

        // We acquired all of the locks, so commit and return acquired.
        LOG.debug("Going to commit");
        dbConn.commit();
        response.setState(LockState.ACQUIRED);
    } finally {
        close(rs, stmt, null);
        if (handle != null) {
            handle.releaseLocks();
        }
    }
    return response;
}

From source file:org.apache.hadoop.hive.metastore.ObjectStore.java

@Override
public boolean grantPrivileges(PrivilegeBag privileges)
        throws InvalidObjectException, MetaException, NoSuchObjectException {
    boolean committed = false;
    int now = (int) (System.currentTimeMillis() / 1000);
    try {//from   w  ww. j a va 2s. co m
        openTransaction();
        List<Object> persistentObjs = new ArrayList<Object>();

        List<HiveObjectPrivilege> privilegeList = privileges.getPrivileges();

        if (privilegeList != null && privilegeList.size() > 0) {
            Iterator<HiveObjectPrivilege> privIter = privilegeList.iterator();
            Set<String> privSet = new HashSet<String>();
            while (privIter.hasNext()) {
                HiveObjectPrivilege privDef = privIter.next();
                HiveObjectRef hiveObject = privDef.getHiveObject();
                String privilegeStr = privDef.getGrantInfo().getPrivilege();
                String[] privs = privilegeStr.split(",");
                String userName = privDef.getPrincipalName();
                PrincipalType principalType = privDef.getPrincipalType();
                String grantor = privDef.getGrantInfo().getGrantor();
                String grantorType = privDef.getGrantInfo().getGrantorType().toString();
                boolean grantOption = privDef.getGrantInfo().isGrantOption();
                privSet.clear();

                if (principalType == PrincipalType.ROLE) {
                    validateRole(userName);
                }

                if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) {
                    List<MGlobalPrivilege> globalPrivs = this.listPrincipalMGlobalGrants(userName,
                            principalType);
                    if (globalPrivs != null) {
                        for (MGlobalPrivilege priv : globalPrivs) {
                            if (priv.getGrantor().equalsIgnoreCase(grantor)) {
                                privSet.add(priv.getPrivilege());
                            }
                        }
                    }
                    for (String privilege : privs) {
                        if (privSet.contains(privilege)) {
                            throw new InvalidObjectException(privilege + " is already granted by " + grantor);
                        }
                        MGlobalPrivilege mGlobalPrivs = new MGlobalPrivilege(userName, principalType.toString(),
                                privilege, now, grantor, grantorType, grantOption);
                        persistentObjs.add(mGlobalPrivs);
                    }
                } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) {
                    MDatabase dbObj = getMDatabase(hiveObject.getDbName());
                    if (dbObj != null) {
                        List<MDBPrivilege> dbPrivs = this.listPrincipalMDBGrants(userName, principalType,
                                hiveObject.getDbName());
                        if (dbPrivs != null) {
                            for (MDBPrivilege priv : dbPrivs) {
                                if (priv.getGrantor().equalsIgnoreCase(grantor)) {
                                    privSet.add(priv.getPrivilege());
                                }
                            }
                        }
                        for (String privilege : privs) {
                            if (privSet.contains(privilege)) {
                                throw new InvalidObjectException(privilege + " is already granted on database "
                                        + hiveObject.getDbName() + " by " + grantor);
                            }
                            MDBPrivilege mDb = new MDBPrivilege(userName, principalType.toString(), dbObj,
                                    privilege, now, grantor, grantorType, grantOption);
                            persistentObjs.add(mDb);
                        }
                    }
                } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) {
                    MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject.getObjectName());
                    if (tblObj != null) {
                        List<MTablePrivilege> tablePrivs = this.listAllMTableGrants(userName, principalType,
                                hiveObject.getDbName(), hiveObject.getObjectName());
                        if (tablePrivs != null) {
                            for (MTablePrivilege priv : tablePrivs) {
                                if (priv.getGrantor() != null && priv.getGrantor().equalsIgnoreCase(grantor)) {
                                    privSet.add(priv.getPrivilege());
                                }
                            }
                        }
                        for (String privilege : privs) {
                            if (privSet.contains(privilege)) {
                                throw new InvalidObjectException(
                                        privilege + " is already granted on table [" + hiveObject.getDbName()
                                                + "," + hiveObject.getObjectName() + "] by " + grantor);
                            }
                            MTablePrivilege mTab = new MTablePrivilege(userName, principalType.toString(),
                                    tblObj, privilege, now, grantor, grantorType, grantOption);
                            persistentObjs.add(mTab);
                        }
                    }
                } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) {
                    MPartition partObj = this.getMPartition(hiveObject.getDbName(), hiveObject.getObjectName(),
                            hiveObject.getPartValues());
                    String partName = null;
                    if (partObj != null) {
                        partName = partObj.getPartitionName();
                        List<MPartitionPrivilege> partPrivs = this.listPrincipalMPartitionGrants(userName,
                                principalType, hiveObject.getDbName(), hiveObject.getObjectName(),
                                partObj.getPartitionName());
                        if (partPrivs != null) {
                            for (MPartitionPrivilege priv : partPrivs) {
                                if (priv.getGrantor().equalsIgnoreCase(grantor)) {
                                    privSet.add(priv.getPrivilege());
                                }
                            }
                        }
                        for (String privilege : privs) {
                            if (privSet.contains(privilege)) {
                                throw new InvalidObjectException(privilege
                                        + " is already granted on partition [" + hiveObject.getDbName() + ","
                                        + hiveObject.getObjectName() + "," + partName + "] by " + grantor);
                            }
                            MPartitionPrivilege mTab = new MPartitionPrivilege(userName,
                                    principalType.toString(), partObj, privilege, now, grantor, grantorType,
                                    grantOption);
                            persistentObjs.add(mTab);
                        }
                    }
                } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) {
                    MTable tblObj = getMTable(hiveObject.getDbName(), hiveObject.getObjectName());
                    if (tblObj != null) {
                        if (hiveObject.getPartValues() != null) {
                            MPartition partObj = null;
                            List<MPartitionColumnPrivilege> colPrivs = null;
                            partObj = this.getMPartition(hiveObject.getDbName(), hiveObject.getObjectName(),
                                    hiveObject.getPartValues());
                            if (partObj == null) {
                                continue;
                            }
                            colPrivs = this.listPrincipalMPartitionColumnGrants(userName, principalType,
                                    hiveObject.getDbName(), hiveObject.getObjectName(),
                                    partObj.getPartitionName(), hiveObject.getColumnName());

                            if (colPrivs != null) {
                                for (MPartitionColumnPrivilege priv : colPrivs) {
                                    if (priv.getGrantor().equalsIgnoreCase(grantor)) {
                                        privSet.add(priv.getPrivilege());
                                    }
                                }
                            }
                            for (String privilege : privs) {
                                if (privSet.contains(privilege)) {
                                    throw new InvalidObjectException(privilege
                                            + " is already granted on column " + hiveObject.getColumnName()
                                            + " [" + hiveObject.getDbName() + "," + hiveObject.getObjectName()
                                            + "," + partObj.getPartitionName() + "] by " + grantor);
                                }
                                MPartitionColumnPrivilege mCol = new MPartitionColumnPrivilege(userName,
                                        principalType.toString(), partObj, hiveObject.getColumnName(),
                                        privilege, now, grantor, grantorType, grantOption);
                                persistentObjs.add(mCol);
                            }

                        } else {
                            List<MTableColumnPrivilege> colPrivs = null;
                            colPrivs = this.listPrincipalMTableColumnGrants(userName, principalType,
                                    hiveObject.getDbName(), hiveObject.getObjectName(),
                                    hiveObject.getColumnName());

                            if (colPrivs != null) {
                                for (MTableColumnPrivilege priv : colPrivs) {
                                    if (priv.getGrantor().equalsIgnoreCase(grantor)) {
                                        privSet.add(priv.getPrivilege());
                                    }
                                }
                            }
                            for (String privilege : privs) {
                                if (privSet.contains(privilege)) {
                                    throw new InvalidObjectException(
                                            privilege + " is already granted on column "
                                                    + hiveObject.getColumnName() + " [" + hiveObject.getDbName()
                                                    + "," + hiveObject.getObjectName() + "] by " + grantor);
                                }
                                MTableColumnPrivilege mCol = new MTableColumnPrivilege(userName,
                                        principalType.toString(), tblObj, hiveObject.getColumnName(), privilege,
                                        now, grantor, grantorType, grantOption);
                                persistentObjs.add(mCol);
                            }
                        }
                    }
                }
            }
        }
        if (persistentObjs.size() > 0) {
            pm.makePersistentAll(persistentObjs);
        }
        committed = commitTransaction();
    } finally {
        if (!committed) {
            rollbackTransaction();
        }
    }
    return committed;
}