Example usage for java.util Set retainAll

List of usage examples for java.util Set retainAll

Introduction

In this page you can find the example usage for java.util Set retainAll.

Prototype

boolean retainAll(Collection<?> c);

Source Link

Document

Retains only the elements in this set that are contained in the specified collection (optional operation).

Usage

From source file:no.sintef.ict.splcatool.CoveringArrayChvatal.java

public static Set<Pair2> intersect(Set<Pair2> set1, Set<Pair2> set2) {
    Set<Pair2> intersection = new HashSet<Pair2>(set1);
    intersection.retainAll(new HashSet<Pair2>(set2));
    return intersection;
}

From source file:org.callimachusproject.server.chain.TransactionHandler.java

private String closest(String origin, Set<String> origins) {
    Set<Character> base = toSet(origin.toCharArray());
    String closest = null;//from w  ww .j  a  v a  2 s  .c om
    Set<Character> common = null;
    for (String o : origins) {
        Set<Character> set = toSet(o.toCharArray());
        set.retainAll(base);
        if (common == null || set.size() > common.size()) {
            common = set;
            closest = o;
        }
    }
    return closest;
}

From source file:org.jasig.portlet.contacts.control.PortletViewController.java

@ModelAttribute("domains")
public Set<ContactDomain> getDomains(PortletPreferences prefs) {
    log.debug("finding Domains to return");
    final List<String> domainActive = Arrays.asList(prefs.getValues("domainsActive", new String[0]));

    String[] defaultOn = prefs.getValues("defaultOn", new String[0]);
    String[] userOn = prefs.getValues("domainOn", new String[0]);
    String[] userOff = prefs.getValues("domainOff", new String[0]);

    Set<String> domains = new HashSet<String>();
    domains.addAll(Arrays.asList(defaultOn));
    domains.addAll(Arrays.asList(userOn));
    domains.removeAll(Arrays.asList(userOff));
    domains.retainAll(domainActive);

    Set<ContactDomain> activeDomains = new TreeSet<ContactDomain>(new Comparator<ContactDomain>() {

        @Override// w w w  .  j  a v a 2  s.  c o  m
        public int compare(ContactDomain o1, ContactDomain o2) {
            int index1 = domainActive.indexOf(o1.getName());
            int index2 = domainActive.indexOf(o2.getName());
            return index1 - index2;
        }
    });
    for (ContactDomain domain : contactDomains) {
        if (domains.contains(domain.getName())) {
            activeDomains.add(domain);
        }
    }
    log.debug("returning " + activeDomains.size() + "domains");
    return activeDomains;
}

From source file:dk.netarkivet.harvester.harvesting.report.LegacyHarvestReport.java

/**
 * Post-processing happens on the scheduler side when ARC files
 * have been uploaded./*  w w  w.j a  v a 2 s  .  c o  m*/
 * @param job the actual job.
 */
@Override
public void postProcess(Job job) {

    if (LOG.isInfoEnabled()) {
        LOG.info("Starting post-processing of harvest report for job " + job.getJobID());
    }
    long startTime = System.currentTimeMillis();

    // Get the map from domain names to domain configurations
    Map<String, String> configurationMap = job.getDomainConfigurationMap();

    // For each domain harvested, check if it corresponds to a
    // domain configuration for this Job and if so add a new HarvestInfo
    // to the DomainHistory of the corresponding Domain object.
    // TODO  Information about the domains harvested by the crawler
    // without a domain configuration for this job is deleted!
    // Should this information be saved in some way (perhaps stored
    // in metadata.arc-files?)

    final Set<String> domainNames = new HashSet<String>();
    domainNames.addAll(getDomainNames());
    domainNames.retainAll(configurationMap.keySet());
    final DomainDAO dao = DomainDAO.getInstance();
    for (String domainName : domainNames) {
        Domain domain = dao.read(domainName);

        // Retrieve crawl data from log and add it to HarvestInfo
        StopReason stopReason = getStopReason(domainName);
        if (stopReason == null) {
            LOG.warn("No stopreason found for domain '" + domainName + "'");
        }
        Long countObjectRetrieved = getObjectCount(domainName);
        if (countObjectRetrieved == null) {
            LOG.warn("No count for objects retrieved found for domain '" + domainName + "'");
            countObjectRetrieved = -1L;
        }
        Long bytesReceived = getByteCount(domainName);
        if (bytesReceived == null) {
            LOG.warn("No count for bytes received found for domain '" + domainName + "'");
            bytesReceived = -1L;
        }
        //If StopReason is SIZE_LIMIT, we check if it's the harvests' size
        //limit, or rather a configuration size limit.

        //A harvest is considered to have hit the configuration limit if
        //1) The limit is lowest, or
        //2) The number of harvested bytes is greater than the limit

        // Note: Even though the per-config-byte-limit might have changed
        // between the time we calculated the job and now, it's okay we
        // compare with the new limit, since it gives us the most accurate
        // result for whether we want to harvest any more.
        if (stopReason == StopReason.SIZE_LIMIT) {
            long maxBytesPerDomain = job.getMaxBytesPerDomain();
            long configMaxBytes = domain.getConfiguration(configurationMap.get(domainName)).getMaxBytes();
            if (NumberUtils.compareInf(configMaxBytes, maxBytesPerDomain) <= 0
                    || NumberUtils.compareInf(configMaxBytes, bytesReceived) <= 0) {
                stopReason = StopReason.CONFIG_SIZE_LIMIT;
            }
        } else if (stopReason == StopReason.OBJECT_LIMIT) {
            long maxObjectsPerDomain = job.getMaxObjectsPerDomain();
            long configMaxObjects = domain.getConfiguration(configurationMap.get(domainName)).getMaxObjects();
            if (NumberUtils.compareInf(configMaxObjects, maxObjectsPerDomain) <= 0) {
                stopReason = StopReason.CONFIG_OBJECT_LIMIT;
            }
        }
        // Create the HarvestInfo object
        HarvestInfo hi = new HarvestInfo(job.getOrigHarvestDefinitionID(), job.getJobID(), domain.getName(),
                configurationMap.get(domain.getName()), new Date(), bytesReceived, countObjectRetrieved,
                stopReason);

        // Add HarvestInfo to Domain and make data persistent
        // by updating DAO
        domain.getHistory().addHarvestInfo(hi);
        dao.update(domain);
    }

    if (LOG.isInfoEnabled()) {
        long time = System.currentTimeMillis() - startTime;
        LOG.info("Finished post-processing of harvest report for job " + job.getJobID() + ", operation took "
                + StringUtils.formatDuration(time / TimeUtils.SECOND_IN_MILLIS));
    }

}

From source file:org.biopax.psidev.ontology_manager.impl.OntologyManagerImpl.java

public Set<OntologyTermI> searchTermByName(String name, Set<String> ontologies) {
    Set<OntologyTermI> found = new HashSet<OntologyTermI>();
    assert name != null : "searchTermByName: null arg.";

    Set<String> ontologyIDs = new HashSet<String>(getOntologyIDs());
    if (ontologies != null && !ontologies.isEmpty())
        ontologyIDs.retainAll(ontologies);

    for (String ontologyId : ontologyIDs) {
        OntologyAccess oa = getOntology(ontologyId);
        for (OntologyTermI term : oa.getOntologyTerms()) {
            String prefName = term.getPreferredName();
            if (prefName == null) {
                log.error("searchTermByName: NULL preffered name for term " + term.getTermAccession() + " in "
                        + ontologyId + "; report to authors.");
            } else if (name.equalsIgnoreCase(prefName)) {
                found.add(term);//from www.ja va  2  s.c o  m
            } else {
                for (String syn : term.getNameSynonyms()) {
                    if (syn.equalsIgnoreCase(name)) {
                        found.add(term);
                    }
                }
            }
        }
    }

    return found;
}

From source file:edu.wisc.my.portlets.dmp.web.ViewMenuController.java

/**
 * @see org.springframework.web.portlet.mvc.AbstractController#handleRenderRequestInternal(javax.portlet.RenderRequest, javax.portlet.RenderResponse)
 *///from w w  w  .j  av  a  2s  . c om
@Override
protected ModelAndView handleRenderRequestInternal(RenderRequest request, RenderResponse response)
        throws Exception {
    // find the username
    final String userName = request.getRemoteUser();
    if (this.logger.isDebugEnabled()) {
        this.logger.debug("remoteUser='" + userName + "'");
    }

    // get the menu name from the portlet properties
    final PortletPreferences pp = request.getPreferences();
    final String menuName = pp.getValue(MENU_NAME, null);

    //find all the groups this person is a member of
    final String[] userGroups = this.groupsDao.getContainingGroups(userName);

    if (this.logger.isInfoEnabled()) {
        this.logger.info("Rendering Dynamic Menu '" + menuName + "' with group list '"
                + Arrays.asList(userGroups) + "' for user '" + userName + "'");
    }

    final MenuItem menuRoot;
    if (userGroups != null) {
        //get the root MenuItem for the name and list of groups
        menuRoot = this.menuDao.getMenu(menuName, userGroups);
    } else {
        //Get the root MenuItem for the name
        menuRoot = this.menuDao.getMenu(menuName);
    }

    final Map<String, Object> model = new HashMap<String, Object>();
    model.put(MODEL_MENU_NAME, menuName);

    if (menuRoot != null) {
        //All the groups the menu supports
        final Set<String> menuGroups = this.getAllMenuGroups(menuRoot);
        //Reduce menu groups to just those that overlap with the user's group list
        menuGroups.retainAll(Arrays.asList(userGroups));

        final Serializable cacheKey = this.getCacheKey(menuName, menuGroups);
        model.put("contentCacheKey", cacheKey);
        model.put(MODEL_ROOT_ITEM, menuRoot);

        return new ModelAndView(VIEW_MENU, model);
    }

    return new ModelAndView(VIEW_NO_MENU, model);
}

From source file:io.minio.policy.Statement.java

/**
 * Returns whether given statement is valid to process for given bucket name.
 *//*from ww  w  .j  ava2 s  .c  o  m*/
public boolean isValid(String bucketName) {
    Set<String> intersection = new HashSet<String>(this.actions);
    intersection.retainAll(Constants.VALID_ACTIONS);
    if (intersection.isEmpty()) {
        return false;
    }

    if (!this.effect.equals("Allow")) {
        return false;
    }

    Set<String> aws = this.principal.aws();
    if (aws == null || !aws.contains("*")) {
        return false;
    }

    String bucketResource = Constants.AWS_RESOURCE_PREFIX + bucketName;

    if (this.resources.contains(bucketResource)) {
        return true;
    }

    if (this.resources.startsWith(bucketResource + "/").isEmpty()) {
        return false;
    }

    return true;
}

From source file:main.java.spelementex.Evaluator.java

/**
 * given result file, gets the true and false positive entity counts.
 * please note: this function is mainly intended for development purposes; 
 * it will not work if the result file does not have gold annotation labels 
 * in the second to the last column.//from  w w  w. j a  v a2s .  com
 * 
 * @param result
 * @return double[] of size 2 with true positive count at index 0 and
 * false positive count at index 1.
 */
public static double[] getTpFp(String[] result) {
    double[] tpFp = new double[2];

    Set<String> goldAnnotations = new HashSet<>();
    Set<Integer> visitedTokens = new HashSet<>();
    Set<String> predictedAnnotations = new HashSet<>();
    Set<Integer> predictedTokens = new HashSet<>();
    int tokenNum = 0;

    int length = result.length;
    for (int i = 0; i < length; i++) {
        String line = result[i].trim();
        if (line.equals(""))
            continue;

        String[] lineTokens = line.split("\\s+");

        if (lineTokens.length == 3) {
            tokenNum = 0;
            //# 0 0.076469
            if (lineTokens[1].equals("0") && !goldAnnotations.isEmpty()) {
                goldAnnotations.retainAll(predictedAnnotations);
                tpFp[0] += goldAnnotations.size();
                predictedAnnotations.removeAll(goldAnnotations);
                tpFp[1] += predictedAnnotations.size();

                goldAnnotations = new HashSet<>();
                visitedTokens = new HashSet<>();
                predictedAnnotations = new HashSet<>();
                predictedTokens = new HashSet<>();
            }
            continue;
        }

        String goldTag = lineTokens[lineTokens.length - 2];
        if (goldTag.matches("B\\-.*") && !visitedTokens.contains(tokenNum)) {
            int[] tokenOffsets = getTokenOffset(result, i, tokenNum);
            goldAnnotations.add(tokenOffsets[0] + "-" + tokenOffsets[1] + "-" + goldTag);
            visitedTokens.add(tokenNum);
        }

        String predictedTag = lineTokens[lineTokens.length - 1];
        if (predictedTag.matches("B\\-.*") && !predictedTokens.contains(tokenNum)) {
            if (lineTokens[0].matches("[^a-zA-Z0-9]+") || lineTokens[0].equals("-lrb-")
                    || lineTokens[0].equals("-rrb-")) {
                if (i + 1 == result.length) {
                    tokenNum++;
                    continue;
                }
                line = result[i + 1].trim();
                lineTokens = line.split("\\s+");
                if (line.equals("") || lineTokens.length == 3 || lineTokens[lineTokens.length - 1].equals("O")
                        || lineTokens[lineTokens.length - 1].matches("B\\-.*")) {
                    tokenNum++;
                    continue;
                }
            }
            int[] tokenOffsets = getTokenOffset(result, i, tokenNum);

            Set<Integer> tempPredictedTokens = new HashSet<>();
            for (int j = tokenOffsets[0]; j <= tokenOffsets[1]; j++)
                tempPredictedTokens.add(j);

            int before = tempPredictedTokens.size();
            tempPredictedTokens.removeAll(predictedTokens);
            int after = tempPredictedTokens.size();
            if (before == after) {
                predictedTokens.addAll(tempPredictedTokens);
                predictedAnnotations.add(tokenOffsets[0] + "-" + tokenOffsets[1] + "-" + predictedTag);
            }
        }

        tokenNum++;
    }

    goldAnnotations.retainAll(predictedAnnotations);
    tpFp[0] += goldAnnotations.size();
    predictedAnnotations.removeAll(goldAnnotations);
    tpFp[1] += predictedAnnotations.size();

    return tpFp;
}

From source file:main.java.spelementex.Evaluator.java

/**
 * given result file, gets the true and false positive entity counts.
 * please note: this function is mainly intended for development purposes; 
 * it will not work if the result file does not have gold annotation labels 
 * in the second to the last column.// w w w .  j  a v a  2  s. c  om
 * 
 * @param result
 * @param tagType
 * @return double[] of size 2 with true positive count at index 0 and
 * false positive count at index 1.
 */
public static double[] getTpFp(String[] result, String tagType) {
    double[] tpFp = new double[2];

    Set<String> goldAnnotations = new HashSet<>();
    Set<Integer> visitedTokens = new HashSet<>();
    Set<String> predictedAnnotations = new HashSet<>();
    Set<Integer> predictedTokens = new HashSet<>();
    int tokenNum = 0;

    int length = result.length;
    for (int i = 0; i < length; i++) {
        String line = result[i].trim();
        if (line.equals(""))
            continue;

        String[] lineTokens = line.split("\\s+");

        if (lineTokens.length == 3) {
            tokenNum = 0;
            //# 0 0.076469
            if (lineTokens[1].equals("0") && !goldAnnotations.isEmpty()) {
                goldAnnotations.retainAll(predictedAnnotations);
                tpFp[0] += goldAnnotations.size();
                predictedAnnotations.removeAll(goldAnnotations);
                tpFp[1] += predictedAnnotations.size();

                goldAnnotations = new HashSet<>();
                visitedTokens = new HashSet<>();
                predictedAnnotations = new HashSet<>();
                predictedTokens = new HashSet<>();
            }
            continue;
        }

        String goldTag = lineTokens[lineTokens.length - 2];
        if (goldTag.matches("B\\-" + tagType) && !visitedTokens.contains(tokenNum)) {
            int[] tokenOffsets = getTokenOffset(result, i, tokenNum);
            goldAnnotations.add(tokenOffsets[0] + "-" + tokenOffsets[1] + "-" + goldTag);
            visitedTokens.add(tokenNum);
        }

        String predictedTag = lineTokens[lineTokens.length - 1];
        if (predictedTag.matches("B\\-" + tagType) && !predictedTokens.contains(tokenNum)) {
            if (lineTokens[0].matches("[^a-zA-Z0-9]+") || lineTokens[0].equals("-lrb-")
                    || lineTokens[0].equals("-rrb-")) {
                if (i + 1 == result.length) {
                    tokenNum++;
                    continue;
                }
                line = result[i + 1].trim();
                lineTokens = line.split("\\s+");
                if (line.equals("") || lineTokens.length == 3 || lineTokens[lineTokens.length - 1].equals("O")
                        || lineTokens[lineTokens.length - 1].matches("B\\-.*")) {
                    tokenNum++;
                    continue;
                }
            }
            int[] tokenOffsets = getTokenOffset(result, i, tokenNum);

            Set<Integer> tempPredictedTokens = new HashSet<>();
            for (int j = tokenOffsets[0]; j <= tokenOffsets[1]; j++)
                tempPredictedTokens.add(j);

            int before = tempPredictedTokens.size();
            tempPredictedTokens.removeAll(predictedTokens);
            int after = tempPredictedTokens.size();
            if (before == after) {
                predictedTokens.addAll(tempPredictedTokens);
                predictedAnnotations.add(tokenOffsets[0] + "-" + tokenOffsets[1] + "-" + predictedTag);
            }
        }

        tokenNum++;
    }

    goldAnnotations.retainAll(predictedAnnotations);
    tpFp[0] += goldAnnotations.size();
    predictedAnnotations.removeAll(goldAnnotations);
    tpFp[1] += predictedAnnotations.size();

    return tpFp;
}

From source file:com.act.biointerpretation.cofactorremoval.CofactorRemover.java

/**
 * The function removes similar chemicals from the substrates and products (conenzymes) and remove duplicates
 * within each category./* w  w  w.j  av  a2  s . c om*/
 * @param reaction The reaction being updated.
 */
private void findAndIsolateCoenzymesFromReaction(Reaction reaction) {
    // Build ordered sets of the substrates/products.
    LinkedHashSet<Long> substrates = new LinkedHashSet<>(Arrays.asList(reaction.getSubstrates()));
    LinkedHashSet<Long> products = new LinkedHashSet<>(Arrays.asList(reaction.getProducts()));

    // Compute the intersection between the sets.
    Set<Long> intersection = new HashSet<>(substrates);
    intersection.retainAll(products);

    // A - int(A, B) = A / B
    substrates.removeAll(intersection);
    products.removeAll(intersection);

    // Update the reaction with the new (ordered) substrates/products + coenzymes.
    reaction.setSubstrates(substrates.toArray(new Long[substrates.size()]));
    reaction.setProducts(products.toArray(new Long[products.size()]));

    // Keep any existing coenzymes, but don't use them when computing the difference--they might be there for a reason.
    intersection.addAll(Arrays.asList(reaction.getCoenzymes()));
    reaction.setCoenzymes(intersection.toArray(new Long[intersection.size()]));
}