Example usage for java.util LinkedHashSet removeAll

List of usage examples for java.util LinkedHashSet removeAll

Introduction

In this page you can find the example usage for java.util LinkedHashSet removeAll.

Prototype

boolean removeAll(Collection<?> c);

Source Link

Document

Removes from this set all of its elements that are contained in the specified collection (optional operation).

Usage

From source file:com.ery.ertc.estorm.util.DNS.java

/**
 * Returns all the IPs associated with the provided interface, if any, in textual form.
 * /*from w  ww. j a  v  a 2 s .c o m*/
 * @param strInterface
 *            The name of the network interface or subinterface to query (eg eth0 or eth0:0) or the string "default"
 * @param returnSubinterfaces
 *            Whether to return IPs associated with subinterfaces of the given interface
 * @return A string vector of all the IPs associated with the provided interface
 * @throws UnknownHostException
 *             If an UnknownHostException is encountered in querying the default interface or the given interface can not be found
 * 
 */
public static String[] getIPs(String strInterface, boolean returnSubinterfaces) throws UnknownHostException {
    if ("default".equals(strInterface)) {
        return new String[] { InetAddress.getLocalHost().getHostAddress() };
    }
    NetworkInterface netIf;
    try {
        netIf = NetworkInterface.getByName(strInterface);
        if (netIf == null) {
            netIf = getSubinterface(strInterface);
            if (netIf == null) {
                throw new UnknownHostException("Unknown interface " + strInterface);
            }
        }
    } catch (SocketException e) {
        LOG.warn("Unable to get IP for interface " + strInterface, e);
        return new String[] { InetAddress.getLocalHost().getHostAddress() };
    }

    // NB: Using a LinkedHashSet to preserve the order for callers
    // that depend on a particular element being 1st in the array.
    // For example, getDefaultIP always returns the first element.
    LinkedHashSet<InetAddress> allAddrs = new LinkedHashSet<InetAddress>();
    allAddrs.addAll(Collections.list(netIf.getInetAddresses()));
    if (!returnSubinterfaces) {
        allAddrs.removeAll(getSubinterfaceInetAddrs(netIf));
    }

    String ips[] = new String[allAddrs.size()];
    int i = 0;
    for (InetAddress addr : allAddrs) {
        ips[i++] = addr.getHostAddress();
    }
    return ips;
}

From source file:com.buaa.cfs.net.DNS.java

/**
 * Returns all the IPs associated with the provided interface, if any, in textual form.
 *
 * @param strInterface        The name of the network interface or sub-interface to query (eg eth0 or eth0:0) or the
 *                            string "default"
 * @param returnSubinterfaces Whether to return IPs associated with subinterfaces of the given interface
 *
 * @return A string vector of all the IPs associated with the provided interface. The local host IP is returned if
 * the interface name "default" is specified or there is an I/O error looking for the given interface.
 *
 * @throws UnknownHostException If the given interface is invalid
 *//*  w w w .ja v a  2s.c  o  m*/
public static String[] getIPs(String strInterface, boolean returnSubinterfaces) throws UnknownHostException {
    if ("default".equals(strInterface)) {
        return new String[] { cachedHostAddress };
    }
    NetworkInterface netIf;
    try {
        netIf = NetworkInterface.getByName(strInterface);
        if (netIf == null) {
            netIf = getSubinterface(strInterface);
        }
    } catch (SocketException e) {
        LOG.warn("I/O error finding interface " + strInterface + ": " + e.getMessage());
        return new String[] { cachedHostAddress };
    }
    if (netIf == null) {
        throw new UnknownHostException("No such interface " + strInterface);
    }

    // NB: Using a LinkedHashSet to preserve the order for callers
    // that depend on a particular element being 1st in the array.
    // For example, getDefaultIP always returns the first element.
    LinkedHashSet<InetAddress> allAddrs = new LinkedHashSet<InetAddress>();
    allAddrs.addAll(Collections.list(netIf.getInetAddresses()));
    if (!returnSubinterfaces) {
        allAddrs.removeAll(getSubinterfaceInetAddrs(netIf));
    }

    String ips[] = new String[allAddrs.size()];
    int i = 0;
    for (InetAddress addr : allAddrs) {
        ips[i++] = addr.getHostAddress();
    }
    return ips;
}

From source file:com.adobe.cq.wcm.core.components.internal.models.v2.PageImpl.java

protected void populateClientLibCategoriesJs() {
    if (currentStyle != null) {
        clientLibCategoriesJsHead = currentStyle.get(PN_CLIENTLIBS_JS_HEAD, ArrayUtils.EMPTY_STRING_ARRAY);
        LinkedHashSet<String> categories = new LinkedHashSet<>(Arrays.asList(clientLibCategories));
        categories.removeAll(Arrays.asList(clientLibCategoriesJsHead));
        clientLibCategoriesJsBody = categories.toArray(new String[0]);
    }/*from w w  w  .  ja va 2  s.  c  o m*/
}

From source file:com.act.biointerpretation.cofactorremoval.CofactorRemover.java

/**
 * The function removes similar chemicals from the substrates and products (conenzymes) and remove duplicates
 * within each category./*from   ww w. ja  v  a  2 s .co  m*/
 * @param reaction The reaction being updated.
 */
private void findAndIsolateCoenzymesFromReaction(Reaction reaction) {
    // Build ordered sets of the substrates/products.
    LinkedHashSet<Long> substrates = new LinkedHashSet<>(Arrays.asList(reaction.getSubstrates()));
    LinkedHashSet<Long> products = new LinkedHashSet<>(Arrays.asList(reaction.getProducts()));

    // Compute the intersection between the sets.
    Set<Long> intersection = new HashSet<>(substrates);
    intersection.retainAll(products);

    // A - int(A, B) = A / B
    substrates.removeAll(intersection);
    products.removeAll(intersection);

    // Update the reaction with the new (ordered) substrates/products + coenzymes.
    reaction.setSubstrates(substrates.toArray(new Long[substrates.size()]));
    reaction.setProducts(products.toArray(new Long[products.size()]));

    // Keep any existing coenzymes, but don't use them when computing the difference--they might be there for a reason.
    intersection.addAll(Arrays.asList(reaction.getCoenzymes()));
    reaction.setCoenzymes(intersection.toArray(new Long[intersection.size()]));
}

From source file:com.odoko.solrcli.actions.CrawlPostAction.java

/**
 * A very simple crawler, pulling URLs to fetch from a backlog and then
 * recurses N levels deep if recursive>0. Links are parsed from HTML
 * through first getting an XHTML version using SolrCell with extractOnly,
 * and followed if they are local. The crawler pauses for a default delay
 * of 10 seconds between each fetch, this can be configured in the delay
 * variable. This is only meant for test purposes, as it does not respect
 * robots or anything else fancy :)//from  www. ja  va  2  s  . c o  m
 * @param level which level to crawl
 * @param out output stream to write to
 * @return number of pages crawled on this level and below
 */
protected int webCrawl(int level, OutputStream out) {
  int numPages = 0;
  LinkedHashSet<URL> stack = backlog.get(level);
  int rawStackSize = stack.size();
  stack.removeAll(visited);
  int stackSize = stack.size();
  LinkedHashSet<URL> subStack = new LinkedHashSet<URL>();
  info("Entering crawl at level "+level+" ("+rawStackSize+" links total, "+stackSize+" new)");
  for(URL u : stack) {
    try {
      visited.add(u);
      PageFetcherResult result = pageFetcher.readPageFromUrl(u);
      if(result.httpStatus == 200) {
        u = (result.redirectUrl != null) ? result.redirectUrl : u;
        URL postUrl = new URL(appendParam(solrUrl.toString(), 
            "literal.id="+URLEncoder.encode(u.toString(),"UTF-8") +
            "&literal.url="+URLEncoder.encode(u.toString(),"UTF-8")));
        boolean success = postData(new ByteArrayInputStream(result.content), null, out, result.contentType, postUrl);
        if (success) {
          info("POSTed web resource "+u+" (depth: "+level+")");
          Thread.sleep(delay * 1000);
          numPages++;
          // Pull links from HTML pages only
          if(recursive > level && result.contentType.equals("text/html")) {
            Set<URL> children = pageFetcher.getLinksFromWebPage(u, new ByteArrayInputStream(result.content), result.contentType, postUrl);
            subStack.addAll(children);
          }
        } else {
          warn("An error occurred while posting "+u);
        }
      } else {
        warn("The URL "+u+" returned a HTTP result status of "+result.httpStatus);
      }
    } catch (IOException e) {
      warn("Caught exception when trying to open connection to "+u+": "+e.getMessage());
    } catch (InterruptedException e) {
      throw new RuntimeException();
    }
  }
  if(!subStack.isEmpty()) {
    backlog.add(subStack);
    numPages += webCrawl(level+1, out);
  }
  return numPages;    
}

From source file:com.ehsy.solr.util.SimplePostTool.java

/**
 * A very simple crawler, pulling URLs to fetch from a backlog and then
 * recurses N levels deep if recursive>0. Links are parsed from HTML
 * through first getting an XHTML version using SolrCell with extractOnly,
 * and followed if they are local. The crawler pauses for a default delay
 * of 10 seconds between each fetch, this can be configured in the delay
 * variable. This is only meant for test purposes, as it does not respect
 * robots or anything else fancy :)//  w  w w. j  a  va2 s.c o  m
 * @param level which level to crawl
 * @param out output stream to write to
 * @return number of pages crawled on this level and below
 */
protected int webCrawl(int level, OutputStream out) {
    int numPages = 0;
    LinkedHashSet<URL> stack = backlog.get(level);
    int rawStackSize = stack.size();
    stack.removeAll(visited);
    int stackSize = stack.size();
    LinkedHashSet<URL> subStack = new LinkedHashSet<>();
    info("Entering crawl at level " + level + " (" + rawStackSize + " links total, " + stackSize + " new)");
    for (URL u : stack) {
        try {
            visited.add(u);
            PageFetcherResult result = pageFetcher.readPageFromUrl(u);
            if (result.httpStatus == 200) {
                u = (result.redirectUrl != null) ? result.redirectUrl : u;
                URL postUrl = new URL(
                        appendParam(solrUrl.toString(), "literal.id=" + URLEncoder.encode(u.toString(), "UTF-8")
                                + "&literal.url=" + URLEncoder.encode(u.toString(), "UTF-8")));
                boolean success = postData(new ByteArrayInputStream(result.content), null, out,
                        result.contentType, postUrl);
                if (success) {
                    info("POSTed web resource " + u + " (depth: " + level + ")");
                    Thread.sleep(delay * 1000);
                    numPages++;
                    // Pull links from HTML pages only
                    if (recursive > level && result.contentType.equals("text/html")) {
                        Set<URL> children = pageFetcher.getLinksFromWebPage(u,
                                new ByteArrayInputStream(result.content), result.contentType, postUrl);
                        subStack.addAll(children);
                    }
                } else {
                    warn("An error occurred while posting " + u);
                }
            } else {
                warn("The URL " + u + " returned a HTTP result status of " + result.httpStatus);
            }
        } catch (IOException e) {
            warn("Caught exception when trying to open connection to " + u + ": " + e.getMessage());
        } catch (InterruptedException e) {
            throw new RuntimeException();
        }
    }
    if (!subStack.isEmpty()) {
        backlog.add(subStack);
        numPages += webCrawl(level + 1, out);
    }
    return numPages;
}

From source file:com.streamsets.pipeline.stage.processor.fieldfilter.FieldFilterProcessor.java

@Override
protected void process(Record record, SingleLaneBatchMaker batchMaker) throws StageException {
    // use List to preserve the order of list fieldPaths - need to watch out for duplicates though
    List<String> allFieldPaths = record.getEscapedFieldPathsOrdered();
    // use LinkedHashSet to preserve order and dedupe as we go
    LinkedHashSet<String> fieldsToRemove;
    switch (filterOperation) {
    case REMOVE:/*from w w  w .  ja v  a  2s. c o  m*/
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            fieldsToRemove.addAll(matchingFieldPaths);
        }
        break;
    case REMOVE_NULL:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && record.get(fieldPath).getValue() == null) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case REMOVE_EMPTY:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && record.get(fieldPath).getValue() != null
                        && record.get(fieldPath).getValue().equals("")) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case REMOVE_NULL_EMPTY:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && (record.get(fieldPath).getValue() == null
                        || record.get(fieldPath).getValue().equals(""))) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case REMOVE_CONSTANT:
        fieldsToRemove = new LinkedHashSet<>();
        for (String field : fields) {
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            for (String fieldPath : matchingFieldPaths) {
                if (record.has(fieldPath) && record.get(fieldPath).getValue() != null
                        && record.get(fieldPath).getValue().equals(constant)) {
                    fieldsToRemove.add(fieldPath);
                }
            }
        }
        break;
    case KEEP:
        //Algorithm:
        // - Get all possible field paths in the record
        //
        // - Remove arguments fields which must be retained, its parent fields and the child fields from above set
        //   (Account for presence of wild card characters while doing so) The remaining set of fields is what must be
        //   removed from the record.
        //
        // - Keep fieldsToRemove in order - sorting is too costly
        //List all the possible field paths in this record
        fieldsToRemove = new LinkedHashSet<>(allFieldPaths);
        for (String field : fields) {
            //Keep parent fields
            //get the parent fieldPaths for each of the fields to keep
            List<String> parentFieldPaths = getParentFields(field);
            //remove parent paths from the fieldsToRemove set
            //Note that parent names could contain wild card characters
            for (String parentField : parentFieldPaths) {
                List<String> matchingFieldPaths = FieldRegexUtil.getMatchingFieldPaths(parentField,
                        allFieldPaths);
                fieldsToRemove.removeAll(matchingFieldPaths);
            }

            //Keep the field itself
            //remove the field path itself from the fieldsToRemove set
            //Consider wild card characters
            List<String> matchingFieldPaths = FieldPathExpressionUtil.evaluateMatchingFieldPaths(field,
                    fieldPathEval, fieldPathVars, record, allFieldPaths);
            fieldsToRemove.removeAll(matchingFieldPaths);

            //Keep the children of the field
            //For each of the fieldPaths that match the argument field path, remove all the child paths
            // Remove children at the end to avoid ConcurrentModificationException
            Set<String> childrenToKeep = new HashSet<>();
            for (String matchingFieldPath : matchingFieldPaths) {
                for (String fieldToRemove : fieldsToRemove) {
                    // for the old way, startsWith is appropriate when we have
                    // different path structures, or "nested" (multiple dimensioned) index structures.
                    //  eg: /USA[0]/SanFrancisco/folsom/streets[0] must still match:
                    //      /USA[0]/SanFrancisco/folsom/streets[0][0]   hence: startsWith.
                    if (StringUtils.countMatches(fieldToRemove, "/") == StringUtils
                            .countMatches(matchingFieldPath, "/")
                            && StringUtils.countMatches(fieldToRemove, "[") == StringUtils
                                    .countMatches(matchingFieldPath, "[")) {
                        if (fieldToRemove.equals(matchingFieldPath)) {
                            childrenToKeep.add(fieldToRemove);
                        }
                    } else {
                        if (fieldToRemove.startsWith(matchingFieldPath)) {
                            childrenToKeep.add(fieldToRemove);
                        }
                    }
                }
            }
            fieldsToRemove.removeAll(childrenToKeep);
        }
        break;
    default:
        throw new IllegalStateException(
                Utils.format("Unexpected Filter Operation '{}'", filterOperation.name()));
    }
    // We don't sort because we maintained list fields in ascending order (but not a full ordering)
    // Instead we just iterate in reverse to delete
    Iterator<String> itr = (new LinkedList<>(fieldsToRemove)).descendingIterator();
    while (itr.hasNext()) {
        record.delete(itr.next());
    }
    batchMaker.addRecord(record);
}

From source file:org.apache.hadoop.net.DNS.java

/**
 * Returns all the IPs associated with the provided interface, if any, as
 * a list of InetAddress objects./*from w  ww .j  av  a  2  s. c o m*/
 *
 * @param strInterface
 *            The name of the network interface or sub-interface to query
 *            (eg eth0 or eth0:0) or the string "default"
 * @param returnSubinterfaces
 *            Whether to return IPs associated with subinterfaces of
 *            the given interface
 * @return A list of all the IPs associated with the provided
 *         interface. The local host IP is returned if the interface
 *         name "default" is specified or there is an I/O error looking
 *         for the given interface.
 * @throws UnknownHostException
 *             If the given interface is invalid
 *
 */
public static List<InetAddress> getIPsAsInetAddressList(String strInterface, boolean returnSubinterfaces)
        throws UnknownHostException {
    if ("default".equals(strInterface)) {
        return Arrays.asList(InetAddress.getByName(cachedHostAddress));
    }
    NetworkInterface netIf;
    try {
        netIf = NetworkInterface.getByName(strInterface);
        if (netIf == null) {
            netIf = getSubinterface(strInterface);
        }
    } catch (SocketException e) {
        LOG.warn("I/O error finding interface " + strInterface + ": " + e.getMessage());
        return Arrays.asList(InetAddress.getByName(cachedHostAddress));
    }
    if (netIf == null) {
        throw new UnknownHostException("No such interface " + strInterface);
    }

    // NB: Using a LinkedHashSet to preserve the order for callers
    // that depend on a particular element being 1st in the array.
    // For example, getDefaultIP always returns the first element.
    LinkedHashSet<InetAddress> allAddrs = new LinkedHashSet<InetAddress>();
    allAddrs.addAll(Collections.list(netIf.getInetAddresses()));
    if (!returnSubinterfaces) {
        allAddrs.removeAll(getSubinterfaceInetAddrs(netIf));
    }
    return new Vector<InetAddress>(allAddrs);
}

From source file:org.mskcc.cbio.oncokb.quest.VariantAnnotationXML.java

public static String annotate(Alteration alt, String tumorType) {
        //        GeneBo geneBo = ApplicationContextSingleton.getGeneBo();

        StringBuilder sb = new StringBuilder();

        Gene gene = alt.getGene();//w w w .j a va 2  s  . com

        //        Set<Gene> genes = new HashSet<Gene>();
        //        if (gene.getEntrezGeneId() > 0) {
        //            genes.add(gene);
        //        } else {
        // fake gene... could be a fusion gene
        //            Set<String> aliases = gene.getGeneAliases();
        //            for (String alias : aliases) {
        //                Gene g = geneBo.findGeneByHugoSymbol(alias);
        //                if (g != null) {
        //                    genes.add(g);
        //                }
        //            }
        //        }

        List<TumorType> relevantTumorTypes = TumorTypeUtils.getMappedOncoTreeTypesBySource(tumorType, "quest");

        AlterationUtils.annotateAlteration(alt, alt.getAlteration());

        AlterationBo alterationBo = ApplicationContextSingleton.getAlterationBo();
        LinkedHashSet<Alteration> alterations = alterationBo.findRelevantAlterations(alt, true);

        // In previous logic, we do not include alternative alleles
        List<Alteration> alternativeAlleles = AlterationUtils.getAlleleAlterations(alt);
        alterations.removeAll(alternativeAlleles);

        EvidenceBo evidenceBo = ApplicationContextSingleton.getEvidenceBo();

        // find all drugs
        //List<Drug> drugs = evidenceBo.findDrugsByAlterations(alterations);

        // find tumor types
        Set<String> tumorTypes = new HashSet<>();

        if (alterations != null && alterations.size() > 0) {
            List<Object> tumorTypesEvidence = evidenceBo
                    .findTumorTypesWithEvidencesForAlterations(new ArrayList<>(alterations));
            for (Object evidence : tumorTypesEvidence) {
                if (evidence != null) {
                    Object[] evidences = (Object[]) evidence;
                    if (evidences.length > 0 && evidences[0] != null) {
                        tumorTypes.add((String) evidences[0]);
                    }
                }
            }
        }

        //        sortTumorType(tumorTypes, tumorType);
        Query query = new Query(alt);
        query.setTumorType(tumorType);
        // summary
        sb.append("<annotation_summary>");
        sb.append(SummaryUtils.fullSummary(gene, alt,
                alterations.isEmpty() ? Collections.singletonList(alt) : new ArrayList<>(alterations), query,
                relevantTumorTypes));
        sb.append("</annotation_summary>\n");

        // gene background
        List<Evidence> geneBgEvs = evidenceBo.findEvidencesByGene(Collections.singleton(gene),
                Collections.singleton(EvidenceType.GENE_BACKGROUND));
        if (!geneBgEvs.isEmpty()) {
            Evidence ev = geneBgEvs.get(0);
            sb.append("<gene_annotation>\n");
            sb.append("    <description>");
            sb.append(StringEscapeUtils.escapeXml10(ev.getDescription()).trim());
            sb.append("</description>\n");
            exportRefereces(ev, sb, "    ");
            sb.append("</gene_annotation>\n");
        }

        if (alterations.isEmpty()) {
            sb.append("<!-- There is no information about the function of this variant in the MSKCC OncoKB. -->");
            return sb.toString();
        }

        List<Evidence> mutationEffectEbs = evidenceBo.findEvidencesByAlteration(alterations,
                Collections.singleton(EvidenceType.MUTATION_EFFECT));
        for (Evidence ev : mutationEffectEbs) {
            sb.append("<variant_effect>\n");
            sb.append("    <effect>");
            if (ev != null) {
                sb.append(ev.getKnownEffect());
            }
            sb.append("</effect>\n");
            if (ev.getDescription() != null) {
                sb.append("    <description>");
                sb.append(StringEscapeUtils.escapeXml10(ev.getDescription()).trim());
                sb.append("</description>\n");
            }
            if (ev != null) {
                exportRefereces(ev, sb, "    ");
            }

            sb.append("</variant_effect>\n");
        }

        for (String tt : tumorTypes) {
            TumorType oncoTreeType = TumorTypeUtils.getMappedOncoTreeTypesBySource(tt, "quest").get(0);
            boolean isRelevant = relevantTumorTypes.contains(oncoTreeType);

            StringBuilder sbTumorType = new StringBuilder();
            sbTumorType.append("<cancer_type type=\"").append(tt).append("\" relevant_to_patient_disease=\"")
                    .append(isRelevant ? "Yes" : "No").append("\">\n");
            int nEmp = sbTumorType.length();

            // find prognostic implication evidence blob
            Set<Evidence> prognosticEbs = new HashSet<Evidence>(evidenceBo.findEvidencesByAlteration(alterations,
                    Collections.singleton(EvidenceType.PROGNOSTIC_IMPLICATION),
                    Collections.singleton(oncoTreeType)));
            if (!prognosticEbs.isEmpty()) {
                sbTumorType.append("    <prognostic_implications>\n");
                sbTumorType.append("        <description>\n");
                for (Evidence ev : prognosticEbs) {
                    String description = ev.getDescription();
                    if (description != null) {
                        sbTumorType.append("        ").append(StringEscapeUtils.escapeXml10(description).trim())
                                .append("\n");
                    }
                }
                sbTumorType.append("</description>\n");

                for (Evidence ev : prognosticEbs) {
                    exportRefereces(ev, sbTumorType, "        ");
                }
                sbTumorType.append("    </prognostic_implications>\n");
            }

            // STANDARD_THERAPEUTIC_IMPLICATIONS
            List<Evidence> stdImpEbsSensitivity = evidenceBo.findEvidencesByAlteration(alterations,
                    Collections.singleton(EvidenceType.STANDARD_THERAPEUTIC_IMPLICATIONS_FOR_DRUG_SENSITIVITY),
                    Collections.singleton(oncoTreeType));
            List<Evidence> stdImpEbsResisitance = evidenceBo.findEvidencesByAlteration(alterations,
                    Collections.singleton(EvidenceType.STANDARD_THERAPEUTIC_IMPLICATIONS_FOR_DRUG_RESISTANCE),
                    Collections.singleton(oncoTreeType));

            //Remove level_0
            stdImpEbsSensitivity = filterLevelZeroEvidence(stdImpEbsSensitivity);

            //Remove level_R3
            stdImpEbsResisitance = filterResistanceEvidence(stdImpEbsResisitance);

            exportTherapeuticImplications(relevantTumorTypes, stdImpEbsSensitivity, stdImpEbsResisitance,
                    "standard_therapeutic_implications", sbTumorType, "    ", isRelevant);

            // INVESTIGATIONAL_THERAPEUTIC_IMPLICATIONS
            List<Evidence> invImpEbsSensitivity = evidenceBo.findEvidencesByAlteration(alterations,
                    Collections.singleton(EvidenceType.INVESTIGATIONAL_THERAPEUTIC_IMPLICATIONS_DRUG_SENSITIVITY),
                    Collections.singleton(oncoTreeType));
            List<Evidence> invImpEbsResisitance = evidenceBo.findEvidencesByAlteration(alterations,
                    Collections.singleton(EvidenceType.INVESTIGATIONAL_THERAPEUTIC_IMPLICATIONS_DRUG_RESISTANCE),
                    Collections.singleton(oncoTreeType));

            //Remove level_R3
            invImpEbsResisitance = filterResistanceEvidence(invImpEbsResisitance);

            exportTherapeuticImplications(relevantTumorTypes, invImpEbsSensitivity, invImpEbsResisitance,
                    "investigational_therapeutic_implications", sbTumorType, "    ", isRelevant);

            if (sbTumorType.length() > nEmp) {
                sbTumorType.append("</cancer_type>\n");
                sb.append(sbTumorType);
            }
        }

        return sb.toString();
    }

From source file:org.topazproject.otm.impl.SessionFactoryImpl.java

public ClassMetadata getSubClassMetadata(ClassMetadata clazz, EntityMode mode, Collection<String> typeUris,
        TripleStore.Result result) {
    Set<ClassMetadata> candidates = new HashSet<ClassMetadata>();

    if (typeUris == null)
        typeUris = Collections.emptySet();

    buildCandidates(candidates, clazz, clazz, mode, typeUris);

    Set<ClassMetadata> solutions = new HashSet<ClassMetadata>(candidates);

    // Eliminate super classes from an rdf:type perspective
    for (ClassMetadata sup : candidates) {
        for (ClassMetadata cm : candidates) {
            if ((sup != cm) && cm.getAllTypes().size() > sup.getAllTypes().size()
                    && cm.getAllTypes().containsAll(sup.getAllTypes())) {
                solutions.remove(sup);//from  www . j av  a  2  s  .  c  o m
                break;
            }
        }
    }

    if (solutions.isEmpty())
        return null;

    if (solutions.size() == 1)
        return solutions.iterator().next();

    // narrow down based on other rdf statements
    if (result != null) {
        LinkedHashSet<SubClassResolver> resolvers = new LinkedHashSet<SubClassResolver>();

        // resolvers for sub-classes of the solutions (excluding the solutions)
        for (ClassMetadata cl : solutions)
            gatherSub(cl.getName(), resolvers);
        for (ClassMetadata cl : solutions)
            resolvers.removeAll(listRegisteredSubClassResolvers(cl.getName()));

        // resolvers for the solutions
        for (ClassMetadata cl : solutions)
            resolvers.addAll(listRegisteredSubClassResolvers(cl.getName()));

        // resolvers for the super-classes
        for (ClassMetadata cl : solutions)
            gatherSup(cl.getName(), resolvers);

        // add the root as the last
        Set<SubClassResolver> rs = subClassResolvers.get(null);
        if (rs != null)
            resolvers.addAll(rs);

        for (SubClassResolver r : resolvers) {
            ClassMetadata cm = r.resolve(clazz, mode, this, typeUris, result);
            if ((cm != null) && isAcceptable(cm, clazz, mode, typeUris))
                return cm;
        }
    }

    // That didn't help. Eliminate super classes from an EntityBinder perspective
    if (mode != null) {
        candidates = new HashSet<ClassMetadata>(solutions);
        for (ClassMetadata sup : candidates) {
            EntityBinder supBinder = sup.getEntityBinder(mode);
            for (ClassMetadata cm : candidates) {
                EntityBinder binder = cm.getEntityBinder(mode);
                if ((sup != cm) && supBinder.isAssignableFrom(binder)) {
                    solutions.remove(sup);
                    break;
                }
            }
        }
    }

    ClassMetadata solution = solutions.iterator().next();

    if (solutions.size() > 1) {
        // That didn't help either. Pick the first in the solutions set
        log.warn("Randomly chose " + solution + " as a subclass for " + clazz + " from the set " + solutions);
    }

    return solution;
}