Example usage for java.util SortedSet remove

List of usage examples for java.util SortedSet remove

Introduction

In this page you can find the example usage for java.util SortedSet remove.

Prototype

boolean remove(Object o);

Source Link

Document

Removes the specified element from this set if it is present (optional operation).

Usage

From source file:spade.storage.CompressedTextFile.java

private static SortedSet<Integer> uncompressReference(Integer id, String ancestorOrSuccessorList,
        boolean ancestorOrSuccessor) throws UnsupportedEncodingException {
    //System.out.println("step m");
    SortedSet<Integer> list = new TreeSet<Integer>();
    StringTokenizer st = new StringTokenizer(ancestorOrSuccessorList);
    //System.out.println("ancestorOrSuccessorList :" + ancestorOrSuccessorList + " /id :" + id);
    //st.nextToken();
    String token = st.nextToken();
    //System.out.println("step n");
    Integer referenceID = id + Integer.parseInt(token);
    //System.out.println("referenceID1: " + referenceID);
    LinkedList<String> previousLayers = new LinkedList<String>();
    previousLayers.addFirst(id + " " + ancestorOrSuccessorList);
    String toUncompress;/*from  w w  w .  j a v  a  2s  . co m*/
    //System.out.println("step o");
    boolean hasReference = true;
    //System.out.println(toUncompress);
    //System.out.println(ancestorOrSuccessorList);
    while (hasReference) {
        //System.out.println("step p");
        String currentLine = get(scaffoldWriter, referenceID);
        if (currentLine.length() > 0) {

            if (ancestorOrSuccessor) { // we want to uncompress ancestors
                toUncompress = currentLine.substring(currentLine.indexOf(' ') + 1,
                        currentLine.indexOf("/") - 1);
            } else { // we want to uncompress successors
                toUncompress = currentLine.substring(currentLine.indexOf("/") + 2);
                toUncompress = toUncompress.substring(toUncompress.indexOf(' ') + 1);
            }
            //System.out.println("step q");
            //System.out.println("toUncompress:" + toUncompress);
            // System.out.println(toUncompress);
            toUncompress = referenceID + " " + toUncompress;
            previousLayers.addFirst(toUncompress);
            //System.out.println("step r");
            if (toUncompress.contains(" _ ")) { // this is the last layer
                hasReference = false;
            } else { // we need to go one layer further to uncompress the successors
                String aux = toUncompress.substring(toUncompress.indexOf(" ") + 1);
                //System.out.println("toUncompress:" + toUncompress);
                referenceID = referenceID + Integer.parseInt(aux.substring(0, aux.indexOf(" ")));
                // System.out.println("referenceID : " + referenceID);
                //System.out.println("step s");
            }
        } else {
            System.out.println("Data missing.");
            hasReference = false;
        } //System.out.println("step t");
    }

    // System.out.println("previousLayers: " + previousLayers.toString());
    String bitListLayer;
    String remainingNodesLayer;
    Integer layerID;
    for (String layer : previousLayers) { //find the successors of the first layer and then those of the second layer and so on...
        layerID = Integer.parseInt(layer.substring(0, layer.indexOf(" ")));
        //System.out.println("step u");
        if (layer.contains("_ ")) { //this is the case for the first layer only
            remainingNodesLayer = layer.substring(layer.indexOf("_ ") + 2);
            //System.out.println("step v");
        } else {
            // uncompress the bitlist
            remainingNodesLayer = layer.substring(layer.indexOf(" ") + 1);
            //System.out.println("remaining Nodes Layer 1: " + remainingNodesLayer);
            //// System.out.println("step 1 :" + remainingNodesLayer + "/");
            remainingNodesLayer = remainingNodesLayer.substring(remainingNodesLayer.indexOf(" ") + 1);
            //remainingNodesLayer = remainingNodesLayer.substring(remainingNodesLayer.indexOf(" ") + 1);
            //// System.out.println("step 2 :" + remainingNodesLayer + "/");
            //System.out.println("step w");
            //System.out.println("remaining Nodes Layer " + remainingNodesLayer);
            if (remainingNodesLayer.contains(" ")) {
                bitListLayer = remainingNodesLayer.substring(0, remainingNodesLayer.indexOf(" "));
                remainingNodesLayer = remainingNodesLayer.substring(remainingNodesLayer.indexOf(" ") + 1);
            } else {

                bitListLayer = remainingNodesLayer.substring(0);
                remainingNodesLayer = "";
            }
            //System.out.println("bitListLayer :" + bitListLayer + "/");
            int count = 0;
            SortedSet<Integer> list2 = new TreeSet<Integer>();
            list2.addAll(list);
            //   System.out.println("step x");
            //System.out.println(bitListLayer);
            for (Integer successor : list2) {
                //System.out.println(successor + " " + count);
                if (bitListLayer.charAt(count) == '0') {
                    list.remove(successor);
                    //System.out.println("step y");
                }
                count++;
            }
        }
        // uncompress remaining nodes
        list.addAll(uncompressRemainingNodes(layerID, remainingNodesLayer));
        //System.out.println("step z");
    }
    //System.out.println("uncompressReference : " + list.toString() + "id : " + id);
    return list;
}

From source file:uk.ac.ebi.fg.jobs.OntologySimilarityJob.java

public void doExecute(JobExecutionContext jobExecutionContext)
        throws JobExecutionException, InterruptedException {
    JobDataMap dataMap = jobExecutionContext.getJobDetail().getJobDataMap();
    Map<ExperimentId, SortedSet<EfoTerm>> smallMap = (Map<ExperimentId, SortedSet<EfoTerm>>) dataMap
            .get("smallMap");
    OntologyDistanceCalculator distanceCalculator = (OntologyDistanceCalculator) dataMap
            .get("distanceCalculator");
    Map<String, SortedSet<ExperimentId>> uriToExpMap = (ConcurrentHashMap<String, SortedSet<ExperimentId>>) dataMap
            .get("uriToExpMap");
    Map<ExperimentId, SortedSet<EfoTerm>> expToURIMap = (ConcurrentHashMap<ExperimentId, SortedSet<EfoTerm>>) dataMap
            .get("expToURIMap");
    Map<ExperimentId, SortedSet<ExperimentId>> ontologyResults = (ConcurrentHashMap<ExperimentId, SortedSet<ExperimentId>>) dataMap
            .get("ontologyResults");
    lowPriorityURIs = (SortedSet<String>) dataMap.get("lowPriorityOntologyURIs");
    int counter = (Integer) dataMap.get("counter");
    Configuration properties = (Configuration) dataMap.get("properties");

    final int maxOWLSimilarityCount = properties.getInt("max_displayed_OWL_similarities");
    final int smallExpAssayCountLimit = properties.getInt("small_experiment_assay_count_limit");
    final float minCalculatedOntologyDistance = properties.getFloat("minimal_calculated_ontology_distance");

    logger.info("Started " + (counter - smallMap.size()) + " - " + counter + " ontology similarity jobs");

    for (Map.Entry<ExperimentId, SortedSet<EfoTerm>> entry : smallMap.entrySet()) {
        ExperimentId experiment = entry.getKey();
        SortedSet<ExperimentId> resultExpSimilaritySet = new TreeSet<ExperimentId>();

        for (EfoTerm efoTerm : entry.getValue()) {
            Set<OntologySimilarityResult> similars = distanceCalculator.getSimilarNodes(efoTerm.getUri());

            if (null != similars) {
                for (OntologySimilarityResult ontologySimilarityResult : similars) {
                    int distance = ontologySimilarityResult.getDistance();
                    SortedSet<ExperimentId> similarExperiments = uriToExpMap
                            .get(ontologySimilarityResult.getURI());

                    if (similarExperiments != null) {
                        for (ExperimentId exp : similarExperiments) {
                            if (experiment.getSpecies().equals(exp.getSpecies()) && !experiment.equals(exp)) {
                                if (resultExpSimilaritySet.contains(exp)) {
                                    ExperimentId expClone = resultExpSimilaritySet.tailSet(exp).first().clone();
                                    resultExpSimilaritySet.remove(exp);
                                    resultExpSimilaritySet.add(
                                            setDistance(expClone, ontologySimilarityResult.getURI(), distance));
                                } else {
                                    ExperimentId expClone = exp.clone();
                                    resultExpSimilaritySet.add(
                                            setDistance(expClone, ontologySimilarityResult.getURI(), distance));
                                }/* ww w.ja  v  a2s.  c o  m*/
                            }
                        }
                    }
                }
            }
        }

        // store information for maximal score calculation
        ExperimentId experimentClone = experiment.clone();
        for (EfoTerm efoTerm : expToURIMap.get(experimentClone)) {
            if (lowPriorityURIs.contains(efoTerm.getUri()))
                experimentClone.setLowPriorityMatchCount(experimentClone.getLowPriorityMatchCount() + 1);
            else
                experimentClone.setDist0Count(experimentClone.getDist0Count() + 1);

            experimentClone.setNumbOfMatches(experimentClone.getNumbOfMatches() + 1);
        }

        ontologyResults.put(experimentClone, cleanResults(experimentClone, resultExpSimilaritySet,
                smallExpAssayCountLimit, maxOWLSimilarityCount, minCalculatedOntologyDistance, expToURIMap));

        Thread.currentThread().wait(1);
    }

    logger.info("Finished " + (counter - smallMap.size()) + " - " + counter + " ontology similarity jobs");

    smallMap.clear();
}