Example usage for java.util SortedSet size

List of usage examples for java.util SortedSet size

Introduction

In this page you can find the example usage for java.util SortedSet size.

Prototype

int size();

Source Link

Document

Returns the number of elements in this set (its cardinality).

Usage

From source file:Main.java

public static void main(String[] argv) throws Exception {
    SortedSet<String> set = new TreeSet<String>();
    set.add("b");
    set.add("c");
    set.add("a");

    Iterator it = set.iterator();
    while (it.hasNext()) {
        Object element = it.next();
    }/*from  w ww  . ja  v  a2  s. com*/
    String[] array = (String[]) set.toArray(new String[set.size()]);
}

From source file:playground.dgrether.signalsystems.cottbus.scripts.DgCottbusSignalPlanChartGenerator.java

/**
 * @param args/*from w  w w  .  ja  va  2 s .co  m*/
 */
public static void main(String[] args) {
    //parameters
    //cottbus
    String runId = "1224";
    String baseDir = DgPaths.REPOS + "runs-svn/";
    int iteration = 500;
    Id<SignalSystem> signalSystemId = Id.create(18, SignalSystem.class);
    double startSecond = 23135.0;
    double endSecond = startSecond + 3600.0;
    startSecond = 8.0 * 3600.0;
    endSecond = startSecond + 3600.0;
    baseDir = baseDir + "run" + runId;

    //cottbus football
    runId = "1222_100_football_fans";
    baseDir = DgPaths.REPOS + "runs-svn/run1222/100_football_fans";
    iteration = 500;
    startSecond = 17.0 * 3600.0;
    endSecond = startSecond + 3600.0;
    signalSystemId = Id.create(1, SignalSystem.class);

    //      koehler strehler 2010
    //      runId = null;
    //      baseDir = "/media/data/work/matsimOutput/koehlerStrehler2010Scenario5SelectBest/";
    //      iteration = 20;
    //      signalSystemId = Id.create(5);
    //      startSecond = 0.0;
    //      endSecond = 900.0;
    //      
    //skript
    OutputDirectoryHierarchy io = null;
    if (runId != null)
        io = new OutputDirectoryHierarchy(baseDir, runId.toString(),
                false ? OutputDirectoryHierarchy.OverwriteFileSetting.overwriteExistingFiles
                        : OutputDirectoryHierarchy.OverwriteFileSetting.failIfDirectoryExists);
    else
        io = new OutputDirectoryHierarchy(baseDir,
                false ? OutputDirectoryHierarchy.OverwriteFileSetting.overwriteExistingFiles
                        : OutputDirectoryHierarchy.OverwriteFileSetting.failIfDirectoryExists);

    String eventsFilename = io.getIterationFilename(iteration, "events.xml.gz");

    DgSignalEventsCollector eventsCollector = new DgSignalEventsCollector();
    EventsManager events = EventsUtils.createEventsManager();
    events.addHandler(eventsCollector);
    MatsimEventsReader eventsReader = new MatsimEventsReader(events);
    eventsReader.readFile(eventsFilename);

    SortedSet<SignalGroupStateChangedEvent> systemEvents = eventsCollector.getSignalGroupEventsBySystemIdMap()
            .get(signalSystemId);
    log.info("Number of events for system " + signalSystemId + " is: " + systemEvents.size());
    for (SignalGroupStateChangedEvent e : systemEvents) {
        if (e.getTime() >= startSecond && e.getTime() <= endSecond)
            log.debug(e);
    }
    //         
    DgSignalPlanChart chart = new DgSignalPlanChart(startSecond, endSecond);
    chart.addData(systemEvents);
    JFreeChart jfChart = chart.createSignalPlanChart("System plan", "group", "time");

    String chartFileName = io.getIterationFilename(iteration,
            "signal_plan_system_" + signalSystemId + "_from_" + startSecond + "_to_" + endSecond);

    int rowCount = jfChart.getCategoryPlot().getDataset().getRowCount();

    int width = (int) (endSecond - startSecond) * 3;
    writeToPng(chartFileName, jfChart, width, rowCount * 3 + 50);

}

From source file:de.tudarmstadt.ukp.experiments.argumentation.convincingness.sampling.Step2ArgumentPairsSampling.java

public static void main(String[] args) throws Exception {
    String inputDir = args[0];/*from  ww  w . j a  v  a 2 s . co  m*/

    // /tmp
    File outputDir = new File(args[1]);
    if (!outputDir.exists()) {
        outputDir.mkdirs();
    }

    // pseudo-random
    final Random random = new Random(1);

    int totalPairsCount = 0;

    // read all debates
    for (File file : IOHelper.listXmlFiles(new File(inputDir))) {
        Debate debate = DebateSerializer.deserializeFromXML(FileUtils.readFileToString(file, "utf-8"));

        // get two stances
        SortedSet<String> originalStances = debate.getStances();

        // cleaning: some debate has three or more stances (data are inconsistent)
        // remove those with only one argument
        SortedSet<String> stances = new TreeSet<>();
        for (String stance : originalStances) {
            if (debate.getArgumentsForStance(stance).size() > 1) {
                stances.add(stance);
            }
        }

        if (stances.size() != 2) {
            throw new IllegalStateException(
                    "2 stances per debate expected, was " + stances.size() + ", " + stances);
        }

        // for each stance, get pseudo-random N arguments
        for (String stance : stances) {
            List<Argument> argumentsForStance = debate.getArgumentsForStance(stance);

            // shuffle
            Collections.shuffle(argumentsForStance, random);

            // and get max first N arguments
            List<Argument> selectedArguments = argumentsForStance.subList(0,
                    argumentsForStance.size() < MAX_SELECTED_ARGUMENTS_PRO_SIDE ? argumentsForStance.size()
                            : MAX_SELECTED_ARGUMENTS_PRO_SIDE);

            List<ArgumentPair> argumentPairs = new ArrayList<>();

            // now create pairs
            for (int i = 0; i < selectedArguments.size(); i++) {
                for (int j = (i + 1); j < selectedArguments.size(); j++) {
                    Argument arg1 = selectedArguments.get(i);
                    Argument arg2 = selectedArguments.get(j);

                    ArgumentPair argumentPair = new ArgumentPair();
                    argumentPair.setDebateMetaData(debate.getDebateMetaData());

                    // assign arg1 and arg2 pseudo-randomly
                    // (not to have the same argument as arg1 all the time)
                    if (random.nextBoolean()) {
                        argumentPair.setArg1(arg1);
                        argumentPair.setArg2(arg2);
                    } else {
                        argumentPair.setArg1(arg2);
                        argumentPair.setArg2(arg1);
                    }

                    // set unique id
                    argumentPair.setId(argumentPair.getArg1().getId() + "_" + argumentPair.getArg2().getId());

                    argumentPairs.add(argumentPair);
                }
            }

            String fileName = IOHelper.createFileName(debate.getDebateMetaData(), stance);

            File outputFile = new File(outputDir, fileName);

            // and save all sampled pairs into a XML file
            XStreamTools.toXML(argumentPairs, outputFile);

            System.out.println("Saved " + argumentPairs.size() + " pairs to " + outputFile);

            totalPairsCount += argumentPairs.size();
        }

    }

    System.out.println("Total pairs generated: " + totalPairsCount);
}

From source file:de.tudarmstadt.ukp.experiments.dip.wp1.documents.Step7CollectMTurkResults.java

public static void main(String[] args) throws Exception {
    // input dir - list of xml query containers
    // /home/user-ukp/research/data/dip/wp1-documents/step4-boiler-plate/
    File inputDir = new File(args[0] + "/");

    // MTurk result file

    // output dir
    File outputDir = new File(args[2]);
    if (!outputDir.exists()) {
        outputDir.mkdirs();/*  ww w .j  av  a2s.c om*/

    }

    // Folder with success files
    File mturkSuccessDir = new File(args[1]);

    Collection<File> files = FileUtils.listFiles(mturkSuccessDir, new String[] { "result" }, false);
    if (files.isEmpty()) {
        throw new IllegalArgumentException("Input folder is empty. " + mturkSuccessDir);
    }

    HashMap<String, List<MTurkAnnotation>> mturkAnnotations = new HashMap<>();

    // parsing all CSV files
    for (File mturkCSVResultFile : files) {
        System.out.println("Parsing " + mturkCSVResultFile.getName());

        MTurkOutputReader outputReader = new MTurkOutputReader(
                new HashSet<>(Arrays.asList("annotation", "workerid")), mturkCSVResultFile);

        // for fixing broken data input: for each hit, collect all sentence IDs
        Map<String, SortedSet<String>> hitSentences = new HashMap<>();

        // first iteration: collect the sentences
        for (Map<String, String> record : outputReader) {
            String hitID = record.get("hitid");
            if (!hitSentences.containsKey(hitID)) {
                hitSentences.put(hitID, new TreeSet<>());
            }

            String relevantSentences = record.get("Answer.relevant_sentences");
            String irrelevantSentences = record.get("Answer.irrelevant_sentences");

            if (relevantSentences != null) {
                hitSentences.get(hitID).addAll(Arrays.asList(relevantSentences.split(",")));
            }

            if (irrelevantSentences != null) {
                hitSentences.get(hitID).addAll(Arrays.asList(irrelevantSentences.split(",")));
            }
        }

        // and now second iteration
        for (Map<String, String> record : outputReader) {
            String hitID = record.get("hitid");
            String annotatorID = record.get("workerid");
            String acceptTime = record.get("assignmentaccepttime");
            String submitTime = record.get("assignmentsubmittime");
            String relevantSentences = record.get("Answer.relevant_sentences");
            String irrelevantSentences = record.get("Answer.irrelevant_sentences");
            String reject = record.get("reject");
            String filename[];
            String comment;
            String clueWeb;
            String[] relevant = {};
            String[] irrelevant = {};

            filename = record.get("annotation").split("_");
            String fileXml = filename[0];
            clueWeb = filename[1].trim();
            comment = record.get("Answer.comment");

            if (relevantSentences != null) {
                relevant = relevantSentences.split(",");
            }

            if (irrelevantSentences != null) {
                irrelevant = irrelevantSentences.split(",");
            }

            // sanitizing data: if both relevant and irrelevant are empty, that's a bug
            // we're gonna look up all sentences from this HIT and treat this assignment
            // as if there were only irrelevant ones
            if (relevant.length == 0 && irrelevant.length == 0) {
                SortedSet<String> strings = hitSentences.get(hitID);
                irrelevant = new String[strings.size()];
                strings.toArray(irrelevant);
            }

            if (reject != null) {
                System.out.println(" HIT " + hitID + " annotated by " + annotatorID + " was rejected ");
            } else {
                /*
                // relevant sentences is a comma-delimited string,
                // this regular expression is rather strange
                // it must contain digits, it might be that there is only one space or a comma or some other char
                // digits are the sentence ids. if relevant sentences do not contain digits then it is wrong
                if (relevantSentences.matches("^\\D*$") &&
                    irrelevantSentences.matches("^\\D*$")) {
                try {
                    throw new IllegalStateException(
                            "No annotations found for HIT " + hitID + " in " +
                                    fileXml + " for document " + clueWeb);
                }
                catch (IllegalStateException ex) {
                    ex.printStackTrace();
                }
                        
                }
                */
                MTurkAnnotation mturkAnnotation;
                try {
                    mturkAnnotation = new MTurkAnnotation(hitID, annotatorID, acceptTime, submitTime, comment,
                            clueWeb, relevant, irrelevant);
                } catch (IllegalArgumentException ex) {
                    throw new IllegalArgumentException("Record: " + record, ex);
                }

                List<MTurkAnnotation> listOfAnnotations = mturkAnnotations.get(fileXml);

                if (listOfAnnotations == null) {
                    listOfAnnotations = new ArrayList<>();
                }
                listOfAnnotations.add(mturkAnnotation);
                mturkAnnotations.put(fileXml, listOfAnnotations);
            }

        }
        //            parser.close();
    }

    // Debugging: output number of HITs of a query
    System.out.println("Accepted HITs for a query:");
    for (Map.Entry e : mturkAnnotations.entrySet()) {
        ArrayList<MTurkAnnotation> a = (ArrayList<MTurkAnnotation>) e.getValue();
        System.out.println(e.getKey() + " " + a.size());
    }

    for (File f : FileUtils.listFiles(inputDir, new String[] { "xml" }, false)) {
        QueryResultContainer queryResultContainer = QueryResultContainer
                .fromXML(FileUtils.readFileToString(f, "utf-8"));
        String fileName = f.getName();
        List<MTurkAnnotation> listOfAnnotations = mturkAnnotations.get(fileName);

        if (listOfAnnotations == null || listOfAnnotations.isEmpty()) {
            throw new IllegalStateException("No annotations for " + f.getName());
        }

        for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) {
            for (MTurkAnnotation mtAnnotation : listOfAnnotations) {
                String clueWeb = mtAnnotation.clueWeb;
                if (rankedResults.clueWebID.equals(clueWeb)) {
                    List<QueryResultContainer.MTurkRelevanceVote> mTurkRelevanceVotes = rankedResults.mTurkRelevanceVotes;
                    QueryResultContainer.MTurkRelevanceVote relevanceVote = new QueryResultContainer.MTurkRelevanceVote();
                    String annotatorID = mtAnnotation.annotatorID;
                    String hitID = mtAnnotation.hitID;
                    String acceptTime = mtAnnotation.acceptTime;
                    String submitTime = mtAnnotation.submitTime;
                    String comment = mtAnnotation.comment;
                    String[] relevant = mtAnnotation.relevant;
                    String[] irrelevant = mtAnnotation.irrelevant;
                    relevanceVote.turkID = annotatorID.trim();
                    relevanceVote.hitID = hitID.trim();
                    relevanceVote.acceptTime = acceptTime.trim();
                    relevanceVote.submitTime = submitTime.trim();
                    relevanceVote.comment = comment != null ? comment.trim() : null;
                    if (relevant.length == 0 && irrelevant.length == 0) {
                        try {
                            throw new IllegalStateException("the length of the annotations is 0"
                                    + rankedResults.clueWebID + " for HIT " + relevanceVote.hitID);
                        } catch (IllegalStateException e) {
                            e.printStackTrace();
                        }
                    }
                    for (String r : relevant) {
                        String sentenceId = r.trim();
                        if (!sentenceId.isEmpty() && sentenceId.matches("\\d+")) {
                            QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote();
                            singleSentenceVote.sentenceID = sentenceId;
                            singleSentenceVote.relevant = "true";
                            relevanceVote.singleSentenceRelevanceVotes.add(singleSentenceVote);
                        }
                    }
                    for (String r : irrelevant) {
                        String sentenceId = r.trim();
                        if (!sentenceId.isEmpty() && sentenceId.matches("\\d+")) {
                            QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote();
                            singleSentenceVote.sentenceID = sentenceId;
                            singleSentenceVote.relevant = "false";
                            relevanceVote.singleSentenceRelevanceVotes.add(singleSentenceVote);
                        }
                    }
                    mTurkRelevanceVotes.add(relevanceVote);
                }
            }

        }
        File outputFile = new File(outputDir, f.getName());
        FileUtils.writeStringToFile(outputFile, queryResultContainer.toXML(), "utf-8");
        System.out.println("Finished " + outputFile);
    }

}

From source file:Main.java

public static List sortedUnion(List args1, List args2) {
    SortedSet set = new TreeSet();
    set.addAll(args1);/* w  w w. j a  va 2  s . c  o m*/
    set.addAll(args2);

    List lst = new ArrayList(set.size());
    for (Iterator it = set.iterator(); it.hasNext();) {
        Object o = it.next();
        lst.add(o);
    }

    return lst;
}

From source file:com.rapleaf.hank.coordinator.Domains.java

public static DomainVersion getLatestVersion(Domain domain) throws IOException {
    SortedSet<DomainVersion> versions = domain.getVersions();
    if (versions == null || versions.size() == 0) {
        return null;
    } else {/*ww  w .j  a v a  2  s  .  c o  m*/
        return versions.last();
    }
}

From source file:co.rsk.peg.BridgeSerializationUtils.java

public static byte[] serializeSet(SortedSet<Sha256Hash> set) {
    int nhashes = set.size();

    byte[][] bytes = new byte[nhashes][];
    int n = 0;/* w ww.j a va2  s  . co m*/

    for (Sha256Hash hash : set)
        bytes[n++] = RLP.encodeElement(hash.getBytes());

    return RLP.encodeList(bytes);
}

From source file:com.rapleaf.hank.storage.curly.CurlyReader.java

public static CurlyFilePath getLatestBase(String partitionRoot) throws IOException {
    SortedSet<CurlyFilePath> bases = Curly.getBases(partitionRoot);
    if (bases == null || bases.size() == 0) {
        throw new IOException("Could not detect any Curly base in " + partitionRoot);
    }//from  w w  w . j a va2  s  .  c  om
    return bases.last();
}

From source file:org.eclipse.sw360.licenseinfo.outputGenerators.OutputGenerator.java

/**
 * Helper function to sort a set by the given key extractor. Falls back to the
 * unsorted set if sorting the set would squash values.
 *
 * @param unsorted/*from   w w  w  .  ja v a  2 s . co  m*/
 *            set to be sorted
 * @param keyExtractor
 *            function to extract the key to use for sorting
 *
 * @return the sorted set
 */
private static <U, K extends Comparable<K>> SortedSet<U> sortSet(Set<U> unsorted, Function<U, K> keyExtractor) {
    if (unsorted == null || unsorted.isEmpty()) {
        return Collections.emptySortedSet();
    }
    SortedSet<U> sorted = new TreeSet<>(Comparator.comparing(keyExtractor));
    sorted.addAll(unsorted);
    if (sorted.size() != unsorted.size()) {
        // there were key collisions and some data was lost -> throw away the sorted set
        // and sort by U's natural order
        sorted = new TreeSet<>();
        sorted.addAll(unsorted);
    }
    return sorted;
}

From source file:ai.susi.json.JsonRepository.java

private static SortedSet<File> tailSet(SortedSet<File> set, int count) {
    if (count >= set.size())
        return set;
    TreeSet<File> t = new TreeSet<File>();
    Iterator<File> fi = set.iterator();
    for (int i = 0; i < set.size() - count; i++)
        fi.next();/*  w w w  .ja  va 2s . c  om*/
    while (fi.hasNext())
        t.add(fi.next());
    return t;
}