Example usage for java.util TreeMap put

List of usage examples for java.util TreeMap put

Introduction

In this page you can find the example usage for java.util TreeMap put.

Prototype

public V put(K key, V value) 

Source Link

Document

Associates the specified value with the specified key in this map.

Usage

From source file:edu.csun.ecs.cs.multitouchj.application.touchpong.TouchPong.java

public static void main(String[] args) {
    LinkedList<String> arguments = new LinkedList<String>();
    for (String argument : args) {
        arguments.add(argument);/*w w w. j av  a  2s  .  c  om*/
    }

    TreeMap<String, String> parameters = new TreeMap<String, String>();
    if (arguments.contains("-ix")) {
        parameters.put(ObjectObserverMoteJ.Parameter.InverseX.toString(), "");
    }
    if (arguments.contains("-iy")) {
        parameters.put(ObjectObserverMoteJ.Parameter.InverseY.toString(), "");
    }

    TouchPong touchPong = new TouchPong();
    touchPong.run(parameters);
}

From source file:TwitterClustering.java

public static void main(String[] args) throws FileNotFoundException, IOException {
    // TODO code application logic here

    File outFile = new File(args[3]);
    Scanner s = new Scanner(new File(args[1])).useDelimiter(",");
    JSONParser parser = new JSONParser();
    Set<Cluster> clusterSet = new HashSet<Cluster>();
    HashMap<String, Tweet> tweets = new HashMap();
    FileWriter fw = new FileWriter(outFile.getAbsoluteFile());
    BufferedWriter bw = new BufferedWriter(fw);

    // init/*w w w  . j a  va  2 s. c o m*/
    try {

        Object obj = parser.parse(new FileReader(args[2]));

        JSONArray jsonArray = (JSONArray) obj;

        for (int i = 0; i < jsonArray.size(); i++) {

            Tweet twt = new Tweet();
            JSONObject jObj = (JSONObject) jsonArray.get(i);
            String text = jObj.get("text").toString();

            long sum = 0;
            for (int y = 0; y < text.toCharArray().length; y++) {

                sum += (int) text.toCharArray()[y];
            }

            String[] token = text.split(" ");
            String tID = jObj.get("id").toString();

            Set<String> mySet = new HashSet<String>(Arrays.asList(token));
            twt.setAttributeValue(sum);
            twt.setText(mySet);
            twt.setTweetID(tID);
            tweets.put(tID, twt);

        }

        // preparing initial clusters
        int i = 0;
        while (s.hasNext()) {
            String id = s.next();// id
            Tweet t = tweets.get(id.trim());
            clusterSet.add(new Cluster(i + 1, t, new LinkedList()));
            i++;
        }

        Iterator it = tweets.entrySet().iterator();

        for (int l = 0; l < 2; l++) { // limit to 25 iterations

            while (it.hasNext()) {
                Map.Entry me = (Map.Entry) it.next();

                // calculate distance to each centroid
                Tweet p = (Tweet) me.getValue();
                HashMap<Cluster, Float> distMap = new HashMap();

                for (Cluster clust : clusterSet) {

                    distMap.put(clust, jaccardDistance(p.getText(), clust.getCentroid().getText()));
                }

                HashMap<Cluster, Float> sorted = (HashMap<Cluster, Float>) sortByValue(distMap);

                sorted.keySet().iterator().next().getMembers().add(p);

            }

            // calculate new centroid and update Clusterset
            for (Cluster clust : clusterSet) {

                TreeMap<String, Long> tDistMap = new TreeMap();

                Tweet newCentroid = null;
                Long avgSumDist = new Long(0);
                for (int j = 0; j < clust.getMembers().size(); j++) {

                    avgSumDist += clust.getMembers().get(j).getAttributeValue();
                    tDistMap.put(clust.getMembers().get(j).getTweetID(),
                            clust.getMembers().get(j).getAttributeValue());
                }
                if (clust.getMembers().size() != 0) {
                    avgSumDist /= (clust.getMembers().size());
                }

                ArrayList<Long> listValues = new ArrayList<Long>(tDistMap.values());

                if (tDistMap.containsValue(findClosestNumber(listValues, avgSumDist))) {
                    // found closest
                    newCentroid = tweets
                            .get(getKeyByValue(tDistMap, findClosestNumber(listValues, avgSumDist)));
                    clust.setCentroid(newCentroid);
                }

            }

        }
        // create an iterator
        Iterator iterator = clusterSet.iterator();

        // check values
        while (iterator.hasNext()) {

            Cluster c = (Cluster) iterator.next();
            bw.write(c.getId() + "\t");
            System.out.print(c.getId() + "\t");

            for (Tweet t : c.getMembers()) {
                bw.write(t.getTweetID() + ", ");
                System.out.print(t.getTweetID() + ",");

            }
            bw.write("\n");
            System.out.println("");
        }

        System.out.println("");

        System.out.println("SSE " + sumSquaredErrror(clusterSet));

    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        bw.close();
        fw.close();
    }
}

From source file:cosmos.example.BuildingPermitsExample.java

public static void main(String[] args) throws Exception {
    BuildingPermitsExample example = new BuildingPermitsExample();
    new JCommander(example, args);

    File inputFile = new File(example.fileName);

    Preconditions.checkArgument(inputFile.exists() && inputFile.isFile() && inputFile.canRead(),
            "Expected " + example.fileName + " to be a readable file");

    String zookeepers;//from   w w  w.jav  a  2s.c om
    String instanceName;
    Connector connector;
    MiniAccumuloCluster mac = null;
    File macDir = null;

    // Use the MiniAccumuloCluster is requested
    if (example.useMiniAccumuloCluster) {
        macDir = Files.createTempDir();
        String password = "password";
        MiniAccumuloConfig config = new MiniAccumuloConfig(macDir, password);
        config.setNumTservers(1);

        mac = new MiniAccumuloCluster(config);
        mac.start();

        zookeepers = mac.getZooKeepers();
        instanceName = mac.getInstanceName();

        ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zookeepers);
        connector = instance.getConnector("root", new PasswordToken(password));
    } else {
        // Otherwise connect to a running instance
        zookeepers = example.zookeepers;
        instanceName = example.instanceName;

        ZooKeeperInstance instance = new ZooKeeperInstance(instanceName, zookeepers);
        connector = instance.getConnector(example.username, new PasswordToken(example.password));
    }

    // Instantiate an instance of Cosmos
    Cosmos cosmos = new CosmosImpl(zookeepers);

    // Create a definition for the data we want to load
    Store id = Store.create(connector, new Authorizations(), AscendingIndexIdentitySet.create());

    // Register the definition with Cosmos so it can track its progress.
    cosmos.register(id);

    // Load all of the data from our inputFile
    LoadBuildingPermits loader = new LoadBuildingPermits(cosmos, id, inputFile);
    loader.run();

    // Finalize the SortableResult which will prevent future writes to the data set
    cosmos.finalize(id);

    // Flush the ingest traces to the backend so we can see the results;
    id.sendTraces();

    // Get back the Set of Columns that we've ingested.
    Set<Column> schema = Sets.newHashSet(cosmos.columns(id));

    log.debug("\nColumns: " + schema);

    Iterator<Column> iter = schema.iterator();
    while (iter.hasNext()) {
        Column c = iter.next();
        // Remove the internal ID field and columns that begin with CONTRACTOR_
        if (c.equals(LoadBuildingPermits.ID) || c.name().startsWith("CONTRACTOR_")) {
            iter.remove();
        }
    }

    Iterable<Index> indices = Iterables.transform(schema, new Function<Column, Index>() {

        @Override
        public Index apply(Column col) {
            return Index.define(col);
        }

    });

    // Ensure that we have locality groups set as we expect
    log.info("Ensure locality groups are set");
    id.optimizeIndices(indices);

    // Compact down the data for this SortableResult    
    log.info("Issuing compaction for relevant data");
    id.consolidate();

    final int numTopValues = 10;

    // Walk through each column in the result set
    for (Column c : schema) {
        Stopwatch sw = new Stopwatch();
        sw.start();

        // Get the number of times we've seen each value in a given column
        CloseableIterable<Entry<RecordValue<?>, Long>> groupingsInColumn = cosmos.groupResults(id, c);

        log.info(c.name() + ":");

        // Iterate over the counts, collecting the top N values in each column
        TreeMap<Long, RecordValue<?>> topValues = Maps.newTreeMap();

        for (Entry<RecordValue<?>, Long> entry : groupingsInColumn) {
            if (topValues.size() == numTopValues) {
                Entry<Long, RecordValue<?>> least = topValues.pollFirstEntry();

                if (least.getKey() < entry.getValue()) {
                    topValues.put(entry.getValue(), entry.getKey());
                } else {
                    topValues.put(least.getKey(), least.getValue());
                }
            } else if (topValues.size() < numTopValues) {
                topValues.put(entry.getValue(), entry.getKey());
            }
        }

        for (Long key : topValues.descendingKeySet()) {
            log.info(topValues.get(key).value() + " occurred " + key + " times");
        }

        sw.stop();

        log.info("Took " + sw.toString() + " to run query.\n");
    }

    log.info("Deleting records");

    // Delete the records we've ingested
    if (!example.useMiniAccumuloCluster) {
        // Because I'm lazy and don't want to wait around to run the BatchDeleter when we're just going
        // to rm -rf the directory in a few secs.
        cosmos.delete(id);
    }

    // And shut down Cosmos
    cosmos.close();

    log.info("Cosmos stopped");

    // If we were using MAC, also stop that
    if (example.useMiniAccumuloCluster && null != mac) {
        mac.stop();
        if (null != macDir) {
            FileUtils.deleteDirectory(macDir);
        }
    }
}

From source file:cc.twittertools.util.VerifySubcollection.java

@SuppressWarnings("static-access")
public static void main(String[] args) throws Exception {
    Options options = new Options();

    options.addOption(OptionBuilder.withArgName("dir").hasArg().withDescription("source collection directory")
            .create(COLLECTION_OPTION));
    options.addOption(//from  www. j ava  2s  . com
            OptionBuilder.withArgName("file").hasArg().withDescription("list of tweetids").create(ID_OPTION));

    CommandLine cmdline = null;
    CommandLineParser parser = new GnuParser();
    try {
        cmdline = parser.parse(options, args);
    } catch (ParseException exp) {
        System.err.println("Error parsing command line: " + exp.getMessage());
        System.exit(-1);
    }

    if (!cmdline.hasOption(COLLECTION_OPTION) || !cmdline.hasOption(ID_OPTION)) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp(ExtractSubcollection.class.getName(), options);
        System.exit(-1);
    }

    String collectionPath = cmdline.getOptionValue(COLLECTION_OPTION);

    LongOpenHashSet tweetids = new LongOpenHashSet();
    File tweetidsFile = new File(cmdline.getOptionValue(ID_OPTION));
    if (!tweetidsFile.exists()) {
        System.err.println("Error: " + tweetidsFile + " does not exist!");
        System.exit(-1);
    }
    LOG.info("Reading tweetids from " + tweetidsFile);

    FileInputStream fin = new FileInputStream(tweetidsFile);
    BufferedReader br = new BufferedReader(new InputStreamReader(fin));

    String s;
    while ((s = br.readLine()) != null) {
        tweetids.add(Long.parseLong(s));
    }
    br.close();
    fin.close();
    LOG.info("Read " + tweetids.size() + " tweetids.");

    File file = new File(collectionPath);
    if (!file.exists()) {
        System.err.println("Error: " + file + " does not exist!");
        System.exit(-1);
    }

    LongOpenHashSet seen = new LongOpenHashSet();
    TreeMap<Long, String> tweets = Maps.newTreeMap();

    PrintStream out = new PrintStream(System.out, true, "UTF-8");
    StatusStream stream = new JsonStatusCorpusReader(file);
    Status status;
    int cnt = 0;
    while ((status = stream.next()) != null) {
        if (!tweetids.contains(status.getId())) {
            LOG.error("tweetid " + status.getId() + " doesn't belong in collection");
            continue;
        }
        if (seen.contains(status.getId())) {
            LOG.error("tweetid " + status.getId() + " already seen!");
            continue;
        }

        tweets.put(status.getId(), status.getJsonObject().toString());
        seen.add(status.getId());
        cnt++;
    }
    LOG.info("total of " + cnt + " tweets in subcollection.");

    for (Map.Entry<Long, String> entry : tweets.entrySet()) {
        out.println(entry.getValue());
    }

    stream.close();
    out.close();
}

From source file:module.entities.NameFinder.RegexNameFinder.java

/**
 * @param args the command line arguments
 *//*from  ww w . j  a  v a 2 s .  c o m*/
public static void main(String[] args) throws SQLException, IOException {

    if (args.length == 1) {
        Config.configFile = args[0];
    }
    long lStartTime = System.currentTimeMillis();
    Timestamp startTime = new Timestamp(lStartTime);
    System.out.println("Regex Name Finder process started at: " + startTime);
    DB.initPostgres();
    regexerId = DB.LogRegexFinder(lStartTime);
    initLexicons();
    JSONObject obj = new JSONObject();
    TreeMap<Integer, String> consultations = DB.getDemocracitConsultationBody();
    Document doc;
    int count = 0;
    TreeMap<Integer, String> consFoundNames = new TreeMap<>();
    TreeMap<Integer, String> consFoundRoles = new TreeMap<>();
    for (int consId : consultations.keySet()) {
        String consBody = consultations.get(consId);
        String signName = "", roleName = "";
        doc = Jsoup.parse(consBody);
        Elements allPars = new Elements();
        Elements paragraphs = doc.select("p");
        for (Element par : paragraphs) {
            if (par.html().contains("<br>")) {
                String out = "<p>" + par.html().replaceAll("<br>", "</p><p>") + "</p>";
                Document internal_doc = Jsoup.parse(out);
                Elements subparagraphs = internal_doc.select("p");
                allPars.addAll(subparagraphs);
            } else {
                allPars.add(par);
            }
            //                System.out.println(formatedText);
        }
        String signature = getSignatureFromParagraphs(allPars);
        //            System.out.println(signature);
        if (signature.contains("#")) {
            String[] sign_tokens = signature.split("#");
            signName = sign_tokens[0];
            if (sign_tokens.length > 1) {
                roleName = sign_tokens[1];
            }
            consFoundNames.put(consId, signName.trim());
            consFoundRoles.put(consId, roleName.trim());
            count++;
        } else {
            System.err.println("--" + consId);
        }
        //           
    }
    DB.insertDemocracitConsultationMinister(consFoundNames, consFoundRoles);

    TreeMap<Integer, String> consultationsCompletedText = DB.getDemocracitCompletedConsultationBody();
    Document doc2;
    TreeMap<Integer, String> complConsFoundNames = new TreeMap<>();
    int count2 = 0;
    for (int consId : consultationsCompletedText.keySet()) {
        String consBody = consultationsCompletedText.get(consId);
        String signName = "", roleName = "";
        doc2 = Jsoup.parse(consBody);
        //            if (doc.text().contains("<br>")) {
        //                doc.text().replaceAll("(<[Bb][Rr]>)+", "<p>");
        //            }
        Elements allPars = new Elements();
        Elements paragraphs = doc2.select("p");
        for (Element par : paragraphs) {

            if (par.html().contains("<br>")) {
                String out = "<p>" + par.html().replaceAll("<br>", "</p><p>") + "</p>";
                Document internal_doc = Jsoup.parse(out);
                Elements subparagraphs = internal_doc.select("p");
                allPars.addAll(subparagraphs);
            } else {
                allPars.add(par);
            }
        }
        String signature = getSignatureFromParagraphs(allPars);
        if (signature.contains("#")) {
            String[] sign_tokens = signature.split("#");
            signName = sign_tokens[0];
            if (sign_tokens.length > 1) {
                roleName = sign_tokens[1];
            }
            consFoundNames.put(consId, signName.trim());
            consFoundRoles.put(consId, roleName.trim());
            //                System.out.println(consId);
            //                System.out.println(signName.trim());
            //                System.out.println("***************");
            count2++;
        } else {
            System.err.println("++" + consId);
        }
    }
    DB.insertDemocracitConsultationMinister(complConsFoundNames, consFoundRoles);
    long lEndTime = System.currentTimeMillis();
    System.out.println("Regex Name Finder process finished at: " + startTime);
    obj.put("message", "Regex Name Finder finished with no errors");
    obj.put("details", "");
    DB.UpdateLogRegexFinder(lEndTime, regexerId, obj);
    DB.close();
}

From source file:cht.Parser.java

public static void main(String[] args) throws IOException {

    // TODO get from google drive
    boolean isUnicode = false;
    boolean isRemoveInputFileOnComplete = false;
    int rowNum;//from  w ww. j  av  a  2s.c om
    int colNum;
    Gson gson = new GsonBuilder().setPrettyPrinting().create();

    Properties prop = new Properties();

    try {
        prop.load(new FileInputStream("config.txt"));
    } catch (IOException ex) {
        ex.printStackTrace();
    }

    String inputFilePath = prop.getProperty("inputFile");
    String outputDirectory = prop.getProperty("outputDirectory");
    System.out.println(outputDirectory);
    // optional
    String unicode = prop.getProperty("unicode");
    String removeInputFileOnComplete = prop.getProperty("removeInputFileOnComplete");

    inputFilePath = inputFilePath.trim();
    outputDirectory = outputDirectory.trim();

    if (unicode != null) {
        isUnicode = Boolean.parseBoolean(unicode.trim());
    }
    if (removeInputFileOnComplete != null) {
        isRemoveInputFileOnComplete = Boolean.parseBoolean(removeInputFileOnComplete.trim());
    }

    Writer out = null;
    FileInputStream in = null;
    final String newLine = System.getProperty("line.separator").toString();
    final String separator = File.separator;
    try {
        in = new FileInputStream(inputFilePath);

        Workbook workbook = new XSSFWorkbook(in);

        Sheet sheet = workbook.getSheetAt(0);

        rowNum = sheet.getLastRowNum() + 1;
        colNum = sheet.getRow(0).getPhysicalNumberOfCells();

        for (int j = 1; j < colNum; ++j) {
            String outputFilename = sheet.getRow(0).getCell(j).getStringCellValue();
            // guess directory
            int slash = outputFilename.indexOf('/');
            if (slash != -1) { // has directory
                outputFilename = outputFilename.substring(0, slash) + separator
                        + outputFilename.substring(slash + 1);
            }

            String outputPath = FilenameUtils.concat(outputDirectory, outputFilename);
            System.out.println("--Writing " + outputPath);
            out = new OutputStreamWriter(new FileOutputStream(outputPath), "UTF-8");
            TreeMap<String, Object> map = new TreeMap<String, Object>();
            for (int i = 1; i < rowNum; i++) {
                try {
                    String key = sheet.getRow(i).getCell(0).getStringCellValue();
                    //String value = "";
                    Cell tmp = sheet.getRow(i).getCell(j);
                    if (tmp != null) {
                        // not empty string!
                        value = sheet.getRow(i).getCell(j).getStringCellValue();
                    }
                    if (!key.equals("") && !key.startsWith("#") && !key.startsWith(".")) {
                        value = isUnicode ? StringEscapeUtils.escapeJava(value) : value;

                        int firstdot = key.indexOf(".");
                        String keyName, keyAttribute;
                        if (firstdot > 0) {// a.b.c.d 
                            keyName = key.substring(0, firstdot); // a
                            keyAttribute = key.substring(firstdot + 1); // b.c.d
                            TreeMap oldhash = null;
                            Object old = null;
                            if (map.get(keyName) != null) {
                                old = map.get(keyName);
                                if (old instanceof TreeMap == false) {
                                    System.out.println("different type of key:" + key);
                                    continue;
                                }
                                oldhash = (TreeMap) old;
                            } else {
                                oldhash = new TreeMap();
                            }

                            int firstdot2 = keyAttribute.indexOf(".");
                            String rootName, childName;
                            if (firstdot2 > 0) {// c, d.f --> d, f
                                rootName = keyAttribute.substring(0, firstdot2);
                                childName = keyAttribute.substring(firstdot2 + 1);
                            } else {// c, d  -> d, null
                                rootName = keyAttribute;
                                childName = null;
                            }

                            TreeMap<String, Object> object = myPut(oldhash, rootName, childName);
                            map.put(keyName, object);

                        } else {// c, d  -> d, null
                            keyName = key;
                            keyAttribute = null;
                            // simple string mode
                            map.put(key, value);
                        }

                    }

                } catch (Exception e) {
                    // just ingore empty rows
                }

            }
            String json = gson.toJson(map);
            // output json
            out.write(json + newLine);
            out.close();
        }
        in.close();

        System.out.println("\n---Complete!---");
        System.out.println("Read input file from " + inputFilePath);
        System.out.println(colNum - 1 + " output files ate generated at " + outputDirectory);
        System.out.println(rowNum + " records are generated for each output file.");
        System.out.println("output file is ecoded as unicode? " + (isUnicode ? "yes" : "no"));
        if (isRemoveInputFileOnComplete) {
            File input = new File(inputFilePath);
            input.deleteOnExit();
            System.out.println("Deleted " + inputFilePath);
        }

    } catch (FileNotFoundException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        if (in != null) {
            in.close();
        }
    }

}

From source file:de.tudarmstadt.ukp.experiments.dip.wp1.documents.Step8GoldDataAggregator.java

public static void main(String[] args) throws Exception {
    String inputDir = args[0] + "/";
    // output dir
    File outputDir = new File(args[1]);
    File turkersConfidence = new File(args[2]);
    if (outputDir.exists()) {
        outputDir.delete();/*  www.  j  ava 2  s.co  m*/
    }
    outputDir.mkdir();

    List<String> annotatorsIDs = new ArrayList<>();
    //        for (File f : FileUtils.listFiles(new File(inputDir), new String[] { "xml" }, false)) {
    //            QueryResultContainer queryResultContainer = QueryResultContainer
    //                    .fromXML(FileUtils.readFileToString(f, "utf-8"));
    //            for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) {
    //                for (QueryResultContainer.MTurkRelevanceVote relevanceVote : rankedResults.mTurkRelevanceVotes) {
    //                    if (!annotatorsIDs.contains(relevanceVote.turkID))
    //                        annotatorsIDs.add(relevanceVote.turkID);
    //                }
    //            }
    //        }
    HashMap<String, Integer> countVotesForATurker = new HashMap<>();
    // creates temporary file with format for mace
    // Hashmap annotations: key is the id of a document and a sentence
    // Value is an array votes[] of turkers decisions: true or false (relevant or not)
    // the length of this array equals the number of annotators in List<String> annotatorsIDs.
    // If an annotator worked on the task his decision is written in the array otherwise the value is NULL

    // key: queryID + clueWebID + sentenceID
    // value: true and false annotations
    TreeMap<String, Annotations> annotations = new TreeMap<>();

    for (File f : FileUtils.listFiles(new File(inputDir), new String[] { "xml" }, false)) {
        QueryResultContainer queryResultContainer = QueryResultContainer
                .fromXML(FileUtils.readFileToString(f, "utf-8"));
        System.out.println("Reading " + f.getName());
        for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) {
            String documentID = rankedResults.clueWebID;
            for (QueryResultContainer.MTurkRelevanceVote relevanceVote : rankedResults.mTurkRelevanceVotes) {
                Integer turkerID;
                if (!annotatorsIDs.contains(relevanceVote.turkID)) {
                    annotatorsIDs.add(relevanceVote.turkID);
                    turkerID = annotatorsIDs.size() - 1;
                } else {
                    turkerID = annotatorsIDs.indexOf(relevanceVote.turkID);
                }
                Integer count = countVotesForATurker.get(relevanceVote.turkID);
                if (count == null) {
                    count = 0;
                }
                count++;
                countVotesForATurker.put(relevanceVote.turkID, count);

                String id;
                List<Integer> trueVotes;
                List<Integer> falseVotes;
                for (QueryResultContainer.SingleSentenceRelevanceVote singleSentenceRelevanceVote : relevanceVote.singleSentenceRelevanceVotes)
                    if (!"".equals(singleSentenceRelevanceVote.sentenceID)) {

                        id = f.getName() + "_" + documentID + "_" + singleSentenceRelevanceVote.sentenceID;
                        Annotations turkerVotes = annotations.get(id);
                        if (turkerVotes == null) {
                            trueVotes = new ArrayList<>();
                            falseVotes = new ArrayList<>();
                            turkerVotes = new Annotations(trueVotes, falseVotes);
                        }
                        trueVotes = turkerVotes.trueAnnotations;
                        falseVotes = turkerVotes.falseAnnotations;
                        if ("true".equals(singleSentenceRelevanceVote.relevant)) {
                            // votes[turkerID] = true;
                            trueVotes.add(turkerID);
                        } else if ("false".equals(singleSentenceRelevanceVote.relevant)) {
                            //   votes[turkerID] = false;
                            falseVotes.add(turkerID);
                        } else {
                            throw new IllegalStateException("Annotation value of sentence "
                                    + singleSentenceRelevanceVote.sentenceID + " in " + rankedResults.clueWebID
                                    + " equals " + singleSentenceRelevanceVote.relevant);
                        }
                        try {
                            int allVotesCount = trueVotes.size() + falseVotes.size();
                            if (allVotesCount > 5) {
                                System.err.println(id + " doesn't have 5 annotators: true: " + trueVotes.size()
                                        + " false: " + falseVotes.size());

                                // nasty hack, we're gonna strip some data; true votes first
                                /* we can't do that, it breaks something down the line
                                int toRemove = allVotesCount - 5;
                                if (trueVotes.size() >= toRemove) {
                                trueVotes = trueVotes
                                        .subList(0, trueVotes.size() - toRemove);
                                }
                                else if (
                                    falseVotes.size() >= toRemove) {
                                falseVotes = falseVotes
                                        .subList(0, trueVotes.size() - toRemove);
                                }
                                */
                                System.err.println("Adjusted: " + id + " doesn't have 5 annotators: true: "
                                        + trueVotes.size() + " false: " + falseVotes.size());
                            }
                        } catch (IllegalStateException e) {
                            e.printStackTrace();
                        }
                        turkerVotes.trueAnnotations = trueVotes;
                        turkerVotes.falseAnnotations = falseVotes;
                        annotations.put(id, turkerVotes);
                    } else {
                        throw new IllegalStateException(
                                "Empty Sentence ID in " + f.getName() + " for turker " + turkerID);
                    }

            }
        }

    }
    File tmp = printHashMap(annotations, annotatorsIDs.size());

    String file = TEMP_DIR + "/" + tmp.getName();
    MACE.main(new String[] { "--prefix", file });

    //gets the keys of the documents and sentences
    ArrayList<String> lines = (ArrayList<String>) FileUtils.readLines(new File(file + ".prediction"));
    int i = 0;
    TreeMap<String, TreeMap<String, ArrayList<HashMap<String, String>>>> ids = new TreeMap<>();
    ArrayList<HashMap<String, String>> sentences;
    if (lines.size() != annotations.size()) {
        throw new IllegalStateException(
                "The size of prediction file is " + lines.size() + "but expected " + annotations.size());
    }
    for (Map.Entry entry : annotations.entrySet()) { //1001.xml_clueweb12-1905wb-13-07360_8783
        String key = (String) entry.getKey();
        String[] IDs = key.split("_");
        if (IDs.length > 2) {
            String queryID = IDs[0];
            String clueWebID = IDs[1];
            String sentenceID = IDs[2];
            TreeMap<String, ArrayList<HashMap<String, String>>> clueWebIDs = ids.get(queryID);
            if (clueWebIDs == null) {
                clueWebIDs = new TreeMap<>();
            }
            sentences = clueWebIDs.get(clueWebID);
            if (sentences == null) {
                sentences = new ArrayList<>();
            }
            HashMap<String, String> sentence = new HashMap<>();
            sentence.put(sentenceID, lines.get(i));
            sentences.add(sentence);
            clueWebIDs.put(clueWebID, sentences);
            ids.put(queryID, clueWebIDs);
        } else {
            throw new IllegalStateException("Wrong ID " + key);
        }

        i++;
    }

    for (Map.Entry entry : ids.entrySet()) {
        TreeMap<Integer, String> value = (TreeMap<Integer, String>) entry.getValue();
        String queryID = (String) entry.getKey();
        QueryResultContainer queryResultContainer = QueryResultContainer
                .fromXML(FileUtils.readFileToString(new File(inputDir, queryID), "utf-8"));
        for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) {
            for (Map.Entry val : value.entrySet()) {
                String clueWebID = (String) val.getKey();
                if (clueWebID.equals(rankedResults.clueWebID)) {
                    List<QueryResultContainer.SingleSentenceRelevanceVote> goldEstimatedLabels = new ArrayList<>();
                    List<QueryResultContainer.SingleSentenceRelevanceVote> turkersVotes = new ArrayList<>();
                    int size = 0;
                    int hitSize = 0;
                    String hitID = "";
                    for (QueryResultContainer.MTurkRelevanceVote vote : rankedResults.mTurkRelevanceVotes) {
                        if (!hitID.equals(vote.hitID)) {
                            hitID = vote.hitID;
                            hitSize = vote.singleSentenceRelevanceVotes.size();
                            size = size + hitSize;
                            turkersVotes.addAll(vote.singleSentenceRelevanceVotes);
                        } else {
                            if (vote.singleSentenceRelevanceVotes.size() != hitSize) {
                                hitSize = vote.singleSentenceRelevanceVotes.size();
                                size = size + hitSize;
                                turkersVotes.addAll(vote.singleSentenceRelevanceVotes);
                            }
                        }
                    }
                    ArrayList<HashMap<String, String>> sentenceList = (ArrayList<HashMap<String, String>>) val
                            .getValue();
                    if (sentenceList.size() != turkersVotes.size()) {
                        try {
                            throw new IllegalStateException("Expected size of annotations is "
                                    + turkersVotes.size() + "but found " + sentenceList.size()
                                    + " for document " + rankedResults.clueWebID + " in " + queryID);
                        } catch (IllegalStateException ex) {
                            ex.printStackTrace();
                        }
                    }
                    for (QueryResultContainer.SingleSentenceRelevanceVote s : turkersVotes) {
                        String valSentence = null;
                        for (HashMap<String, String> anno : sentenceList) {
                            if (anno.keySet().contains(s.sentenceID)) {
                                valSentence = anno.get(s.sentenceID);
                            }
                        }
                        QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote();
                        singleSentenceVote.sentenceID = s.sentenceID;
                        if (("false").equals(valSentence)) {
                            singleSentenceVote.relevant = "false";
                        } else if (("true").equals(valSentence)) {
                            singleSentenceVote.relevant = "true";
                        } else {
                            throw new IllegalStateException("Annotation value of sentence "
                                    + singleSentenceVote.sentenceID + " equals " + val.getValue());
                        }
                        goldEstimatedLabels.add(singleSentenceVote);
                    }
                    rankedResults.goldEstimatedLabels = goldEstimatedLabels;
                }
            }
        }
        File outputFile = new File(outputDir, queryID);
        FileUtils.writeStringToFile(outputFile, queryResultContainer.toXML(), "utf-8");
        System.out.println("Finished " + outputFile);
    }

    ArrayList<String> annotators = (ArrayList<String>) FileUtils.readLines(new File(file + ".competence"));
    FileWriter fileWriter;
    StringBuilder sb = new StringBuilder();
    for (int j = 0; j < annotatorsIDs.size(); j++) {
        String[] s = annotators.get(0).split("\t");
        Float score = Float.parseFloat(s[j]);
        String turkerID = annotatorsIDs.get(j);
        System.out.println(turkerID + " " + score + " " + countVotesForATurker.get(turkerID));
        sb.append(turkerID).append(" ").append(score).append(" ").append(countVotesForATurker.get(turkerID))
                .append("\n");
    }
    fileWriter = new FileWriter(turkersConfidence);
    fileWriter.append(sb.toString());
    fileWriter.close();

}

From source file:cn.tata.t2s.ssm.util.AcmeCorpPhysicalNamingStrategy.java

private static Map<String, String> buildAbbreviationMap() {
    TreeMap<String, String> abbreviationMap = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
    abbreviationMap.put("account", "acct");
    abbreviationMap.put("number", "num");
    return abbreviationMap;
}

From source file:it.zero11.acme.utils.JWKUtils.java

public static TreeMap<String, Object> getWebKey(PublicKey publicKey) {
    TreeMap<String, Object> key = new TreeMap<>();
    if (publicKey instanceof RSAPublicKey) {
        key.put("kty", "RSA");
        key.put("e",
                TextCodec.BASE64URL.encode(toIntegerBytes(((RSAPublicKey) publicKey).getPublicExponent())));
        key.put("n", TextCodec.BASE64URL.encode(toIntegerBytes(((RSAPublicKey) publicKey).getModulus())));
        return key;
    } else {//  w  ww.ja va  2s  .co m
        throw new IllegalArgumentException();
    }
}

From source file:com.kpb.other.AcmeCorpPhysicalNamingStrategy.java

private static Map<String, String> buildAbbreviationMap() {
    TreeMap<String, String> abbreviationMap = new TreeMap<String, String>(String.CASE_INSENSITIVE_ORDER);
    abbreviationMap.put("account", "acct");
    abbreviationMap.put("number", "num");
    return abbreviationMap;
}