Example usage for java.util HashMap entrySet

List of usage examples for java.util HashMap entrySet

Introduction

In this page you can find the example usage for java.util HashMap entrySet.

Prototype

Set entrySet

To view the source code for java.util HashMap entrySet.

Click Source Link

Document

Holds cached entrySet().

Usage

From source file:de.unileipzig.ub.scroller.Main.java

public static void main(String[] args) throws IOException {

    Options options = new Options();
    // add t option
    options.addOption("h", "help", false, "display this help");

    // elasticsearch options
    options.addOption("t", "host", true, "elasticsearch hostname (default: 0.0.0.0)");
    options.addOption("p", "port", true, "transport port (that's NOT the http port, default: 9300)");
    options.addOption("c", "cluster", true, "cluster name (default: elasticsearch_mdma)");

    options.addOption("i", "index", true, "index to use");

    options.addOption("f", "filter", true, "filter(s) - e.g. meta.kind=title");
    options.addOption("j", "junctor", true, "values: and, or (default: and)");
    options.addOption("n", "notice-every", true, "show speed after every N items");

    options.addOption("v", "verbose", false, "be verbose");
    // options.addOption("z", "end-of-message", true, "sentinel to print to stdout, once the regular input finished (default: EOM)");

    CommandLineParser parser = new PosixParser();
    CommandLine cmd = null;/*from   ww  w.j  a va2s .  c o  m*/

    try {
        cmd = parser.parse(options, args);
    } catch (ParseException ex) {
        logger.error(ex);
        System.exit(1);
    }

    // process options
    if (cmd.hasOption("h")) {
        HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("scroller", options, true);
        System.exit(0);
    }

    String endOfMessage = "EOM";

    boolean verbose = false;
    if (cmd.hasOption("verbose")) {
        verbose = true;
    }

    if (!cmd.hasOption("i")) {
        System.err.println("error: no index specified");
        System.exit(1);
    }

    long noticeEvery = 10000;
    if (cmd.hasOption("n")) {
        noticeEvery = Long.parseLong(cmd.getOptionValue("n"));
    }

    // ES options
    String[] hosts = new String[] { "0.0.0.0" };
    int port = 9300;
    String clusterName = "elasticsearch_mdma";
    int bulkSize = 3000;

    if (cmd.hasOption("host")) {
        hosts = cmd.getOptionValues("host");
    }
    if (cmd.hasOption("port")) {
        port = Integer.parseInt(cmd.getOptionValue("port"));
    }
    if (cmd.hasOption("cluster")) {
        clusterName = cmd.getOptionValue("cluster");
    }

    // Index
    String indexName = cmd.getOptionValue("index");

    Map<String, String> filterMap = new HashMap<String, String>();
    if (cmd.hasOption("filter")) {
        try {
            filterMap = getMapForKeys(cmd.getOptionValues("filter"));
        } catch (ParseException pe) {
            System.err.println(pe);
            System.exit(1);
        }
    }

    Collection<HashMap> filterList = new ArrayList<HashMap>();
    if (cmd.hasOption("filter")) {
        try {
            filterList = getFilterList(cmd.getOptionValues("filter"));
        } catch (ParseException pe) {
            System.err.println(pe);
            System.exit(1);
        }
    }

    // ES Client
    final Settings settings = ImmutableSettings.settingsBuilder().put("cluster.name", "elasticsearch_mdma")
            .put("client.transport.ping_timeout", "60s").build();
    final TransportClient client = new TransportClient(settings);
    for (String host : hosts) {
        client.addTransportAddress(new InetSocketTransportAddress(host, port));
    }

    // build the query
    String junctor = "and";
    if (cmd.hasOption("j")) {
        junctor = cmd.getOptionValue("j");
    }

    //        ArrayList<TermFilterBuilder> filters = new ArrayList<TermFilterBuilder>();
    //        if (filterMap.size() > 0) {
    //            for (Map.Entry<String, String> entry : filterMap.entrySet()) {
    //                filters.add(new TermFilterBuilder(entry.getKey(), entry.getValue()));
    //            }
    //        }

    ArrayList<TermFilterBuilder> filters = new ArrayList<TermFilterBuilder>();
    if (filterList.size() > 0) {
        for (HashMap map : filterList) {
            for (Object obj : map.entrySet()) {
                Map.Entry entry = (Map.Entry) obj;
                filters.add(new TermFilterBuilder(entry.getKey().toString(), entry.getValue().toString()));
            }
        }
    }

    FilterBuilder fb = null;
    if (junctor.equals("and")) {
        AndFilterBuilder afb = new AndFilterBuilder();
        for (TermFilterBuilder tfb : filters) {
            afb.add(tfb);
        }
        fb = afb;
    }

    if (junctor.equals("or")) {
        OrFilterBuilder ofb = new OrFilterBuilder();
        for (TermFilterBuilder tfb : filters) {
            ofb.add(tfb);
        }
        fb = ofb;
    }

    //        TermFilterBuilder tfb0 = new TermFilterBuilder("meta.kind", "title");
    //        TermFilterBuilder tfb1 = new TermFilterBuilder("meta.timestamp", "201112081240");
    //
    //        AndFilterBuilder afb0 = new AndFilterBuilder(tfb0, tfb1);

    QueryBuilder qb0 = null;
    if (filterMap.isEmpty()) {
        qb0 = matchAllQuery();
    } else {
        qb0 = filteredQuery(matchAllQuery(), fb);
    }

    // sorting
    // FieldSortBuilder sortBuilder = new FieldSortBuilder("meta.timestamp");
    // sortBuilder.order(SortOrder.DESC);

    // FilteredQueryBuilder fqb0 = filteredQuery(matchAllQuery(), tfb0);

    final CountResponse countResponse = client.prepareCount(indexName).setQuery(qb0).execute().actionGet();
    final long total = countResponse.getCount();

    SearchResponse scrollResp = client.prepareSearch(indexName).setSearchType(SearchType.SCAN)
            .setScroll(new TimeValue(60000)).setQuery(qb0)
            // .addSort(sortBuilder) // sort has no effect on scroll type (see: https://github.com/CPAN-API/cpan-api/issues/172)
            .setSize(1000) //1000 hits per shard will be returned for each scroll
            .execute().actionGet();

    //Scroll until no hits are returned

    System.err.println("[Scroller] query: " + qb0.toString());
    System.err.println("[Scroller] took: " + scrollResp.getTookInMillis() + "ms");
    System.err.println("[Scroller] docs found: " + total);

    long counter = 0;
    long start = System.currentTimeMillis();

    while (true) {
        scrollResp = client.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(600000))
                .execute().actionGet();

        if (scrollResp.getHits().hits().length == 0) {
            break;
        }

        for (SearchHit hit : scrollResp.getHits()) {

            System.out.println(hit.sourceAsString());
            counter += 1;
            if (counter % noticeEvery == 0) {
                final double elapsed = (System.currentTimeMillis() - start) / 1000;
                final double speed = counter / elapsed;
                final long eta = (long) ((elapsed / counter) * (total - counter) * 1000);
                System.err.println(
                        counter + "/" + total + " records recvd @ speed " + String.format("%1$,.1f", speed)
                                + " r/s eta: " + DurationFormatUtils.formatDurationWords(eta, false, false));
            }
        }
    }
    System.out.close();
    // System.out.println(endOfMessage);
}

From source file:at.tlphotography.jAbuseReport.Reporter.java

/**
 * The main method./*from   w  ww  .j  a va 2  s  . c o  m*/
 *
 * @param args
 *          the arguments
 */
public static void main(String[] args) {
    parseArguments(args);

    File[] directory = new File(logDir).listFiles(); // get the files in the dir

    for (File file : directory) // iterate over the file
    {
        if (!file.isDirectory() && file.getName().contains(logNames)) // if the file is not a dir and the name contains the logName string
        {
            if (file.getName().endsWith(".gz")) // is it zipped?
            {
                content.putAll(readGZFile(file));
            } else {
                content.putAll(readLogFile(file));
            }
        }
    }

    // save the mails to the log lines
    HashMap<String, ArrayList<LogObject>> finalContent = new HashMap<>();

    Iterator<Entry<String, String>> it = content.entrySet().iterator();
    while (it.hasNext()) {
        Map.Entry<String, String> pair = it.next();
        String mail = whoIsLookUp(pair.getKey());

        if (finalContent.containsKey(mail)) {
            finalContent.get(mail).add(new LogObject(pair.getValue()));
        } else {
            ArrayList<LogObject> temp = new ArrayList<LogObject>();
            temp.add(new LogObject(pair.getValue()));
            finalContent.put(mail, temp);
        }

        it.remove();
    }

    // sort them
    Iterator<Entry<String, ArrayList<LogObject>>> it2 = finalContent.entrySet().iterator();
    while (it2.hasNext()) {
        Entry<String, ArrayList<LogObject>> pair = it2.next();
        Collections.sort(pair.getValue());
        println(pair.getKey() + " =");
        for (LogObject obj : pair.getValue()) {
            println(obj.logContent);
        }

        println("\n");
        it2.remove();
    }

}

From source file:akori.AKORI.java

public static void main(String[] args) throws IOException, InterruptedException {
    System.out.println("esto es AKORI");

    URL = "http://www.mbauchile.cl";
    PATH = "E:\\NetBeansProjects\\AKORI\\";
    NAME = "mbauchile.png";
    // Extrar DOM tree

    Document doc = Jsoup.connect(URL).timeout(0).get();

    // The Firefox driver supports javascript 
    WebDriver driver = new FirefoxDriver();
    driver.manage().window().maximize();
    System.out.println(driver.manage().window().getSize().toString());
    System.out.println(driver.manage().window().getPosition().toString());
    int xmax = driver.manage().window().getSize().width;
    int ymax = driver.manage().window().getSize().height;

    // Go to the URL page
    driver.get(URL);/*from ww w .j  av a 2s .c  om*/

    File screen = ((TakesScreenshot) driver).getScreenshotAs(OutputType.FILE);
    FileUtils.copyFile(screen, new File(PATH + NAME));

    BufferedImage img = ImageIO.read(new File(PATH + NAME));
    //Graphics2D graph = img.createGraphics();

    BufferedImage img1 = new BufferedImage(xmax, ymax, BufferedImage.TYPE_INT_ARGB);
    Graphics2D graph1 = img.createGraphics();
    double[][] matrix = new double[ymax][xmax];
    BufferedReader in = new BufferedReader(new FileReader("et.txt"));
    String linea;
    double max = 0;
    graph1.drawImage(img, 0, 0, null);
    HashMap<String, Integer> lista = new HashMap<String, Integer>();
    int count = 0;
    for (int i = 0; (linea = in.readLine()) != null && i < 10000; ++i) {
        String[] datos = linea.split(",");
        int x = (int) Double.parseDouble(datos[0]);
        int y = (int) Double.parseDouble(datos[2]);
        long time = Double.valueOf(datos[4]).longValue();
        if (x >= xmax || y >= ymax)
            continue;
        if (time < 691215)
            continue;
        if (time > 705648)
            break;
        if (lista.containsKey(x + "," + y))
            lista.put(x + "," + y, lista.get(x + "," + y) + 1);
        else
            lista.put(x + "," + y, 1);
        ++count;
    }
    System.out.println(count);
    in.close();
    Iterator iter = lista.entrySet().iterator();
    Map.Entry e;
    for (String key : lista.keySet()) {
        Integer i = lista.get(key);
        if (max < i)
            max = i;
    }
    System.out.println(max);
    max = 0;
    while (iter.hasNext()) {
        e = (Map.Entry) iter.next();
        String xy = (String) e.getKey();
        String[] datos = xy.split(",");
        int x = Integer.parseInt(datos[0]);
        int y = Integer.parseInt(datos[1]);
        matrix[y][x] += (int) e.getValue();
        double aux;
        if ((aux = normalMatrix(matrix, y, x, ((int) e.getValue()) * 4)) > max) {
            max = aux;
        }
        //normalMatrix(matrix,x,y,20);
        if (matrix[y][x] > max)
            max = matrix[y][x];
    }
    int A, R, G, B, n;
    for (int i = 0; i < xmax; ++i) {
        for (int j = 0; j < ymax; ++j) {
            if (matrix[j][i] != 0) {
                n = (int) Math.round(matrix[j][i] * 100 / max);
                R = Math.round((255 * n) / 100);
                G = Math.round((255 * (100 - n)) / 100);
                B = 0;
                A = Math.round((255 * n) / 100);
                ;
                if (R > 255)
                    R = 255;
                if (R < 0)
                    R = 0;
                if (G > 255)
                    G = 255;
                if (G < 0)
                    G = 0;
                if (R < 50)
                    A = 0;
                graph1.setColor(new Color(R, G, B, A));
                graph1.fillOval(i, j, 1, 1);
            }
        }
    }
    //graph1.dispose();

    ImageIO.write(img, "png", new File("example.png"));
    System.out.println(max);

    graph1.setColor(Color.RED);
    // Extraer elementos
    Elements e1 = doc.body().getAllElements();
    int i = 1;
    ArrayList<String> tags = new ArrayList<String>();
    for (Element temp : e1) {

        if (tags.indexOf(temp.tagName()) == -1) {
            tags.add(temp.tagName());

            List<WebElement> query = driver.findElements(By.tagName(temp.tagName()));
            for (WebElement temp1 : query) {
                Point po = temp1.getLocation();
                Dimension d = temp1.getSize();
                if (d.width <= 0 || d.height <= 0 || po.x < 0 || po.y < 0)
                    continue;
                System.out.println(i + " " + temp.nodeName());
                System.out.println("  x: " + po.x + " y: " + po.y);
                System.out.println("  width: " + d.width + " height: " + d.height);
                graph1.draw(new Rectangle(po.x, po.y, d.width, d.height));
                ++i;
            }
        }
    }

    graph1.dispose();
    ImageIO.write(img, "png", new File(PATH + NAME));

    driver.quit();

}

From source file:TwitterClustering.java

public static void main(String[] args) throws FileNotFoundException, IOException {
    // TODO code application logic here

    File outFile = new File(args[3]);
    Scanner s = new Scanner(new File(args[1])).useDelimiter(",");
    JSONParser parser = new JSONParser();
    Set<Cluster> clusterSet = new HashSet<Cluster>();
    HashMap<String, Tweet> tweets = new HashMap();
    FileWriter fw = new FileWriter(outFile.getAbsoluteFile());
    BufferedWriter bw = new BufferedWriter(fw);

    // init/*from w  ww .j av a 2  s  .  c om*/
    try {

        Object obj = parser.parse(new FileReader(args[2]));

        JSONArray jsonArray = (JSONArray) obj;

        for (int i = 0; i < jsonArray.size(); i++) {

            Tweet twt = new Tweet();
            JSONObject jObj = (JSONObject) jsonArray.get(i);
            String text = jObj.get("text").toString();

            long sum = 0;
            for (int y = 0; y < text.toCharArray().length; y++) {

                sum += (int) text.toCharArray()[y];
            }

            String[] token = text.split(" ");
            String tID = jObj.get("id").toString();

            Set<String> mySet = new HashSet<String>(Arrays.asList(token));
            twt.setAttributeValue(sum);
            twt.setText(mySet);
            twt.setTweetID(tID);
            tweets.put(tID, twt);

        }

        // preparing initial clusters
        int i = 0;
        while (s.hasNext()) {
            String id = s.next();// id
            Tweet t = tweets.get(id.trim());
            clusterSet.add(new Cluster(i + 1, t, new LinkedList()));
            i++;
        }

        Iterator it = tweets.entrySet().iterator();

        for (int l = 0; l < 2; l++) { // limit to 25 iterations

            while (it.hasNext()) {
                Map.Entry me = (Map.Entry) it.next();

                // calculate distance to each centroid
                Tweet p = (Tweet) me.getValue();
                HashMap<Cluster, Float> distMap = new HashMap();

                for (Cluster clust : clusterSet) {

                    distMap.put(clust, jaccardDistance(p.getText(), clust.getCentroid().getText()));
                }

                HashMap<Cluster, Float> sorted = (HashMap<Cluster, Float>) sortByValue(distMap);

                sorted.keySet().iterator().next().getMembers().add(p);

            }

            // calculate new centroid and update Clusterset
            for (Cluster clust : clusterSet) {

                TreeMap<String, Long> tDistMap = new TreeMap();

                Tweet newCentroid = null;
                Long avgSumDist = new Long(0);
                for (int j = 0; j < clust.getMembers().size(); j++) {

                    avgSumDist += clust.getMembers().get(j).getAttributeValue();
                    tDistMap.put(clust.getMembers().get(j).getTweetID(),
                            clust.getMembers().get(j).getAttributeValue());
                }
                if (clust.getMembers().size() != 0) {
                    avgSumDist /= (clust.getMembers().size());
                }

                ArrayList<Long> listValues = new ArrayList<Long>(tDistMap.values());

                if (tDistMap.containsValue(findClosestNumber(listValues, avgSumDist))) {
                    // found closest
                    newCentroid = tweets
                            .get(getKeyByValue(tDistMap, findClosestNumber(listValues, avgSumDist)));
                    clust.setCentroid(newCentroid);
                }

            }

        }
        // create an iterator
        Iterator iterator = clusterSet.iterator();

        // check values
        while (iterator.hasNext()) {

            Cluster c = (Cluster) iterator.next();
            bw.write(c.getId() + "\t");
            System.out.print(c.getId() + "\t");

            for (Tweet t : c.getMembers()) {
                bw.write(t.getTweetID() + ", ");
                System.out.print(t.getTweetID() + ",");

            }
            bw.write("\n");
            System.out.println("");
        }

        System.out.println("");

        System.out.println("SSE " + sumSquaredErrror(clusterSet));

    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        bw.close();
        fw.close();
    }
}

From source file:main.DOORS_Service.java

/**
 * Login to the DWA server and perform some OSLC actions
 * @param args/*from w ww  .  j a  v  a2  s. c om*/
 * @throws ParseException 
 */
public static void main(String[] args) throws ParseException {

    Options options = new Options();

    options.addOption("url", true, "url");
    options.addOption("user", true, "user ID");
    options.addOption("password", true, "password");
    options.addOption("project", true, "project area");

    CommandLineParser cliParser = new GnuParser();

    //Parse the command line
    CommandLine cmd = cliParser.parse(options, args);

    if (!validateOptions(cmd)) {
        logger.severe(
                "Syntax:  java <class_name> -url https://<server>:port/<context>/ -user <user> -password <password> -project \"<project_area>\"");
        logger.severe(
                "Example: java DoorsOauthSample -url https://exmple.com:9443/dwa -user ADMIN -password ADMIN -project \"JKE Banking (Requirements Management)\"");
        return;
    }

    String webContextUrl = cmd.getOptionValue("url");
    String user = cmd.getOptionValue("user");
    String passwd = cmd.getOptionValue("password");
    String projectArea = cmd.getOptionValue("project");

    try {
        //STEP 1: Initialize a Jazz rootservices helper and indicate we're looking for the RequirementManagement catalog
        // The root services for DOORs is found at /public level
        JazzRootServicesHelper helper = new JazzRootServicesHelper(webContextUrl + "/public",
                OSLCConstants.OSLC_RM);

        //STEP 2: Create a new OSLC OAuth capable client
        OslcOAuthClient client = helper.initOAuthClient("JIRA", "JIRA");

        if (client != null) {
            //STEP 3: Try to access the context URL to trigger the OAuth dance and login
            try {
                client.getResource(webContextUrl, OSLCConstants.CT_RDF);
            } catch (OAuthRedirectException oauthE) {
                validateTokens(client,
                        oauthE.getRedirectURL() + "?oauth_token=" + oauthE.getAccessor().requestToken, user,
                        passwd, webContextUrl + "/j_acegi_security_check");
                // Try to access again
                ClientResponse response = client.getResource(webContextUrl, OSLCConstants.CT_RDF);
                response.getEntity(InputStream.class).close();
            }

            //STEP 4: Get our requirements collection that we want
            //TODO: Replace with option from startup
            String serviceProviderUrl = "http://usnx47:8080/dwa/rm/urn:rational::1-4d2b67b464226e12-M-0000048a";
            ClientResponse response = client.getResource(serviceProviderUrl,
                    "application/x-oslc-rm-requirement-collection-1.0+xml");
            //build the rdf
            Model rdfModel = ModelFactory.createDefaultModel();
            rdfModel.read(response.getEntity(InputStream.class), serviceProviderUrl);
            response.consumeContent();

            //get the statements
            List<Statement> reqs = rdfModel.getResource(serviceProviderUrl).listProperties().toList();
            HashMap<String, String> requirements = new HashMap<String, String>();
            for (Statement s : reqs) {

                String reqURI = s.getObject().toString();
                if (reqURI.contains("http")) {
                    response = client.getResource(reqURI, "application/x-oslc-rm-requirement-1.0+xml");
                    if (response.getStatusCode() == 200) {
                        InputStream in = response.getEntity(InputStream.class);
                        Model model = ModelFactory.createDefaultModel();

                        try {
                            model.read(in, reqURI);
                        } catch (Exception sa) {
                            System.out.println(reqURI);
                        }

                        //Properties to traverse on
                        Property attrDef = model
                                .createProperty("http://jazz.net/doors/xmlns/prod/jazz/doors/1.0/attrDef");
                        Property name = model
                                .createProperty("http://jazz.net/doors/xmlns/prod/jazz/doors/1.0/name");

                        //Flags we use for parsing
                        int count = 0;
                        boolean isText = false;
                        boolean isID = false;
                        boolean done = false;
                        //Text of the DOORS Object and its ID are what we are going to extract
                        String text = "";
                        String id = "";

                        //Look through all of the possible fields
                        StmtIterator statementIter = model.listStatements();
                        while (statementIter.hasNext() && done != true) {
                            Statement field = statementIter.next();
                            //Get the attrDef property to find out what kind of value we have
                            StmtIterator props = field.getSubject().listProperties(attrDef);
                            while (props.hasNext() && done != true) {
                                Statement kind = props.next();
                                RDFNode propertyNode = kind.getObject();
                                StmtIterator propIt = propertyNode.asResource().listProperties(name);
                                //Check all of the properties for our desired fields
                                while (propIt.hasNext()) {
                                    Statement node = propIt.next();
                                    if (node.getObject().isLiteral()) {
                                        if (node.getObject().toString().contains("Object+Text")
                                                && field.getObject().isLiteral()) {
                                            text = field.getLiteral().toString();
                                            text = text.substring(0, text.indexOf("^"));
                                            count++;

                                        }
                                        if (node.getObject().toString().contains("Absolute+Number")
                                                && field.getObject().isLiteral()) {
                                            id = field.getLiteral().toString();
                                            id = id.substring(0, id.indexOf("^"));
                                            count++;
                                        }

                                    }
                                }
                                if (count == 2) {
                                    if (!text.isEmpty()) {
                                        //System.out.println( "Req: " + id );
                                        //System.out.println( text );
                                        requirements.put(id, text);
                                        count = 0;
                                        done = true;
                                        break;
                                    }

                                }
                            }

                        }

                    }

                }
                response.consumeContent();
            }
            //check if already in JIRA
            //post to jira
            for (Entry<String, String> e : requirements.entrySet()) {

            }
        }

    } catch (Exception e) {
        logger.log(Level.SEVERE, e.getMessage(), e);
    }

}

From source file:de.tudarmstadt.ukp.experiments.dip.wp1.documents.Step7CollectMTurkResults.java

public static void main(String[] args) throws Exception {
    // input dir - list of xml query containers
    // /home/user-ukp/research/data/dip/wp1-documents/step4-boiler-plate/
    File inputDir = new File(args[0] + "/");

    // MTurk result file

    // output dir
    File outputDir = new File(args[2]);
    if (!outputDir.exists()) {
        outputDir.mkdirs();//from w  w  w. j a v a  2s. c  o m

    }

    // Folder with success files
    File mturkSuccessDir = new File(args[1]);

    Collection<File> files = FileUtils.listFiles(mturkSuccessDir, new String[] { "result" }, false);
    if (files.isEmpty()) {
        throw new IllegalArgumentException("Input folder is empty. " + mturkSuccessDir);
    }

    HashMap<String, List<MTurkAnnotation>> mturkAnnotations = new HashMap<>();

    // parsing all CSV files
    for (File mturkCSVResultFile : files) {
        System.out.println("Parsing " + mturkCSVResultFile.getName());

        MTurkOutputReader outputReader = new MTurkOutputReader(
                new HashSet<>(Arrays.asList("annotation", "workerid")), mturkCSVResultFile);

        // for fixing broken data input: for each hit, collect all sentence IDs
        Map<String, SortedSet<String>> hitSentences = new HashMap<>();

        // first iteration: collect the sentences
        for (Map<String, String> record : outputReader) {
            String hitID = record.get("hitid");
            if (!hitSentences.containsKey(hitID)) {
                hitSentences.put(hitID, new TreeSet<>());
            }

            String relevantSentences = record.get("Answer.relevant_sentences");
            String irrelevantSentences = record.get("Answer.irrelevant_sentences");

            if (relevantSentences != null) {
                hitSentences.get(hitID).addAll(Arrays.asList(relevantSentences.split(",")));
            }

            if (irrelevantSentences != null) {
                hitSentences.get(hitID).addAll(Arrays.asList(irrelevantSentences.split(",")));
            }
        }

        // and now second iteration
        for (Map<String, String> record : outputReader) {
            String hitID = record.get("hitid");
            String annotatorID = record.get("workerid");
            String acceptTime = record.get("assignmentaccepttime");
            String submitTime = record.get("assignmentsubmittime");
            String relevantSentences = record.get("Answer.relevant_sentences");
            String irrelevantSentences = record.get("Answer.irrelevant_sentences");
            String reject = record.get("reject");
            String filename[];
            String comment;
            String clueWeb;
            String[] relevant = {};
            String[] irrelevant = {};

            filename = record.get("annotation").split("_");
            String fileXml = filename[0];
            clueWeb = filename[1].trim();
            comment = record.get("Answer.comment");

            if (relevantSentences != null) {
                relevant = relevantSentences.split(",");
            }

            if (irrelevantSentences != null) {
                irrelevant = irrelevantSentences.split(",");
            }

            // sanitizing data: if both relevant and irrelevant are empty, that's a bug
            // we're gonna look up all sentences from this HIT and treat this assignment
            // as if there were only irrelevant ones
            if (relevant.length == 0 && irrelevant.length == 0) {
                SortedSet<String> strings = hitSentences.get(hitID);
                irrelevant = new String[strings.size()];
                strings.toArray(irrelevant);
            }

            if (reject != null) {
                System.out.println(" HIT " + hitID + " annotated by " + annotatorID + " was rejected ");
            } else {
                /*
                // relevant sentences is a comma-delimited string,
                // this regular expression is rather strange
                // it must contain digits, it might be that there is only one space or a comma or some other char
                // digits are the sentence ids. if relevant sentences do not contain digits then it is wrong
                if (relevantSentences.matches("^\\D*$") &&
                    irrelevantSentences.matches("^\\D*$")) {
                try {
                    throw new IllegalStateException(
                            "No annotations found for HIT " + hitID + " in " +
                                    fileXml + " for document " + clueWeb);
                }
                catch (IllegalStateException ex) {
                    ex.printStackTrace();
                }
                        
                }
                */
                MTurkAnnotation mturkAnnotation;
                try {
                    mturkAnnotation = new MTurkAnnotation(hitID, annotatorID, acceptTime, submitTime, comment,
                            clueWeb, relevant, irrelevant);
                } catch (IllegalArgumentException ex) {
                    throw new IllegalArgumentException("Record: " + record, ex);
                }

                List<MTurkAnnotation> listOfAnnotations = mturkAnnotations.get(fileXml);

                if (listOfAnnotations == null) {
                    listOfAnnotations = new ArrayList<>();
                }
                listOfAnnotations.add(mturkAnnotation);
                mturkAnnotations.put(fileXml, listOfAnnotations);
            }

        }
        //            parser.close();
    }

    // Debugging: output number of HITs of a query
    System.out.println("Accepted HITs for a query:");
    for (Map.Entry e : mturkAnnotations.entrySet()) {
        ArrayList<MTurkAnnotation> a = (ArrayList<MTurkAnnotation>) e.getValue();
        System.out.println(e.getKey() + " " + a.size());
    }

    for (File f : FileUtils.listFiles(inputDir, new String[] { "xml" }, false)) {
        QueryResultContainer queryResultContainer = QueryResultContainer
                .fromXML(FileUtils.readFileToString(f, "utf-8"));
        String fileName = f.getName();
        List<MTurkAnnotation> listOfAnnotations = mturkAnnotations.get(fileName);

        if (listOfAnnotations == null || listOfAnnotations.isEmpty()) {
            throw new IllegalStateException("No annotations for " + f.getName());
        }

        for (QueryResultContainer.SingleRankedResult rankedResults : queryResultContainer.rankedResults) {
            for (MTurkAnnotation mtAnnotation : listOfAnnotations) {
                String clueWeb = mtAnnotation.clueWeb;
                if (rankedResults.clueWebID.equals(clueWeb)) {
                    List<QueryResultContainer.MTurkRelevanceVote> mTurkRelevanceVotes = rankedResults.mTurkRelevanceVotes;
                    QueryResultContainer.MTurkRelevanceVote relevanceVote = new QueryResultContainer.MTurkRelevanceVote();
                    String annotatorID = mtAnnotation.annotatorID;
                    String hitID = mtAnnotation.hitID;
                    String acceptTime = mtAnnotation.acceptTime;
                    String submitTime = mtAnnotation.submitTime;
                    String comment = mtAnnotation.comment;
                    String[] relevant = mtAnnotation.relevant;
                    String[] irrelevant = mtAnnotation.irrelevant;
                    relevanceVote.turkID = annotatorID.trim();
                    relevanceVote.hitID = hitID.trim();
                    relevanceVote.acceptTime = acceptTime.trim();
                    relevanceVote.submitTime = submitTime.trim();
                    relevanceVote.comment = comment != null ? comment.trim() : null;
                    if (relevant.length == 0 && irrelevant.length == 0) {
                        try {
                            throw new IllegalStateException("the length of the annotations is 0"
                                    + rankedResults.clueWebID + " for HIT " + relevanceVote.hitID);
                        } catch (IllegalStateException e) {
                            e.printStackTrace();
                        }
                    }
                    for (String r : relevant) {
                        String sentenceId = r.trim();
                        if (!sentenceId.isEmpty() && sentenceId.matches("\\d+")) {
                            QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote();
                            singleSentenceVote.sentenceID = sentenceId;
                            singleSentenceVote.relevant = "true";
                            relevanceVote.singleSentenceRelevanceVotes.add(singleSentenceVote);
                        }
                    }
                    for (String r : irrelevant) {
                        String sentenceId = r.trim();
                        if (!sentenceId.isEmpty() && sentenceId.matches("\\d+")) {
                            QueryResultContainer.SingleSentenceRelevanceVote singleSentenceVote = new QueryResultContainer.SingleSentenceRelevanceVote();
                            singleSentenceVote.sentenceID = sentenceId;
                            singleSentenceVote.relevant = "false";
                            relevanceVote.singleSentenceRelevanceVotes.add(singleSentenceVote);
                        }
                    }
                    mTurkRelevanceVotes.add(relevanceVote);
                }
            }

        }
        File outputFile = new File(outputDir, f.getName());
        FileUtils.writeStringToFile(outputFile, queryResultContainer.toXML(), "utf-8");
        System.out.println("Finished " + outputFile);
    }

}

From source file:Main.java

public static double getSumValue(HashMap<String, Double> map) {
    Double count = 0.0D;/*  ww w  .  j  a v a  2s.  c  o  m*/
    List list = new LinkedList(map.entrySet());
    for (Iterator it = list.iterator(); it.hasNext();) {
        Map.Entry entry = (Map.Entry) it.next();
        count += map.get(entry.getKey());
    }
    return count;
}

From source file:Main.java

public static ArrayList<String> getHashMap(HashMap<String, String> hm) {
    ArrayList<String> a = new ArrayList<String>();
    Set s = hm.entrySet();
    Iterator it = s.iterator();//ww  w .j a  v  a  2  s.c  o m
    while (it.hasNext()) {
        Map.Entry m = (Map.Entry) it.next();
        a.add(m.getKey() + "\t" + m.getValue());
    }
    return a;
}

From source file:Main.java

public static <K, V> List<Map.Entry<K, V>> sortMapByValue(HashMap<K, V> map, final int sort) {
    List<Map.Entry<K, V>> orderList = new ArrayList<Map.Entry<K, V>>(map.entrySet());
    Collections.sort(orderList, new Comparator<Map.Entry<K, V>>() {
        @Override/*from   w ww . j  a  v  a2s.  c o  m*/
        @SuppressWarnings("unchecked")
        public int compare(Map.Entry<K, V> o1, Map.Entry<K, V> o2) {
            return (((Comparable<V>) o2.getValue()).compareTo(o1.getValue())) * sort;
        }
    });
    return orderList;
}

From source file:Main.java

/**
 * Converts a HashMap to a 2d array for JSON Reading
 *
 * @param map// w ww  .  j  ava  2s  . c o  m
 *
 * @return an array that can be read in JSON as an array
 */
public static Object[][] HashMapToArray(HashMap map) {
    Object[][] map2D = new Object[map.size()][2];
    Set entries = map.entrySet();
    Iterator entriesIterator = entries.iterator();
    int i = 0;
    while (entriesIterator.hasNext()) {

        Map.Entry mapping = (Map.Entry) entriesIterator.next();
        map2D[i][0] = mapping.getKey();
        map2D[i][1] = mapping.getValue();
        i++;
    }

    return map2D;
}