Example usage for java.util HashMap keySet

List of usage examples for java.util HashMap keySet

Introduction

In this page you can find the example usage for java.util HashMap keySet.

Prototype

public Set<K> keySet() 

Source Link

Document

Returns a Set view of the keys contained in this map.

Usage

From source file:it.cnr.icar.eric.server.cms.CanonicalXMLFilteringService.java

/**
 * Runs XSLT based upon specified inputs and returns the outputfile.
 *
 * TODO: Need some refactoring to make this reusable throughout OMAR
 * particularly in CanonicalXMLCatalogingService.
 *//*from w  ww  .j  a  va2 s. c  o m*/
protected static File runXSLT(StreamSource input, StreamSource xslt, URIResolver resolver,
        HashMap<String, String> params) throws RegistryException {

    File outputFile = null;

    try {
        //dumpStream(xslt);

        TransformerFactory tFactory = TransformerFactory.newInstance();
        Transformer transformer = initTransformer(tFactory, xslt);
        // Use FilteringService URIResolver to resolve RIs submitted in the
        // ServiceInput object
        transformer.setURIResolver(resolver);
        //Set respository item as parameter

        //Create the output file with the filtered RegistryObject Metadata
        outputFile = File.createTempFile("CanonicalXMLFilteringService_Output", ".xml");
        outputFile.deleteOnExit();

        log.debug("outputFile= " + outputFile.getAbsolutePath());

        Iterator<String> iter = params.keySet().iterator();
        while (iter.hasNext()) {
            String key = iter.next();
            Object value = params.get(key);
            transformer.setParameter(key, value);
        }

        StreamResult sr = new StreamResult(outputFile);
        transformer.transform(input, sr);

    } catch (Exception e) {
        throw new RegistryException(e);
    }

    return outputFile;

}

From source file:edu.illinois.cs.cogcomp.transliteration.WikiTransliteration.java

public static HashMap<String, Double> GetSourceSubstringMax(HashMap<Pair<String, String>, Double> counts) {
    HashMap<String, Double> result = new HashMap<>(counts.size());
    for (Pair<String, String> key : counts.keySet()) {
        Double value = counts.get(key);
        if (result.containsKey(key.getFirst()))
            result.put(key.getFirst(), Math.max(value, result.get(key.getFirst())));
        else// w w w .j  av a 2  s  . c o m
            result.put(key.getFirst(), value);
    }

    return result;
}

From source file:edu.illinois.cs.cogcomp.transliteration.WikiTransliteration.java

public static HashMap<String, Double> GetFixedSizeNgramProbs(int n, Iterable<String> examples) {
    HashMap<String, Integer> ngramCounts = GetNgramCounts(n, examples, true);
    HashMap<String, Integer> ngramTotals = new HashMap<>();
    for (String key : ngramCounts.keySet()) {
        int v = ngramCounts.get(key);
        Dictionaries.IncrementOrSet(ngramTotals, key.substring(0, n - 1), v, v);
    }/*from w  w  w.  ja va 2s  .c o  m*/

    HashMap<String, Double> result = new HashMap<>(ngramCounts.size());
    for (String key : ngramCounts.keySet()) {
        int v = ngramCounts.get(key);
        result.put(key, ((double) v) / ngramTotals.get(key.substring(0, n - 1)));
    }

    return result;
}

From source file:pfg.graphic.Chart.java

/**
 * Ajoute des donnes  afficher//from  w  w w.j  av a  2  s .  c o m
 * @param values
 */
public void addData(HashMap<String, Double> values) {
    for (String name : values.keySet())
        addData(name, values.get(name));
}

From source file:amie.keys.CombinationsExplorationNew.java

/**
 *
 * @param ruleToExtendWith/*w w w.j  a v  a2s.  c om*/
 * @param ruleToGraphNewFirstLevel
 * @param ruleToGraphNewLastLevel
 * @param kb
 */
private static void discoverConditionalKeysPerLevel(HashMap<Rule, HashSet<String>> ruleToExtendWith,
        HashMap<Rule, GraphNew> ruleToGraphNewFirstLevel, HashMap<Rule, GraphNew> ruleToGraphNewLastLevel) {
    HashMap<Rule, GraphNew> ruleToGraphNewThisLevel = new HashMap<>();
    for (Rule currentRule : ruleToExtendWith.keySet()) {
        for (String conditionProperty : ruleToExtendWith.get(currentRule)) {
            if (Utilities.getRelationIds(currentRule, property2Id).last() > property2Id
                    .get(conditionProperty)) {
                GraphNew graph = ruleToGraphNewLastLevel.get(currentRule);
                GraphNew currentGraphNew = (GraphNew) graph.clone();
                Integer propertyId = property2Id.get(conditionProperty);
                HashSet<Integer> propertiesSet = new HashSet<>();
                propertiesSet.add(propertyId);
                Node node = currentGraphNew.createNode(propertiesSet);
                node.toExplore = false;
                Iterable<Rule> conditions = Utilities.getConditions(currentRule, conditionProperty,
                        (int) support, kb);
                for (Rule conditionRule : conditions) {
                    Rule complementaryRule = getComplementaryRule(conditionRule);
                    if (!ruleToGraphNewFirstLevel.containsKey(complementaryRule)) {
                        // We should never fall in this case
                        for (Rule r : ruleToGraphNewFirstLevel.keySet()) {
                            System.out.println(r.getDatalogBasicRuleString());
                        }
                        System.out.println(complementaryRule.getDatalogBasicRuleString());
                        System.out.println(complementaryRule + " not found in the first level graph");
                    }
                    GraphNew complementaryGraphNew = ruleToGraphNewFirstLevel.get(complementaryRule);
                    GraphNew newGraphNew = (GraphNew) currentGraphNew.clone();
                    HashSet<Integer> conditionProperties = new HashSet<>();
                    conditionProperties.addAll(getRelations(conditionRule, property2Id));
                    conditionProperties.addAll(getRelations(currentRule, property2Id));
                    newGraphNew = mergeGraphNews(newGraphNew, complementaryGraphNew,
                            newGraphNew.topGraphNodes(), conditionProperties);

                    discoverConditionalKeysForComplexConditions(newGraphNew, newGraphNew.topGraphNodes(),
                            conditionRule);
                    ruleToGraphNewThisLevel.put(conditionRule, newGraphNew);
                }
            }
        }
    }
    HashMap<Rule, HashSet<String>> newRuleToExtendWith = new HashMap<>();
    for (Rule conRule : ruleToGraphNewThisLevel.keySet()) {
        GraphNew newGraphNew = ruleToGraphNewThisLevel.get(conRule);
        for (Node node : newGraphNew.topGraphNodes()) {
            HashSet<String> properties = new HashSet<>();
            if (node.toExplore) {
                Iterator<Integer> it = node.set.iterator();
                int prop = it.next();
                String propertyStr = id2Property.get(prop);
                properties.add(propertyStr);
            }
            if (properties.size() != 0) {
                newRuleToExtendWith.put(conRule, properties);
            }
        }
    }

    if (newRuleToExtendWith.size() != 0) {
        discoverConditionalKeysPerLevel(newRuleToExtendWith, ruleToGraphNewFirstLevel, ruleToGraphNewThisLevel);
    }
}

From source file:com.glluch.profilesparser.Writer.java

/**
 * Prepare the terms for Solr's xml.//from w  w  w.  j  av a 2s . com
 * @param field_name The name of the Solr's field.
 * @param terms A hash map terms -&gt; counts to be transform as xml.
 * @return An xml string which is the part representing the terms.
 */
protected String terms2xml(String field_name, HashMap<String, Double> terms) {
    String text = "";
    Set pterms = terms.keySet();
    for (Object t0 : pterms) {
        String t = (String) t0;
        text += "<field name=\"" + field_name + "\" " + " boost=\"" + term_boost * terms.get(t) + "\"" + ">" + t
                + "</field>" + System.lineSeparator();
    }
    return text;
}

From source file:com.bah.applefox.main.plugins.fulltextindex.FTLoader.java

/**
 * This method is used to add all information parsed by tika into the
 * Accumulo table/*from   w  w  w .  j av  a  2s.  co m*/
 * 
 * @param url
 *            - the URL of the page that has been parsed
 * @param tikaParsed
 *            - all of the engrams from the page
 * @throws TikaException
 * @throws SAXException
 */
private static boolean addToDataBaseTable(String url) {
    try {
        // Connect to the data table
        BatchWriter writer = AccumuloUtils.connectBatchWrite(dTable);

        // Let the user know the url is being added
        System.out.println("Adding " + url + " with prefix " + longSuffix);

        // Get the input stream (in case it is not an html document
        InputStream urlInput = new URL(url).openStream();

        // Set the page contents (used for filtering if it is an html
        // document)
        String pageContents = getPageContents(new URL(url));

        // If the document is HTML
        if (exDivs.size() != 0 && pageContents.toLowerCase().contains("<html>")) {
            // Filter out some divs (especially generic headers/footers,
            // etc.)
            pageContents = DivsFilter.filterDivs(pageContents, exDivs);
            urlInput = new ByteArrayInputStream(pageContents.getBytes());
        }

        // Parse with tika
        Parser parser = new AutoDetectParser();
        Metadata metadata = new Metadata();
        ParseContext context = new ParseContext();
        ContentHandler handler = new BodyContentHandler();

        parser.parse(urlInput, handler, metadata, context);

        // Get the keywords of the page and its title
        String keywords = metadata.get("keywords");
        String title = metadata.get("title");
        if (title == null) {
            WebPageCrawl p;
            try {
                p = new WebPageCrawl(url, "", Collections.<String>emptySet());
            } catch (PageCrawlException e) {
                log.info(e);
                return false;
            }
            title = p.getTitle();
        }

        // If there are keywords, delimit the commas, otherwise make it a
        // blank screen (not null)
        if (keywords != null) {
            keywords = keywords.replaceAll(",", "[ ]");
        } else {
            keywords = "";
        }

        // Make everything lower case for ease of search
        String plainText = handler.toString().toLowerCase();

        // Split it into <Key,Value> pairs of NGrams, with the Value being
        // the count of the NGram on the page
        HashMap<String, Integer> tikaParsed = IngestUtils
                .collectTerms(IngestUtils.createNGrams(plainText, maxNGrams));

        // A counter for the final number of words
        Integer totalWords = 0;

        // A HashMap for the final NGrams
        HashMap<String, Integer> finalParsed = new HashMap<String, Integer>();

        for (String i : tikaParsed.keySet()) {
            int freq = tikaParsed.get(i);
            totalWords += freq;
            // erase stop words
            if (stopWords != null && !stopWords.contains(i)) {
                finalParsed.put(i, tikaParsed.get(i));
            } else if (stopWords == null) {
                finalParsed.put(i, tikaParsed.get(i));
            }
        }

        System.out.println("Tika Parsed: " + finalParsed.keySet().size());
        System.out.println("Starting");
        int counter = 0;

        String namedURL = url + "[ ]" + title + "[ ]" + keywords;

        for (String row : finalParsed.keySet()) {
            row = row + " " + longSuffix;
            for (String CQ : finalParsed.keySet()) {
                String groupedVal = new String();
                Integer wc = finalParsed.get(CQ);
                double freq = wc.doubleValue() / totalWords.doubleValue();
                groupedVal = wc + "," + freq;
                Value val = new Value(IngestUtils.serialize(groupedVal));

                Mutation m = new Mutation(row);
                m.put(namedURL, CQ, new Date().getTime(), val);
                writer.addMutation(m);
                counter++;
            }

        }

        System.out.println("Wrote " + counter + " Key-Value pairs to Accumulo.");

        writer.close();
        System.out.println("Stopped writing");
    } catch (AccumuloException e) {
        if (e.getMessage() != null) {
            log.error(e.getMessage());
        } else {
            log.error(e.getStackTrace());
        }
        return false;
    } catch (AccumuloSecurityException e) {
        if (e.getMessage() != null) {
            log.error(e.getMessage());
        } else {
            log.error(e.getStackTrace());
        }
        return false;
    } catch (TableNotFoundException e) {
        if (e.getMessage() != null) {
            log.error(e.getMessage());
        } else {
            log.error(e.getStackTrace());
        }
        return false;
    } catch (TableExistsException e) {
        if (e.getMessage() != null) {
            log.error(e.getMessage());
        } else {
            log.error(e.getStackTrace());
        }
        return false;
    } catch (MalformedURLException e) {
        if (e.getMessage() != null) {
            log.error(e.getMessage());
        } else {
            log.error(e.getStackTrace());
        }
    } catch (IOException e) {
        if (e.getMessage() != null) {
            log.error(e.getMessage());
        } else {
            log.error(e.getStackTrace());
        }
        return false;
    } catch (SAXException e) {
        if (e.getMessage() != null) {
            log.error(e.getMessage());
        } else {
            log.error(e.getStackTrace());
        }
        return false;
    } catch (TikaException e) {
        if (e.getMessage() != null) {
            log.error(e.getMessage());
        } else {
            log.error(e.getStackTrace());
        }
        return false;
    }
    return true;
}

From source file:edu.illinois.cs.cogcomp.transliteration.WikiTransliteration.java

/**
 * Given a map of productions and corresponding counts, get the counts of the source word in each
 * production./* w w w  .  jav a  2 s  . c  om*/
 * @param counts production counts
 * @return a map from source strings to counts.
 */
public static HashMap<String, Double> GetAlignmentTotals1(HashMap<Production, Double> counts) {
    // the string in this map is the source string.
    HashMap<String, Double> result = new HashMap<>();
    for (Production key : counts.keySet()) {
        Double value = counts.get(key);

        String source = key.getFirst();

        // Increment or set
        if (result.containsKey(source)) {
            result.put(source, result.get(source) + value);
        } else {
            result.put(source, value);
        }
    }

    return result;
}

From source file:edu.illinois.cs.cogcomp.transliteration.WikiTransliteration.java

/**
 * This does no normalization, but interns the string in each production.
 * @param counts//  w  ww  . ja va 2  s  .  c  o  m
 * @param internTable
 * @return
 */
public static HashMap<Production, Double> InternProductions(HashMap<Production, Double> counts,
        InternDictionary<String> internTable) {
    HashMap<Production, Double> result = new HashMap<>(counts.size());

    for (Production key : counts.keySet()) {
        Double value = counts.get(key);
        result.put(new Production(internTable.Intern(key.getFirst()), internTable.Intern(key.getSecond()),
                key.getOrigin()), value);
    }

    return result;
}

From source file:org.ontosoft.server.repository.plugins.GithubPlugin.java

private Collection<String> addLanguageMetadata(String userid, String repoid, PluginResponse response) {
    String ontns = KBConstants.ONTNS();
    String resource = "/repos/" + userid + "/" + repoid + "/languages";

    HashMap<String, Object> vals = getResource(resource);
    for (String language : vals.keySet()) {
        response.addSuggestedMetadata(ontns + "hasImplementationLanguage", language);
    }/*from w  w w  . j a v a2 s.  c om*/
    return vals.keySet();
}