Example usage for java.util TreeMap keySet

List of usage examples for java.util TreeMap keySet

Introduction

In this page you can find the example usage for java.util TreeMap keySet.

Prototype

public Set<K> keySet() 

Source Link

Document

Returns a Set view of the keys contained in this map.

Usage

From source file:com.linkedin.pinot.integration.tests.HybridClusterScanComparisonIntegrationTest.java

protected void runTestLoop(Callable<Object> testMethod, boolean useMultipleThreads) throws Exception {
    // Clean up the Kafka topic
    // TODO jfim: Re-enable this once PINOT-2598 is fixed
    // purgeKafkaTopicAndResetRealtimeTable();

    List<Pair<File, File>> enabledRealtimeSegments = new ArrayList<>();

    // Sort the realtime segments based on their segment name so they get added from earliest to latest
    TreeMap<File, File> sortedRealtimeSegments = new TreeMap<File, File>(new Comparator<File>() {
        @Override//www .  j av  a2 s . c o m
        public int compare(File o1, File o2) {
            return _realtimeAvroToSegmentMap.get(o1).getName()
                    .compareTo(_realtimeAvroToSegmentMap.get(o2).getName());
        }
    });
    sortedRealtimeSegments.putAll(_realtimeAvroToSegmentMap);

    for (File avroFile : sortedRealtimeSegments.keySet()) {
        enabledRealtimeSegments.add(Pair.of(avroFile, sortedRealtimeSegments.get(avroFile)));

        if (useMultipleThreads) {
            _queryExecutor = new ThreadPoolExecutor(4, 4, 5, TimeUnit.SECONDS,
                    new ArrayBlockingQueue<Runnable>(50), new ThreadPoolExecutor.CallerRunsPolicy());
        }

        // Push avro for the new segment
        LOGGER.info("Pushing Avro file {} into Kafka", avroFile);
        pushAvroIntoKafka(Collections.singletonList(avroFile), KafkaStarterUtils.DEFAULT_KAFKA_BROKER,
                KAFKA_TOPIC);

        // Configure the scan based comparator to use the distinct union of the offline and realtime segments
        configureScanBasedComparator(enabledRealtimeSegments);

        QueryResponse queryResponse = _scanBasedQueryProcessor.processQuery("select count(*) from mytable");

        int expectedRecordCount = queryResponse.getNumDocsScanned();
        waitForRecordCountToStabilizeToExpectedCount(expectedRecordCount,
                System.currentTimeMillis() + getStabilizationTimeMs());

        // Run the actual tests
        LOGGER.info("Running queries");
        testMethod.call();

        if (useMultipleThreads) {
            if (_nQueriesRead == -1) {
                _queryExecutor.shutdown();
                _queryExecutor.awaitTermination(5, TimeUnit.MINUTES);
            } else {
                int totalQueries = _failedQueries.get() + _successfulQueries.get();
                while (totalQueries < _nQueriesRead) {
                    LOGGER.info("Completed " + totalQueries + " out of " + _nQueriesRead + " - waiting");
                    Uninterruptibles.sleepUninterruptibly(20, TimeUnit.SECONDS);
                    totalQueries = _failedQueries.get() + _successfulQueries.get();
                }
                if (totalQueries > _nQueriesRead) {
                    throw new RuntimeException("Executed " + totalQueries + " more than " + _nQueriesRead);
                }
                _queryExecutor.shutdown();
            }
        }
        int totalQueries = _failedQueries.get() + _successfulQueries.get();
        doDisplayStatus(totalQueries);

        // Release resources
        _scanBasedQueryProcessor.close();
        _compareStatusFileWriter.write("Status after push of " + avroFile + ":" + System.currentTimeMillis()
                + ":Executed " + _nQueriesRead + " queries, " + _failedQueries + " failures,"
                + _emptyResults.get() + " empty results\n");
    }
}

From source file:com.sfs.DataFilter.java

/**
 * Parses the text data.// ww w  . j  a va  2s  .  c  o m
 *
 * @param text the text
 *
 * @return the tree map< integer, tree map< integer, string>>
 */
public static TreeMap<Integer, TreeMap<Integer, String>> parseTextData(final String text) {

    TreeMap<Integer, TreeMap<Integer, String>> parsedData = new TreeMap<Integer, TreeMap<Integer, String>>();

    // This counter holds the maximum number of columns provided
    int maxNumberOfTokens = 0;

    if (text != null) {
        StringTokenizer tokenizer = new StringTokenizer(text, "\n");

        int lineCounter = 1;

        while (tokenizer.hasMoreTokens()) {
            String line = tokenizer.nextToken();
            TreeMap<Integer, String> parsedLine = new TreeMap<Integer, String>();

            final StringTokenizer tabTokenizer = new StringTokenizer(line, "\t");
            if (tabTokenizer.countTokens() > 1) {
                parsedLine = tokenizerToMap(tabTokenizer);
            } else {
                final StringTokenizer commaTokenizer = new StringTokenizer(line, ",");
                parsedLine = tokenizerToMap(commaTokenizer);
            }
            if (parsedLine.size() > maxNumberOfTokens) {
                maxNumberOfTokens = parsedLine.size();
            }

            parsedData.put(lineCounter, parsedLine);
            lineCounter++;
        }
    }

    // Now cycle through all the parsed data
    // Ensure that each row has the same (max) number of tokens
    for (int rowIndex : parsedData.keySet()) {
        TreeMap<Integer, String> parsedLine = parsedData.get(rowIndex);

        // This map holds the final values
        TreeMap<Integer, String> columnTokens = new TreeMap<Integer, String>();

        for (int i = 0; i < maxNumberOfTokens; i++) {
            int columnIndex = i + 1;
            if (parsedLine.containsKey(columnIndex)) {
                String value = parsedLine.get(columnIndex);
                columnTokens.put(columnIndex, value);
            } else {
                columnTokens.put(columnIndex, "");
            }
        }
        parsedData.put(rowIndex, columnTokens);
    }

    return parsedData;
}

From source file:edu.nyu.vida.data_polygamy.standard_techniques.CorrelationTechniquesReducer.java

private double[] computeCorrelationTechniques(ArrayList<TreeMap<Integer, Float>>[] timeSeries, int index1,
        int index2, boolean temporalPermutation) {
    double[] values = { 0.0, 0.0, 0.0 };

    TreeMap<Integer, Float> map1 = timeSeries[index1].get(dataset1Key);
    TreeMap<Integer, Float> map2 = timeSeries[index2].get(dataset2Key);

    ArrayList<Double> array1 = new ArrayList<Double>();
    ArrayList<Double> array2 = new ArrayList<Double>();

    for (int temp : map1.keySet()) {
        if (map2.containsKey(temp)) {
            array1.add((double) map1.get(temp));
            array2.add((double) map2.get(temp));
        }/*from  w  w  w.  j ava2 s.c o m*/
    }

    double[] completeTempArray1 = new double[map1.keySet().size()];
    int index = 0;
    for (int temp : map1.keySet()) {
        completeTempArray1[index] = map1.get(temp);
        index++;
    }
    double[] completeTempArray2 = new double[map2.keySet().size()];
    index = 0;
    for (int temp : map2.keySet()) {
        completeTempArray2[index] = map2.get(temp);
        index++;
    }

    map1.clear();
    map2.clear();

    if (array1.size() < 2)
        return null;

    // Pearson's Correlation

    double[] tempDoubleArray1 = new double[array1.size()];
    double[] tempDoubleArray2 = new double[array2.size()];

    int indexD1 = (temporalPermutation) ? new Random().nextInt(array1.size()) : 0;
    int indexD2 = (temporalPermutation) ? new Random().nextInt(array2.size()) : 0;
    for (int i = 0; i < array1.size(); i++) {
        int j = (indexD1 + i) % array1.size();
        int k = (indexD2 + i) % array2.size();
        tempDoubleArray1[i] = array1.get(j);
        tempDoubleArray2[i] = array2.get(k);
    }

    array1 = null;
    array2 = null;

    PearsonsCorrelation pearsonsCorr = new PearsonsCorrelation();
    values[0] = pearsonsCorr.correlation(tempDoubleArray1, tempDoubleArray2);

    // Mutual Information

    try {
        values[1] = getMIScore(tempDoubleArray1, tempDoubleArray2);
    } catch (Exception e) {
        e.printStackTrace();
        /*String data1 = "";
        for (double d : tempDoubleArray1)
        data1 += d + ", ";
        String data2 = "";
        for (double d : tempDoubleArray2)
        data2 += d + ", ";
        System.out.println(data1);
        System.out.println(data2);*/
        System.exit(-1);
    }

    tempDoubleArray1 = null;
    tempDoubleArray2 = null;

    // DTW

    double[] completeTempDoubleArray1 = new double[completeTempArray1.length];
    double[] completeTempDoubleArray2 = new double[completeTempArray2.length];

    if (temporalPermutation) {
        indexD1 = new Random().nextInt(completeTempArray1.length);
        for (int i = 0; i < completeTempArray1.length; i++) {
            int j = (indexD1 + i) % completeTempArray1.length;
            completeTempDoubleArray1[i] = completeTempArray1[j];
        }

        indexD2 = new Random().nextInt(completeTempArray2.length);
        for (int i = 0; i < completeTempArray2.length; i++) {
            int j = (indexD2 + i) % completeTempArray2.length;
            completeTempDoubleArray2[i] = completeTempArray2[j];
        }
    } else {
        System.arraycopy(completeTempArray1, 0, completeTempDoubleArray1, 0, completeTempArray1.length);
        System.arraycopy(completeTempArray2, 0, completeTempDoubleArray2, 0, completeTempArray2.length);
    }

    completeTempArray1 = null;
    completeTempArray2 = null;

    completeTempDoubleArray1 = normalize(completeTempDoubleArray1);
    completeTempDoubleArray2 = normalize(completeTempDoubleArray2);

    values[2] = getDTWScore(completeTempDoubleArray1, completeTempDoubleArray2);

    return values;
}

From source file:org.rhwlab.BHC.BHCTree.java

public TreeMap<Integer, Double> allPosteriorProb(int maxProbs) {
    TreeMap<Integer, TreeSet<NucleusLogNode>> allCuts = allTreeCuts(maxProbs);
    TreeMap<Integer, Double> ret = new TreeMap<>();
    for (Integer i : allCuts.keySet()) {
        TreeSet<NucleusLogNode> nodes = allCuts.get(i);
        double p = Math.exp(nodes.first().getLogPosterior());
        ret.put(i, p);//from www.  j a va2 s .  co m
    }

    return ret;
}

From source file:com.sfs.whichdoctor.analysis.RevenueAnalysisDAOImpl.java

/**
 * Process the gst rates for a summary./*w  w w .jav  a 2s  . co  m*/
 *
 * @param summary the summary
 * @return the collection
 */
private final Collection<RevenueBean> processGSTRates(final Collection<RevenueBean> summary) {

    final Collection<RevenueBean> results = new ArrayList<RevenueBean>();
    final TreeMap<Double, Double> gstRates = new TreeMap<Double, Double>();

    // Construct a map with all of the GST rates that are used
    for (RevenueBean revenue : summary) {
        for (Double gstRate : revenue.getGSTValues().keySet()) {
            gstRates.put(gstRate, 0.0);
        }
    }

    // Build a new results collection with the correct number of GST rates
    for (RevenueBean revenue : summary) {
        for (Double gstRate : gstRates.keySet()) {
            // If the GST rate is not part of the revenue analysis add it with 0 value
            if (!revenue.getGSTValues().containsKey(gstRate)) {
                revenue.setGSTValue(gstRate, 0.0);
            }
        }
        results.add(revenue);
    }
    return results;
}

From source file:gda.scan.ConcurrentScan.java

public String reportDevicesByLevel() {
    TreeMap<Integer, Scannable[]> devicesToMoveByLevel = generateDevicesToMoveByLevel(scannableLevels,
            allDetectors);//from   w  w  w .  ja va 2 s.  c o m
    // e.g. "| lev4 | lev4a lev4b | *det9 || mon1, mon2
    String sMoved = "";
    List<String> toMonitor = new ArrayList<String>();
    for (Integer level : devicesToMoveByLevel.keySet()) {
        sMoved += " | ";
        List<String> toMove = new ArrayList<String>();
        for (Scannable scn : devicesToMoveByLevel.get(level)) {
            if (scn instanceof Detector) {
                toMove.add("*" + scn.getName());
            } else if (isScannableActuallyToBeMoved(scn)) {
                toMove.add(scn.getName());
            } else {
                // Scannable is acting like a monitor
                toMonitor.add(scn.getName());
            }
        }
        sMoved += StringUtils.join(toMove, ", ");
    }
    String sMonitor = StringUtils.join(toMonitor, ", ");
    return (sMoved + ((sMonitor.equals("")) ? " |" : (" || " + sMonitor + " |"))).trim();
}

From source file:eu.edisonproject.training.wsd.DisambiguatorImpl.java

private Set<Term> tf_idf_Disambiguation(Set<Term> possibleTerms, Set<String> nGrams, String lemma,
        double confidence, boolean matchTitle) throws IOException, ParseException {
    LOGGER.log(Level.FINE, "Loaded {0} for {1}", new Object[] { nGrams.size(), lemma });
    if (nGrams.size() < 7) {
        LOGGER.log(Level.WARNING, "Found only {0} n-grams for {1}. Not enough for disambiguation.",
                new Object[] { nGrams.size(), lemma });
        return null;
    }/*from w w w.  j  av  a 2 s .  c  o m*/

    List<List<String>> allDocs = new ArrayList<>();
    Map<CharSequence, List<String>> docs = new HashMap<>();

    for (Term tv : possibleTerms) {
        Set<String> doc = getDocument(tv);
        allDocs.add(new ArrayList<>(doc));
        docs.put(tv.getUid(), new ArrayList<>(doc));
    }

    Set<String> contextDoc = new HashSet<>();
    StringBuilder ngString = new StringBuilder();
    for (String s : nGrams) {
        if (s.contains("_")) {
            String[] parts = s.split("_");
            for (String token : parts) {
                if (token.length() >= 1 && !token.contains(lemma)) {
                    //                        contextDoc.add(token);
                    ngString.append(token).append(" ");
                }
            }
        } else if (s.length() >= 1 && !s.contains(lemma)) {
            ngString.append(s).append(" ");
            //                contextDoc.add(s);
        }
    }
    tokenizer.setDescription(ngString.toString());
    String cleanText = tokenizer.execute();
    lematizer.setDescription(cleanText);
    String lematizedText = lematizer.execute();
    List<String> ngList = Arrays.asList(lematizedText.split(" "));
    contextDoc.addAll(ngList);

    docs.put("context", new ArrayList<>(contextDoc));

    Map<CharSequence, Map<String, Double>> featureVectors = new HashMap<>();
    for (CharSequence k : docs.keySet()) {
        List<String> doc = docs.get(k);
        Map<String, Double> featureVector = new TreeMap<>();
        for (String term : doc) {
            if (!featureVector.containsKey(term)) {
                double tfidf = tfIdf(doc, allDocs, term);
                featureVector.put(term, tfidf);
            }
        }
        featureVectors.put(k, featureVector);
    }

    Map<String, Double> contextVector = featureVectors.remove("context");
    Map<CharSequence, Double> scoreMap = new HashMap<>();
    for (CharSequence key : featureVectors.keySet()) {
        Double similarity = cosineSimilarity(contextVector, featureVectors.get(key));

        for (Term t : possibleTerms) {
            if (t.getUid().equals(key) && matchTitle) {
                stemer.setDescription(t.getLemma().toString());
                String stemTitle = stemer.execute();
                stemer.setDescription(lemma);
                String stemLema = stemer.execute();
                //                    List<String> subTokens = new ArrayList<>();
                //                    if (!t.getLemma().toString().toLowerCase().startsWith("(") && t.getLemma().toString().toLowerCase().contains("(") && t.getLemma().toLowerCase().contains(")")) {
                //                        int index1 = t.getLemma().toString().toLowerCase().indexOf("(") + 1;
                //                        int index2 = t.getLemma().toString().toLowerCase().indexOf(")");
                //                        String sub = t.getLemma().toString().toLowerCase().substring(index1, index2);
                //                        subTokens.addAll(tokenize(sub, true));
                //                    }
                double factor = 0.15;
                if (stemTitle.length() > stemLema.length()) {
                    if (stemTitle.contains(stemLema)) {
                        factor = 0.075;
                    }
                } else if (stemLema.length() > stemTitle.length()) {
                    if (stemLema.contains(stemTitle)) {
                        factor = 0.075;
                    }
                }
                int dist = edu.stanford.nlp.util.StringUtils.editDistance(stemTitle, stemLema);
                similarity = similarity - (dist * factor);
                t.setConfidence(similarity);
            }
        }
        scoreMap.put(key, similarity);
    }

    if (scoreMap.isEmpty()) {
        return null;
    }

    ValueComparator bvc = new ValueComparator(scoreMap);
    TreeMap<CharSequence, Double> sorted_map = new TreeMap(bvc);
    sorted_map.putAll(scoreMap);
    //        System.err.println(sorted_map);

    Iterator<CharSequence> it = sorted_map.keySet().iterator();
    CharSequence winner = it.next();

    Double s1 = scoreMap.get(winner);
    if (s1 < confidence) {
        return null;
    }

    Set<Term> terms = new HashSet<>();
    for (Term t : possibleTerms) {
        if (t.getUid().equals(winner)) {
            terms.add(t);
        }
    }
    if (!terms.isEmpty()) {
        return terms;
    } else {
        LOGGER.log(Level.INFO, "No winner");
        return null;
    }
}

From source file:web.diva.server.unused.PCAGenerator.java

/**
 *
 *
 * @return dataset.//from w  ww.  j a v a 2s  .com
 */
private XYDataset createDataset(TreeMap<Integer, PCAPoint> points, int[] subSelectionData, int[] selection,
        boolean zoom, DivaDataset divaDataset) {

    final XYSeriesCollection dataset = new XYSeriesCollection();
    seriesList = new TreeMap<String, XYSeries>();
    seriesList.put("#000000", new XYSeries("#000000"));
    seriesList.put("unGrouped", new XYSeries("LIGHT_GRAY"));

    for (Group g : divaDataset.getRowGroups()) {
        if (g.isActive() && !g.getName().equalsIgnoreCase("all")) {
            seriesList.put(g.getHashColor(), new XYSeries(g.getHashColor()));
        }
    }

    if (!zoom && (selection == null || selection.length == 0) && subSelectionData == null) {
        for (int key : points.keySet()) {
            PCAPoint point = points.get(key);
            if (seriesList.containsKey(point.getColor())) {
                //                    seriesList.get(divaDataset.getGeneColorArr()[point.getGeneIndex()]).add(point.getX(), point.getY());
            } else {
                seriesList.get("unGrouped").add(point.getX(), point.getY());
            }

        }

    } else if (zoom) {
        selectionSet.clear();
        for (int i : selection) {
            selectionSet.add(i);
        }

        for (int x : subSelectionData) {
            PCAPoint point = points.get(x);
            if (selectionSet.contains(point.getGeneIndex())) {
                if (seriesList.containsKey(point.getColor())) {
                    //                        seriesList.get(divaDataset.getGeneColorArr()[point.getGeneIndex()]).add(point.getX(), point.getY());

                } else {

                    seriesList.get("#000000").add(point.getX(), point.getY());
                }

            } else {
                seriesList.get("unGrouped").add(point.getX(), point.getY());
            }
        }

    } else if (subSelectionData != null) {
        selectionSet.clear();
        for (int i : selection) {
            selectionSet.add(i);
        }
        //            for (int key : subSelectionData) {
        //                PCAPoint point = points.get(key);
        //                if (selectionSet.contains(point.getGeneIndex())) {
        //                    if (seriesList.containsKey(divaDataset.getGeneColorArr()[point.getGeneIndex()])) {
        //                        seriesList.get(divaDataset.getGeneColorArr()[point.getGeneIndex()]).add(point.getX(), point.getY());
        //
        //                    } else {
        //
        //                        seriesList.get("#000000").add(point.getX(), point.getY());
        //                    }
        //
        //                } else {
        //
        //                    seriesList.get("unGrouped").add(point.getX(), point.getY());
        //                }
        //
        //            }

    } else //selection without zoom
    {
        selectionSet.clear();
        for (int i : selection) {
            selectionSet.add(i);
        }
        //            for (int key : points.keySet()) {
        //                PCAPoint point = points.get(key);
        //
        //                if (selectionSet.contains(point.getGeneIndex())) {
        //                    if (seriesList.containsKey(divaDataset.getGeneColorArr()[point.getGeneIndex()])) {
        //                        seriesList.get(divaDataset.getGeneColorArr()[point.getGeneIndex()]).add(point.getX(), point.getY());
        //
        //                    } else {
        //
        //                        seriesList.get("#000000").add(point.getX(), point.getY());
        //                    }
        //
        //                } else {
        //
        //                    seriesList.get("unGrouped").add(point.getX(), point.getY());
        //                }
        //
        //            }

    }
    for (XYSeries ser : seriesList.values()) {
        dataset.addSeries(ser);
    }

    return dataset;

}

From source file:com.bombardier.plugin.scheduling.TestScheduler.java

/**
 * Used to get a sub map from a whole map based on the closest number of
 * bytes./*from   w  w w  . j  a  v  a2s.co m*/
 * 
 * @param sortedBySize
 *            the whole map
 * @param bytes
 *            the number of bytes
 * @return the sub map
 * @since 1.0
 */
private SortedMap<Long, Double> getSubMapForSize(TreeMap<Long, Double> sortedBySize, long bytes) {
    SortedMap<Long, Double> subMap = new TreeMap<Long, Double>();

    List<Long> keyList = new ArrayList<Long>(sortedBySize.keySet());

    Long closest = getClosestBySizeNum(keyList, bytes);

    List<List<Long>> list = splitToFiveLists(keyList);
    for (List<Long> subList : list) {
        if (subList.contains(closest)) {
            subMap = sortedBySize.subMap(subList.get(0), subList.get(subList.size() - 1));
            break;
        }
    }
    return subMap;
}