Example usage for java.util TreeMap entrySet

List of usage examples for java.util TreeMap entrySet

Introduction

In this page you can find the example usage for java.util TreeMap entrySet.

Prototype

EntrySet entrySet

To view the source code for java.util TreeMap entrySet.

Click Source Link

Document

Fields initialized to contain an instance of the entry set view the first time this view is requested.

Usage

From source file:org.apache.geode.BundledJarsJUnitTest.java

@Test
public void verifyBundledJarsHaveNotChanged() throws IOException {
    TreeMap<String, String> sortedJars = getBundledJars();
    Stream<String> lines = sortedJars.entrySet().stream().map(entry -> removeVersion(entry.getKey()));
    Set<String> bundledJarNames = new TreeSet<>(lines.collect(Collectors.toSet()));

    Files.write(Paths.get("bundled_jars.txt"), bundledJarNames);

    TreeSet<String> newJars = new TreeSet<>(bundledJarNames);
    newJars.removeAll(expectedJars);/* w  w  w  . ja v a  2 s. co m*/
    TreeSet<String> missingJars = new TreeSet<>(expectedJars);
    missingJars.removeAll(bundledJarNames);

    String message = "The bundled jars have changed. Please make sure you update the licence and notice"
            + "\nas described in https://cwiki.apache.org/confluence/display/GEODE/License+Guide+for+Contributors"
            + "\nWhen fixed, copy geode-assembly/build/test/bundled_jars.txt"
            + "\nto src/test/resources/expected_jars.txt" + "\nRemoved Jars\n--------------\n"
            + String.join("\n", missingJars) + "\n\nAdded Jars\n--------------\n" + String.join("\n", newJars)
            + "\n\n";

    assertTrue(message, expectedJars.equals(bundledJarNames));

}

From source file:ANNFileDetect.GraphingClass.java

public JFreeChart RenderFiles(TreeMap finalMap) {
    XYSeriesCollection ds = new XYSeriesCollection();
    Iterator entries = finalMap.entrySet().iterator();
    while (entries.hasNext()) {
        Entry thisentry = (Entry) entries.next();

        String key = (String) thisentry.getKey();
        TreeMap<Double, Integer> tm = (TreeMap) thisentry.getValue();
        XYSeries series = new XYSeries(key);
        //HashMap<Double, Integer> tmp = finalMap.get(key);
        for (Map.Entry<Double, Integer> entry : tm.entrySet()) {
            //ds.addValue(ht.get(tmp), "Times", tmp); 
            series.add(entry.getKey(), entry.getValue());
        }//from   ww w  .j  ava 2s  .  c  o m
        ds.addSeries(series);
    }
    JFreeChart chart = ChartFactory.createXYLineChart("Test graph", "Value", "Times", ds,
            PlotOrientation.VERTICAL, true, true, false);

    XYLineAndShapeRenderer rend = (XYLineAndShapeRenderer) chart.getXYPlot().getRenderer();
    rend.setBaseShape(new Rectangle(-2, -2, 4, 4));
    rend.setBaseShapesVisible(true);
    rend.setBaseSeriesVisible(true);
    //rend.setSeriesVisible(i, true);
    chart.getXYPlot().setRenderer(rend);
    return chart;
}

From source file:main.java.workload.WorkloadExecutor.java

public static Transaction streamOneTransaction(Database db, Cluster cluster, Workload wrl, WorkloadBatch wb) {

    Set<Integer> trTupleSet = null;
    Set<Integer> trDataSet = null;

    int min = 0, i = 0, n = 0, tr_id = 0;
    int type = trDistribution.sample();

    Transaction tr = null;//from   www .  j av a 2s. c  om

    if (!wb.getTrMap().containsKey(type))
        wb.getTrMap().put(type, new TreeMap<Integer, Transaction>());

    // new
    double rand_val = Global.rand.nextDouble();
    int toBeRemovedKey = -1;

    /**
     *  Implementing the new Workload Generation model 
     *  (Finalised as per November 20, 2014 and later improved on February 13-14, 2015)      
     */
    ++Global.global_trCount;

    // Transaction birth
    if (wb.getTrMap().get(type).isEmpty() || rand_val <= Global.percentageChangeInWorkload) {

        trTupleSet = wrl.getTrTupleSet(db, type);
        trDataSet = Workload.getTrDataSet(db, cluster, wb, trTupleSet);

        ++Global.global_trSeq;
        tr = new Transaction(Global.global_trSeq, type, trDataSet, Sim.time());

        // Add the incident transaction id
        wb.addIncidentTrId(cluster, trDataSet, Global.global_trSeq);

        // Add the newly created Transaction in the Workload Transaction map   
        wb.getTrMap().get(type).put(tr.getTr_id(), tr);

        // New improvements------------------------------------------------------------------------------
        double initial_period = (double) WorkloadExecutor.uNmax; // initialisation         
        tr.setTr_period(initial_period);

        perfm.Period.put(tr.getTr_id(), initial_period);
        Time.put(tr.getTr_id(), Sim.time());

        // Transaction repetition and retention of old transaction
    } else {

        ArrayList<Integer> idx2_id = new ArrayList<Integer>();
        ArrayList<Integer> idx_value = new ArrayList<Integer>();
        ArrayList<Integer> uT = new ArrayList<Integer>();

        TreeMap<Integer, Integer> idx2 = new TreeMap<Integer, Integer>(new ValueComparator<Integer>(idx));
        idx2.putAll(idx);

        min = Math.min(idx.size(), uNmax); // uNmax or uNmaxT

        i = 0;
        Iterator<Entry<Integer, Integer>> itr = idx2.entrySet().iterator();
        while (i < min) {
            idx2_id.add(itr.next().getKey());
            ++i;
        }

        // Deleting old Transactions
        if (idx2.size() > min) {
            toBeRemovedKey = idx2.lastKey();

            Transaction tr_old = wb.getTransaction(toBeRemovedKey);
            tr_old.calculateSpans(cluster);

            wb.removeTransaction(cluster, tr_old);
            idx.remove(toBeRemovedKey);
        }

        i = 0;
        while (i < idx2_id.size()) {
            idx_value.add(idx.get(idx2_id.get(i)));
            ++i;
        }

        i = 0;
        while (i < idx_value.size()) {
            uT.add(T.get(idx_value.get(i) - 1));
            ++i;
        }

        if (uT.size() == 1)
            n = 0;
        else
            n = Global.rand.nextInt(uT.size());

        tr_id = uT.get(n);

        tr = wb.getTransaction(tr_id);
        tr.setProcessed(false);

        // New improvements------------------------------------------------------------------------------
        double prev_period = perfm.Period.get(tr.getTr_id());
        double prev_time = Time.get(tr.getTr_id());

        double new_period = Global.expAvgWt * prev_period + (1 - Global.expAvgWt) * (Sim.time() - prev_time);

        tr.setTr_period(new_period);

        perfm.Period.remove(tr.getTr_id());
        perfm.Period.put(tr.getTr_id(), new_period);

        Time.remove(tr.getTr_id());
        Time.put(tr.getTr_id(), Sim.time());

    } // end-if-else()

    // Calculate latest Span
    tr.calculateSpans(cluster);

    // Update Idt
    tr.calculateIdt();

    if (perfm.Span.containsKey(tr.getTr_id()))
        perfm.Span.remove(tr.getTr_id());

    perfm.Span.put(tr.getTr_id(), tr.getTr_serverSpanCost());

    // Create an index entry for each newly created Transaction      
    idx.put(tr.getTr_id(), Global.global_trCount);
    T.add(tr.getTr_id());

    // New improvements------------------------------------------------------------------------------
    if (Global.global_trCount > Global.observationWindow) {

        _i = Global.global_trCount; // _i ~ Sim.time() 
        _W = Global.observationWindow; // _W ~ time 

        HashSet<Integer> unq = new HashSet<Integer>(T);
        for (int _n = (_i - _W); n <= _i; n++) {
            unq.add(T.get(_n));
        }

        // Captures the number of total unique transaction for this observation window
        perfm.Unqlen.put((_i - _W), unq.size());

        // Calculate the impact of distributed transaction per transaction basis               
        double sum_of_span_by_period = 0.0;
        sum_of_one_by_period = 0.0;

        Iterator<Integer> unq_itr = unq.iterator();
        while (unq_itr.hasNext()) {
            int unq_T = unq_itr.next();

            int span = perfm.Span.get(unq_T);
            double period = perfm.Period.get(unq_T);

            double span_by_period = span / period; // Frequency = 1/Period (f=1/t) per unit time (i.e. 1 second)
            double one_by_period = 1 / period; // Frequency = 1/Period (f=1/t) per unit time (i.e. 1 second)

            sum_of_span_by_period += span_by_period;
            sum_of_one_by_period += one_by_period;
        }

        double i_dt = (sum_of_span_by_period) / (Global.servers * sum_of_one_by_period);
        perfm.I_Dt.put((_i - _W), i_dt);

        if (Double.isNaN(i_dt))
            currentIDt = 0;
        else
            currentIDt = i_dt;

        // Reset repartitioning cooling off period
        if (WorkloadExecutor.repartitioningCoolingOff
                && Sim.time() >= WorkloadExecutor.RepartitioningCoolingOffPeriod) {

            WorkloadExecutor.repartitioningCoolingOff = false;

            Global.LOGGER.info("-----------------------------------------------------------------------------");
            Global.LOGGER.info("Simulation time: " + Sim.time() / (double) Global.observationWindow + " hrs");
            Global.LOGGER.info("Repartitioning cooling off period ends.");
            Global.LOGGER
                    .info("System will now check whether another repartitioning is required at this moment.");
            Global.LOGGER.info("Current IDt: " + currentIDt);
            Global.LOGGER.info("User defined IDt threshold: " + Global.userDefinedIDtThreshold);

            if (currentIDt < Global.userDefinedIDtThreshold) {
                Global.LOGGER.info("Repartitioning is not required at this moment.");

                //This is to disable on-demand atomic repartitioning for A-ARHC only
                if (Global.adaptive) {
                    Global.LOGGER.info("Disabling on-demand atomic repartitioning for A-ARHC ...");
                    WorkloadExecutor.isAdaptive = false;
                }

                Global.LOGGER.info("Continuing transaction processing ...");
            }
        }

        perfm.time.put((_i - _W), Sim.time());
    }

    // Add a hyperedge to workload hypergraph
    wb.addHGraphEdge(cluster, tr);

    // Collect transactional streams if data stream mining is enabled
    if (Global.streamCollection)
        Global.dsm.collectStream(cluster, tr);

    return tr;
}

From source file:com.alibaba.rocketmq.tools.command.broker.BrokerStatsSubCommand.java

@Override
public void execute(CommandLine commandLine, Options options) {
    DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt();

    defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));

    try {//from   w  ww.  j  a  va 2s .  c om
        defaultMQAdminExt.start();

        String brokerAddr = commandLine.getOptionValue('b').trim();

        KVTable kvTable = defaultMQAdminExt.fetchBrokerRuntimeStats(brokerAddr);

        // ?
        TreeMap<String, String> tmp = new TreeMap<String, String>();
        tmp.putAll(kvTable.getTable());

        Iterator<Entry<String, String>> it = tmp.entrySet().iterator();
        while (it.hasNext()) {
            Entry<String, String> next = it.next();
            System.out.printf("%-32s: %s\n", next.getKey(), next.getValue());
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        defaultMQAdminExt.shutdown();
    }
}

From source file:com.alibaba.rocketmq.tools.command.broker.BrokerStatusSubCommand.java

@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) {
    DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);

    defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));

    try {/*  w w w.  j a va  2s .  c  om*/
        defaultMQAdminExt.start();

        String brokerAddr = commandLine.getOptionValue('b').trim();

        KVTable kvTable = defaultMQAdminExt.fetchBrokerRuntimeStats(brokerAddr);

        // ?
        TreeMap<String, String> tmp = new TreeMap<String, String>();
        tmp.putAll(kvTable.getTable());

        Iterator<Entry<String, String>> it = tmp.entrySet().iterator();
        while (it.hasNext()) {
            Entry<String, String> next = it.next();
            System.out.printf("%-32s: %s\n", next.getKey(), next.getValue());
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        defaultMQAdminExt.shutdown();
    }
}

From source file:mrmc.chart.BarGraph.java

/**
 * Converts data mapping into format for used by chart
 * //w  ww. j  a va 2  s.c  om
 * @param treeMap Mapping of x-y data
 * @param xaxis Label for x-axis
 * @return Chart data in CategoryDataset format
 */
private CategoryDataset createDataset(TreeMap<String, Double> treeMap, String xaxis) {
    final DefaultCategoryDataset dataset = new DefaultCategoryDataset();
    for (Entry<String, Double> e : treeMap.entrySet()) {
        String key = e.getKey();
        double value = e.getValue();
        dataset.addValue(value, key + "", xaxis);
    }

    return dataset;
}

From source file:script.manager.Printer.java

private void printScriptNo() {
    TreeMap<Integer, String> candidate = CandidateIO.readCandidate();
    for (Map.Entry<Integer, String> entrySet : candidate.entrySet()) {
        final Integer no = entrySet.getKey();
        LOG.log(Level.INFO, "{0}| {1}",
                new Object[] { StringUtils.leftPad(Integer.toString(no), 3), entrySet.getValue() });
    }/*from  w  w w  .  j  ava 2s. com*/
}

From source file:ark.util.CounterTable.java

public JSONObject toJSON() {
    JSONObject json = new JSONObject();

    TreeMap<Integer, List<T>> sortedCounts = getSortedCounts();

    try {//from   w w w.  j  a v  a  2s. c  o  m
        for (Entry<Integer, List<T>> entry : sortedCounts.entrySet()) {
            for (T item : entry.getValue()) {
                json.put(item.toString(), entry.getKey());
            }
        }
    } catch (JSONException e) {
        e.printStackTrace();
    }

    return json;
}

From source file:org.apache.rocketmq.tools.command.broker.BrokerStatusSubCommand.java

public void printBrokerRuntimeStats(final DefaultMQAdminExt defaultMQAdminExt, final String brokerAddr,
        final boolean printBroker) throws InterruptedException, MQBrokerException, RemotingTimeoutException,
        RemotingSendRequestException, RemotingConnectException {
    KVTable kvTable = defaultMQAdminExt.fetchBrokerRuntimeStats(brokerAddr);

    TreeMap<String, String> tmp = new TreeMap<String, String>();
    tmp.putAll(kvTable.getTable());//from  w  w  w .  ja va  2  s . c  om

    Iterator<Entry<String, String>> it = tmp.entrySet().iterator();
    while (it.hasNext()) {
        Entry<String, String> next = it.next();
        if (printBroker) {
            System.out.printf("%-24s %-32s: %s%n", brokerAddr, next.getKey(), next.getValue());
        } else {
            System.out.printf("%-32s: %s%n", next.getKey(), next.getValue());
        }
    }
}

From source file:com.ibm.g11n.pipeline.example.MultiBundleCSVFilter.java

@Override
public void write(OutputStream outStream, Map<String, LanguageBundle> languageBundles, FilterOptions options)
        throws IOException, ResourceFilterException {
    BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(outStream, StandardCharsets.UTF_8));
    CSVPrinter printer = CSVFormat.RFC4180.withHeader("module", "key", "value").print(writer);

    // Sort by bundle
    TreeMap<String, LanguageBundle> sortedBundles = new TreeMap<>(languageBundles);

    for (Entry<String, LanguageBundle> bundleEntry : sortedBundles.entrySet()) {
        String module = bundleEntry.getKey();
        LanguageBundle languageBundle = bundleEntry.getValue();
        for (ResourceString resString : languageBundle.getSortedResourceStrings()) {
            printer.printRecord(module, resString.getKey(), resString.getValue());
        }//from w w  w . j a  v a  2  s.  c  o  m
    }
    printer.flush();
}