Example usage for java.util TreeMap get

List of usage examples for java.util TreeMap get

Introduction

In this page you can find the example usage for java.util TreeMap get.

Prototype

public V get(Object key) 

Source Link

Document

Returns the value to which the specified key is mapped, or null if this map contains no mapping for the key.

Usage

From source file:net.spfbl.core.Analise.java

protected static void dumpClusterCIDR(StringBuilder builder) {
    TreeMap<String, Short[]> map = getClusterMap();
    for (String token : map.keySet()) {
        Short[] dist = map.get(token);
        int spam = dist[1];
        if (spam > 512) {
            int ham = dist[0];
            float total = ham + spam;
            float reputation = spam / total;
            if (reputation > CLUSTER_RED) {
                if (Subnet.isValidCIDR(token)) {
                    if (!Block.contains(token)) {
                        builder.append(token);
                        builder.append(' ');
                        builder.append(ham);
                        builder.append(' ');
                        builder.append(spam);
                        builder.append('\n');
                    }/*w  w w .j  a v  a  2  s  .c  om*/
                }
            }
        }
    }
}

From source file:ch.icclab.cyclops.support.database.influxdb.client.InfluxDBClient.java

/**
 * This method gets the data from the database for a parametrized Query, format it and send it back as a TSDBData.
 *
 * @param parameterQuery/*from   www  . j a  va  2s .  co  m*/
 * @return
 */
public TSDBData getData(String parameterQuery) {
    //TODO: check the sense of the TSDBData[] and simplify/split the code
    logger.debug("Attempting to get Data");
    InfluxDB influxDB = InfluxDBFactory.connect(this.url, this.username, this.password);
    JSONArray resultArray;
    TSDBData[] dataObj = null;
    ObjectMapper mapper = new ObjectMapper();
    int timeIndex = -1;
    int usageIndex = -1;
    Query query = new Query(parameterQuery, dbName);
    try {
        logger.debug("Attempting to execute the query: " + parameterQuery + " into the db: " + dbName);
        resultArray = new JSONArray(influxDB.query(query).getResults());
        logger.debug("Obtained results: " + resultArray.toString());
        if (!resultArray.isNull(0)) {
            if (resultArray.toString().equals("[{}]")) {
                TSDBData data = new TSDBData();
                data.setColumns(new ArrayList<String>());
                data.setPoints(new ArrayList<ArrayList<Object>>());
                data.setTags(new HashMap());
                return data;
            } else {
                JSONObject obj = (JSONObject) resultArray.get(0);
                JSONArray series = (JSONArray) obj.get("series");
                for (int i = 0; i < series.length(); i++) {
                    String response = series.get(i).toString();
                    response = response.split("values")[0] + "points" + response.split("values")[1];
                    series.put(i, new JSONObject(response));
                }
                dataObj = mapper.readValue(series.toString(), TSDBData[].class);

                //Filter the points for repeated timestamps and add their usage/avg value
                for (int i = 0; i < dataObj.length; i++) {
                    for (int o = 0; o < dataObj[i].getColumns().size(); o++) {
                        if (dataObj[i].getColumns().get(o).equalsIgnoreCase("time"))
                            timeIndex = o;
                        if (dataObj[i].getColumns().get(o).equalsIgnoreCase("usage")
                                || dataObj[i].getColumns().get(o).equalsIgnoreCase("avg"))
                            usageIndex = o;
                    }
                    if (usageIndex > -1) {
                        //If the json belongs to a meter point, filter and add to another if necessary.
                        TreeMap<String, ArrayList> points = new TreeMap<String, ArrayList>();
                        for (ArrayList point : dataObj[i].getPoints()) {
                            if (points.containsKey(point.get(timeIndex))) {
                                String time = (String) point.get(timeIndex);
                                Double usage = Double.parseDouble(points.get(time).get(usageIndex).toString());
                                usage = Double.parseDouble(point.get(usageIndex).toString()) + usage;
                                point.set(usageIndex, usage);
                            }
                            points.put((String) point.get(timeIndex), point);
                        }
                        ArrayList<ArrayList<Object>> result = new ArrayList<ArrayList<Object>>();
                        for (String key : points.keySet()) {
                            result.add(points.get(key));
                        }
                        dataObj[i].setPoints(result);
                    }
                }

            }
        }
    } catch (JSONException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    }
    return dataObj[0];
}

From source file:com.eucalyptus.objectstorage.pipeline.handlers.S3Authentication.java

/**
 * Query params are included in cases of Query-String/Presigned-url auth where they are considered just like headers
 * //from   w  w w.j  av  a2s.  c  o m
 * @param httpRequest
 * @param includeQueryParams
 * @return
 */
private static String getCanonicalizedAmzHeaders(MappingHttpRequest httpRequest, boolean includeQueryParams) {
    String result = "";
    Set<String> headerNames = httpRequest.getHeaderNames();
    TreeMap<String, String> amzHeaders = new TreeMap<String, String>();
    for (String headerName : headerNames) {
        String headerNameString = headerName.toLowerCase().trim();
        if (headerNameString.startsWith("x-amz-")) {
            String value = httpRequest.getHeader(headerName).trim();
            String[] parts = value.split("\n");
            value = "";
            for (String part : parts) {
                part = part.trim();
                value += part + " ";
            }
            value = value.trim();
            if (amzHeaders.containsKey(headerNameString)) {
                String oldValue = (String) amzHeaders.remove(headerNameString);
                oldValue += "," + value;
                amzHeaders.put(headerNameString, oldValue);
            } else {
                amzHeaders.put(headerNameString, value);
            }
        }
    }

    if (includeQueryParams) {
        // For query-string auth, header values may include 'x-amz-*' that need to be signed
        for (String paramName : httpRequest.getParameters().keySet()) {
            processHeaderValue(paramName, httpRequest.getParameters().get(paramName), amzHeaders);
        }
    }

    // Build the canonical string
    Iterator<String> iterator = amzHeaders.keySet().iterator();
    while (iterator.hasNext()) {
        String key = iterator.next();
        String value = (String) amzHeaders.get(key);
        result += key + ":" + value + "\n";
    }
    return result;
}

From source file:edu.lternet.pasta.dml.database.DatabaseAdapter.java

/**
 * Assigns database field names to all Attribute objects in the AttributeList.
 * The assigned field names comply with the following criteria:
 *   (1) each is a legal database field name
 *   (2) each is unique within this attribute list
 *   /*  ww  w . jav a 2 s  . co m*/
 * @param  attributeList  the AttributeList object containing the Attributes
 *                        that correspond to the fields in the database 
 *                        table
 */
public void assignDbFieldNames(AttributeList attributeList) {
    Attribute[] list = attributeList.getAttributes();
    TreeMap<String, String> usedNames = new TreeMap<String, String>();

    int size = list.length;

    for (int i = 0; i < size; i++) {
        Attribute attribute = list[i];
        String attributeName = attribute.getName();
        String legalDbFieldName = getLegalDbFieldName(attributeName);
        String foundName = usedNames.get(legalDbFieldName);

        while (foundName != null) {
            String mangledName = mangleFieldName(legalDbFieldName);
            legalDbFieldName = mangledName;
            foundName = usedNames.get(legalDbFieldName);
        }

        usedNames.put(legalDbFieldName, legalDbFieldName);

        /*
         * Ensure that the field names are surrounded by quotes.
         * (See Bug #2737: 
         *   http://bugzilla.ecoinformatics.org/show_bug.cgi?id=2737
         * )
         */
        String quotedName = DOUBLEQUOTE + legalDbFieldName + DOUBLEQUOTE;

        attribute.setDBFieldName(quotedName);
    }
}

From source file:edu.umm.radonc.ca_dash.controllers.PieChartController.java

public void updateChart(String dataSet) {
    TreeMap<String, Long> mtxcounts;
    TreeMap<String, Long> dptcounts;
    TreeMap<String, SynchronizedDescriptiveStatistics> mptstats;
    TreeMap<String, SynchronizedDescriptiveStatistics> ptstats;
    pieChart.clear();/*from  w w  w . java  2  s .com*/
    dstats.clear();
    dstatsPerDoc.clear();
    dstatsPerRTM.clear();

    JSONArray labels = new JSONArray();

    if (dataSet.equals("DR")) {
        dptcounts = getFacade().doctorPtCounts(startDate, endDate, selectedFacility, selectedFilters);
        ptstats = getFacade().doctorStats(startDate, endDate, selectedFacility, selectedFilters);
        for (String doctor : dptcounts.keySet()) {
            Long count = dptcounts.get(doctor);
            DoctorStats newItem = new DoctorStats();
            newItem.setTotalPatients(count);
            newItem.setAverageDailyPatients(ptstats.get(doctor));
            dstatsPerDoc.put(doctor, newItem);
            pieChart.set(doctor, newItem.getAverageDailyPatients().getMean());
            dstats.addValue(count);
            try {
                String item = doctor + "<br/>( mean: " + Math.round(newItem.getAverageDailyPatients().getMean())
                        + ", &#963;: " + decf.format(newItem.getAverageDailyPatients().getStandardDeviation())
                        + " )";
                labels.put(item);
            } catch (Exception e) {
                //FIXME
            }
        }

        pieChart.setTitle("Physician Workload: " + df.format(startDate) + " - " + df.format(endDate));
    } else {
        mtxcounts = getFacade().machineTxCounts(startDate, endDate, selectedFacility, selectedFilters);
        mptstats = getFacade().machineStats(startDate, endDate, selectedFacility, selectedFilters);
        pieChart.setTitle("Tx per Machine: " + df.format(startDate) + " - " + df.format(endDate));

        for (String machine : mtxcounts.keySet()) {
            Long count = mtxcounts.get(machine);
            DoctorStats newItem = new DoctorStats();
            newItem.setTotalPatients(count);
            newItem.setAverageDailyPatients(mptstats.get(machine));
            dstatsPerRTM.put(machine, newItem);
            pieChart.set(machine, newItem.getAverageDailyPatients().getMean());
            dstats.addValue(count);
            try {
                String item = machine + "<br/>( mean: "
                        + Math.round(newItem.getAverageDailyPatients().getMean()) + ", &#963;: "
                        + decf.format(newItem.getAverageDailyPatients().getStandardDeviation()) + " )";
                labels.put(item);
            } catch (Exception e) {
                //FIXME
            }
        }
    }

    //pieChart.setLegendPosition("ne");
    pieChart.setShowDataLabels(true);
    pieChart.setShadow(false);
    //pieChart.setDataFormat("value");
    pieChart.setSeriesColors("8C3130, E0AB5D, 4984D0, 2C2A29, A2B85C, BBBEC3, D8C9B6, BD8A79, 3C857A, CD3935");
    pieChart.setExtender("function(){ this.cfg.seriesDefaults.rendererOptions.dataLabels = " + labels.toString()
            + "; " + "this.cfg.seriesDefaults.rendererOptions.dataLabelPositionFactor = 1.21; "
            + "this.cfg.seriesDefaults.rendererOptions.diameter = 600; "
            + "this.cfg.seriesDefaults.rendererOptions.dataLabelThreshold = 0.5;" + "this.cfg.sliceMargin = 3; "
            + "this.legend = {show:false} }");
}

From source file:com.sfs.whichdoctor.webservice.RotationXmlOutputImpl.java

/**
 * Gets the tool count xml./*from w w w. j  a va  2  s  . co  m*/
 *
 * @param type the type
 * @param toolCounts the tool counts
 * @return the tool count xml
 */
private Element getToolCountXml(final String type, final TreeMap<String, ToolCount> toolCounts) {

    Element tctXml = new Element("ToolCountType");
    tctXml.setAttribute("name", type);

    for (String id : toolCounts.keySet()) {
        ToolCount tc = toolCounts.get(id);

        Element tcXml = new Element("ToolCount");
        tcXml.setAttribute("name", tc.getName());
        tcXml.setAttribute("subName", tc.getSubName());
        tcXml.setAttribute("STP", tc.getTrainingProgramShortName());
        tcXml.addContent(String.valueOf(tc.getCount()));

        tctXml.addContent(tcXml);
    }
    return tctXml;
}

From source file:org.alfresco.web.forms.xforms.SchemaUtil.java

public static TreeMap<String, TreeSet<XSTypeDefinition>> buildTypeTree(final XSModel schema) {
    final TreeMap<String, TreeSet<XSTypeDefinition>> result = new TreeMap<String, TreeSet<XSTypeDefinition>>();
    if (LOGGER.isDebugEnabled())
        LOGGER.debug("buildTypeTree " + schema);
    // build the type tree for complex types
    final XSNamedMap types = schema.getComponents(XSConstants.TYPE_DEFINITION);
    for (int i = 0; i < types.getLength(); i++) {
        final XSTypeDefinition t = (XSTypeDefinition) types.item(i);
        if (t.getTypeCategory() == XSTypeDefinition.COMPLEX_TYPE) {
            final XSComplexTypeDefinition type = (XSComplexTypeDefinition) t;
            SchemaUtil.buildTypeTree(type, new TreeSet<XSTypeDefinition>(TYPE_EXTENSION_SORTER), result);
        }/*from  www  .  java  2  s.  c o  m*/
    }

    // build the type tree for simple types
    for (int i = 0; i < types.getLength(); i++) {
        final XSTypeDefinition t = (XSTypeDefinition) types.item(i);
        if (t.getTypeCategory() == XSTypeDefinition.SIMPLE_TYPE) {
            SchemaUtil.buildTypeTree((XSSimpleTypeDefinition) t,
                    new TreeSet<XSTypeDefinition>(TYPE_EXTENSION_SORTER), result);
        }
    }

    // print out type hierarchy for debugging purposes
    if (LOGGER.isDebugEnabled()) {
        for (String typeName : result.keySet()) {
            TreeSet<XSTypeDefinition> descendents = result.get(typeName);
            LOGGER.debug(">>>> for " + typeName + " Descendants=\n ");
            Iterator<XSTypeDefinition> it = descendents.iterator();
            while (it.hasNext()) {
                XSTypeDefinition desc = it.next();
                LOGGER.debug("      " + desc.getName());
            }
        }
    }
    return result;
}

From source file:org.loklak.geo.GeoNames.java

public GeoNames(final File cities1000_zip, final File iso3166json, long minPopulation) throws IOException {

    // load iso3166 info
    this.iso3166toCountry = new HashMap<>();
    try {/*from  w  w w  .  j  av  a 2s.c  om*/
        //String jsonString = new String(Files.readAllBytes(iso3166json.toPath()), StandardCharsets.UTF_8);
        ObjectMapper jsonMapper = new ObjectMapper(DAO.jsonFactory);
        JsonNode j = jsonMapper.readTree(iso3166json);
        for (JsonNode n : j) {
            // contains name,alpha-2,alpha-3,country-code,iso_3166-2,region-code,sub-region-code
            String name = n.get("name").textValue();
            String cc = n.get("alpha-2").textValue();
            this.iso3166toCountry.put(cc, name);
        }
    } catch (IOException e) {
        this.iso3166toCountry = new HashMap<String, String>();
    }

    // this is a processing of the cities1000.zip file from http://download.geonames.org/export/dump/

    this.id2loc = new HashMap<>();
    this.hash2ids = new HashMap<>();
    this.stopwordHashes = new HashSet<>();
    this.countryCenter = new HashMap<>();
    Map<String, CountryBounds> countryBounds = new HashMap<>();

    if (cities1000_zip == null || !cities1000_zip.exists()) {
        throw new IOException("GeoNames: file does not exist!");
    }
    ZipFile zf = null;
    BufferedReader reader = null;
    try {
        zf = new ZipFile(cities1000_zip);
        String entryName = cities1000_zip.getName();
        entryName = entryName.substring(0, entryName.length() - 3) + "txt";
        final ZipEntry ze = zf.getEntry(entryName);
        final InputStream is = zf.getInputStream(ze);
        reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8));
    } catch (final IOException e) {
        throw new IOException("GeoNames: Error when decompressing cities1000.zip!", e);
    }

    /* parse this fields:
    ---------------------------------------------------
    00 geonameid         : integer id of record in geonames database
    01 name              : name of geographical point (utf8) varchar(200)
    02 asciiname         : name of geographical point in plain ascii characters, varchar(200)
    03 alternatenames    : alternatenames, comma separated varchar(5000)
    04 latitude          : latitude in decimal degrees (wgs84)
    05 longitude         : longitude in decimal degrees (wgs84)
    06 feature class     : see http://www.geonames.org/export/codes.html, char(1)
    07 feature code      : see http://www.geonames.org/export/codes.html, varchar(10)
    08 country code      : ISO-3166 2-letter country code, 2 characters
    09 cc2               : alternate country codes, comma separated, ISO-3166 2-letter country code, 60 characters
    10 admin1 code       : fipscode (subject to change to iso code), see exceptions below, see file admin1Codes.txt for display names of this code; varchar(20)
    11 admin2 code       : code for the second administrative division, a county in the US, see file admin2Codes.txt; varchar(80)
    12 admin3 code       : code for third level administrative division, varchar(20)
    13 admin4 code       : code for fourth level administrative division, varchar(20)
    14 population        : bigint (8 byte int)
    15 elevation         : in meters, integer
    16 dem               : digital elevation model, srtm3 or gtopo30, average elevation of 3''x3'' (ca 90mx90m) or 30''x30'' (ca 900mx900m) area in meters, integer. srtm processed by cgiar/ciat.
    17 timezone          : the timezone id (see file timeZone.txt) varchar(40)
    18 modification date : date of last modification in yyyy-MM-dd format
    */
    try {
        String line;
        String[] fields;
        while ((line = reader.readLine()) != null) {
            if (line.isEmpty()) {
                continue;
            }
            fields = CommonPattern.TAB.split(line);
            final long population = Long.parseLong(fields[14]);
            if (minPopulation > 0 && population < minPopulation)
                continue;
            final int geonameid = Integer.parseInt(fields[0]);
            Set<String> locnames = new LinkedHashSet<>();
            locnames.add(fields[1]);
            locnames.add(fields[2]);
            for (final String s : CommonPattern.COMMA.split(fields[3]))
                locnames.add(s);
            ArrayList<String> locnamess = new ArrayList<>(locnames.size());
            locnamess.addAll(locnames);
            String cc = fields[8]; //ISO-3166

            final GeoLocation geoLocation = new GeoLocation(Float.parseFloat(fields[4]),
                    Float.parseFloat(fields[5]), locnamess, cc);
            geoLocation.setPopulation(population);
            this.id2loc.put(geonameid, geoLocation);
            for (final String name : locnames) {
                if (name.length() < 4)
                    continue;
                String normalized = normalize(name);
                int lochash = normalized.hashCode();
                List<Integer> locs = this.hash2ids.get(lochash);
                if (locs == null) {
                    locs = new ArrayList<Integer>(1);
                    this.hash2ids.put(lochash, locs);
                }
                if (!locs.contains(geonameid))
                    locs.add(geonameid);
            }

            // update the country bounds
            CountryBounds bounds = countryBounds.get(cc);
            if (bounds == null) {
                bounds = new CountryBounds();
                countryBounds.put(cc, bounds);
            }
            bounds.extend(geoLocation);
        }
        if (reader != null)
            reader.close();
        if (zf != null)
            zf.close();
    } catch (final IOException e) {
    }

    // calculate the center of the countries
    for (Map.Entry<String, CountryBounds> country : countryBounds.entrySet()) {
        this.countryCenter.put(country.getKey(),
                new double[] { (country.getValue().lon_west - country.getValue().lon_east) / 2.0,
                        (country.getValue().lat_north - country.getValue().lat_south) / 2.0 }); // [longitude, latitude]
    }

    // finally create a statistic which names appear very often to have fill-word heuristic
    TreeMap<Integer, Set<Integer>> stat = new TreeMap<>(); // a mapping from number of occurrences of location name hashes to a set of location name hashes
    for (Map.Entry<Integer, List<Integer>> entry : this.hash2ids.entrySet()) {
        int occurrences = entry.getValue().size();
        Set<Integer> hashes = stat.get(occurrences);
        if (hashes == null) {
            hashes = new HashSet<Integer>();
            stat.put(occurrences, hashes);
        }
        hashes.add(entry.getKey());
    }
    // we consider 3/4 of this list as fill-word (approx 300): those with the most occurrences
    int good = stat.size() / 4;
    Iterator<Map.Entry<Integer, Set<Integer>>> i = stat.entrySet().iterator();
    for (int j = 0; j < good; j++)
        i.next(); // 'eat away' the good entries.
    while (i.hasNext()) {
        Set<Integer> morehashes = i.next().getValue();
        this.stopwordHashes.addAll(morehashes);
    }
}

From source file:org.cloudata.core.client.Row.java

public boolean deepEquals(Row row) {
    if (row == null || row.key == null) {
        return false;
    }/*from w  w w .ja  v  a  2s.com*/
    if (!key.equals(row.key)) {
        return false;
    }

    if (cells.size() != row.cells.size()) {
        return false;
    }

    for (Map.Entry<String, TreeMap<Cell.Key, Cell>> entry : cells.entrySet()) {
        String columnName = entry.getKey();
        TreeMap<Cell.Key, Cell> columnCells = entry.getValue();

        TreeMap<Cell.Key, Cell> targetColumnCells = row.getCellMap(columnName);

        int columnCellsSize = columnCells == null ? 0 : columnCells.size();
        int targetColumnCellsSize = targetColumnCells == null ? 0 : targetColumnCells.size();
        if (columnCellsSize != targetColumnCellsSize) {
            return false;
        }

        if (columnCellsSize > 0) {
            for (Cell eachCell : columnCells.values()) {
                Cell targetCell = targetColumnCells.get(eachCell.getKey());
                if (!eachCell.equals(targetCell)) {
                    return false;
                }

                List<Cell.Value> values = eachCell.getValues();
                List<Cell.Value> targetValues = targetCell.getValues();

                int valueSize = values == null ? 0 : values.size();
                int targetValueSize = targetValues == null ? 0 : targetValues.size();
                if (valueSize != targetValueSize) {
                    return false;
                }

                for (int i = 0; i < valueSize; i++) {
                    Cell.Value value = values.get(i);
                    Cell.Value targetValue = values.get(i);

                    if (!StringUtils.equalsBytes(value.getBytes(), targetValue.getBytes())) {
                        return false;
                    }

                    if (value.isDeleted() != targetValue.isDeleted()) {
                        return false;
                    }
                }
            }
        }
    }

    return true;
}

From source file:com.ibm.bi.dml.debug.DMLDebuggerFunctions.java

/**
 * Print range of DML program lines interspersed with corresponding runtime instructions
 * @param lines DML script lines of code
 * @param DMLInstMap Mapping between source code line number and corresponding runtime instruction(s)
 * @param range Range of lines of DML code to be displayed
 * @param debug Flag for displaying instructions in debugger test integration
 *///from ww  w .ja v a 2  s. com
public void printInstructions(String[] lines, TreeMap<Integer, ArrayList<Instruction>> DMLInstMap,
        IntRange range, boolean debug) {
    //Display instructions with corresponding DML line numbers
    for (int lineNumber = range.getMinimumInteger(); lineNumber <= range.getMaximumInteger(); lineNumber++) {
        System.out.format("line %4d: %s\n", lineNumber, lines[lineNumber - 1]);
        if (DMLInstMap.get(lineNumber) != null) {
            for (Instruction currInst : DMLInstMap.get(lineNumber)) {
                if (currInst instanceof CPInstruction) {
                    if (!debug)
                        System.out.format("\t\t id %4d: %s\n", currInst.getInstID(),
                                prepareInstruction(currInst.toString()));
                    else {
                        String[] instStr = prepareInstruction(currInst.toString()).split(" ");
                        System.out.format("\t\t id %4d: %s %s\n", currInst.getInstID(), instStr[0], instStr[1]);
                    }
                } else if (currInst instanceof MRJobInstruction) {
                    MRJobInstruction currMRInst = (MRJobInstruction) currInst;
                    System.out.format("\t\t id %4d: %s\n", currInst.getInstID(),
                            prepareInstruction(currMRInst.getMRString(debug)));
                } else if (currInst instanceof BreakPointInstruction) {
                    BreakPointInstruction currBPInst = (BreakPointInstruction) currInst;
                    System.out.format("\t\t id %4d: %s\n", currInst.getInstID(), currBPInst.toString());
                }
            }
        }
    }
}