List of usage examples for java.util TreeMap keySet
public Set<K> keySet()
From source file:com.npower.wurfl.ListManager.java
/** * Return TreeMap of device IDs to WurflDevices representing actual devices * (i.e. this device element represents a real device and a bunch of * subdevices with similar software subversions. * //from w w w . j ava 2 s. c o m */ public synchronized TreeMap<String, WurflDevice> getActualDeviceElementsList() { if (actualDeviceElementsList.isEmpty()) { CapabilityMatrix cm = this.getObjectsManager().getCapabilityMatrixInstance(); TreeMap<String, Element> actualXOMDevices = wu.getActualDeviceElementsList(); Iterator<String> keys = actualXOMDevices.keySet().iterator(); while (keys.hasNext()) { String key = keys.next(); Element el = actualXOMDevices.get(key); WurflDevice wd = new WurflDevice(el); String bn = cm.getCapabilityForDevice(key, "brand_name"); String mn = cm.getCapabilityForDevice(key, "model_name"); // only devices with name and brand defined in the WURFL please if (!bn.equals("") && !mn.equals("")) { wd.setBrandName(bn); wd.setModelName(mn); actualDeviceElementsList.put(key, wd); } // else { //just for debugging purposes // log.debug("Discarding actual device: "+wd.getId()); // } } } return actualDeviceElementsList; }
From source file:model.plate.ANATestResult.java
public boolean initPosCtrl2(double negControl) { final Comparator<DiagnosisConstant.ANA_Titer> titerComparator = new Comparator<DiagnosisConstant.ANA_Titer>() { @Override/* w ww . j a va 2s .c o m*/ public int compare(DiagnosisConstant.ANA_Titer t, DiagnosisConstant.ANA_Titer t1) { if (t.getId() < 0) { throw new RuntimeException("Titer: " + t.name()); } if (t1.getId() < 0) { throw new RuntimeException("Titer: " + t.name()); } if (t.getId() > 6) { throw new RuntimeException("Titer: " + t.name()); } if (t1.getId() > 6) { throw new RuntimeException("Titer: " + t.name()); } return t.getId() < t1.getId() ? -1 : t.getId() == t1.getId() ? 0 : 1; } }; TreeMap<DiagnosisConstant.ANA_Titer, Double> decreasingSignals = new TreeMap<>(titerComparator); decreasingSignals.putAll(signals); SimpleRegression regression = new SimpleRegression(); Iterator<DiagnosisConstant.ANA_Titer> it = decreasingSignals.keySet().iterator(); DiagnosisConstant.ANA_Titer t; double signal, posCtrl = getFirstPlateSignal(); while (it.hasNext()) { t = it.next(); signal = decreasingSignals.get(t); // posCtrl=signal>posCtrl?signal:posCtrl; ??1:40, regression.addData((double) t.getId(), signal); if (signal > posCtrl * PlateConstants.PositiveCutOffRatio || signal > negControl * PlateConstants.NegativeCutOffRatio) { titer = t; } } if (titer.getId() >= DiagnosisConstant.ANA_Titer.ANA_1_320.getId()) { positivity = DiagnosisConstant.ANA_Result.POSITIVE; System.out.println("found titer for " + plateID + " : " + titer); System.out.println(); System.out.println(); } r2 = regression.getRSquare(); if (r2 < PlateConstants.R2_TH) { warningMessage.add(WarningMessage.PositiveControlLinearity.getId()); } if (titer == null || titer.getId() < DiagnosisConstant.ANA_Titer.ANA_1_320.getId()) {//1:40 titer = DiagnosisConstant.ANA_Titer.ANA_LESS_1_40; System.out.println(); for (DiagnosisConstant.ANA_Titer t1 : decreasingSignals.keySet()) { System.out.println(plateID + " Control Sample Compare"); System.out.println(t1 + ": posCtrl=" + decreasingSignals.get(t1) + "\tv.s.\tnegCtrl=" + negControl + " (" + decreasingSignals.get(t1) / negControl + ")"); } System.out.println(); positivity = DiagnosisConstant.ANA_Result.NEGATIVE; System.out.println("barcode " + this.julien_barcode); warningMessage.add(WarningMessage.PositiveNegativeControlComparison.getId()); } else { positivity = DiagnosisConstant.ANA_Result.POSITIVE; } if (posCtrl < negControl * PlateConstants.CTRL_RATIO_TH) { this.warningMessage.add(WarningMessage.PosCtrlFailed.getId()); return false; } return true; }
From source file:com.sfs.whichdoctor.analysis.AgedDebtorsAnalysisDAOImpl.java
/** * Post process the aged debtors groupings. * * @param groupings the groupings/* w ww . ja v a2s. co m*/ * @param showZeroBalances the show zero balances * @return the tree map */ private TreeMap<String, AgedDebtorsGrouping> processGroups(final TreeMap<String, AgedDebtorsGrouping> groupings, final boolean showZeroBalances) { TreeMap<String, AgedDebtorsGrouping> processedGroupings = new TreeMap<String, AgedDebtorsGrouping>(); for (String groupName : groupings.keySet()) { AgedDebtorsGrouping group = groupings.get(groupName); TreeMap<String, AgedDebtorsRecord> records = new TreeMap<String, AgedDebtorsRecord>(); for (String orderKey : group.getRecords().keySet()) { AgedDebtorsRecord record = group.getRecords().get(orderKey); if (showZeroBalances || record.getTotal() != 0) { records.put(orderKey, record); for (int id : record.getPeriodBreakdown().keySet()) { AgedDebtorsPeriod period = record.getPeriodBreakdown().get(id); AgedDebtorsPeriod groupPeriod = group.getPeriodBreakdown(period); // Update the running totals for the period of the grouping groupPeriod.setOutstandingDebitValue( groupPeriod.getOutstandingDebitValue() + period.getOutstandingDebitValue()); groupPeriod.setUnallocatedRefundValue( groupPeriod.getUnallocatedRefundValue() + period.getUnallocatedRefundValue()); groupPeriod.setUnallocatedCreditValue( groupPeriod.getUnallocatedCreditValue() + period.getUnallocatedCreditValue()); groupPeriod.setUnallocatedReceiptValue( groupPeriod.getUnallocatedReceiptValue() + period.getUnallocatedReceiptValue()); group.getPeriodBreakdown().put(id, groupPeriod); } // Update the running totals for the grouping group.setOutstandingDebitValue( group.getOutstandingDebitValue() + record.getOutstandingDebitValue()); group.setUnallocatedRefundValue( group.getUnallocatedRefundValue() + record.getUnallocatedRefundValue()); group.setUnallocatedCreditValue( group.getUnallocatedCreditValue() + record.getUnallocatedCreditValue()); group.setUnallocatedReceiptValue( group.getUnallocatedReceiptValue() + record.getUnallocatedReceiptValue()); } } group.setRecords(records); processedGroupings.put(groupName, group); } return processedGroupings; }
From source file:com.sfs.whichdoctor.importer.Importer.java
/** * Sets the column map.//from ww w .j av a2 s . c om * * @param type the type * @param data the data * @param includeRowsVal the include rows */ public final void setColumnMap(final String type, final TreeMap<Integer, TreeMap<Integer, String>> data, final TreeMap<Integer, String> includeRowsVal) { TreeMap<Integer, String> columnMapVal = new TreeMap<Integer, String>(); List<String> fields = new ArrayList<String>(); if (StringUtils.equalsIgnoreCase(type, "exam")) { ExamImporter examImporter = new ExamImporter(); fields = examImporter.getFields(); } // Inspect the first row of data supplied Integer rowIndex = data.keySet().iterator().next(); TreeMap<Integer, String> firstRow = data.get(rowIndex); int fieldMatches = 0; for (Integer columnNumber : firstRow.keySet()) { String dataField = firstRow.get(columnNumber); String fieldName = ""; // Iterate through each field to see if there is a match // If there is more than two matches then the first row // is indicating column field names for (int i = 0; i < fields.size(); i++) { String field = (String) fields.get(i); if (StringUtils.equalsIgnoreCase(dataField, field)) { // Matching field fieldName = dataField; fieldMatches++; } } columnMapVal.put(columnNumber, fieldName); } if (fieldMatches > 2) { // There were more than two field matches // Deselect the first column from the list of imports if (includeRowsVal.containsKey(rowIndex)) { includeRowsVal.remove(rowIndex); } } setIncludeRows(includeRowsVal); setColumnMap(columnMapVal); }
From source file:org.apdplat.superword.tools.SentenceScorer.java
public static void toTextFile(TreeMap<Float, Map<String, List<String>>> scores, String fileName) { LOGGER.debug("" + fileName); AtomicInteger bookCount = new AtomicInteger(); AtomicInteger sentenceCount = new AtomicInteger(); try (BufferedWriter writer = new BufferedWriter( new OutputStreamWriter(new BufferedOutputStream(new FileOutputStream(fileName))))) { AtomicInteger i = new AtomicInteger(); scores.entrySet().forEach(score -> { writeLine(writer,//from ww w . j av a2 s . com "score_(" + i.incrementAndGet() + "/" + scores.size() + ")" + "" + score.getKey()); Map<String, List<String>> books = score.getValue(); AtomicInteger j = new AtomicInteger(); books.entrySet().forEach(book -> { writeLine(writer, "\tbook_(" + j.incrementAndGet() + "/" + books.size() + ")" + "" + book.getKey()); bookCount.incrementAndGet(); AtomicInteger k = new AtomicInteger(); book.getValue().forEach(sentence -> { writeLine(writer, "\t\tsentence_(" + k.incrementAndGet() + "/" + book.getValue().size() + ")" + "" + sentence); sentenceCount.incrementAndGet(); }); }); }); writeLine(writer, "??" + sentenceCount.get()); } catch (IOException e) { LOGGER.error(e.getMessage(), e); } LOGGER.debug("" + scores.keySet().size()); LOGGER.debug("??" + sentenceCount.get()); LOGGER.debug("?"); }
From source file:com.npower.wurfl.ListManager.java
/** * Find WurflDevice by brand/*from w w w.j a v a 2s.c o m*/ * @param manufacturer * @param modelExtID * @param lm * @return */ public WurflDevice getDeviceByBrand(String manufacturer, String modelExtID) { if (StringUtils.isEmpty(manufacturer) || StringUtils.isEmpty(modelExtID)) { return null; } // Translate manufacturer = translateBrandName(manufacturer); modelExtID = translateModelName(manufacturer, modelExtID); // Initializing if (modelBrandMap.isEmpty()) { // Generate brand Map TreeMap<String, WurflDevice> load = this.getActualDeviceElementsList(); Iterator<String> keys = load.keySet().iterator(); while (keys.hasNext()) { String key = keys.next(); WurflDevice wd = load.get(key); if (!modelBrandMap.containsKey(wd.getBrandName().toLowerCase())) { modelBrandMap.put(wd.getBrandName().toLowerCase(), new HashMap<String, WurflDevice>()); } Map<String, WurflDevice> modelOfBrandMap = modelBrandMap.get(wd.getBrandName().toLowerCase()); if (!modelOfBrandMap.containsKey(wd.getModelName().toLowerCase())) { modelOfBrandMap.put(wd.getModelName().toLowerCase(), wd); } } } // Retrieve if (modelBrandMap.containsKey(manufacturer.trim().toLowerCase())) { Map<String, WurflDevice> modelOfBrandMap = modelBrandMap.get(manufacturer.trim().toLowerCase()); WurflDevice device = modelOfBrandMap.get(modelExtID.trim().toLowerCase()); return device; } return null; /* TreeMap<String, WurflDevice> load = this.getActualDeviceElementsList(); Iterator<String> keys = load.keySet().iterator(); WurflDevice foundDevice = null; while (keys.hasNext()) { String key = keys.next(); WurflDevice wd = load.get(key); if (manufacturer.equalsIgnoreCase(wd.getBrandName()) && modelExtID.equalsIgnoreCase(wd.getModelName())) { foundDevice = wd; } } return foundDevice; */ }
From source file:org.ala.spatial.analysis.layers.SitesBySpeciesTabulated.java
/** * write bioregion tabulation./*from w ww .j a v a 2 s. c om*/ * <p/> * Output filename is name + ".csv" and name + ".json". * * @param name output filename * @param outputDirectory directory for output. * @param columns list of the bioregion names. * @param bioMap data to write. * @return */ private Map writeBioregions(String name, String outputDirectory, String[] columns, HashMap<Integer, Integer>[] bioMap) { Map map = new HashMap(); ArrayList array = new ArrayList(); try { FileWriter fw = new FileWriter(outputDirectory + File.separator + name + ".csv"); //identify column numbers TreeMap<Integer, Integer> tm = new TreeMap(); for (int i = 0; i < columns.length; i++) { tm.putAll(bioMap[i]); } Integer[] cols = new Integer[tm.size()]; tm.keySet().toArray(cols); ArrayList<Integer> c = new ArrayList<Integer>(); for (int j = 0; j < cols.length; j++) { c.add(cols[j]); fw.write(",\"" + cols[j] + "\""); } //bioregion rows for (int i = 0; i < columns.length + 1; i++) { if (bioMap[i].size() > 0) { ArrayList array2 = new ArrayList(); String rowname = "Undefined"; if (i > 0) { rowname = columns[i - 1]; } fw.write("\n\"" + rowname + "\""); //count columns for (int j = 0; j < cols.length; j++) { Integer v = bioMap[i].get(cols[j]); fw.write(","); if (v != null) { fw.write(v.toString()); array2.add(v.toString()); } else { array2.add(""); } } Map m3 = new HashMap(); m3.put("name", rowname); m3.put("row", array2); array.add(m3); } } Map m4 = new HashMap(); m4.put("rows", array); m4.put("columns", c); map.put(name, m4); fw.close(); fw = new FileWriter(outputDirectory + File.separator + name + ".json"); JSONObject.writeJSONString(map, fw); fw.close(); } catch (Exception e) { e.printStackTrace(); } return map; }
From source file:com.opengamma.analytics.financial.interestrate.CashFlowEquivalentCalculator.java
@Override public AnnuityPaymentFixed visitGenericAnnuity(final Annuity<? extends Payment> annuity, final YieldCurveBundle curves) { Validate.notNull(curves);/*from w w w .j av a2s. com*/ Validate.notNull(annuity); TreeMap<Double, Double> flow = new TreeMap<Double, Double>(); Currency ccy = annuity.getCurrency(); for (final Payment p : annuity.getPayments()) { AnnuityPaymentFixed cfe = visit(p, curves); for (int loopcf = 0; loopcf < cfe.getNumberOfPayments(); loopcf++) { addcf(flow, cfe.getNthPayment(loopcf).getPaymentTime(), cfe.getNthPayment(loopcf).getAmount()); } } PaymentFixed[] agregatedCfe = new PaymentFixed[flow.size()]; int loopcf = 0; for (double time : flow.keySet()) { agregatedCfe[loopcf++] = new PaymentFixed(ccy, time, flow.get(time), annuity.getDiscountCurve()); } return new AnnuityPaymentFixed(agregatedCfe); }
From source file:org.ala.spatial.analysis.layers.SitesBySpeciesTabulated.java
/** * write decades tabulation./*w ww. j a v a 2 s . c o m*/ * <p/> * Output filename is "decades.csv" and "decades.json". * * @param outputDirectory path to output directory. * @param decadeIdx array of decades. * @param decMap array of map of values to write. * @return */ private Map writeDecades(String outputDirectory, short[] decadeIdx, HashMap<Integer, Integer>[] decMap) { Map map = new HashMap(); ArrayList array = new ArrayList(); try { FileWriter fw = new FileWriter(outputDirectory + File.separator + "decades.csv"); //identify column numbers TreeMap<Integer, Integer> tm = new TreeMap(); for (int i = 0; i < decMap.length; i++) { tm.putAll(decMap[i]); } Integer[] cols = new Integer[tm.size()]; tm.keySet().toArray(cols); ArrayList<Integer> c = new ArrayList<Integer>(); for (int j = 0; j < cols.length; j++) { c.add(cols[j]); fw.write(",\"" + cols[j] + "\""); } //bioregion rows for (int i = 0; i < decMap.length; i++) { if (decMap[i].size() > 0) { ArrayList array2 = new ArrayList(); int pos = java.util.Arrays.binarySearch(decadeIdx, (short) i); //seek to first while (pos > 0 && decadeIdx[pos - 1] == i) { pos--; } String rowname = "no year recorded"; if (i > 0) { rowname = pos + " to " + (pos + 9); } fw.write("\n\"" + rowname + "\""); //count columns for (int j = 0; j < cols.length; j++) { Integer v = decMap[i].get(cols[j]); fw.write(","); if (v != null) { fw.write(v.toString()); array2.add(v.toString()); } else { array2.add(""); } } Map m3 = new HashMap(); m3.put("name", rowname); m3.put("row", array2); array.add(m3); } } Map m4 = new HashMap(); m4.put("rows", array); m4.put("columns", c); map.put("decades", m4); fw.close(); fw = new FileWriter(outputDirectory + File.separator + "decades.json"); JSONObject.writeJSONString(map, fw); fw.close(); } catch (Exception e) { e.printStackTrace(); } return map; }
From source file:org.ala.spatial.analysis.layers.SitesBySpeciesTabulated.java
/** * write decade counts tabulation./*from w w w .ja va2s . c o m*/ * <p/> * Output filename is "decadecounts.csv" and "decadecounts.json". * * @param outputDirectory path to output directory. * @param decadeIdx array of decades. * @param decMap array of map of values to write. * @return */ private Map writeDecadeCounts(String outputDirectory, HashMap<Integer, Integer>[] decCountMap) { Map map = new HashMap(); ArrayList array = new ArrayList(); try { FileWriter fw = new FileWriter(outputDirectory + File.separator + "decadecounts.csv"); //identify column numbers TreeMap<Integer, Integer> tm = new TreeMap(); for (int i = 1; i < decCountMap.length; i++) { tm.putAll(decCountMap[i]); } Integer[] cols = new Integer[tm.size()]; tm.keySet().toArray(cols); ArrayList<Integer> c = new ArrayList<Integer>(); for (int j = 0; j < cols.length; j++) { c.add(cols[j]); fw.write(",\"" + cols[j] + "\""); } //bioregion rows for (int i = 1; i < decCountMap.length; i++) { if (decCountMap[i].size() > 0) { ArrayList array2 = new ArrayList(); String rowname = i + " Decades"; fw.write("\n\"" + rowname + "\""); //count columns for (int j = 0; j < cols.length; j++) { Integer v = decCountMap[i].get(cols[j]); fw.write(","); if (v != null) { fw.write(v.toString()); array2.add(v.toString()); } else { array2.add(""); } } Map m3 = new HashMap(); m3.put("name", rowname); m3.put("row", array2); array.add(m3); } } Map m4 = new HashMap(); m4.put("rows", array); m4.put("columns", c); map.put("decadecounts", m4); fw.close(); fw = new FileWriter(outputDirectory + File.separator + "decadecounts.json"); JSONObject.writeJSONString(map, fw); fw.close(); } catch (Exception e) { e.printStackTrace(); } return map; }