List of usage examples for java.util SortedMap put
V put(K key, V value);
From source file:io.lavagna.service.CardLabelRepository.java
public SortedMap<Integer, LabelListValueWithMetadata> findLabeListValueAggregatedByCardLabelId(int projectId) { SortedMap<Integer, LabelListValueWithMetadata> m = new TreeMap<>(); for (LabelListValueWithMetadata l : addMetadata(queries.findListValueByProjectId(projectId))) { m.put(l.getId(), l); }//from w w w. j a v a2s .co m return m; }
From source file:com.streamsets.pipeline.lib.jdbc.JdbcMultiRowRecordWriter.java
private SortedMap<String, String> getFilteredColumnsToParameters(Map<String, String> parameters, Record record) {//from w w w . j ava 2s . c o m SortedMap<String, String> filtered = new TreeMap<>(); for (Map.Entry<String, String> entry : getColumnsToFields().entrySet()) { String columnName = entry.getKey(); String fieldPath = entry.getValue(); if (record.has(fieldPath)) { filtered.put(columnName, parameters.get(columnName)); } } return filtered; }
From source file:de.tudarmstadt.ukp.experiments.argumentation.convincingness.sampling.Step6GraphTransitivityCleaner.java
@SuppressWarnings("unchecked") public static void printResultStatistics(File xmlFile) throws IllegalAccessException { Map<String, Map<String, GraphCleaningResults>> results = (Map<String, Map<String, GraphCleaningResults>>) XStreamTools .getXStream().fromXML(xmlFile); // System.out.println(results); SortedMap<String, List<GraphCleaningResults>> resultsGroupedByMethod = new TreeMap<>(); for (Map.Entry<String, Map<String, GraphCleaningResults>> entry : results.entrySet()) { // System.out.println(entry.getKey()); for (Map.Entry<String, GraphCleaningResults> e : entry.getValue().entrySet()) { // System.out.println(e.getKey()); // System.out.println(e.getValue()); if (!resultsGroupedByMethod.containsKey(e.getKey())) { resultsGroupedByMethod.put(e.getKey(), new ArrayList<GraphCleaningResults>()); }/*from w w w. j av a2 s . c om*/ resultsGroupedByMethod.get(e.getKey()).add(e.getValue()); } } String header = null; // collect statistics for (Map.Entry<String, List<GraphCleaningResults>> entry : resultsGroupedByMethod.entrySet()) { List<GraphCleaningResults> value = entry.getValue(); SortedMap<String, DescriptiveStatistics> stringDescriptiveStatisticsMap = collectStatisticsOverGraphCleaningResults( value); if (header == null) { header = StringUtils.join(stringDescriptiveStatisticsMap.keySet(), "\t"); System.out.println("\t\t" + header); } List<Double> means = new ArrayList<>(); List<Double> stdDevs = new ArrayList<>(); for (DescriptiveStatistics statistics : stringDescriptiveStatisticsMap.values()) { means.add(statistics.getMean()); stdDevs.add(statistics.getStandardDeviation()); } List<String> meansString = new ArrayList<>(); for (Double mean : means) { meansString.add(String.format(Locale.ENGLISH, "%.2f", mean)); } List<String> stdDevString = new ArrayList<>(); for (Double stdDev : stdDevs) { stdDevString.add(String.format(Locale.ENGLISH, "%.2f", stdDev)); } System.out.println(entry.getKey() + "\tmean\t" + StringUtils.join(meansString, "\t")); // System.out.println(entry.getKey() + "\tstdDev\t" + StringUtils.join(stdDevString, "\t")); } }
From source file:com.linkedin.pinot.core.segment.store.SingleFileIndexDirectory.java
private void mapBufferEntries() throws IOException { SortedMap<Long, IndexEntry> indexStartMap = new TreeMap<>(); for (Map.Entry<IndexKey, IndexEntry> columnEntry : columnEntries.entrySet()) { long startOffset = columnEntry.getValue().startOffset; indexStartMap.put(startOffset, columnEntry.getValue()); }/* ww w .ja v a 2s .c o m*/ long runningSize = 0; List<Long> offsetAccum = new ArrayList<>(); for (Map.Entry<Long, IndexEntry> offsetEntry : indexStartMap.entrySet()) { IndexEntry entry = offsetEntry.getValue(); runningSize += entry.size; if (runningSize >= MAX_ALLOCATION_SIZE) { mapAndSliceFile(indexStartMap, offsetAccum, offsetEntry.getKey()); runningSize = entry.size; offsetAccum.clear(); } offsetAccum.add(offsetEntry.getKey()); } if (offsetAccum.size() > 0) { mapAndSliceFile(indexStartMap, offsetAccum, offsetAccum.get(0) + runningSize); } }
From source file:de.blizzy.documentr.web.system.SystemController.java
private SortedMap<String, SortedMap<String, String>> getMacroSettingsFromSystemSettings() { Set<IMacroDescriptor> descriptors = macroFactory.getDescriptors(); SortedMap<String, SortedMap<String, String>> allMacroSettings = Maps.newTreeMap(); for (IMacroDescriptor descriptor : descriptors) { Set<MacroSetting> settingDescriptors = descriptor.getSettings(); if (!settingDescriptors.isEmpty()) { SortedMap<String, String> macroSettings = Maps.newTreeMap(); String macroName = descriptor.getMacroName(); for (MacroSetting settingDescriptor : settingDescriptors) { String key = settingDescriptor.value(); String value = StringUtils.defaultString(systemSettingsStore.getMacroSetting(macroName, key)); macroSettings.put(key, value); }/*from ww w. ja v a 2s. co m*/ allMacroSettings.put(macroName, macroSettings); } } return allMacroSettings; }
From source file:com.github.dryangkun.hbase.tidx.hive.HiveHFileOutputFormat.java
@Override public RecordWriter getHiveRecordWriter(final JobConf jc, final Path finalOutPath, Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties, final Progressable progressable) throws IOException { // Read configuration for the target path, first from jobconf, then from table properties String hfilePath = getFamilyPath(jc, tableProperties); if (hfilePath == null) { throw new RuntimeException("Please set " + HFILE_FAMILY_PATH + " to target location for HFiles"); }/*from w w w .j av a2 s.com*/ // Target path's last component is also the column family name. final Path columnFamilyPath = new Path(hfilePath); final String columnFamilyName = columnFamilyPath.getName(); final byte[] columnFamilyNameBytes = Bytes.toBytes(columnFamilyName); final Job job = new Job(jc); setCompressOutput(job, isCompressed); setOutputPath(job, finalOutPath); // Create the HFile writer final org.apache.hadoop.mapreduce.TaskAttemptContext tac = ShimLoader.getHadoopShims() .newTaskAttemptContext(job.getConfiguration(), progressable); final Path outputdir = FileOutputFormat.getOutputPath(tac); final org.apache.hadoop.mapreduce.RecordWriter<ImmutableBytesWritable, KeyValue> fileWriter = getFileWriter( tac); // Individual columns are going to be pivoted to HBase cells, // and for each row, they need to be written out in order // of column name, so sort the column names now, creating a // mapping to their column position. However, the first // column is interpreted as the row key. String columnList = tableProperties.getProperty("columns"); String[] columnArray = columnList.split(","); final SortedMap<byte[], Integer> columnMap = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR); int i = 0; for (String columnName : columnArray) { if (i != 0) { columnMap.put(Bytes.toBytes(columnName), i); } ++i; } return new RecordWriter() { @Override public void close(boolean abort) throws IOException { try { fileWriter.close(null); if (abort) { return; } // Move the hfiles file(s) from the task output directory to the // location specified by the user. FileSystem fs = outputdir.getFileSystem(jc); fs.mkdirs(columnFamilyPath); Path srcDir = outputdir; for (;;) { FileStatus[] files = fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER); if ((files == null) || (files.length == 0)) { throw new IOException("No family directories found in " + srcDir); } if (files.length != 1) { throw new IOException("Multiple family directories found in " + srcDir); } srcDir = files[0].getPath(); if (srcDir.getName().equals(columnFamilyName)) { break; } } for (FileStatus regionFile : fs.listStatus(srcDir, FileUtils.STAGING_DIR_PATH_FILTER)) { fs.rename(regionFile.getPath(), new Path(columnFamilyPath, regionFile.getPath().getName())); } // Hive actually wants a file as task output (not a directory), so // replace the empty directory with an empty file to keep it happy. fs.delete(outputdir, true); fs.createNewFile(outputdir); } catch (InterruptedException ex) { throw new IOException(ex); } } private void writeText(Text text) throws IOException { // Decompose the incoming text row into fields. String s = text.toString(); String[] fields = s.split("\u0001"); assert (fields.length <= (columnMap.size() + 1)); // First field is the row key. byte[] rowKeyBytes = Bytes.toBytes(fields[0]); // Remaining fields are cells addressed by column name within row. for (Map.Entry<byte[], Integer> entry : columnMap.entrySet()) { byte[] columnNameBytes = entry.getKey(); int iColumn = entry.getValue(); String val; if (iColumn >= fields.length) { // trailing blank field val = ""; } else { val = fields[iColumn]; if ("\\N".equals(val)) { // omit nulls continue; } } byte[] valBytes = Bytes.toBytes(val); KeyValue kv = new KeyValue(rowKeyBytes, columnFamilyNameBytes, columnNameBytes, valBytes); try { fileWriter.write(null, kv); } catch (IOException e) { LOG.error("Failed while writing row: " + s); throw e; } catch (InterruptedException ex) { throw new IOException(ex); } } } private void writePut(PutWritable put) throws IOException { ImmutableBytesWritable row = new ImmutableBytesWritable(put.getPut().getRow()); SortedMap<byte[], List<Cell>> cells = put.getPut().getFamilyCellMap(); for (Map.Entry<byte[], List<Cell>> entry : cells.entrySet()) { Collections.sort(entry.getValue(), new CellComparator()); for (Cell c : entry.getValue()) { try { fileWriter.write(row, KeyValueUtil.copyToNewKeyValue(c)); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException().initCause(e); } } } } @Override public void write(Writable w) throws IOException { if (w instanceof Text) { writeText((Text) w); } else if (w instanceof PutWritable) { writePut((PutWritable) w); } else { throw new IOException("Unexpected writable " + w); } } }; }
From source file:de.micromata.genome.gwiki.umgmt.GWikiUserAuthorization.java
public boolean collectRights(Matcher<String> m, Map<String, GWikiRight> systemRights, SortedMap<String, GWikiRight> ret) { if (m instanceof EqualsMatcher) { EqualsMatcher<String> em = (EqualsMatcher<String>) m; ret.put(em.getOther(), getRightFromString(em.getOther(), systemRights)); return true; }//from w ww . j a v a2 s. c o m if (m instanceof BooleanListMatcher) { BooleanListMatcher<String> bm = (BooleanListMatcher<String>) m; List<Matcher<String>> ml = bm.getMatcherList(); for (int i = 0; i < ml.size(); ++i) { if (collectRights(ml.get(i), systemRights, ret) == false) { return false; } } return true; } if (m instanceof TreeStateMatcher) { TreeStateMatcher<String> tm = (TreeStateMatcher<String>) m; if (tm.isValue() == false) { return false; } return collectRights(tm.getNested(), systemRights, ret); } return false; }
From source file:playground.benjamin.scenarios.zurich.analysis.charts.BkDeltaUtilsQuantilesChart.java
/** * Dependent on what to plot this method has to be adapted * @param populationInformation (Map from Id to Row (all desired information)) * @return Map from income to the chosen information (e.g. scoreDiff) * // w ww .j ava 2 s . c o m */ private SortedMap<Double, Double> personalIncomeInQuantiles2Scores( SortedMap<Id, WinnerLoserAnalysisRow> populationInformation) { SortedMap<Double, Double> result = new TreeMap<Double, Double>(); //iterating over a map and getting the desired values out of Row (personal income and score difference) for (Entry<Id, WinnerLoserAnalysisRow> entry : populationInformation.entrySet()) { WinnerLoserAnalysisRow winnerLoserAnalysisRow = entry.getValue(); Double personalIncome = winnerLoserAnalysisRow.getPersonalIncome(); Double scoreDiff = winnerLoserAnalysisRow.getScoreDiff(); result.put(personalIncome, scoreDiff); } return result; }
From source file:com.github.fge.jsonschema.processors.build.ValidatorBuilder.java
/** * Process the input/*from w w w . j a va 2 s. c o m*/ * * @param report the report to use while processing * @param input the input for this processor * @return the output * @throws ProcessingException processing failed */ @Override public ValidatorList process(final ProcessingReport report, final SchemaDigest input) throws ProcessingException { final SortedMap<String, KeywordValidator> map = Maps.newTreeMap(); String keyword; JsonNode digest; KeywordValidator validator; Constructor<? extends KeywordValidator> constructor; for (final Map.Entry<String, JsonNode> entry : input.getDigests().entrySet()) { keyword = entry.getKey(); digest = entry.getValue(); constructor = constructors.get(keyword); validator = buildKeyword(constructor, digest); map.put(keyword, validator); } return new ValidatorList(input.getContext(), map.values()); }
From source file:playground.benjamin.scenarios.zurich.analysis.charts.BkDeltaUtilsChart.java
/** * Dependent on what to plot this method has to be adapted * @param populationInformation (Map from Id to Row (all desired information)) * @return Map from income to the chosen information (e.g. scoreDiff) * //from ww w . j a v a 2 s . co m */ private SortedMap<Double, Double> personalIncome2Scores( SortedMap<Id, WinnerLoserAnalysisRow> populationInformation) { SortedMap<Double, Double> result = new TreeMap<Double, Double>(); //iterating over a map and getting the desired values out of Row (personal income and score difference) for (Entry<Id, WinnerLoserAnalysisRow> entry : populationInformation.entrySet()) { WinnerLoserAnalysisRow winnerLoserAnalysisRow = entry.getValue(); Double personalIncome = winnerLoserAnalysisRow.getPersonalIncome(); Double scoreDiff = winnerLoserAnalysisRow.getScoreDiff(); result.put(personalIncome, scoreDiff); } return result; }