List of usage examples for java.util TreeMap get
public V get(Object key)
From source file:org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.java
private void verifyDir(DistributedFileSystem dfs, Path dir) throws IOException { FileStatus[] fileArr = dfs.listStatus(dir); TreeMap<Path, Boolean> fileMap = new TreeMap<Path, Boolean>(); for (FileStatus file : fileArr) { fileMap.put(file.getPath(), Boolean.valueOf(file.isDir())); }// w ww . j av a 2s. com for (Iterator<Path> it = fileMap.keySet().iterator(); it.hasNext();) { Path path = it.next(); boolean isDir = fileMap.get(path); String pathName = path.toUri().getPath(); overallChecksum.update(pathName.getBytes()); if (isDir) { verifyDir(dfs, path); } else { // this is not a directory. Checksum the file data. CRC32 fileCRC = new CRC32(); FSInputStream in = dfs.dfs.open(pathName); byte[] buf = new byte[4096]; int nRead = 0; while ((nRead = in.read(buf, 0, buf.length)) > 0) { fileCRC.update(buf, 0, nRead); } verifyChecksum(pathName, fileCRC.getValue()); } } }
From source file:de.micromata.genome.gwiki.plugin.rogmp3_1_0.CsvTable.java
public void createIndex(int column) { TreeMap<String, List<String[]>> index = new TreeMap<String, List<String[]>>(); for (String[] rec : table) { if (rec.length <= column) { continue; }/*from www . j a v a2 s .c o m*/ String n = rec[column]; List<String[]> el = index.get(n); if (el == null) { el = new ArrayList<String[]>(); index.put(n, el); } el.add(rec); } indices.put(column, index); }
From source file:org.openiot.gsn.vsensor.ChartVirtualSensor.java
public boolean initialize() { /**/* w ww . j a v a2 s . c om*/ * TODO : Checking if the user provides the arguements currectly. TODO : * This can now plot only for one input stream value. */ TreeMap<String, String> params = getVirtualSensorConfiguration().getMainClassInitialParams(); ChartInfo chartInfo = new ChartInfo(); chartInfo.setInputStreamName(params.get("input-stream")); chartInfo.setPlotTitle(params.get("title")); chartInfo.setType(params.get("type")); chartInfo.setHeight(ParamParser.getInteger(params.get("height"), 480)); chartInfo.setWidth(ParamParser.getInteger(params.get("width"), 640)); chartInfo.setVerticalAxisTitle(params.get("vertical-axis")); chartInfo.setHistorySize(ParamParser.getInteger(params.get("history-size"), 10)); input_stream_name_to_ChartInfo_map.put(chartInfo.getInputStreamName(), chartInfo); chartInfo.initialize(); return true; }
From source file:com.espertech.esper.epl.core.OrderByProcessorImpl.java
public EventBean[] sort(EventBean[] outgoingEvents, Object[] orderKeys, ExprEvaluatorContext exprEvaluatorContext) { TreeMap<Object, Object> sort = new TreeMap<Object, Object>(factory.getComparator()); if (outgoingEvents == null || outgoingEvents.length < 2) { return outgoingEvents; }/*from www. j a va 2s . c o m*/ for (int i = 0; i < outgoingEvents.length; i++) { Object entry = sort.get(orderKeys[i]); if (entry == null) { sort.put(orderKeys[i], outgoingEvents[i]); } else if (entry instanceof EventBean) { List<EventBean> list = new ArrayList<EventBean>(); list.add((EventBean) entry); list.add(outgoingEvents[i]); sort.put(orderKeys[i], list); } else { List<EventBean> list = (List<EventBean>) entry; list.add(outgoingEvents[i]); } } EventBean[] result = new EventBean[outgoingEvents.length]; int count = 0; for (Object entry : sort.values()) { if (entry instanceof List) { List<EventBean> output = (List<EventBean>) entry; for (EventBean theEvent : output) { result[count++] = theEvent; } } else { result[count++] = (EventBean) entry; } } return result; }
From source file:agendavital.modelo.data.Noticia.java
public static TreeMap<LocalDate, ArrayList<Noticia>> buscar(String _parametro) throws ConexionBDIncorrecta, SQLException { final DateTimeFormatter dateFormatter = DateTimeFormatter.ofPattern("dd-MM-yyyy"); ArrayList<String> _tags = UtilidadesBusqueda.separarPalabras(_parametro); TreeMap<LocalDate, ArrayList<Noticia>> busqueda = null; try (Connection conexion = ConfigBD.conectar()) { busqueda = new TreeMap<>(); for (String _tag : _tags) { String tag = ConfigBD.String2Sql(_tag, true); String buscar = String.format("SELECT id_Noticia, fecha from noticias " + "WHERE id_noticia IN (SELECT id_noticia from momentos_noticias_etiquetas " + "WHERE id_etiqueta IN (SELECT id_etiqueta from etiquetas WHERE nombre LIKE %s)) " + "OR titulo LIKE %s " + "OR cuerpo LIKE %s " + "OR categoria LIKE %s " + "OR fecha LIKE %s; ", tag, tag, tag, tag, tag); ResultSet rs = conexion.createStatement().executeQuery(buscar); while (rs.next()) { LocalDate date = LocalDate.parse(rs.getString("fecha"), dateFormatter); Noticia insertarNoticia = new Noticia(rs.getInt("id_noticia")); if (busqueda.containsKey(date)) { boolean encontrado = false; for (int i = 0; i < busqueda.get(date).size() && !encontrado; i++) if (busqueda.get(date).get(i).getId() == insertarNoticia.getId()) encontrado = true; if (!encontrado) busqueda.get(date).add(insertarNoticia); } else { busqueda.put(date, new ArrayList<>()); busqueda.get(date).add(insertarNoticia); }//from ww w. ja va 2 s . co m } } } catch (SQLException e) { e.printStackTrace(); } Iterator it = busqueda.keySet().iterator(); return busqueda; }
From source file:org.apache.hadoop.hive.ql.exec.ComputationBalancerReducer.java
void flushHistogram(FSDataOutputStream out) throws Exception { out.writeBytes(HistogramOperator.HISTOGRAMTABLE + "\n"); for (String _s : mcvList.keySet()) { out.writeBytes(_s + "\n"); ToolBox _tb = new ToolBox(); TreeMap<String, Integer> _tsi = mcvList.get(_s); for (String _s_inner_ : _tsi.keySet()) { _tb.push(_s_inner_, _tsi.get(_s_inner_)); }//from w ww . j av a 2 s.c o m ToolBox _copyBox = HistogramOperator.binning(_tb, 10); String _curString = null; String _preString = _copyBox.getStringAtIdx(0); int idx; for (idx = 1; idx < _copyBox.getCapacity(); idx++) { _curString = _copyBox.getStringAtIdx(idx); if (_curString.equals(_preString)) { continue; } else { out.writeBytes(_copyBox.getIntegeAtIdx(idx - 1) + ToolBox.hiveDelimiter + _s + ToolBox.hiveDelimiter + _copyBox.getStringAtIdx(idx - 1) + "\n"); _preString = _curString; } } out.writeBytes(_copyBox.getIntegeAtIdx(idx - 1) + ToolBox.hiveDelimiter + _s + ToolBox.hiveDelimiter + _copyBox.getStringAtIdx(idx - 1) + "\n"); } }
From source file:opendap.hai.BesControlApi.java
private String getValidLoggerName(BES bes, String loggerName) throws BesAdminFail { TreeMap<String, BES.BesLogger> validLoggers = bes.getBesLoggers(); if (validLoggers.containsKey(loggerName)) { BES.BesLogger besLogger = validLoggers.get(loggerName); return besLogger.getName(); }//from www . ja va2s .c o m log.debug("User requested unknown BES logger: '{}'", loggerName); return null; }
From source file:org.apache.hadoop.hbase.util.RegionSplitter.java
static void rollingSplit(String tableName, SplitAlgorithm splitAlgo, Configuration conf) throws IOException, InterruptedException { final int minOS = conf.getInt("split.outstanding", 2); HTable table = new HTable(conf, tableName); // max outstanding splits. default == 50% of servers final int MAX_OUTSTANDING = Math.max(table.getConnection().getCurrentNrHRS() / 2, minOS); Path hbDir = FSUtils.getRootDir(conf); Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); Path splitFile = new Path(tableDir, "_balancedSplit"); FileSystem fs = FileSystem.get(conf); // get a list of daughter regions to create LinkedList<Pair<byte[], byte[]>> tmpRegionSet = getSplits(table, splitAlgo); LinkedList<Pair<byte[], byte[]>> outstanding = Lists.newLinkedList(); int splitCount = 0; final int origCount = tmpRegionSet.size(); // all splits must compact & we have 1 compact thread, so 2 split // requests to the same RS can stall the outstanding split queue. // To fix, group the regions into an RS pool and round-robin through it LOG.debug("Bucketing regions by regionserver..."); TreeMap<String, LinkedList<Pair<byte[], byte[]>>> daughterRegions = Maps.newTreeMap(); for (Pair<byte[], byte[]> dr : tmpRegionSet) { String rsLocation = table.getRegionLocation(dr.getSecond()).getHostnamePort(); if (!daughterRegions.containsKey(rsLocation)) { LinkedList<Pair<byte[], byte[]>> entry = Lists.newLinkedList(); daughterRegions.put(rsLocation, entry); }/*from w w w . ja v a2s. c om*/ daughterRegions.get(rsLocation).add(dr); } LOG.debug("Done with bucketing. Split time!"); long startTime = System.currentTimeMillis(); // open the split file and modify it as splits finish FSDataInputStream tmpIn = fs.open(splitFile); byte[] rawData = new byte[tmpIn.available()]; tmpIn.readFully(rawData); tmpIn.close(); FSDataOutputStream splitOut = fs.create(splitFile); splitOut.write(rawData); try { // *** split code *** while (!daughterRegions.isEmpty()) { LOG.debug(daughterRegions.size() + " RS have regions to splt."); // Get RegionServer : region count mapping final TreeMap<ServerName, Integer> rsSizes = Maps.newTreeMap(); Map<HRegionInfo, ServerName> regionsInfo = table.getRegionLocations(); for (ServerName rs : regionsInfo.values()) { if (rsSizes.containsKey(rs)) { rsSizes.put(rs, rsSizes.get(rs) + 1); } else { rsSizes.put(rs, 1); } } // sort the RS by the number of regions they have List<String> serversLeft = Lists.newArrayList(daughterRegions.keySet()); Collections.sort(serversLeft, new Comparator<String>() { public int compare(String o1, String o2) { return rsSizes.get(o1).compareTo(rsSizes.get(o2)); } }); // round-robin through the RS list. Choose the lightest-loaded servers // first to keep the master from load-balancing regions as we split. for (String rsLoc : serversLeft) { Pair<byte[], byte[]> dr = null; // find a region in the RS list that hasn't been moved LOG.debug("Finding a region on " + rsLoc); LinkedList<Pair<byte[], byte[]>> regionList = daughterRegions.get(rsLoc); while (!regionList.isEmpty()) { dr = regionList.pop(); // get current region info byte[] split = dr.getSecond(); HRegionLocation regionLoc = table.getRegionLocation(split); // if this region moved locations String newRs = regionLoc.getHostnamePort(); if (newRs.compareTo(rsLoc) != 0) { LOG.debug("Region with " + splitAlgo.rowToStr(split) + " moved to " + newRs + ". Relocating..."); // relocate it, don't use it right now if (!daughterRegions.containsKey(newRs)) { LinkedList<Pair<byte[], byte[]>> entry = Lists.newLinkedList(); daughterRegions.put(newRs, entry); } daughterRegions.get(newRs).add(dr); dr = null; continue; } // make sure this region wasn't already split byte[] sk = regionLoc.getRegionInfo().getStartKey(); if (sk.length != 0) { if (Bytes.equals(split, sk)) { LOG.debug("Region already split on " + splitAlgo.rowToStr(split) + ". Skipping this region..."); ++splitCount; dr = null; continue; } byte[] start = dr.getFirst(); Preconditions.checkArgument(Bytes.equals(start, sk), splitAlgo.rowToStr(start) + " != " + splitAlgo.rowToStr(sk)); } // passed all checks! found a good region break; } if (regionList.isEmpty()) { daughterRegions.remove(rsLoc); } if (dr == null) continue; // we have a good region, time to split! byte[] split = dr.getSecond(); LOG.debug("Splitting at " + splitAlgo.rowToStr(split)); HBaseAdmin admin = new HBaseAdmin(table.getConfiguration()); admin.split(table.getTableName(), split); LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList(); if (conf.getBoolean("split.verify", true)) { // we need to verify and rate-limit our splits outstanding.addLast(dr); // with too many outstanding splits, wait for some to finish while (outstanding.size() >= MAX_OUTSTANDING) { finished = splitScan(outstanding, table, splitAlgo); if (finished.isEmpty()) { Thread.sleep(30 * 1000); } else { outstanding.removeAll(finished); } } } else { finished.add(dr); } // mark each finished region as successfully split. for (Pair<byte[], byte[]> region : finished) { splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; if (splitCount % 10 == 0) { long tDiff = (System.currentTimeMillis() - startTime) / splitCount; LOG.debug("STATUS UPDATE: " + splitCount + " / " + origCount + ". Avg Time / Split = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); } } } } if (conf.getBoolean("split.verify", true)) { while (!outstanding.isEmpty()) { LinkedList<Pair<byte[], byte[]>> finished = splitScan(outstanding, table, splitAlgo); if (finished.isEmpty()) { Thread.sleep(30 * 1000); } else { outstanding.removeAll(finished); for (Pair<byte[], byte[]> region : finished) { splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); } } } } LOG.debug("All regions have been successfully split!"); } finally { long tDiff = System.currentTimeMillis() - startTime; LOG.debug("TOTAL TIME = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff)); LOG.debug("Splits = " + splitCount); LOG.debug("Avg Time / Split = " + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); splitOut.close(); if (table != null) { table.close(); } } fs.delete(splitFile, false); }
From source file:org.cloudata.core.client.Row.java
private void internalAddCell(TreeMap<Cell.Key, Cell> cellMap, Cell newCell) { Cell cell = null;// www . j a v a 2 s. c om if ((cell = cellMap.get(newCell.key)) == null) { cellMap.put(newCell.key, newCell); } else { cell.values.addAll(newCell.values); } }
From source file:edu.ucsb.eucalyptus.transport.query.WalrusQuerySecurityHandler.java
private String getCanonicalizedAmzHeaders(CaseInsensitiveMap headers) { String result = ""; TreeMap amzHeaders = headers.removeSub("x-amz-"); Iterator iterator = amzHeaders.keySet().iterator(); while (iterator.hasNext()) { Object key = iterator.next(); String trimmedKey = key.toString().trim(); String value = (String) amzHeaders.get(key); String trimmedValue = value.trim(); result += trimmedKey + ":" + trimmedValue + "\n"; }// ww w. ja v a2 s .co m return result; }