List of usage examples for java.util TreeMap containsKey
public boolean containsKey(Object key)
From source file:com.wattzap.view.graphs.DistributionGraph.java
public void updateValues(int scale, boolean keepZeroes) { da.setBucketSize(scale);/* w w w. j ava2s.c o m*/ da.setKeepZeroes(keepZeroes); long totalTime = 0; TreeMap<Integer, Long> data = new TreeMap<Integer, Long>(); for (int i = 0; i < telemetry.length; i++) { Telemetry last = null; for (Telemetry t : telemetry[i]) { if (last == null) { // first time through last = t; } else { int key = da.getKey(t); if (key != -1) { if (data.containsKey(key)) { // add time to current key long time = data.get(key); data.put(key, time + (t.getTime() - last.getTime())); } else { data.put(key, t.getTime() - last.getTime()); } totalTime += t.getTime() - last.getTime(); } last = t; } } // for } // for DefaultCategoryDataset dataset = new DefaultCategoryDataset(); for (Entry<Integer, Long> entry : data.entrySet()) { int key = entry.getKey(); double p = ((double) entry.getValue() * 100 / totalTime); if (p > 0.5) { dataset.addValue(p, "", da.getValueLabel(key)); } } // for plot.setDataset(dataset); chartPanel.revalidate(); }
From source file:org.apache.hadoop.raid.TestRaidHistogram.java
/** * Have three stages. Each stage spawns nPercents threads. * Each thread iterate $rounds rounds and send random number for * each monitor dir to raidnode including succeed files and failed files. * Set two windows: The first window covers stage3 only. * The second window covers stage2 and stage3 only. * Calling getBlockFixStatus should be able to filter out all stage1 points * The histogram counts for the second window should be double as the of * the first window.//from w w w . jav a 2 s. c o m */ public void testHistograms() throws Exception { int rounds = 10000; int range = 1000000; int dividedRange = range / 1000; float step = 1.0f / nPercents; try { mySetup(); cnode = RaidNode.createRaidNode(null, conf); ArrayList<Float> percents = new ArrayList<Float>(); for (int i = 0; i <= nPercents; i++) { percents.add(step * i); } Collections.shuffle(percents); // submit some old data sendRecoveryTimes(nPercents, range * (nPercents + 1), range, rounds); Thread.sleep(100); long ckpTime1 = System.currentTimeMillis(); sendRecoveryTimes(nPercents, 0, range, rounds); Thread.sleep(100); long ckpTime2 = System.currentTimeMillis(); sendRecoveryTimes(nPercents, 0, range, rounds); long endTime = System.currentTimeMillis(); ArrayList<Long> newWindows = new ArrayList<Long>(); newWindows.add(endTime - ckpTime2); newWindows.add(endTime - ckpTime1); HashMap<String, RaidHistogram> recoveryTimes = cnode.blockIntegrityMonitor.getRecoveryTimes(); for (RaidHistogram histogram : recoveryTimes.values()) { histogram.setNewWindows(newWindows); } for (int i = 0; i <= monitorDirs.length; i++) { String monitorDir; if (i < monitorDirs.length) { monitorDir = monitorDirs[i]; } else { monitorDir = BlockIntegrityMonitor.OTHERS; } assertEquals("Stale entries are not filtered", rounds * nPercents * 3 * 2, cnode.blockIntegrityMonitor.getNumberOfPoints(monitorDir)); TreeMap<Long, BlockFixStatus> status = cnode.blockIntegrityMonitor.getBlockFixStatus(monitorDir, nPercents, percents, endTime); assertTrue(status.containsKey(newWindows.get(0))); assertTrue(status.containsKey(newWindows.get(1))); BlockFixStatus bfs = status.get(newWindows.get(0)); assertEquals("Stale entries are not filtered", rounds * nPercents * 2 * 2, cnode.blockIntegrityMonitor.getNumberOfPoints(monitorDir)); // Verify failed recovered files for the first window assertEquals("The number of failed recovery files should match", rounds * nPercents, bfs.failedPaths); // Verify histogram for the first window assertEquals(nPercents, bfs.counters.length); for (int j = 0; j < nPercents; j++) { assertEquals(rounds, bfs.counters[j]); } // Verify percent values for the first window assertEquals(nPercents + 1, bfs.percentValues.length); assertEquals(0, bfs.percentValues[0]); for (int j = 1; j <= nPercents; j++) { assertEquals(dividedRange * j - 1, bfs.percentValues[j]); } bfs = status.get(newWindows.get(1)); // Verify failed recovered files for the second window assertEquals("The number of failed recovery files should match", rounds * nPercents, bfs.failedPaths); // Verify histogram for the second window assertEquals(nPercents, bfs.counters.length); for (int j = 0; j < nPercents; j++) { assertEquals(rounds * 2, bfs.counters[j]); } // Verify percent values for the second window assertEquals(nPercents + 1, bfs.percentValues.length); assertEquals(0, bfs.percentValues[0]); for (int j = 1; j <= nPercents; j++) { assertEquals(dividedRange * j - 1, bfs.percentValues[j]); } } } finally { myTearDown(); } }
From source file:com.sshtools.common.vomanagementtool.common.VOHelper.java
private static TreeMap readVomsesFile(File file) { TreeMap vosInfo = new TreeMap<String, List>(); BufferedReader br;//w w w. ja va2s . co m try { br = new BufferedReader(new FileReader(file)); String line; //int counter=0; while ((line = br.readLine()) != null) { if (!line.trim().equals("")) { String[] info = line.split("\" \""); TreeMap temp = null; String voname = ""; for (int i = 0; i < info.length; i++) { if (i == 0) { temp = new TreeMap<String, String>(); voname = info[i].substring(1); } else if (i == 4) { temp.put("servervoname", info[i].substring(0, info[i].length() - 1)); //Find if the same voname already exists if (vosInfo.containsKey(voname)) { List multiValue = (List) vosInfo.get(voname); multiValue.add(temp); vosInfo.put(voname, multiValue); } else { List singleValue = new ArrayList(); singleValue.add(temp); vosInfo.put(voname, singleValue); } } else { if (i == 1) { temp.put("server", info[i]); } else if (i == 2) { temp.put("port", info[i]); } else if (i == 3) { temp.put("dn", info[i]); } } } //counter++; } } br.close(); } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } return vosInfo; }
From source file:org.openiot.gsn.wrappers.general.CSVHandler.java
public ArrayList<TreeMap<String, Serializable>> parseValues(Reader datainput, long previousCheckPoint, int samplingCountPerPeriod) throws IOException { ArrayList<TreeMap<String, Serializable>> toReturn = new ArrayList<TreeMap<String, Serializable>>(); CSVReader reader = new CSVReader(datainput, getSeparator(), getStringSeparator(), getSkipFirstXLines()); String[] values;/*from ww w .j a v a 2 s .co m*/ long currentLine = 0; Serializable currentTimeStamp = null; boolean quit = false; while ((values = reader.readNext()) != null) { TreeMap<String, Serializable> se = convertTo(formats, fields, getNulls(), values, getSeparator()); if (isEmpty(se)) { continue; } if (se.containsKey(TIMESTAMP)) { if (((Long) se.get(TIMESTAMP)) <= previousCheckPoint) { continue; } } else {// assuming useCounterForCheckPoint = true if (logger.isDebugEnabled()) { String symbol = (currentLine < previousCheckPoint) ? " < " : " >= "; logger.debug("currentLine=" + currentLine + symbol + "checkpoint=" + previousCheckPoint); } if (currentLine < previousCheckPoint) {// skipping already read lines, based on line count logger.debug("skipping"); currentLine++; continue; } } if (quit) { if (se.containsKey(TIMESTAMP)) { if (currentTimeStamp == null || !currentTimeStamp.equals(se.get(TIMESTAMP))) { break; } } else { break; } } toReturn.add(se); currentLine++; loggedNoChange = false; if (toReturn.size() >= samplingCountPerPeriod) { // Move outside the loop as in each call we only read x values; // But if we use timeStampMode, still check the next value, since // if the timestamp is the same we have to return it, or data // would be lost. logger.trace("Time to quit."); quit = true; if (se.containsKey(TIMESTAMP)) { currentTimeStamp = se.get(TIMESTAMP); } else { break; } } } if (logger.isDebugEnabled() && toReturn.isEmpty() && loggedNoChange == false) { logger.debug("There is no new item after most recent checkpoint(previousCheckPoint:" + new DateTime(previousCheckPoint) + ")."); loggedNoChange = true; } reader.close(); return toReturn; }
From source file:org.openiot.gsn.beans.StreamElement.java
public StreamElement(TreeMap<String, Serializable> output, DataField[] fields) { int nbFields = output.keySet().size(); if (output.containsKey("timed")) nbFields--;/*from w w w .j a va 2 s . com*/ String fieldNames[] = new String[nbFields]; Byte fieldTypes[] = new Byte[nbFields]; Serializable fieldValues[] = new Serializable[nbFields]; TreeMap<String, Integer> indexedFieldNames = new TreeMap<String, Integer>(new CaseInsensitiveComparator()); int idx = 0; long timestamp = System.currentTimeMillis(); for (String key : output.keySet()) { Serializable value = output.get(key); if (key.equalsIgnoreCase("timed")) timestamp = (Long) value; else { fieldNames[idx] = key; fieldValues[idx] = value; for (int i = 0; i < fields.length; i++) { if (fields[i].getName().equalsIgnoreCase(key)) fieldTypes[idx] = fields[i].getDataTypeID(); } indexedFieldNames.put(key, idx); idx++; } } this.fieldNames = fieldNames; this.fieldTypes = fieldTypes; this.fieldValues = fieldValues; this.indexedFieldNames = indexedFieldNames; this.timeStamp = timestamp; }
From source file:emily.command.administrative.GuildStatsCommand.java
@Override public String execute(DiscordBot bot, String[] args, MessageChannel channel, User author, Message inputMessage) {//from ww w . jav a 2s . c om if (!bot.getContainer().allShardsReady()) { return "Not fully loaded yet!"; } if (args.length == 0) { return "Statistics! \n" + getTotalTable(bot, false) + "\nYou are on shard # " + bot.getShardId(); } SimpleRank userrank = bot.security.getSimpleRank(author, channel); switch (args[0].toLowerCase()) { case "mini": return "Statistics! \n" + getTotalTable(bot, true); case "music": return DebugUtil .sendToHastebin(getPlayingOn(bot.getContainer(), userrank.isAtLeast(SimpleRank.BOT_ADMIN) || (args.length >= 2 && args[1].equalsIgnoreCase("guilds")))); case "activity": return lastShardActivity(bot.getContainer()); case "users": if (!(channel instanceof TextChannel)) { return Templates.invalid_use.formatGuild(channel); } TreeMap<Date, Integer> map = new TreeMap<>(); Guild guild = ((TextChannel) channel).getGuild(); List<Member> joins = new ArrayList<>(guild.getMembers()); for (Member join : joins) { Date time = DateUtils.round(new Date(join.getJoinDate().toInstant().toEpochMilli()), Calendar.DAY_OF_MONTH); if (!map.containsKey(time)) { map.put(time, 0); } map.put(time, map.get(time) + 1); } List<Date> xData = new ArrayList<>(); List<Integer> yData = new ArrayList<>(); int total = 0; for (Map.Entry<Date, Integer> entry : map.entrySet()) { total += entry.getValue(); xData.add(entry.getKey()); yData.add(total); } XYChart chart = new XYChart(1024, 600); chart.setTitle("Users over time for " + guild.getName()); chart.setXAxisTitle("Date"); chart.setYAxisTitle("Users"); chart.getStyler().setTheme(new GGPlot2Theme()); XYSeries series = chart.addSeries("Users", xData, yData); series.setMarker(SeriesMarkers.CIRCLE); try { File f = new File("./Sample_Chart.png"); BitmapEncoder.saveBitmap(chart, f.getAbsolutePath(), BitmapEncoder.BitmapFormat.PNG); bot.queue.add(channel.sendFile(f), message -> f.delete()); } catch (IOException e) { e.printStackTrace(); } return ""; } return "Statistics! \n" + getTotalTable(bot, false); }
From source file:gsn.beans.StreamElement.java
public StreamElement(TreeMap<String, Serializable> output, DataField[] fields) { int nbFields = output.keySet().size(); if (output.containsKey("timed")) nbFields--;// w w w . j a v a 2s . c o m String fieldNames[] = new String[nbFields]; Byte fieldTypes[] = new Byte[nbFields]; Serializable fieldValues[] = new Serializable[nbFields]; TreeMap<String, Integer> indexedFieldNames = new TreeMap<String, Integer>(new CaseInsensitiveComparator()); int idx = 0; long timestamp = System.currentTimeMillis(); for (String key : output.keySet()) { Serializable value = output.get(key); if (key.equalsIgnoreCase("timed")) { timestamp = (Long) value; timestampProvided = true; } else { fieldNames[idx] = key; fieldValues[idx] = value; for (int i = 0; i < fields.length; i++) { if (fields[i].getName().equalsIgnoreCase(key)) fieldTypes[idx] = fields[i].getDataTypeID(); } indexedFieldNames.put(key, idx); idx++; } } this.fieldNames = fieldNames; this.fieldTypes = fieldTypes; this.fieldValues = fieldValues; this.indexedFieldNames = indexedFieldNames; this.timeStamp = timestamp; }
From source file:org.apache.hadoop.raid.TestRaidHistogram.java
public void testRepeatSendingRecoveryTime() throws Exception { int rounds = 4; int nPercents = 2; int range = 1000000; int dividedRange = range / 1000; float step = 1.0f / nPercents; long gapTime = 3000L; ArrayList<Long> windows = new ArrayList<Long>(); windows.add(gapTime);//from w ww . j ava2 s. c o m windows.add(3600000L); int sendRound = 2; try { mySetup(); Configuration localConf = new Configuration(conf); localConf.set(BlockIntegrityMonitor.MONITOR_SECONDS_KEY, gapTime / 1000 + ",3600"); cnode = RaidNode.createRaidNode(null, localConf); ArrayList<Float> percents = new ArrayList<Float>(); for (int i = 0; i <= 2; i++) { percents.add(step * i); } Collections.shuffle(percents); for (int r = 0; r < rounds; r++) { // submit some data long sTime = System.currentTimeMillis(); sendRecoveryTimes(2, 0, range, sendRound); LOG.info("Get blockFixStatus"); String monitorDir = monitorDirs[0]; TreeMap<Long, BlockFixStatus> status = cnode.blockIntegrityMonitor.getBlockFixStatus(monitorDir, nPercents, percents, sTime + gapTime - 1000); printBlockFixStatus(status); assertTrue(status.containsKey(windows.get(0))); assertTrue(status.containsKey(windows.get(1))); BlockFixStatus bfs = status.get(windows.get(0)); // Verify failed recovered files for the first window assertEquals("The number of failed recovery files should match", sendRound * nPercents, bfs.failedPaths); // Verify percent values for the first window assertEquals(nPercents + 1, bfs.percentValues.length); assertEquals(0, bfs.percentValues[0]); for (int j = 1; j <= nPercents; j++) { assertEquals(dividedRange * j - 1, bfs.percentValues[j]); } bfs = status.get(windows.get(1)); // Verify failed recovered files for the second window assertEquals("The number of failed recovery files should match", sendRound * nPercents, bfs.failedPaths); // Verify percent values for the second window assertEquals(nPercents + 1, bfs.percentValues.length); assertEquals(0, bfs.percentValues[0]); for (int j = 1; j <= nPercents; j++) { assertEquals(dividedRange * j - 1, bfs.percentValues[j]); } Thread.sleep(gapTime + 1000); status = cnode.blockIntegrityMonitor.getBlockFixStatus(monitorDir, nPercents, percents, System.currentTimeMillis()); printBlockFixStatus(status); assertTrue(status.containsKey(windows.get(0))); assertTrue(status.containsKey(windows.get(1))); bfs = status.get(windows.get(0)); // Verify failed recovered files for the first window assertEquals("The number of failed recovery files should be 0", 0, bfs.failedPaths); // Verify percent values for the first window, they should all be -1 assertEquals(nPercents + 1, bfs.percentValues.length); assertEquals(-1, bfs.percentValues[0]); for (int j = 1; j <= nPercents; j++) { assertEquals(-1, bfs.percentValues[j]); } } } finally { myTearDown(); } }
From source file:com.sfs.whichdoctor.dao.VoteDAOImpl.java
/** * This method deletes a cast vote based on its GUID. * * @param vote the vote/* www . ja v a 2 s. c o m*/ * @param checkUser the check user * @param privileges the privileges * * @return true, if delete * * @throws WhichDoctorDaoException the which doctor dao exception */ public final boolean delete(final VoteBean vote, final UserBean checkUser, final PrivilegesBean privileges) throws WhichDoctorDaoException { /* Check that the vote belongs to a valid election */ GroupBean group = new GroupBean(); try { group = this.groupDAO.loadGUID(vote.getGroupGUID()); } catch (Exception e) { dataLogger.error("Error loading election: " + e.getMessage()); throw new WhichDoctorDaoException("Error loading election: " + e.getMessage()); } if (group.getType().compareTo("Election") != 0) { throw new WhichDoctorDaoException("Sorry a valid election relating to this vote could not be found"); } // Check if this vote has been recorded TreeMap<Integer, VoteBean> castVotes = new TreeMap<Integer, VoteBean>(); try { castVotes = this.load(vote.getGroupGUID()); } catch (Exception e) { dataLogger.error("Error loading cast votes: " + e.getMessage()); } if (!castVotes.containsKey(vote.getVoteNumber())) { throw new WhichDoctorDaoException("Sorry, this vote has not been recorded"); } ItemBean item = new ItemBean(); item.setId(vote.getId()); item.setGUID(vote.getGUID()); item.setObject1GUID(vote.getGroupGUID()); item.setWeighting(vote.getCandidateGUID()); item.setObject2GUID(PersonBean.getVoteNumber(vote.getVoteNumber(), group.getYear())); item.setItemType("Vote"); item.setPermission("groups"); return this.itemDAO.delete(item, checkUser, privileges, null); }
From source file:org.lockss.servlet.AddContent.java
private void deletePublisher(String publisher) throws IOException { TreeMap<String, TreeMap<String, TreeSet<ArchivalUnit>>> auMap = DisplayContentTab.getAusByPublisherName(); ArrayList<String> auIds = new ArrayList<String>(); if (auMap.containsKey(publisher)) { for (Map.Entry<String, TreeMap<String, TreeSet<ArchivalUnit>>> entry : auMap.entrySet()) { String publisherString = entry.getKey(); log.error("Publisher: " + publisher); log.error("Publisher string: " + publisherString); if (publisher.equals(publisherString)) { TreeMap<String, TreeSet<ArchivalUnit>> titleMap = entry.getValue(); for (Map.Entry<String, TreeSet<ArchivalUnit>> stringTreeSetEntry : titleMap.entrySet()) { TreeSet<ArchivalUnit> auSet = stringTreeSetEntry.getValue(); for (ArchivalUnit au : auSet) { auIds.add(au.getAuId()); }/* www . j ava2 s. c o m*/ } } } doRemoveAus(auIds); session.setAttribute("actionMessage", "All AUs associated with publisher " + publisher + " were deleted"); } else { log.error("Could not find publisher"); } }