List of usage examples for java.util TreeMap remove
public V remove(Object key)
From source file:org.apache.tajo.storage.thirdparty.orc.ByteBufferAllocatorPool.java
public ByteBuffer getBuffer(boolean direct, int length) { TreeMap<Key, ByteBuffer> tree = getBufferTree(direct); Map.Entry<Key, ByteBuffer> entry = tree.ceilingEntry(new Key(length, 0)); if (entry == null) { return direct ? ByteBuffer.allocateDirect(length) : ByteBuffer.allocate(length); }/* w w w .j a v a 2 s .co m*/ tree.remove(entry.getKey()); return entry.getValue(); }
From source file:com.edmunds.etm.runtime.api.ApplicationSeries.java
/** * Removes an application version from this series. * * @param application the application to remove * @return the previous application of the same version, or null if there was no entry for the version *//* ww w .j av a 2s. c o m*/ public ApplicationSeries remove(Application application) { final TreeMap<ApplicationVersion, Application> temp = Maps.newTreeMap(applicationsByVersion); Application previous = temp.remove(application.getVersion()); // There is no such thing as an empty series (just return null); if (temp.isEmpty()) { return null; } // If previous version didn't exist there is no point making a copy. if (previous == null) { return this; } updateActiveVersion(temp); return new ApplicationSeries(name, temp); }
From source file:org.apache.hadoop.io.ElasticByteBufferPool.java
@Override public synchronized ByteBuffer getBuffer(boolean direct, int length) { TreeMap<Key, ByteBuffer> tree = getBufferTree(direct); Map.Entry<Key, ByteBuffer> entry = tree.ceilingEntry(new Key(length, 0)); if (entry == null) { return direct ? ByteBuffer.allocateDirect(length) : ByteBuffer.allocate(length); }//from ww w .ja va 2 s . c o m tree.remove(entry.getKey()); return entry.getValue(); }
From source file:hermes.ext.ems.TibcoEMSAdmin.java
public Map getStatistics(final DestinationConfig destination) throws JMSException { try {/*from ww w.j av a 2s . c o m*/ final DestinationInfo info = getDestinationInfo(destination); final TreeMap rval = new TreeMap(); rval.putAll(PropertyUtils.describe(info)); rval.remove("inboundStatistics"); rval.remove("outboundStatistics"); rval.put("inboundByteRate", new Long(info.getInboundStatistics().getByteRate())); rval.put("inboundMessageRate", new Long(info.getInboundStatistics().getMessageRate())); rval.put("inboundTotalBytes", new Long(info.getInboundStatistics().getTotalBytes())); rval.put("inboundTotalMessages", new Long(info.getInboundStatistics().getTotalMessages())); rval.put("outboundByteRate", new Long(info.getOutboundStatistics().getByteRate())); rval.put("outboundMessageRate", new Long(info.getOutboundStatistics().getMessageRate())); rval.put("outboundTotalBytes", new Long(info.getOutboundStatistics().getTotalBytes())); rval.put("outboundTotalMessages", new Long(info.getOutboundStatistics().getTotalMessages())); return rval; } catch (IllegalAccessException e) { throw new HermesException(e); } catch (InvocationTargetException e) { throw new HermesException(e); } catch (NoSuchMethodException e) { throw new HermesException(e); } }
From source file:com.hp.mqm.atrf.core.configuration.FetchConfiguration.java
public void logProperties() { //put in TreeMap for sorting TreeMap<String, String> props = new TreeMap<>(this.properties); props.remove(ALM_PASSWORD_PARAM); props.remove(OCTANE_PASSWORD_PARAM); if (Integer.toString(SYNC_BULK_SIZE_DEFAULT).equals(getSyncBulkSize())) { props.remove(SYNC_BULK_SIZE_PARAM); }/* ww w.ja va2 s. co m*/ if (Integer.toString(SYNC_SLEEP_BETWEEN_POSTS_DEFAULT).equals(getSyncSleepBetweenPosts())) { props.remove(SYNC_SLEEP_BETWEEN_POSTS_PARAM); } if (Integer.toString(ALM_RUN_FILTER_FETCH_LIMIT_DEFAULT).equals(getRunFilterFetchLimit())) { props.remove(ALM_RUN_FILTER_FETCH_LIMIT_PARAM); } logger.info("Loaded configuration : " + (props.entrySet().toString())); }
From source file:org.apache.storm.scheduler.IsolationScheduler.java
private Map<Integer, Integer> machineDistribution(TopologyDetails topology) { int machineNum = isoMachines.get(topology.getName()).intValue(); int workerNum = topology.getNumWorkers(); TreeMap<Integer, Integer> distribution = Utils.integerDivided(workerNum, machineNum); if (distribution.containsKey(0)) { distribution.remove(0); }// w w w . j a va 2 s.c o m return distribution; }
From source file:org.cloudata.core.client.TabletLocationCache.java
private void removeFromCache(TreeMap<Row.Key, TabletInfo> cache, Row.Key cacheRowKey, TabletInfo removeTablet) { if (cache.containsKey(cacheRowKey)) { cache.remove(cacheRowKey); }/*from ww w .j av a 2s .co m*/ SortedMap<Row.Key, TabletInfo> tailMap = cache.tailMap(cacheRowKey); if (tailMap.isEmpty()) { return; } Row.Key tailFirst = tailMap.firstKey(); TabletInfo tabletInfo = tailMap.get(tailFirst); if (tabletInfo.equals(removeTablet)) { cache.remove(tailFirst); } }
From source file:de.suse.swamp.core.container.WorkflowManager.java
public synchronized List reloadWorkflowDefinition(String name, String version) throws Exception { SWAMP swamp = SWAMP.getInstance();// w w w.j a v a2 s . c o m String workflowLoc = swamp.getWorkflowLocation(); File workflowDir = new File(workflowLoc); TreeMap versions = (TreeMap) workflowTempls.get(name); if (versions != null) { versions.remove(version); } WorkflowReader reader = new WorkflowReader(workflowDir); List results = new ArrayList(); results.add(reader.readWorkflow(name, version, results)); installValidTemplates(results); return results; }
From source file:org.apache.hadoop.net.unix.DomainSocketWatcher.java
/** * Send callback, and if the domain socket was closed as a result of * processing, then also remove the entry for the file descriptor. * * @param caller reason for call//from w w w .ja va 2 s. c o m * @param entries mapping of file descriptor to entry * @param fdSet set of file descriptors * @param fd file descriptor */ private void sendCallbackAndRemove(String caller, TreeMap<Integer, Entry> entries, FdSet fdSet, int fd) { if (sendCallback(caller, entries, fdSet, fd)) { entries.remove(fd); } }
From source file:gov.usgs.anss.query.MultiplexedMSOutputer.java
/** * This does the hard work of sorting - called as a shutdown hook. * TODO: consider recursion./*from ww w . j av a2 s. c om*/ * @param outputName name for the output file. * @param files list of MiniSEED files to multiplex. * @param cleanup flag indicating whether to cleanup after ourselves or not. * @throws IOException */ public static void multiplexFiles(String outputName, List<File> files, boolean cleanup, boolean allowEmpty) throws IOException { ArrayList<File> cleanupFiles = new ArrayList<File>(files); ArrayList<File> moreFiles = new ArrayList<File>(); File outputFile = new File(outputName); File tempOutputFile = new File(outputName + ".tmp"); do { // This checks if we're in a subsequent (i.e. not the first) iteration and if there are any more files to process...? if (!moreFiles.isEmpty()) { logger.info("more files left to multiplex..."); FileUtils.deleteQuietly(tempOutputFile); FileUtils.moveFile(outputFile, tempOutputFile); cleanupFiles.add(tempOutputFile); moreFiles.add(tempOutputFile); files = moreFiles; moreFiles = new ArrayList<File>(); } logger.log(Level.FINE, "Multiplexing blocks from {0} temp files to {1}", new Object[] { files.size(), outputName }); BufferedOutputStream out = new BufferedOutputStream(FileUtils.openOutputStream(outputFile)); // The hard part, sorting the temp files... TreeMap<MiniSeed, FileInputStream> blks = new TreeMap<MiniSeed, FileInputStream>( new MiniSeedTimeOnlyComparator()); // Prime the TreeMap logger.log(Level.FINEST, "Priming the TreeMap with files: {0}", files); for (File file : files) { logger.log(Level.INFO, "Reading first block from {0}", file.toString()); try { FileInputStream fs = FileUtils.openInputStream(file); MiniSeed ms = getNextValidMiniSeed(fs, allowEmpty); if (ms != null) { blks.put(ms, fs); } else { logger.log(Level.WARNING, "Failed to read valid MiniSEED block from {0}", file.toString()); } } catch (IOException ex) { // Catch "Too many open files" i.e. hitting ulimit, throw anything else. if (ex.getMessage().contains("Too many open files")) { logger.log(Level.INFO, "Too many open files - {0} deferred.", file.toString()); moreFiles.add(file); } else throw ex; } } while (!blks.isEmpty()) { MiniSeed next = blks.firstKey(); out.write(next.getBuf(), 0, next.getBlockSize()); FileInputStream fs = blks.remove(next); next = getNextValidMiniSeed(fs, allowEmpty); if (next != null) { blks.put(next, fs); } else { fs.close(); } } out.close(); } while (!moreFiles.isEmpty()); if (cleanup) { logger.log(Level.INFO, "Cleaning up..."); for (File file : cleanupFiles) { FileUtils.deleteQuietly(file); } } }