List of usage examples for java.util PriorityQueue PriorityQueue
public PriorityQueue()
From source file:org.xwiki.velocity.tools.CollectionsTool.java
/** * Create and return a new {@link Queue}, which instead of the FIFO ordering uses the natural order of the items * added to the queue, so that the retrieved item is always the lowest one. All the items added to this queue must * be non-null and be comparable with the other items in the queue. * * @param <E> the type of the elements in the queue * @return a new, empty {@link PriorityQueue} */// www . j a v a 2 s .c o m public <E extends Comparable<E>> Queue<E> getPriorityQueue() { return new PriorityQueue<E>(); }
From source file:eu.stratosphere.nephele.jobmanager.splitassigner.file.FileInputSplitList.java
/** * Returns a list of file input splits specifically ordered for the given {@link AbstractInstance}. When the list is * initially created, it contains all the unconsumed file input splits at that point in time, ascendingly ordered by * the minimum distance between the input splits' storage locations and the given {@link AbstractInstance}. * /* w ww . j a v a 2 s . co m*/ * @param instance * the instance for which the file input split list has been computed * @return the list of file input splits ordered specifically for the given instance */ private Queue<QueueElem> getInstanceSplitList(final AbstractInstance instance) { Queue<QueueElem> instanceSplitList = this.instanceMap.get(instance); if (instanceSplitList == null) { // Create and populate instance specific split list instanceSplitList = new PriorityQueue<FileInputSplitList.QueueElem>(); final Iterator<FileInputSplit> it = this.masterSet.iterator(); while (it.hasNext()) { final FileInputSplit split = it.next(); final String[] hostNames = split.getHostNames(); if (hostNames == null) { instanceSplitList.add(new QueueElem(split, Integer.MAX_VALUE)); } else { int minDistance = Integer.MAX_VALUE; for (int i = 0; i < hostNames.length; ++i) { final int distance = instance.getDistance(hostNames[i]); if (LOG.isDebugEnabled()) { LOG.debug("Distance between " + instance + " and " + hostNames[i] + " is " + distance); } if (distance < minDistance) { minDistance = distance; } } instanceSplitList.add(new QueueElem(split, minDistance)); } } this.instanceMap.put(instance, instanceSplitList); } return instanceSplitList; }
From source file:org.apache.flink.runtime.jobmanager.splitassigner.file.FileInputSplitList.java
/** * Returns a list of file input splits specifically ordered for the given {@link org.apache.flink.runtime.instance.Instance}. When the list is * initially created, it contains all the unconsumed file input splits at that point in time, ascendingly ordered by * the minimum distance between the input splits' storage locations and the given {@link org.apache.flink.runtime.instance.Instance}. * /*ww w .ja va 2 s . c om*/ * @param instance * the instance for which the file input split list has been computed * @return the list of file input splits ordered specifically for the given instance */ private Queue<QueueElem> getInstanceSplitList(final Instance instance) { Queue<QueueElem> instanceSplitList = this.instanceMap.get(instance); if (instanceSplitList == null) { // Create and populate instance specific split list instanceSplitList = new PriorityQueue<FileInputSplitList.QueueElem>(); final Iterator<FileInputSplit> it = this.masterSet.iterator(); while (it.hasNext()) { final FileInputSplit split = it.next(); final String[] hostNames = split.getHostNames(); if (hostNames == null) { instanceSplitList.add(new QueueElem(split, Integer.MAX_VALUE)); } else { int minDistance = Integer.MAX_VALUE; for (int i = 0; i < hostNames.length; ++i) { final int distance = instance.getDistance(hostNames[i]); if (LOG.isDebugEnabled()) { LOG.debug("Distance between " + instance + " and " + hostNames[i] + " is " + distance); } if (distance < minDistance) { minDistance = distance; } } instanceSplitList.add(new QueueElem(split, minDistance)); } } this.instanceMap.put(instance, instanceSplitList); } return instanceSplitList; }
From source file:eu.stratosphere.nephele.jobmanager.splitassigner.LocatableInputSplitList.java
/** * Returns a list of locatable input splits specifically ordered for the given {@link AbstractInstance}. When the * list is initially created, it contains all the unconsumed located input splits at that point in time, ascendingly * ordered/*from w w w. ja v a 2s.c om*/ * by the minimum distance between the input splits' storage locations and the given {@link AbstractInstance}. * * @param instance * the instance for which the locatable input split list has been computed * @return the list of file input splits ordered specifically for the given instance */ private Queue<QueueElem> getInstanceSplitList(final AbstractInstance instance) { Queue<QueueElem> instanceSplitList = this.instanceMap.get(instance); if (instanceSplitList == null) { // Create and populate instance specific split list instanceSplitList = new PriorityQueue<LocatableInputSplitList.QueueElem>(); final Iterator<LocatableInputSplit> it = this.masterSet.iterator(); while (it.hasNext()) { final LocatableInputSplit split = it.next(); final String[] hostnames = split.getHostnames(); if (hostnames == null) { instanceSplitList.add(new QueueElem(split, Integer.MAX_VALUE)); } else { int minDistance = Integer.MAX_VALUE; for (int i = 0; i < hostnames.length; ++i) { final int distance = instance.getDistance(hostnames[i]); if (LOG.isDebugEnabled()) { LOG.debug("Distance between " + instance + " and " + hostnames[i] + " is " + distance); } if (distance < minDistance) { minDistance = distance; } } instanceSplitList.add(new QueueElem(split, minDistance)); } } this.instanceMap.put(instance, instanceSplitList); } return instanceSplitList; }
From source file:org.apache.flink.runtime.jobmanager.splitassigner.LocatableInputSplitList.java
/** * Returns a list of locatable input splits specifically ordered for the given {@link org.apache.flink.runtime.instance.Instance}. When the * list is initially created, it contains all the unconsumed located input splits at that point in time, ascendingly * ordered//from w w w . j a v a 2 s. c o m * by the minimum distance between the input splits' storage locations and the given {@link org.apache.flink.runtime.instance.Instance}. * * @param instance * the instance for which the locatable input split list has been computed * @return the list of file input splits ordered specifically for the given instance */ private Queue<QueueElem> getInstanceSplitList(final Instance instance) { Queue<QueueElem> instanceSplitList = this.instanceMap.get(instance); if (instanceSplitList == null) { // Create and populate instance specific split list instanceSplitList = new PriorityQueue<LocatableInputSplitList.QueueElem>(); final Iterator<LocatableInputSplit> it = this.masterSet.iterator(); while (it.hasNext()) { final LocatableInputSplit split = it.next(); final String[] hostnames = split.getHostnames(); if (hostnames == null) { instanceSplitList.add(new QueueElem(split, Integer.MAX_VALUE)); } else { int minDistance = Integer.MAX_VALUE; for (int i = 0; i < hostnames.length; ++i) { final int distance = instance.getDistance(hostnames[i]); if (LOG.isDebugEnabled()) { LOG.debug("Distance between " + instance + " and " + hostnames[i] + " is " + distance); } if (distance < minDistance) { minDistance = distance; } } instanceSplitList.add(new QueueElem(split, minDistance)); } } this.instanceMap.put(instance, instanceSplitList); } return instanceSplitList; }
From source file:com.facebook.FileLruCache.java
private void trim() { Logger.log(LoggingBehaviors.CACHE, TAG, "trim started"); PriorityQueue<ModifiedFile> heap = new PriorityQueue<ModifiedFile>(); long size = 0; long count = 0; for (File file : this.directory.listFiles(BufferFile.excludeBufferFiles())) { ModifiedFile modified = new ModifiedFile(file); heap.add(modified);/* w w w .ja v a 2 s . c o m*/ Logger.log(LoggingBehaviors.CACHE, TAG, " trim considering time=" + Long.valueOf(modified.getModified()) + " name=" + modified.getFile().getName()); size += file.length(); count++; } while ((size > limits.getByteCount()) || (count > limits.getFileCount())) { File file = heap.remove().getFile(); Logger.log(LoggingBehaviors.CACHE, TAG, " trim removing " + file.getName()); size -= file.length(); count--; file.delete(); } }
From source file:com.feilong.commons.core.util.CollectionsUtilTest.java
/** * TestCollectionsUtilTest.// w w w . j a va 2 s. c o m */ @Test public void testCollectionsUtilTest33() { Queue<Object> queue = new PriorityQueue<Object>(); queue.add(1); queue.add(2); queue.add(3); queue.add(4); queue.add(5); queue.add(6); if (log.isDebugEnabled()) { log.debug(JsonUtil.format(queue)); log.debug("" + queue.peek()); } }
From source file:org.apache.hama.computemodel.mapreduce.Mapper.java
@Override protected void compute( BSPPeer<K1, V1, K2, V2, WritableKeyValues<? extends WritableComparable<?>, ? extends Writable>> peer) throws IOException { this.memoryQueue = new PriorityQueue<WritableKeyValues<K2, V2>>(); this.globalKeyDistribution = new long[peer.getNumPeers()][peer.getNumPeers()]; int myId = peer.getPeerId(); OutputCollector<K2, V2> collector = new BSPMapperOutputCollector<K1, V1, K2, V2>(peer, memoryQueue, globalKeyDistribution[myId]); KeyValuePair<K1, V1> record = null; while ((record = peer.readNext()) != null) { map(record.getKey(), record.getValue(), collector); }/*from ww w. j av a2 s.c o m*/ Comparator<V2> valComparator = null; Configuration conf = peer.getConfiguration(); Class<?> comparatorClass = conf.getClass(VALUE_COMPARATOR_CLASS, null); if (comparatorClass != null) { valComparator = (Comparator<V2>) ReflectionUtils.newInstance(comparatorClass, conf); } Reducer<K2, V2, K2, V2> combiner = null; Class<?> combinerClass = conf.getClass(COMBINER_CLASS, null); if (combinerClass != null) { combiner = (Reducer<K2, V2, K2, V2>) ReflectionUtils.newInstance(combinerClass, conf); } ExecutorService service = Executors.newFixedThreadPool(1); Future<Integer> future = service.submit(new CombineAndSortThread<K2, V2>(peer.getConfiguration(), this.memoryQueue, valComparator, combiner)); String[] peers = peer.getAllPeerNames(); IntWritable keyPartition = new IntWritable(); LongWritable value = new LongWritable(); WritableKeyValues<IntWritable, IntWritable> myIdTuple = new WritableKeyValues<IntWritable, IntWritable>( new IntWritable(peer.getPeerId()), new IntWritable(-1)); int peerId = peer.getPeerId(); for (int keyNumber = 0; keyNumber < globalKeyDistribution[0].length; ++keyNumber) { keyPartition.set(keyNumber); value.set(globalKeyDistribution[peerId][keyNumber]); myIdTuple.setValue(keyPartition); for (String peerName : peers) { peer.send(peerName, new WritableKeyValues<WritableKeyValues<IntWritable, IntWritable>, LongWritable>(myIdTuple, value)); } } peer.save(KEY_DIST, this.globalKeyDistribution); peer.save(COMBINER_FUTURE, future); peer.save(MESSAGE_QUEUE, this.memoryQueue); }
From source file:mulavito.algorithms.shortestpath.ksp.Eppstein.java
@Override protected List<List<E>> getShortestPathsIntern(V source, V target, int k) { PriorityQueue<WeightedPath> prioQ = new PriorityQueue<WeightedPath>(); List<List<E>> found_paths = new LinkedList<List<E>>(); Transformer<E, Double> delta = prepareTransformations(target); // Initialize with start vertex. prioQ.add(new WeightedPath(source)); while (!prioQ.isEmpty() && found_paths.size() < k) { WeightedPath curPath = prioQ.poll(); // get & remove next shortest V curV = curPath.getLast();/*from ww w .ja v a 2 s . c om*/ if (curV.equals(target)) { found_paths.add(curPath.getPath()); continue; } // Create new paths for every expanded vertex ... for (V nextV : graph.getSuccessors(curV)) { if (curPath.contains(nextV)) continue; // Prevent looping! // ... and every possible edge. for (E e : graph.findEdgeSet(curV, nextV)) { if (Double.isInfinite(delta.transform(e))) continue; // Skip unreachable vertices. WeightedPath tmpPath = new WeightedPath(curPath); // clone tmpPath.addHop(e, delta.transform(e), nextV); prioQ.add(tmpPath); } } } return found_paths; }
From source file:org.apache.hadoop.hbase.master.balancer.LocalityAwareLoadBalancer.java
/** * This implements the Locality Aware Load Balancer. * Information for the algorithm can be found here: https://issues.apache.org/jira/browse/HBASE-10075 * * @param clusterMap Map of regionservers and their load/region information to * a list of their most loaded regions * @return a list of regions to be moved, including source and destination, * or null if cluster is already balanced *//*from w ww .j a v a 2 s .co m*/ public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterMap) { long startTime = System.currentTimeMillis(); ClusterLoadState cs = new ClusterLoadState(clusterMap); float average = cs.getLoadAverage(); // for logging int ceiling = (int) Math.ceil(average * (1 + slop)); NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad(); if (!this.needsBalance(cs)) { /*LOG.info("Skipping load balancing because balanced cluster; " + "servers=" + cs.getNumServers() + " " + "regions=" + cs.getNumRegions() + " average=" + average + " " + "mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded=" + serversByLoad.firstKey().getLoad());*/ return null; } // Additional check for locality aware load balancer as it only considers // only max loaded servers if (!(cs.getMaxLoad() > ceiling)) { return null; } Cluster cluster = new Cluster(clusterMap, new HashMap<String, Deque<RegionLoad>>(), regionLocationFinder); int numRegions = cs.getNumRegions(); LOG.info(" ####################################################################################"); LOG.info(" Before Locality-aware Balancing"); LOG.info(" Average=" + average + " Ceiling=" + ceiling + " slop=" + slop); /* for (ServerAndLoad server : serversByLoad.keySet()) { LOG.info("---------------" + "Server Name: " + server.getServerName() + "---------------"); List<HRegionInfo> hRegionInfos = serversByLoad.get(server); LOG.info("Number of Regions:" + hRegionInfos.size()); for (HRegionInfo hRegionInfo : hRegionInfos){ LOG.info(String.format("Name of Region: %s ", hRegionInfo.getRegionNameAsString())); //LOG.info(String.format("Size of Region in number of rows"+(Bytes.toInt(hRegionInfo.getStartKey())-Bytes.toInt(hRegionInfo.getEndKey())))); LOG.info("Start Key: " + Bytes.toString(hRegionInfo.getStartKey())); LOG.info("End Key: " + Bytes.toString(hRegionInfo.getEndKey())); } LOG.info("------------------------------------------------------------------------------"); } */ // calculate allTableRegionNumber = total number of regions per table. Map<Integer, Integer> allTableRegionNumberMap = new HashMap<Integer, Integer>(); for (int i = 0; i < cluster.numServers; ++i) { for (int j = 0; j < cluster.numTables; ++j) { if (allTableRegionNumberMap.containsKey(j)) { Integer integer = allTableRegionNumberMap.get(j); integer = integer + cluster.numRegionsPerServerPerTable[i][j]; allTableRegionNumberMap.put(j, integer); } else { allTableRegionNumberMap.put(j, cluster.numRegionsPerServerPerTable[i][j]); } } } List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>(); for (ServerAndLoad server : serversByLoad.keySet()) { List<HRegionInfo> hRegionInfos = serversByLoad.get(server); // Check if number of regions on current server is greater than floor. // Continue only if number regions is greater than floor. if (hRegionInfos.size() <= ceiling) { LOG.debug("Number of HRegions <= ceiling (" + hRegionInfos.size() + " <= " + ceiling + ")"); continue; } PriorityQueue<RegionServerRegionAffinity> queue = new PriorityQueue<RegionServerRegionAffinity>(); int numberOfRegionsToMove = hRegionInfos.size() - ceiling; double regionAffinityNumber = (1 - hRegionInfos.size() / numRegions) * SERVER_BALANCER_WEIGHT; double tableRegionAffinityNumber = 0; // Calculate allTableRegionNumber for (HRegionInfo hRegionInfo : hRegionInfos) { // Do not move metaregion. if (hRegionInfo.isMetaRegion()) { continue; } TableName table = hRegionInfo.getTable(); String tableName = table.getNameAsString(); int tableIndex = cluster.tablesToIndex.get(tableName); int serverIndex = cluster.serversToIndex.get(server.getServerName().getHostAndPort()); tableRegionAffinityNumber = (1 - cluster.numRegionsPerServerPerTable[serverIndex][tableIndex] / allTableRegionNumberMap.get(tableIndex)) * TABLE_BALANCER_WEIGHT; float localityIndex = getLocalityIndex(hRegionInfo, server) * LOCALITY_WEIGHT; LOG.info("tableRegionaffinity: " + tableRegionAffinityNumber); LOG.info("regionAffinityNUmber: " + regionAffinityNumber); LOG.info("localityIndex: " + localityIndex); double finalAffinity = regionAffinityNumber + tableRegionAffinityNumber + localityIndex + getStickinessWeight(hRegionInfo); queue.add(new RegionServerRegionAffinity(server, hRegionInfo, finalAffinity)); LOG.info("Affinity between server=" + server.getServerName() + " and region=" + hRegionInfo.getRegionNameAsString() + " is " + finalAffinity); } LOG.info("Number of regions to move=" + numberOfRegionsToMove + " All server and region affinities: " + queue); // Get top numberOfRegionsToMove List<RegionServerRegionAffinity> listOfRegionsToMove = new ArrayList<RegionServerRegionAffinity>(); for (int i = 0; i < numberOfRegionsToMove; ++i) { if (queue.isEmpty()) { continue; } listOfRegionsToMove.add(queue.poll()); } // Search for the most affine servers to these listOfRegionsToMove for (RegionServerRegionAffinity regionServerRegionAffinity : listOfRegionsToMove) { HRegionInfo hRegionInfoToMove = regionServerRegionAffinity.getHRegionInfo(); ServerAndLoad serverToMove = null; double maxAffinity = Double.MIN_VALUE; // Get the most affine server to hRegionInfoToMove for (ServerAndLoad activeServer : serversByLoad.keySet()) { hRegionInfos = serversByLoad.get(activeServer); if (activeServer.equals(regionServerRegionAffinity.getServer())) { continue; } if (hRegionInfos.size() >= ceiling) { LOG.debug("Number of HRegions >= ceiling (" + hRegionInfos.size() + " >= " + ceiling + ")"); continue; } regionAffinityNumber = (1 - hRegionInfos.size() / numRegions) * SERVER_BALANCER_WEIGHT; TableName table = hRegionInfoToMove.getTable(); String tableNameAsString = table.getNameAsString(); int serverIndex = cluster.serversToIndex.get(activeServer.getServerName().getHostAndPort()); tableRegionAffinityNumber = 0; if (cluster.tablesToIndex.containsKey(tableNameAsString)) { Integer tableIndex = cluster.tablesToIndex.get(tableNameAsString); tableRegionAffinityNumber = (1 - cluster.numRegionsPerServerPerTable[serverIndex][tableIndex] / allTableRegionNumberMap.get(tableIndex)) * TABLE_BALANCER_WEIGHT; } else { LOG.error("Table " + tableNameAsString + "not present in cluster.tablesToIndex"); } double finalAffinity = regionAffinityNumber + tableRegionAffinityNumber + getLocalityIndex(hRegionInfoToMove, activeServer) * LOCALITY_WEIGHT + getStickinessWeight(hRegionInfoToMove); if (finalAffinity > maxAffinity) { maxAffinity = finalAffinity; serverToMove = activeServer; } } regionsToReturn.add(new RegionPlan(hRegionInfoToMove, regionServerRegionAffinity.getServer().getServerName(), serverToMove.getServerName())); } } LOG.info("Returning plan: " + regionsToReturn); // Reset previuosly moved regions and add new regions previouslyMovedRegions.clear(); for (RegionPlan regionPlan : regionsToReturn) { previouslyMovedRegions.add(regionPlan.getRegionInfo()); } long endTime = System.currentTimeMillis(); LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + regionsToReturn.size() + " regions"); return regionsToReturn; }