List of usage examples for java.util PriorityQueue offer
public boolean offer(E e)
From source file:Main.java
public static void main(String args[]) { PriorityQueue<Integer> prq = new PriorityQueue<Integer>(); for (int i = 0; i < 10; i++) { prq.add(i);/*from w ww. j a v a2 s . c o m*/ } System.out.println(prq); // add using offer() function call prq.offer(122); System.out.println(prq); }
From source file:PriorityQueueTester.java
public static void main(String[] args) { PriorityQueue<Integer> pq = new PriorityQueue<Integer>(20, new Comparator<Integer>() { public int compare(Integer i, Integer j) { int result = i % 2 - j % 2; if (result == 0) result = i - j;/*from w ww .j av a 2 s . c o m*/ return result; } }); // Fill up with data, in an odd order for (int i = 0; i < 20; i++) { pq.offer(20 - i); } // Print out and check ordering for (int i = 0; i < 20; i++) { System.out.println(pq.poll()); } }
From source file:android.support.v7.graphics.ColorCutQuantizer.java
private List<Swatch> quantizePixels(int maxColors) { // Create the priority queue which is sorted by volume descending. This means we always // split the largest box in the queue final PriorityQueue<Vbox> pq = new PriorityQueue<>(maxColors, VBOX_COMPARATOR_VOLUME); // To start, offer a box which contains all of the colors pq.offer(new Vbox(0, mColors.length - 1)); // Now go through the boxes, splitting them until we have reached maxColors or there are no // more boxes to split splitBoxes(pq, maxColors);/*from w w w . j ava2 s . c om*/ // Finally, return the average colors of the color boxes return generateAverageColors(pq); }
From source file:android.support.v7.graphics.ColorCutQuantizer.java
/** * Iterate through the {@link java.util.Queue}, popping * {@link ColorCutQuantizer.Vbox} objects from the queue * and splitting them. Once split, the new box and the remaining box are offered back to the * queue./*from w w w. j av a 2 s .c o m*/ * * @param queue {@link java.util.PriorityQueue} to poll for boxes * @param maxSize Maximum amount of boxes to split */ private void splitBoxes(final PriorityQueue<Vbox> queue, final int maxSize) { while (queue.size() < maxSize) { final Vbox vbox = queue.poll(); if (vbox != null && vbox.canSplit()) { // First split the box, and offer the result queue.offer(vbox.splitBox()); if (LOG_TIMINGS) { mTimingLogger.addSplit("Box split"); } // Then offer the box back queue.offer(vbox); } else { if (LOG_TIMINGS) { mTimingLogger.addSplit("All boxes split"); } // If we get here then there are no more boxes to split, so return return; } } }
From source file:hivemall.knn.lsh.MinHashUDTF.java
private void computeAndForwardSignatures(List<FeatureValue> features, Object[] forwardObjs) throws HiveException { final PriorityQueue<Integer> minhashes = new PriorityQueue<Integer>(); // Compute N sets K minhash values for (int i = 0; i < num_hashes; i++) { float weightedMinHashValues = Float.MAX_VALUE; for (FeatureValue fv : features) { Object f = fv.getFeature(); int hashIndex = Math.abs(hashFuncs[i].hash(f)); float w = fv.getValueAsFloat(); float hashValue = calcWeightedHashValue(hashIndex, w); if (hashValue < weightedMinHashValues) { weightedMinHashValues = hashValue; minhashes.offer(hashIndex); }/* w ww . j a v a2 s . c om*/ } forwardObjs[0] = getSignature(minhashes, num_keygroups); forward(forwardObjs); minhashes.clear(); } }
From source file:org.apache.drill.exec.store.mongo.MongoGroupScan.java
@Override public void applyAssignments(List<DrillbitEndpoint> endpoints) throws PhysicalOperatorSetupException { logger.debug("Incoming endpoints :" + endpoints); watch.reset();//from w w w . ja va 2 s . c o m watch.start(); final int numSlots = endpoints.size(); int totalAssignmentsTobeDone = chunksMapping.size(); Preconditions.checkArgument(numSlots <= totalAssignmentsTobeDone, String.format( "Incoming endpoints %d is greater than number of chunks %d", numSlots, totalAssignmentsTobeDone)); final int minPerEndpointSlot = (int) Math.floor((double) totalAssignmentsTobeDone / numSlots); final int maxPerEndpointSlot = (int) Math.ceil((double) totalAssignmentsTobeDone / numSlots); endpointFragmentMapping = Maps.newHashMapWithExpectedSize(numSlots); Map<String, Queue<Integer>> endpointHostIndexListMap = Maps.newHashMap(); for (int i = 0; i < numSlots; ++i) { endpointFragmentMapping.put(i, new ArrayList<MongoSubScanSpec>(maxPerEndpointSlot)); String hostname = endpoints.get(i).getAddress(); Queue<Integer> hostIndexQueue = endpointHostIndexListMap.get(hostname); if (hostIndexQueue == null) { hostIndexQueue = Lists.newLinkedList(); endpointHostIndexListMap.put(hostname, hostIndexQueue); } hostIndexQueue.add(i); } Set<Entry<String, List<ChunkInfo>>> chunksToAssignSet = Sets.newHashSet(chunksInverseMapping.entrySet()); for (Iterator<Entry<String, List<ChunkInfo>>> chunksIterator = chunksToAssignSet.iterator(); chunksIterator .hasNext();) { Entry<String, List<ChunkInfo>> chunkEntry = chunksIterator.next(); Queue<Integer> slots = endpointHostIndexListMap.get(chunkEntry.getKey()); if (slots != null) { for (ChunkInfo chunkInfo : chunkEntry.getValue()) { Integer slotIndex = slots.poll(); List<MongoSubScanSpec> subScanSpecList = endpointFragmentMapping.get(slotIndex); subScanSpecList.add(buildSubScanSpecAndGet(chunkInfo)); slots.offer(slotIndex); } chunksIterator.remove(); } } PriorityQueue<List<MongoSubScanSpec>> minHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots, LIST_SIZE_COMPARATOR); PriorityQueue<List<MongoSubScanSpec>> maxHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots, LIST_SIZE_COMPARATOR_REV); for (List<MongoSubScanSpec> listOfScan : endpointFragmentMapping.values()) { if (listOfScan.size() < minPerEndpointSlot) { minHeap.offer(listOfScan); } else if (listOfScan.size() > minPerEndpointSlot) { maxHeap.offer(listOfScan); } } if (chunksToAssignSet.size() > 0) { for (Entry<String, List<ChunkInfo>> chunkEntry : chunksToAssignSet) { for (ChunkInfo chunkInfo : chunkEntry.getValue()) { List<MongoSubScanSpec> smallestList = minHeap.poll(); smallestList.add(buildSubScanSpecAndGet(chunkInfo)); minHeap.offer(smallestList); } } } while (minHeap.peek() != null && minHeap.peek().size() < minPerEndpointSlot) { List<MongoSubScanSpec> smallestList = minHeap.poll(); List<MongoSubScanSpec> largestList = maxHeap.poll(); smallestList.add(largestList.remove(largestList.size() - 1)); if (largestList.size() > minPerEndpointSlot) { maxHeap.offer(largestList); } if (smallestList.size() < minPerEndpointSlot) { minHeap.offer(smallestList); } } logger.debug("Built assignment map in {} s.\nEndpoints: {}.\nAssignment Map: {}", watch.elapsed(TimeUnit.NANOSECONDS) / 1000, endpoints, endpointFragmentMapping.toString()); }
From source file:org.apache.storm.daemon.logviewer.utils.DirectoryCleaner.java
/** * If totalSize of files exceeds the either the per-worker quota or global quota, * Logviewer deletes oldest inactive log files in a worker directory or in all worker dirs. * We use the parameter forPerDir to switch between the two deletion modes. * * @param dirs the list of directories to be scanned for deletion * @param quota the per-dir quota or the total quota for the all directories * @param forPerDir if true, deletion happens for a single dir; otherwise, for all directories globally * @param activeDirs only for global deletion, we want to skip the active logs in activeDirs * @return number of files deleted/*www. j a va 2s. co m*/ */ public DeletionMeta deleteOldestWhileTooLarge(List<Path> dirs, long quota, boolean forPerDir, Set<Path> activeDirs) throws IOException { long totalSize = 0; for (Path dir : dirs) { try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) { for (Path path : stream) { totalSize += Files.size(path); } } } LOG.debug("totalSize: {} quota: {}", totalSize, quota); long toDeleteSize = totalSize - quota; if (toDeleteSize <= 0) { return DeletionMeta.EMPTY; } int deletedFiles = 0; long deletedSize = 0; // the oldest pq_size files in this directory will be placed in PQ, with the newest at the root PriorityQueue<Pair<Path, FileTime>> pq = new PriorityQueue<>(PQ_SIZE, Comparator.comparing((Pair<Path, FileTime> p) -> p.getRight()).reversed()); int round = 0; final Set<Path> excluded = new HashSet<>(); while (toDeleteSize > 0) { LOG.debug("To delete size is {}, start a new round of deletion, round: {}", toDeleteSize, round); for (Path dir : dirs) { try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) { for (Path path : stream) { if (!excluded.contains(path)) { if (isFileEligibleToSkipDelete(forPerDir, activeDirs, dir, path)) { excluded.add(path); } else { Pair<Path, FileTime> p = Pair.of(path, Files.getLastModifiedTime(path)); if (pq.size() < PQ_SIZE) { pq.offer(p); } else if (p.getRight().toMillis() < pq.peek().getRight().toMillis()) { pq.poll(); pq.offer(p); } } } } } } if (!pq.isEmpty()) { // need to reverse the order of elements in PQ to delete files from oldest to newest Stack<Pair<Path, FileTime>> stack = new Stack<>(); while (!pq.isEmpty()) { stack.push(pq.poll()); } while (!stack.isEmpty() && toDeleteSize > 0) { Pair<Path, FileTime> pair = stack.pop(); Path file = pair.getLeft(); final String canonicalPath = file.toAbsolutePath().normalize().toString(); final long fileSize = Files.size(file); final long lastModified = pair.getRight().toMillis(); //Original implementation doesn't actually check if delete succeeded or not. try { Utils.forceDelete(file.toString()); LOG.info("Delete file: {}, size: {}, lastModified: {}", canonicalPath, fileSize, lastModified); toDeleteSize -= fileSize; deletedSize += fileSize; deletedFiles++; } catch (IOException e) { excluded.add(file); } } pq.clear(); round++; if (round >= MAX_ROUNDS) { if (forPerDir) { LOG.warn( "Reach the MAX_ROUNDS: {} during per-dir deletion, you may have too many files in " + "a single directory : {}, will delete the rest files in next interval.", MAX_ROUNDS, dirs.get(0).toAbsolutePath().normalize()); } else { LOG.warn("Reach the MAX_ROUNDS: {} during global deletion, you may have too many files, " + "will delete the rest files in next interval.", MAX_ROUNDS); } break; } } else { LOG.warn("No more files able to delete this round, but {} is over quota by {} MB", forPerDir ? "this directory" : "root directory", toDeleteSize * 1e-6); } } return new DeletionMeta(deletedSize, deletedFiles); }
From source file:org.mule.util.store.MonitoredObjectStoreWrapper.java
public void expire() { try {//from ww w. jav a 2s . co m final long now = System.nanoTime(); List<Serializable> keys = allKeys(); int excess = (allKeys().size() - maxEntries); if (maxEntries > 0 && excess > 0) { PriorityQueue<StoredObject<T>> q = new PriorityQueue<StoredObject<T>>(excess, new Comparator<StoredObject<T>>() { @Override public int compare(StoredObject<T> paramT1, StoredObject<T> paramT2) { return paramT2.timestamp.compareTo(paramT1.timestamp); } }); long youngest = Long.MAX_VALUE; for (Serializable key : keys) { StoredObject<T> obj = getStore().retrieve(key); //TODO extract the entryTTL>0 outside of loop if (entryTTL > 0 && TimeUnit.NANOSECONDS.toMillis(now - obj.getTimestamp()) >= entryTTL) { remove(key); excess--; if (excess > 0 && q.size() > excess) { q.poll(); youngest = q.peek().timestamp; } } else { if (excess > 0 && (q.size() < excess || obj.timestamp < youngest)) { q.offer(obj); youngest = q.peek().timestamp; } if (excess > 0 && q.size() > excess) { q.poll(); youngest = q.peek().timestamp; } } } for (int i = 0; i < excess; i++) { Serializable key = q.poll().key; remove(key); } } else { if (entryTTL > 0) { for (Serializable key : keys) { StoredObject<T> obj = getStore().retrieve(key); if (TimeUnit.NANOSECONDS.toMillis(now - obj.getTimestamp()) >= entryTTL) { remove(key); } } } } } catch (Exception e) { logger.warn("Running expirty on " + baseStore + " threw " + e + ":" + e.getMessage()); } }
From source file:de.tudarmstadt.lt.n2n.annotators.RelationAnnotator.java
protected List<Dependency> find_path_dijkstra(Token start, Token dest, Collection<Token> nodes, Map<Token, List<Dependency>> edges) throws IllegalStateException { List<Dependency> shortest_path = new ArrayList<Dependency>(); final Map<Token, Integer> dist = new HashMap<Token, Integer>(); final Map<Token, Dependency> prev = new HashMap<Token, Dependency>(); for (Token t : nodes) dist.put(t, Integer.MAX_VALUE); dist.put(start, 0);//from w w w.j a v a2s .co m PriorityQueue<Token> Q = new PriorityQueue<Token>(edges.size(), new Comparator<Token>() { @Override public int compare(Token o1, Token o2) { return dist.get(o1).compareTo(dist.get(o2)); } }); Q.addAll(nodes); while (!Q.isEmpty()) { Token u = Q.poll(); // initially source node if (u.equals(dest)) // stop if dest break; if (dist.get(u) == Integer.MAX_VALUE) throw new IllegalStateException(String.format( "Could not find path from token '%s' to token '%s'. Perhaps start or dest is part of a preposition? (%s)", start.getCoveredText(), dest.getCoveredText(), DocumentMetaData.get(u.getCAS()).getDocumentId())); List<Dependency> connected_edges = edges.get(u); if (connected_edges == null) continue; for (Dependency d : connected_edges) { Token v = null; if (u.equals(d.getGovernor())) v = d.getDependent(); else v = d.getGovernor(); if (!Q.contains(v)) continue; int alt = dist.get(u) + 1; // dist(u,v) = 1 if (alt < dist.get(v)) { dist.put(v, alt); prev.put(v, d); Q.remove(v); // reinsert v so that Q is recomputed Q.offer(v); } } } Token u = dest; Dependency e = prev.get(u); while (e != null) { shortest_path.add(0, e); if (u == e.getGovernor()) u = e.getDependent(); else u = e.getGovernor(); e = prev.get(u); } return shortest_path; }
From source file:mondrian.olap.fun.FunUtil.java
/** * Julian's algorithm for stable partial sort. Improves Pedro's algorithm * by using a heap (priority queue) for the top {@code limit} items seen. * The items on the priority queue have an ordinal field, so the queue * can be used to generate a list of stably sorted items. (Heap sort is * not normally stable.)//from w ww .j a v a2 s. co m * * @param list List to sort * @param comp Comparator * @param limit Maximum number of items to return * @param <T> Element type * @return Sorted list, containing at most limit items */ public static <T> List<T> stablePartialSortJulian(final List<T> list, final Comparator<T> comp, int limit) { final Comparator<ObjIntPair<T>> comp2 = new Comparator<ObjIntPair<T>>() { public int compare(ObjIntPair<T> o1, ObjIntPair<T> o2) { int c = comp.compare(o1.t, o2.t); if (c == 0) { c = Util.compare(o1.i, o2.i); } return -c; } }; int filled = 0; final PriorityQueue<ObjIntPair<T>> queue = new PriorityQueue<ObjIntPair<T>>(limit, comp2); for (T element : list) { if (filled < limit) { queue.offer(new ObjIntPair<T>(element, filled++)); } else { ObjIntPair<T> head = queue.element(); if (comp.compare(element, head.t) <= 0) { ObjIntPair<T> item = new ObjIntPair<T>(element, filled++); if (comp2.compare(item, head) >= 0) { ObjIntPair poll = queue.remove(); Util.discard(poll); queue.offer(item); } } } } int n = queue.size(); final Object[] elements = new Object[n]; while (n > 0) { elements[--n] = queue.poll().t; } assert queue.isEmpty(); //noinspection unchecked return Arrays.asList((T[]) elements); }