List of usage examples for java.util PriorityQueue add
public boolean add(E e)
From source file:org.apache.accumulo.tserver.tablet.Tablet.java
private Set<FileRef> removeSmallest(Map<FileRef, DataFileValue> filesToCompact, int maxFilesToCompact) { // ensure this method works properly when multiple files have the same size // short-circuit; also handles zero files case if (filesToCompact.size() <= maxFilesToCompact) { Set<FileRef> smallestFiles = new HashSet<FileRef>(filesToCompact.keySet()); filesToCompact.clear();/*w ww .ja va2s.c om*/ return smallestFiles; } PriorityQueue<Pair<FileRef, Long>> fileHeap = new PriorityQueue<Pair<FileRef, Long>>(filesToCompact.size(), new Comparator<Pair<FileRef, Long>>() { @Override public int compare(Pair<FileRef, Long> o1, Pair<FileRef, Long> o2) { if (o1.getSecond() == o2.getSecond()) return o1.getFirst().compareTo(o2.getFirst()); if (o1.getSecond() < o2.getSecond()) return -1; return 1; } }); for (Iterator<Entry<FileRef, DataFileValue>> iterator = filesToCompact.entrySet().iterator(); iterator .hasNext();) { Entry<FileRef, DataFileValue> entry = iterator.next(); fileHeap.add(new Pair<FileRef, Long>(entry.getKey(), entry.getValue().getSize())); } Set<FileRef> smallestFiles = new HashSet<FileRef>(); while (smallestFiles.size() < maxFilesToCompact && fileHeap.size() > 0) { Pair<FileRef, Long> pair = fileHeap.remove(); filesToCompact.remove(pair.getFirst()); smallestFiles.add(pair.getFirst()); } return smallestFiles; }
From source file:com.joliciel.jochre.search.highlight.FixedSizeSnippetFinder.java
@Override public List<Snippet> findSnippets(int docId, Set<String> fields, Set<HighlightTerm> highlightTerms, int maxSnippets, int snippetSize) { try {// w w w. j a v a2 s .c o m Document doc = indexSearcher.doc(docId); JochreIndexDocument jochreDoc = searchService.getJochreIndexDocument(indexSearcher, docId); // find best snippet for each term PriorityQueue<Snippet> heap = new PriorityQueue<Snippet>(); int i = -1; for (HighlightTerm term : highlightTerms) { i++; String content = jochreDoc.getContents(); CoordinateStorage coordinateStorage = jochreDoc.getCoordinateStorage(); if (term.getStartOffset() >= content.length()) { String title = doc.get("title"); String startPage = doc.get("startPage"); String endPage = doc.get("endPage"); LOG.debug("Content: " + content); throw new RuntimeException(term.toString() + " cannot fit into contents for doc " + title + ", pages " + startPage + " to " + endPage + ", length: " + content.length()); } List<HighlightTerm> snippetTerms = new ArrayList<HighlightTerm>(); snippetTerms.add(term); int j = -1; boolean foundImage = false; for (HighlightTerm otherTerm : highlightTerms) { j++; if (j <= i) continue; if (otherTerm.getImageIndex() != term.getImageIndex()) { if (foundImage) break; else continue; } foundImage = true; if (otherTerm.getStartOffset() < term.getStartOffset() + snippetSize) { snippetTerms.add(otherTerm); } else { break; } } HighlightTerm lastTerm = snippetTerms.get(snippetTerms.size() - 1); int middle = (term.getStartOffset() + lastTerm.getEndOffset()) / 2; int start = middle - (snippetSize / 2); int end = middle + (snippetSize / 2); if (start > term.getStartOffset()) start = term.getStartOffset(); if (end < lastTerm.getEndOffset()) end = lastTerm.getEndOffset(); if (start < 0) start = 0; if (end > content.length()) end = content.length(); for (int k = start; k >= 0; k--) { if (Character.isWhitespace(content.charAt(k))) { start = k + 1; break; } } for (int k = end; k < content.length(); k++) { if (Character.isWhitespace(content.charAt(k))) { end = k; break; } } int imageStartOffset = coordinateStorage.getImageStartOffset(term.getImageIndex()); int imageEndOffset = Integer.MAX_VALUE; if (term.getImageIndex() + 1 < coordinateStorage.getImageCount()) { imageEndOffset = coordinateStorage.getImageStartOffset(term.getImageIndex() + 1); } if (start < imageStartOffset) start = imageStartOffset; if (end > imageEndOffset) end = imageEndOffset; Snippet snippet = new Snippet(docId, term.getField(), start, end); snippet.setHighlightTerms(snippetTerms); heap.add(snippet); } // if we have no snippets, add one per field type if (heap.isEmpty()) { String content = jochreDoc.getContents(); int end = snippetSize * maxSnippets; if (end > content.length()) end = content.length(); for (int k = end; k < content.length(); k++) { if (Character.isWhitespace(content.charAt(k))) { end = k; break; } } Snippet snippet = new Snippet(docId, fields.iterator().next(), 0, end); heap.add(snippet); } List<Snippet> snippets = new ArrayList<Snippet>(maxSnippets); while (snippets.size() < maxSnippets && !heap.isEmpty()) { Snippet snippet = heap.poll(); boolean hasOverlap = false; for (Snippet otherSnippet : snippets) { if (otherSnippet.hasOverlap(snippet)) hasOverlap = true; } if (!hasOverlap) snippets.add(snippet); } for (Snippet snippet : snippets) { LOG.debug("Added snippet: " + snippet.toJson()); } return snippets; } catch (IOException e) { LogUtils.logError(LOG, e); throw new RuntimeException(e); } }
From source file:org.caleydo.neo4j.plugins.kshortestpaths.KShortestPathsAlgo.java
public List<WeightedPath> run(Node sourceNode, Node targetNode, int k, IPathReadyListener onPathReady) { StopWatch w = new StopWatch(); w.start();//from ww w.j ava2 s . c om // Calculate shortest path first List<WeightedPath> paths = new ArrayList<>(k); profile("start", w); WeightedPath shortestPath = shortestPathFinder.findSinglePath(sourceNode, targetNode); if (shortestPath == null) return paths; profile("initial disjkra", w); PriorityQueue<WeightedPath> pathCandidates = new PriorityQueue<WeightedPath>(20, new Comparator<WeightedPath>() { @Override public int compare(WeightedPath o1, WeightedPath o2) { return Double.compare(o1.weight(), o2.weight()); } }); Set<Integer> pathCandidateHashes = new HashSet<>(); if (onPathReady != null) { onPathReady.onPathReady(shortestPath); } paths.add(shortestPath); pathCandidateHashes.add(generatePathHash(shortestPath)); for (int i = 1; i < k; i++) { WeightedPath prevPath = paths.get(i - 1); for (Node spurNode : prevPath.nodes()) { if (spurNode.getId() == prevPath.endNode().getId()) break; WeightedPath rootPath = getSubPathTo(prevPath, spurNode); for (Path path : paths) { Iterator<Relationship> pathIterator = path.relationships().iterator(); boolean containsRootPath = true; // Test if the existing shortest path starts with the root path for (Relationship relationship : rootPath.relationships()) { if (!pathIterator.hasNext()) { containsRootPath = false; break; } Relationship pathRelationship = pathIterator.next(); if (relationship.getId() != pathRelationship.getId()) { containsRootPath = false; break; } } // If so, set edge weight of following edge in that path to infinity if (containsRootPath) { if (pathIterator.hasNext()) { Relationship r = pathIterator.next(); costEvaluator.addInvalidRelationship(r); //profile("invalid: "+r,w); } } } // Simulate removal of root path nodes (except spur node) by setting all their edge weights to // infinity Set<Long> badIds = new HashSet<Long>(); for (Node rootPathNode : rootPath.nodes()) { if (rootPathNode.getId() != spurNode.getId()) { badIds.add(rootPathNode.getId()); //for (Relationship relationship : getRelationships(rootPathNode)) { // costEvaluator.addInvalidRelationship(relationship); //} //profile("invalids: "+rootPathNode.getRelationships(),w); } } expander.setExtraIgnoreNodes(badIds); profile("Find next path", w); WeightedPath spurPath = shortestPathFinder.findSinglePath(spurNode, targetNode); profile("Found next path", w); if (spurPath != null && !Double.isInfinite(spurPath.weight())) { WeightedPath pathCandidate = concatenate(rootPath, spurPath); Integer pathHash = generatePathHash(pathCandidate); if (!pathCandidateHashes.contains(pathHash)) { pathCandidates.add(pathCandidate); pathCandidateHashes.add(pathHash); } } // Restore edges costEvaluator.clearInvalidRelationships(); expander.setExtraIgnoreNodes(null); } if (pathCandidates.isEmpty()) break; WeightedPath nextBest = pathCandidates.poll(); profile("flush path", w); if (onPathReady != null) { onPathReady.onPathReady(nextBest); } paths.add(nextBest); } profile("done", w); return paths; }
From source file:com.uber.stream.kafka.mirrormaker.manager.core.ControllerHelixManager.java
public void scaleCurrentCluster() throws Exception { int oldTotalNumWorker = 0; int newTotalNumWorker = 0; Map<String, Integer> _routeWorkerOverrides = getRouteWorkerOverride(); for (String pipeline : _pipelineToInstanceMap.keySet()) { LOGGER.info("Start rescale pipeline: {}", pipeline); PriorityQueue<InstanceTopicPartitionHolder> newItphQueue = new PriorityQueue<>(1, InstanceTopicPartitionHolder.totalWorkloadComparator(_pipelineWorkloadMap)); // TODO: what if routeId is not continuous int nextRouteId = _pipelineToInstanceMap.get(pipeline).size(); for (InstanceTopicPartitionHolder itph : _pipelineToInstanceMap.get(pipeline)) { if (itph.getTotalNumPartitions() > _maxNumPartitionsPerRoute) { LOGGER.info(//ww w. j a v a 2s. c o m "Checking route {} with controller {} and topics {} since it exceeds maxNumPartitionsPerRoute {}", itph.getRouteString(), itph.getInstanceName(), itph.getServingTopicPartitionSet(), _maxNumPartitionsPerRoute); while (itph.getTotalNumPartitions() > _maxNumPartitionsPerRoute) { // Only one topic left, do nothing if (itph.getNumServingTopicPartitions() == 1) { LOGGER.info("Only one topic {} in route {}, do nothing", itph.getServingTopicPartitionSet().iterator().next(), itph.getRouteString()); break; } // Get the topic with largest number of partitions TopicPartition tpToMove = new TopicPartition("tmp", -1); for (TopicPartition tp : itph.getServingTopicPartitionSet()) { if (tp.getPartition() > tpToMove.getPartition()) { tpToMove = tp; } } // If existing lightest route cannot fit the largest topic to move if (newItphQueue.isEmpty() || newItphQueue.peek().getTotalNumPartitions() + tpToMove.getPartition() > _initMaxNumPartitionsPerRoute) { try { InstanceTopicPartitionHolder newHolder = createNewRoute(pipeline, nextRouteId); _helixAdmin.setResourceIdealState(_helixClusterName, tpToMove.getTopic(), IdealStateBuilder.resetCustomIdealStateFor( _helixAdmin.getResourceIdealState(_helixClusterName, tpToMove.getTopic()), tpToMove.getTopic(), itph.getRouteString(), newHolder.getRouteString(), newHolder.getInstanceName())); itph.removeTopicPartition(tpToMove); newHolder.addTopicPartition(tpToMove); newItphQueue.add(newHolder); nextRouteId++; } catch (Exception e) { LOGGER.error("Got exception when create a new route when rebalancing, abandon!", e); throw new Exception( "Got exception when create a new route when rebalancing, abandon!", e); } } else { InstanceTopicPartitionHolder newHolder = newItphQueue.poll(); _helixAdmin.setResourceIdealState(_helixClusterName, tpToMove.getTopic(), IdealStateBuilder.resetCustomIdealStateFor( _helixAdmin.getResourceIdealState(_helixClusterName, tpToMove.getTopic()), tpToMove.getTopic(), itph.getRouteString(), newHolder.getRouteString(), newHolder.getInstanceName())); itph.removeTopicPartition(tpToMove); newHolder.addTopicPartition(tpToMove); newItphQueue.add(newHolder); } } } newItphQueue.add(itph); } // After moving topics, scale workers based on workload int rescaleFailedCount = 0; for (InstanceTopicPartitionHolder itph : newItphQueue) { oldTotalNumWorker += itph.getWorkerSet().size(); String routeString = itph.getRouteString(); int initWorkerCount = _initMaxNumWorkersPerRoute; if (_routeWorkerOverrides.containsKey(routeString) && _routeWorkerOverrides.get(routeString) > initWorkerCount) { initWorkerCount = _routeWorkerOverrides.get(routeString); } String hostname = getHostname(itph.getInstanceName()); try { String result = HttpClientUtils.getData(_httpClient, _requestConfig, hostname, _controllerPort, "/admin/workloadinfo"); ControllerWorkloadInfo workloadInfo = JSONObject.parseObject(result, ControllerWorkloadInfo.class); TopicWorkload totalWorkload = workloadInfo.getTopicWorkload(); if (workloadInfo != null && workloadInfo.getNumOfExpectedWorkers() != 0) { _pipelineWorkloadMap.put(itph.getRouteString(), totalWorkload); int expectedNumWorkers = workloadInfo.getNumOfExpectedWorkers(); LOGGER.info("Current {} workers in route {}, expect {} workers", itph.getWorkerSet().size(), itph.getRouteString(), expectedNumWorkers); int actualExpectedNumWorkers = getActualExpectedNumWorkers(expectedNumWorkers, initWorkerCount); LOGGER.info("Current {} workers in route {}, actual expect {} workers", itph.getWorkerSet().size(), itph.getRouteString(), actualExpectedNumWorkers); if (actualExpectedNumWorkers > itph.getWorkerSet().size()) { LOGGER.info("Current {} workers in route {}, actual expect {} workers, add {} workers", itph.getWorkerSet().size(), itph.getRouteString(), actualExpectedNumWorkers, actualExpectedNumWorkers - itph.getWorkerSet().size()); // TODO: handle exception _workerHelixManager.addWorkersToMirrorMaker(itph, itph.getRoute().getTopic(), itph.getRoute().getPartition(), actualExpectedNumWorkers - itph.getWorkerSet().size()); } if (actualExpectedNumWorkers < itph.getWorkerSet().size()) { LOGGER.info( "Current {} workers in route {}, actual expect {} workers, remove {} workers", itph.getWorkerSet().size(), itph.getRouteString(), actualExpectedNumWorkers, itph.getWorkerSet().size() - actualExpectedNumWorkers); // TODO: handle exception _workerHelixManager.removeWorkersToMirrorMaker(itph, itph.getRoute().getTopic(), itph.getRoute().getPartition(), itph.getWorkerSet().size() - actualExpectedNumWorkers); } newTotalNumWorker += actualExpectedNumWorkers; } else { LOGGER.warn("Get workload on {} for route: {} returns 0. No change on number of workers", hostname, itph.getRouteString()); newTotalNumWorker += itph.getWorkerSet().size(); rescaleFailedCount++; } } catch (Exception e) { rescaleFailedCount++; LOGGER.error(String.format( "Get workload error when connecting to %s for route %s. No change on number of workers", hostname, itph.getRouteString()), e); newTotalNumWorker += itph.getWorkerSet().size(); rescaleFailedCount++; } } _pipelineToInstanceMap.put(pipeline, newItphQueue); _rescaleFailedCount.inc(rescaleFailedCount - _rescaleFailedCount.getCount()); } LOGGER.info("oldTotalNumWorker: {}, newTotalNumWorker: {}", oldTotalNumWorker, newTotalNumWorker); }
From source file:org.apache.accumulo.server.tabletserver.Tablet.java
private Set<FileRef> removeSmallest(Map<FileRef, Long> filesToCompact, int maxFilesToCompact) { // ensure this method works properly when multiple files have the same size PriorityQueue<Pair<FileRef, Long>> fileHeap = new PriorityQueue<Pair<FileRef, Long>>(filesToCompact.size(), new Comparator<Pair<FileRef, Long>>() { @Override// www.j ava2s.co m public int compare(Pair<FileRef, Long> o1, Pair<FileRef, Long> o2) { if (o1.getSecond() == o2.getSecond()) return o1.getFirst().compareTo(o2.getFirst()); if (o1.getSecond() < o2.getSecond()) return -1; return 1; } }); for (Iterator<Entry<FileRef, Long>> iterator = filesToCompact.entrySet().iterator(); iterator.hasNext();) { Entry<FileRef, Long> entry = iterator.next(); fileHeap.add(new Pair<FileRef, Long>(entry.getKey(), entry.getValue())); } Set<FileRef> smallestFiles = new HashSet<FileRef>(); while (smallestFiles.size() < maxFilesToCompact && fileHeap.size() > 0) { Pair<FileRef, Long> pair = fileHeap.remove(); filesToCompact.remove(pair.getFirst()); smallestFiles.add(pair.getFirst()); } return smallestFiles; }
From source file:org.apache.accumulo.tserver.Tablet.java
private Set<FileRef> removeSmallest(Map<FileRef, DataFileValue> filesToCompact, int maxFilesToCompact) { // ensure this method works properly when multiple files have the same size PriorityQueue<Pair<FileRef, Long>> fileHeap = new PriorityQueue<Pair<FileRef, Long>>(filesToCompact.size(), new Comparator<Pair<FileRef, Long>>() { @Override//from ww w . jav a 2 s .co m public int compare(Pair<FileRef, Long> o1, Pair<FileRef, Long> o2) { if (o1.getSecond() == o2.getSecond()) return o1.getFirst().compareTo(o2.getFirst()); if (o1.getSecond() < o2.getSecond()) return -1; return 1; } }); for (Iterator<Entry<FileRef, DataFileValue>> iterator = filesToCompact.entrySet().iterator(); iterator .hasNext();) { Entry<FileRef, DataFileValue> entry = iterator.next(); fileHeap.add(new Pair<FileRef, Long>(entry.getKey(), entry.getValue().getSize())); } Set<FileRef> smallestFiles = new HashSet<FileRef>(); while (smallestFiles.size() < maxFilesToCompact && fileHeap.size() > 0) { Pair<FileRef, Long> pair = fileHeap.remove(); filesToCompact.remove(pair.getFirst()); smallestFiles.add(pair.getFirst()); } return smallestFiles; }