Example usage for java.util PriorityQueue PriorityQueue

List of usage examples for java.util PriorityQueue PriorityQueue

Introduction

In this page you can find the example usage for java.util PriorityQueue PriorityQueue.

Prototype

public PriorityQueue(int initialCapacity, Comparator<? super E> comparator) 

Source Link

Document

Creates a PriorityQueue with the specified initial capacity that orders its elements according to the specified comparator.

Usage

From source file:com.navercorp.pinpoint.web.service.AgentEventServiceImpl.java

private List<AgentEvent> createAgentEvents(List<AgentEventBo> agentEventBos) {
    if (CollectionUtils.isEmpty(agentEventBos)) {
        return Collections.emptyList();
    }// w  w w .  j av a  2 s .c  om
    List<AgentEvent> agentEvents = new ArrayList<>(agentEventBos.size());
    PriorityQueue<DurationalAgentEvent> durationalAgentEvents = new PriorityQueue<>(agentEventBos.size(),
            AgentEvent.EVENT_TIMESTAMP_ASC_COMPARATOR);
    for (AgentEventBo agentEventBo : agentEventBos) {
        if (agentEventBo.getEventType().isCategorizedAs(AgentEventTypeCategory.DURATIONAL)) {
            durationalAgentEvents.add(createDurationalAgentEvent(agentEventBo, false));
        } else {
            boolean hasMessage = !ArrayUtils.isEmpty(agentEventBo.getEventBody());
            agentEvents.add(createAgentEvent(agentEventBo, hasMessage));
        }
    }
    long durationStartTimestamp = DurationalAgentEvent.UNKNOWN_TIMESTAMP;
    while (!durationalAgentEvents.isEmpty()) {
        DurationalAgentEvent currentEvent = durationalAgentEvents.remove();
        if (durationStartTimestamp == DurationalAgentEvent.UNKNOWN_TIMESTAMP) {
            durationStartTimestamp = currentEvent.getEventTimestamp();
        }
        currentEvent.setDurationStartTimestamp(durationStartTimestamp);
        DurationalAgentEvent nextEvent = durationalAgentEvents.peek();
        if (nextEvent != null) {
            long nextEventTimestamp = nextEvent.getEventTimestamp();
            currentEvent.setDurationEndTimestamp(nextEventTimestamp);
            durationStartTimestamp = nextEventTimestamp;
        }
        agentEvents.add(currentEvent);
    }
    return agentEvents;
}

From source file:org.apache.hadoop.hive.ql.exec.tez.tools.KeyValuesInputMerger.java

public KeyValuesInputMerger(List<? extends Input> shuffleInputs) throws Exception {
    //get KeyValuesReaders from the LogicalInput and add them to priority queue
    int initialCapacity = shuffleInputs.size();
    kvsIterable = new KeyValuesIterable(initialCapacity);
    pQueue = new PriorityQueue<KeyValuesReader>(initialCapacity, new KVReaderComparator());
    for (Input input : shuffleInputs) {
        addToQueue((KeyValuesReader) input.getReader());
    }/*from w w w . j  a  va2 s .  c o m*/
}

From source file:classif.ahc.AHCSymbolicSequence.java

public void cluster() {

    // cache all distances
    distances = new double[data.size()][data.size()];
    for (int i = 0; i < data.size(); i++) {
        for (int j = i + 1; j < data.size(); j++) {
            distances[i][j] = data.get(i).distance(data.get(j));
            distances[j][i] = distances[i][j];
        }/*from   w ww  .j  a v a 2  s  .  com*/
    }
    System.out.println("distances cached");

    ArrayList<Integer>[] nClusterID = new ArrayList[data.size()];
    for (int i = 0; i < data.size(); i++) {
        nClusterID[i] = new ArrayList<Integer>();
        nClusterID[i].add(i);
    }
    int nClusters = data.size();

    int nInstances = data.size();
    Node[] clusterNodes = new Node[data.size()];

    PriorityQueue<Tuple> queue = new PriorityQueue<Tuple>(nClusters, new TupleComparator());
    double[][] fDistance0 = new double[nClusters][nClusters];
    for (int i = 0; i < nClusters; i++) {
        fDistance0[i][i] = 0;
        for (int j = i + 1; j < nClusters; j++) {
            fDistance0[i][j] = getDistanceClusters(nClusterID[i], nClusterID[j]);
            fDistance0[j][i] = fDistance0[i][j];
            queue.add(new Tuple(fDistance0[i][j], i, j, 1, 1));
        }
    }

    centroidsForNumberOfClusters = new ArrayList[data.size() + 1];
    centroidsForNumberOfClusters[data.size()] = new ArrayList<Sequence>();
    for (int i = 0; i < data.size(); i++) {
        centroidsForNumberOfClusters[data.size()].add(data.get(i));
    }

    while (nClusters > 1) {
        System.out.println("nClusters left = " + nClusters);
        int iMin1 = -1;
        int iMin2 = -1;
        Tuple t;
        do {
            t = queue.poll();
        } while (t != null && (nClusterID[t.m_iCluster1].size() != t.m_nClusterSize1
                || nClusterID[t.m_iCluster2].size() != t.m_nClusterSize2));
        iMin1 = t.m_iCluster1;
        iMin2 = t.m_iCluster2;

        centroidsForNumberOfClusters[nClusters
                - 1] = (ArrayList<Sequence>) centroidsForNumberOfClusters[nClusters].clone();

        merge(iMin1, iMin2, t.m_fDist, t.m_fDist, nClusterID, centroidsForNumberOfClusters[nClusters - 1],
                clusterNodes, distances);
        for (int i = 0; i < nInstances; i++) {
            if (i != iMin1 && nClusterID[i].size() != 0) {
                int i1 = Math.min(iMin1, i);
                int i2 = Math.max(iMin1, i);
                double fDistance = getDistanceClusters(nClusterID[i1], nClusterID[i2]);
                queue.add(new Tuple(fDistance, i1, i2, nClusterID[i1].size(), nClusterID[i2].size()));
            }
        }

        nClusters--;

    }
    System.out.println("Clustering done for all possible cuts");

}

From source file:org.apache.hadoop.hbase.extended.loadbalance.strategies.hotspot.HotSpotLoadBalancer.java

@Override
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
    initParameters();/*from ww w .java 2 s. co m*/
    /**
     * <pre>
     * We atleast need two priority queues 
     * a) It would contain HotSpot regions with their load as the moving criteria (max priority queue)
     * b) Non hot spot region with their loads (min priority queue)
     * 
     * Further we need to iterate over these queues and decrease the load so we 
     * need a data structure to build these queues 
     * and lastly we need to return the Region plan.
     * </pre>
     */

    LOG.debug("#################Came in the new Balancer Code and the cluster status is = " + this.status);
    long startTime = System.currentTimeMillis();
    int numServers = clusterState.size();
    if (numServers == 0) {
        LOG.info("numServers=0 so skipping load balancing");
        return null;

    }

    NavigableMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>> regionServerAndServerLoadMap = new TreeMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>>();
    PriorityQueue<HotSpotServerAndLoad> hotspotRegionServers = new PriorityQueue<HotSpotServerAndLoad>(
            numServers, HotSpotServerAndLoad.DESC_LOAD);
    PriorityQueue<HotSpotServerAndLoad> nonHotspotRegionServers = new PriorityQueue<HotSpotServerAndLoad>(
            numServers, HotSpotServerAndLoad.ASC_LOAD);
    HashBiMap<HRegionInfo, HotSpotRegionLoad> allRegionsLoadBiMap = HashBiMap.create();
    LOG.debug("#################clusterState=" + clusterState);
    double normalisedTotalLoadOfAllRegions = initRegionLoadMapsBasedOnInput(clusterState,
            regionServerAndServerLoadMap, allRegionsLoadBiMap);
    LOG.debug("#################normalisedTotalLoadOfAllRegions=" + normalisedTotalLoadOfAllRegions);
    // Check if we even need to do any load balancing
    double average = normalisedTotalLoadOfAllRegions / numServers; // for
    // logging
    // HBASE-3681 check sloppiness first
    LOG.debug("######################## final regionServerAndServerLoadMap == " + regionServerAndServerLoadMap);
    if (!loadBalancingNeeded(numServers, regionServerAndServerLoadMap, normalisedTotalLoadOfAllRegions,
            average)) {
        // we do not need load balancing
        return null;
    }
    double minLoad = normalisedTotalLoadOfAllRegions / numServers;
    double maxLoad = normalisedTotalLoadOfAllRegions % numServers == 0 ? minLoad : minLoad + 1;
    // as we now have to balance stuff, init PQ's
    LOG.debug(String.format("#################minLoad =%s,maxLoad= %s", minLoad, maxLoad));
    for (Map.Entry<HotSpotServerAndLoad, List<HotSpotRegionLoad>> item : regionServerAndServerLoadMap
            .entrySet()) {
        HotSpotServerAndLoad serverLoad = item.getKey();
        if (serverLoad.isHotSpot()) {

            hotspotRegionServers.add(serverLoad);
        } else {
            if (serverLoad.getLoad() < maxLoad) {
                nonHotspotRegionServers.add(serverLoad);
            }
        }
    }
    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numRegions=").append(normalisedTotalLoadOfAllRegions)
            .append(", numServers=").append(numServers).append(", max=").append(maxLoad).append(", min=")
            .append(minLoad);
    LOG.debug(strBalanceParam.toString());
    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    while (hotspotRegionServers.size() > 0 && nonHotspotRegionServers.size() > 0) {
        HotSpotServerAndLoad serverToBalance = hotspotRegionServers.poll();
        LOG.debug(String.format("#################serverToBalance =%s",
                serverToBalance.getServerName().getServerName()));
        // get least loaded not hotspot regions of this server
        List<HotSpotRegionLoad> regionList = regionServerAndServerLoadMap.get(serverToBalance);
        // assume it to be sorted asc.
        if (regionList.size() > 0) {
            HotSpotRegionLoad regionToMove = regionList.remove(0);
            HRegionInfo regionMoveInfo = allRegionsLoadBiMap.inverse().get(regionToMove);

            /*
             * regionMoveInfo can be null in case the load map returns us
             * the root and meta regions along with the movable regions But
             * as the clusterState which is passed to us does not contain
             * these regions we can have a situation where
             * regionServerAndServerLoadMap contains some regions which are
             * not present in the allRegionsLoadBiMap
             */
            if (regionMoveInfo != null && !regionMoveInfo.isMetaRegion() && !regionMoveInfo.isRootRegion()
                    && !regionMoveInfo.isMetaTable() && regionToMove.isRegionHotspot()) {
                LOG.debug(String.format(
                        "#################Came to move the region regionMoveInfo=%s;; regionToMove=%s ",
                        regionMoveInfo, regionToMove));
                // move out.
                HotSpotServerAndLoad destinationServer = nonHotspotRegionServers.poll();

                RegionPlan rpl = new RegionPlan(allRegionsLoadBiMap.inverse().get(regionToMove),
                        serverToBalance.getServerName(), destinationServer.getServerName());
                regionsToReturn.add(rpl);
                serverToBalance.modifyLoad(regionToMove.getLoad());
                destinationServer.modifyLoad(-1 * regionToMove.getLoad());
                // reenter them to list. if they satisfy conditions
                if (serverToBalance.getLoad() > minLoad) {
                    hotspotRegionServers.offer(serverToBalance);
                }
                if (destinationServer.getLoad() < maxLoad) {
                    nonHotspotRegionServers.offer(destinationServer);
                }
            }
        }
    }
    LOG.info("Total Time taken to balance = " + (System.currentTimeMillis() - startTime));
    LOG.info(String.format("#################regionsToReturn=%s ", regionsToReturn));
    return regionsToReturn;

}

From source file:org.apache.storm.daemon.logviewer.utils.DirectoryCleaner.java

/**
 * If totalSize of files exceeds the either the per-worker quota or global quota,
 * Logviewer deletes oldest inactive log files in a worker directory or in all worker dirs.
 * We use the parameter forPerDir to switch between the two deletion modes.
 *
 * @param dirs the list of directories to be scanned for deletion
 * @param quota the per-dir quota or the total quota for the all directories
 * @param forPerDir if true, deletion happens for a single dir; otherwise, for all directories globally
 * @param activeDirs only for global deletion, we want to skip the active logs in activeDirs
 * @return number of files deleted//  www  . jav  a2s. c  o m
 */
public DeletionMeta deleteOldestWhileTooLarge(List<Path> dirs, long quota, boolean forPerDir,
        Set<Path> activeDirs) throws IOException {
    long totalSize = 0;
    for (Path dir : dirs) {
        try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) {
            for (Path path : stream) {
                totalSize += Files.size(path);
            }
        }
    }
    LOG.debug("totalSize: {} quota: {}", totalSize, quota);
    long toDeleteSize = totalSize - quota;
    if (toDeleteSize <= 0) {
        return DeletionMeta.EMPTY;
    }

    int deletedFiles = 0;
    long deletedSize = 0;
    // the oldest pq_size files in this directory will be placed in PQ, with the newest at the root
    PriorityQueue<Pair<Path, FileTime>> pq = new PriorityQueue<>(PQ_SIZE,
            Comparator.comparing((Pair<Path, FileTime> p) -> p.getRight()).reversed());
    int round = 0;
    final Set<Path> excluded = new HashSet<>();
    while (toDeleteSize > 0) {
        LOG.debug("To delete size is {}, start a new round of deletion, round: {}", toDeleteSize, round);
        for (Path dir : dirs) {
            try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) {
                for (Path path : stream) {
                    if (!excluded.contains(path)) {
                        if (isFileEligibleToSkipDelete(forPerDir, activeDirs, dir, path)) {
                            excluded.add(path);
                        } else {
                            Pair<Path, FileTime> p = Pair.of(path, Files.getLastModifiedTime(path));
                            if (pq.size() < PQ_SIZE) {
                                pq.offer(p);
                            } else if (p.getRight().toMillis() < pq.peek().getRight().toMillis()) {
                                pq.poll();
                                pq.offer(p);
                            }
                        }
                    }
                }
            }
        }
        if (!pq.isEmpty()) {
            // need to reverse the order of elements in PQ to delete files from oldest to newest
            Stack<Pair<Path, FileTime>> stack = new Stack<>();
            while (!pq.isEmpty()) {
                stack.push(pq.poll());
            }
            while (!stack.isEmpty() && toDeleteSize > 0) {
                Pair<Path, FileTime> pair = stack.pop();
                Path file = pair.getLeft();
                final String canonicalPath = file.toAbsolutePath().normalize().toString();
                final long fileSize = Files.size(file);
                final long lastModified = pair.getRight().toMillis();
                //Original implementation doesn't actually check if delete succeeded or not.
                try {
                    Utils.forceDelete(file.toString());
                    LOG.info("Delete file: {}, size: {}, lastModified: {}", canonicalPath, fileSize,
                            lastModified);
                    toDeleteSize -= fileSize;
                    deletedSize += fileSize;
                    deletedFiles++;
                } catch (IOException e) {
                    excluded.add(file);
                }
            }
            pq.clear();
            round++;
            if (round >= MAX_ROUNDS) {
                if (forPerDir) {
                    LOG.warn(
                            "Reach the MAX_ROUNDS: {} during per-dir deletion, you may have too many files in "
                                    + "a single directory : {}, will delete the rest files in next interval.",
                            MAX_ROUNDS, dirs.get(0).toAbsolutePath().normalize());
                } else {
                    LOG.warn("Reach the MAX_ROUNDS: {} during global deletion, you may have too many files, "
                            + "will delete the rest files in next interval.", MAX_ROUNDS);
                }
                break;
            }
        } else {
            LOG.warn("No more files able to delete this round, but {} is over quota by {} MB",
                    forPerDir ? "this directory" : "root directory", toDeleteSize * 1e-6);
        }
    }
    return new DeletionMeta(deletedSize, deletedFiles);
}

From source file:amfservices.actions.PGServicesAction.java

public Map<String, Object> penguinWannaEatAction(String uid, String coteID, List<String> penguinIDs, long now)
        throws PGException {
    final EntityContext context = EntityContext.getContext(uid);

    for (String pengId : penguinIDs) {
        PGException.Assert(context.getCote().penguins().contains(pengId), PGError.PENGUIN_NOT_IN_COTE,
                "Penguin isn't contained in cote");
    }//from w  w  w  .  ja  v  a2  s  .  com

    PriorityQueue<Penguin> penguins = new PriorityQueue(penguinIDs.size(), new Comparator<Penguin>() {
        @Override
        public int compare(Penguin p1, Penguin p2) {
            long p1NextEatTime = PenguinServices.inst().nextEat(p1, context.getCote());
            long p2NextEatTime = PenguinServices.inst().nextEat(p2, context.getCote());

            return (p1NextEatTime > p2NextEatTime) ? 1 : ((p1NextEatTime == p2NextEatTime) ? 0 : -1);
        }
    });

    Map<String, Object> failData = new HashMap();

    int remainFish = context.getCote().getPoolFish();
    for (String pengId : penguinIDs) {
        Penguin penguin = Penguin.getPenguin(uid, coteID, pengId);

        long nextEat = PenguinServices.inst().nextEat(penguin, context.getCote());
        if (nextEat > now) {
            Map<String, Object> lastPenguinEatData = new HashMap();
            lastPenguinEatData.put(PGMacro.TIME_LAST_EAT, penguin.getLastEat());
            lastPenguinEatData.put(PGMacro.FISH_LAST_EAT, penguin.getFood());

            failData.put(penguin.getPenguinID(), lastPenguinEatData);
        } else {
            PGException.Assert(remainFish > 0, PGError.EMPTY_POOL, "Empty pool");
            PGException.Assert(PenguinServices.inst().configOf(penguin).getFeed() > 0,
                    PGError.PENGUIN_CANNOT_EAT, "Penguin cannot eat");

            penguins.add(penguin);
            remainFish -= Math.min(PenguinServices.inst().configOf(penguin).getFeed(), remainFish);
        }
    }

    List<Penguin> fedPenguins = new ArrayList(penguinIDs.size());
    while (!penguins.isEmpty()) {
        Penguin penguin = penguins.poll();
        long nextEat = PenguinServices.inst().nextEat(penguin, context.getCote());

        QuestLogger questLogger = QuestServices.inst().getQuestLogger(uid, now);
        PenguinServices.inst().eat(penguin, context, questLogger, nextEat);
        fedPenguins.add(penguin);
    }

    Map<String, Object> successData = new HashMap();
    for (Penguin penguin : fedPenguins) {
        penguin.saveToDB();
        successData.put(penguin.getPenguinID(), AMFBuilder.make(PGMacro.FISH_LAST_EAT, penguin.getFood(),
                PGMacro.TIME_LAST_EAT, penguin.getLastEat()));
    }

    context.saveToDB();

    Map<String, Object> response = new HashMap();
    response.put(PGMacro.SUCCESS, successData);
    response.put(PGMacro.FAIL, failData);

    return response;
}

From source file:org.broad.igv.track.PackedFeatures.java

/**
 * Allocates each feature to the rows such that there is no overlap.
 *
 * @param iter TabixLineReader wrapping the collection of alignments. Note that this should
 *             really be an Iterator<T>, but it can't be subclassed if that's the case.
 *///from   www  . ja v  a  2  s. c o m
List<FeatureRow> packFeatures(Iterator iter) {

    List<FeatureRow> rows = new ArrayList(10);
    if (iter == null || !iter.hasNext()) {
        return rows;
    }

    maxFeatureLength = 0;
    int totalCount = 0;

    LinkedHashMap<Integer, PriorityQueue<T>> bucketArray = new LinkedHashMap();
    Comparator pqComparator = new Comparator<T>() {
        public int compare(Feature row1, Feature row2) {
            return (row2.getEnd() - row2.getStart()) - (row1.getEnd() - row2.getStart());
        }
    };

    // Allocate features to buckets,  1 bucket per base position
    while (iter.hasNext()) {
        T feature = (T) iter.next();
        maxFeatureLength = Math.max(maxFeatureLength,
                getFeatureEndForPacking(feature) - getFeatureStartForPacking(feature));
        features.add(feature);

        int bucketNumber = getFeatureStartForPacking(feature);

        PriorityQueue<T> bucket = bucketArray.get(bucketNumber);
        if (bucket == null) {
            bucket = new PriorityQueue<T>(5, pqComparator);
            bucketArray.put(bucketNumber, bucket);
        }
        bucket.add(feature);
        totalCount++;

    }

    // Allocate features to rows, pulling at most 1 per bucket for each row
    FeatureRow currentRow = new FeatureRow();
    int allocatedCount = 0;
    int nextStart = Integer.MIN_VALUE;

    int lastKey = 0;
    int lastAllocatedCount = -1;
    while (allocatedCount < totalCount && rows.size() < maxLevels) {

        // Check to prevent infinite loops
        if (lastAllocatedCount == allocatedCount) {

            if (IGV.hasInstance()) {
                String msg = "Infinite loop detected while packing features for track: " + getTrackName()
                        + ".<br>Not all features will be shown."
                        + "<br>Please contact igv-team@broadinstitute.org";

                log.error(msg);
                MessageUtils.showMessage(msg);
            }
            break;
        }
        lastAllocatedCount = allocatedCount;

        // Next row Loop through alignments until we reach the end of the interval

        PriorityQueue<T> bucket = null;
        // Advance to nextLine occupied bucket

        ArrayList<Integer> emptyBucketKeys = new ArrayList();
        for (Integer key : bucketArray.keySet()) {
            //if (key < lastKey) {
            //    String msg = "Features from track: " + trackName + " are not sorted.  Some features might not be shown.<br>" +
            //            "Please notify igv-help@broadinstitute.org";
            //    MessageUtils.showMessage(msg);
            //}
            lastKey = key;
            if (key >= nextStart) {
                bucket = bucketArray.get(key);

                T feature = bucket.poll();

                if (bucket.isEmpty()) {
                    emptyBucketKeys.add(key);
                }
                currentRow.addFeature(feature);
                nextStart = currentRow.end + FeatureTrack.MINIMUM_FEATURE_SPACING;
                allocatedCount++;
            }
        }
        for (Integer key : emptyBucketKeys) {
            bucketArray.remove(key);
        }

        // We've reached the end of the interval,  start a new row
        if (currentRow.features.size() > 0) {
            rows.add(currentRow);
            lastAllocatedCount = 0;
        }
        currentRow = new FeatureRow();
        nextStart = 0;
        lastKey = 0;

    }
    // Add the last row
    if (currentRow.features.size() > 0) {
        rows.add(currentRow);
    }

    return rows;
}

From source file:org.apache.pig.piggybank.evaluation.util.Top.java

@Override
public DataBag exec(Tuple tuple) throws IOException {
    if (tuple == null || tuple.size() < 3) {
        return null;
    }/*from w  ww.j  av a 2 s  . co  m*/
    try {
        int n = (Integer) tuple.get(0);
        int fieldNum = (Integer) tuple.get(1);
        DataBag inputBag = (DataBag) tuple.get(2);
        PriorityQueue<Tuple> store = new PriorityQueue<Tuple>(n + 1, new TupleComparator(fieldNum));
        updateTop(store, n, inputBag);
        DataBag outputBag = mBagFactory.newDefaultBag();
        for (Tuple t : store) {
            outputBag.add(t);
        }
        if (log.isDebugEnabled()) {
            if (randomizer.nextInt(1000) == 1) {
                log.debug("outputting a bag: ");
                for (Tuple t : outputBag)
                    log.debug("outputting " + t.toDelimitedString("\t"));
                log.debug("==================");
            }
        }
        return outputBag;
    } catch (ExecException e) {
        throw new RuntimeException("ExecException executing function: ", e);
    } catch (Exception e) {
        throw new RuntimeException("General Exception executing function: " + e);
    }
}

From source file:org.chromium.chrome.browser.physicalweb.UrlManager.java

/**
 * Construct the UrlManager./*from   w  w  w.ja va2s.co m*/
 * @param context An instance of android.content.Context
 */
@VisibleForTesting
public UrlManager(Context context) {
    mContext = context;
    mNotificationManager = new NotificationManagerProxyImpl(
            (NotificationManager) context.getSystemService(Context.NOTIFICATION_SERVICE));
    mPwsClient = new PwsClientImpl(context);
    mObservers = new ObserverList<Listener>();
    mNearbyUrls = new HashSet<>();
    mUrlInfoMap = new HashMap<>();
    mPwsResultMap = new HashMap<>();
    mUrlsSortedByTimestamp = new PriorityQueue<String>(1, new Comparator<String>() {
        @Override
        public int compare(String url1, String url2) {
            Long scanTimestamp1 = Long.valueOf(mUrlInfoMap.get(url1).getScanTimestamp());
            Long scanTimestamp2 = Long.valueOf(mUrlInfoMap.get(url2).getScanTimestamp());
            return scanTimestamp1.compareTo(scanTimestamp2);
        }
    });
    initSharedPreferences();
}

From source file:org.sample.whiteboardapp.MyWhiteboard.java

static JSONObject findKNN(double[] Qpoint, Node root, int k) {
    JSONObject coordinates = new JSONObject();
    JSONArray lat_json = new JSONArray();
    JSONArray long_json = new JSONArray();
    PriorityQueue<Double> pq = new PriorityQueue<Double>(10, Collections.reverseOrder());
    HashMap<Double, Node> hm = new HashMap();
    searchKDSubtree(pq, hm, root, Qpoint, k, 0);
    System.out.println(pq.size());
    while (pq.size() != 0) {
        Node ans = hm.get(pq.poll());
        System.out.println(ans.point[0] + " " + ans.point[1]);
        System.out.println(pq.size());
        lat_json.add(ans.point[0]);//from   w ww.  j  av a2s. c om
        long_json.add(ans.point[1]);

    }
    coordinates.put("latitude", lat_json);
    coordinates.put("longitude", long_json);
    return coordinates;

}