Example usage for java.util PriorityQueue PriorityQueue

List of usage examples for java.util PriorityQueue PriorityQueue

Introduction

In this page you can find the example usage for java.util PriorityQueue PriorityQueue.

Prototype

public PriorityQueue(SortedSet<? extends E> c) 

Source Link

Document

Creates a PriorityQueue containing the elements in the specified sorted set.

Usage

From source file:com.koda.integ.hbase.blockcache.OnHeapBlockCache.java

/**
 * Eviction method.//w  w w .  jav  a 2s.  c o  m
 */
void evict() {

    // Ensure only one eviction at a time
    if (!evictionLock.tryLock())
        return;

    try {
        evictionInProgress = true;
        long currentSize = this.size.get();
        long bytesToFree = currentSize - minSize();

        if (LOG.isDebugEnabled()) {
            LOG.debug("Block cache LRU eviction started; Attempting to free "
                    + StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize));
        }

        if (bytesToFree <= 0)
            return;

        // Instantiate priority buckets
        BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize, singleSize());
        BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize, multiSize());
        BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize, memorySize());

        // Scan entire map putting into appropriate buckets
        for (CachedBlock cachedBlock : map.values()) {
            switch (cachedBlock.getPriority()) {
            case SINGLE: {
                bucketSingle.add(cachedBlock);
                break;
            }
            case MULTI: {
                bucketMulti.add(cachedBlock);
                break;
            }
            case MEMORY: {
                bucketMemory.add(cachedBlock);
                break;
            }
            }
        }

        PriorityQueue<BlockBucket> bucketQueue = new PriorityQueue<BlockBucket>(3);

        bucketQueue.add(bucketSingle);
        bucketQueue.add(bucketMulti);
        bucketQueue.add(bucketMemory);

        int remainingBuckets = 3;
        long bytesFreed = 0;

        BlockBucket bucket;
        while ((bucket = bucketQueue.poll()) != null) {
            long overflow = bucket.overflow();
            if (overflow > 0) {
                long bucketBytesToFree = Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets);
                bytesFreed += bucket.free(bucketBytesToFree);
            }
            remainingBuckets--;
        }

        if (LOG.isDebugEnabled()) {
            long single = bucketSingle.totalSize();
            long multi = bucketMulti.totalSize();
            long memory = bucketMemory.totalSize();
            LOG.debug("Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed)
                    + ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single="
                    + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", "
                    + "memory=" + StringUtils.byteDesc(memory));
        }
    } finally {
        stats.evict();
        evictionInProgress = false;
        evictionLock.unlock();
    }
}

From source file:delfos.group.grs.consensus.ConsensusGRS.java

public File getConsensusOutputXMLwithDesiredConsensusDegree(File consensusInputXML, double consensusDegree) {
    File consensusOutputDirectory = (File) getParameterValue(CONSENSUS_OUTPUT_FILES_DIRECTORY);

    String consensusInputXMLFileNameNoExtension = consensusInputXML.getName().substring(0,
            consensusInputXML.getName().lastIndexOf("."));

    String consensusInputXMLInOutputDirectoryAbsolutePath = consensusOutputDirectory.getAbsolutePath()
            + File.separator + consensusInputXMLFileNameNoExtension;

    File consensusInputXMLInOutputDirectory = new File(consensusInputXMLInOutputDirectoryAbsolutePath);

    if (!consensusInputXML.exists()) {
        Global.showWarning("The input XML '" + consensusInputXMLInOutputDirectory
                + "' does not exists in the output directory");
        return null;
    }/*from   w  w  w. j ava  2s.co m*/

    if (!consensusOutputDirectory.exists()) {
        Global.showWarning("'" + consensusOutputDirectory.getAbsolutePath() + "' not exists");
        return null;
    }

    if (!consensusOutputDirectory.isDirectory()) {
        Global.showWarning("'" + consensusOutputDirectory.getAbsolutePath() + "' is not a directory");
        return null;
    }

    List<File> childrenFiles = new ArrayList<>(Arrays.asList(consensusOutputDirectory.listFiles()));
    PriorityQueue<PriorityItem<File>> queue = new PriorityQueue<>(Collections.reverseOrder());

    for (File consensusOutputFile : childrenFiles) {
        final String outputFileNameNoExtension = consensusOutputFile.getName().substring(0,
                consensusOutputFile.getName().lastIndexOf("."));
        if (outputFileNameNoExtension.startsWith(consensusInputXMLFileNameNoExtension)
                && outputFileNameNoExtension.contains("Consenso")) {
            try {
                Global.showln(consensusOutputFile.getAbsolutePath());
                double thisFileConsensusDegree = ConsensusOfIndividualRecommendationsToXML
                        .readConsensusOutputXML(consensusOutputFile).consensusDegree;

                queue.add(new PriorityItem<>(consensusOutputFile, thisFileConsensusDegree));
            } catch (JDOMException | IOException ex) {
                Global.showWarning(ex);
            }
        }
    }

    if (queue.isEmpty()) {
        return null;
    }

    if (Global.isVerboseAnnoying()) {
        Global.showInfoMessage("Found " + queue.size() + " consensus files");
    }

    while (!queue.isEmpty()) {
        PriorityItem<File> priorityItem = queue.poll();

        double consensusDegreeThisFile = priorityItem.getPriority();

        if (consensusDegreeThisFile >= consensusDegree) {
            return priorityItem.getKey();
        }
    }

    throw new IllegalStateException(
            "Consensus degree not reached for '" + consensusInputXMLFileNameNoExtension + "'");
}

From source file:blusunrize.immersiveengineering.api.energy.wires.ImmersiveNetHandler.java

public Set<AbstractConnection> getIndirectEnergyConnections(BlockPos node, World world,
        boolean ignoreIsEnergyOutput) {
    int dimension = world.provider.getDimension();
    if (!ignoreIsEnergyOutput && indirectConnections.containsKey(dimension)
            && indirectConnections.get(dimension).containsKey(node))
        return indirectConnections.get(dimension).get(node);
    else if (ignoreIsEnergyOutput && indirectConnectionsIgnoreOut.containsKey(dimension)
            && indirectConnectionsIgnoreOut.get(dimension).containsKey(node))
        return indirectConnectionsIgnoreOut.get(dimension).get(node);

    PriorityQueue<Pair<IImmersiveConnectable, Float>> queue = new PriorityQueue<>(
            Comparator.comparingDouble(Pair::getRight));
    Set<AbstractConnection> closedList = newSetFromMap(new ConcurrentHashMap<AbstractConnection, Boolean>());
    List<BlockPos> checked = new ArrayList<>();
    HashMap<BlockPos, BlockPos> backtracker = new HashMap<>();

    checked.add(node);//from w  w  w  . j a  v a 2s  .  c o  m
    Set<Connection> conL = getConnections(world, node);
    if (conL != null)
        for (Connection con : conL) {
            IImmersiveConnectable end = toIIC(con.end, world);
            if (end != null) {
                queue.add(new ImmutablePair<>(end, con.getBaseLoss()));
                backtracker.put(con.end, node);
            }
        }

    IImmersiveConnectable next;
    final int closedListMax = 1200;

    while (closedList.size() < closedListMax && !queue.isEmpty()) {
        Pair<IImmersiveConnectable, Float> pair = queue.poll();
        next = pair.getLeft();
        float loss = pair.getRight();
        BlockPos nextPos = toBlockPos(next);
        if (!checked.contains(nextPos) && queue.stream().noneMatch((p) -> p.getLeft().equals(nextPos))) {
            boolean isOutput = next.isEnergyOutput();
            if (ignoreIsEnergyOutput || isOutput) {
                BlockPos last = toBlockPos(next);
                WireType minimumType = null;
                int distance = 0;
                List<Connection> connectionParts = new ArrayList<>();
                while (last != null) {
                    BlockPos prev = last;
                    last = backtracker.get(last);
                    if (last != null) {

                        Set<Connection> conLB = getConnections(world, last);
                        if (conLB != null)
                            for (Connection conB : conLB)
                                if (conB.end.equals(prev)) {
                                    connectionParts.add(0, conB);
                                    distance += conB.length;
                                    if (minimumType == null
                                            || conB.cableType.getTransferRate() < minimumType.getTransferRate())
                                        minimumType = conB.cableType;
                                    break;
                                }
                    }
                }
                closedList.add(new AbstractConnection(toBlockPos(node), toBlockPos(next), minimumType, distance,
                        isOutput, connectionParts.toArray(new Connection[connectionParts.size()])));
            }

            Set<Connection> conLN = getConnections(world, toBlockPos(next));
            if (conLN != null)
                for (Connection con : conLN)
                    if (next.allowEnergyToPass(con)) {
                        IImmersiveConnectable end = toIIC(con.end, world);

                        Optional<Pair<IImmersiveConnectable, Float>> existing = queue.stream()
                                .filter((p) -> p.getLeft() == end).findAny();
                        float newLoss = con.getBaseLoss() + loss;
                        if (end != null && !checked.contains(con.end)
                                && existing.map(Pair::getRight).orElse(Float.MAX_VALUE) > newLoss) {
                            existing.ifPresent(p1 -> queue.removeIf((p2) -> p1.getLeft() == p2.getLeft()));
                            queue.add(new ImmutablePair<>(end, newLoss));
                            backtracker.put(con.end, toBlockPos(next));
                        }
                    }
            checked.add(toBlockPos(next));
        }
    }
    if (FMLCommonHandler.instance().getEffectiveSide() == Side.SERVER) {
        if (ignoreIsEnergyOutput) {
            if (!indirectConnectionsIgnoreOut.containsKey(dimension))
                indirectConnectionsIgnoreOut.put(dimension, new ConcurrentHashMap<>());
            Map<BlockPos, Set<AbstractConnection>> conns = indirectConnectionsIgnoreOut.get(dimension);
            if (!conns.containsKey(node))
                conns.put(node, newSetFromMap(new ConcurrentHashMap<>()));
            conns.get(node).addAll(closedList);
        } else {
            if (!indirectConnections.containsKey(dimension))
                indirectConnections.put(dimension, new ConcurrentHashMap<>());
            Map<BlockPos, Set<AbstractConnection>> conns = indirectConnections.get(dimension);
            if (!conns.containsKey(node))
                conns.put(node, newSetFromMap(new ConcurrentHashMap<>()));
            conns.get(node).addAll(closedList);
        }
    }
    return closedList;
}

From source file:io.anserini.rerank.lib.AxiomReranker.java

/**
 * Calculate the scores (weights) of each term that occured in the reranking pool.
 * The Process://from www.  j av  a 2s  .c o m
 * 1. For each query term, calculate its score for each term in the reranking pool. the score
 * is calcuated as
 * <pre>
 * P(both occurs)*log{P(both occurs)/P(t1 occurs)/P(t2 occurs)}
 * + P(both not occurs)*log{P(both not occurs)/P(t1 not occurs)/P(t2 not occurs)}
 * + P(t1 occurs t2 not occurs)*log{P(t1 occurs t2 not occurs)/P(t1 occurs)/P(t2 not occurs)}
 * + P(t1 not occurs t2 occurs)*log{P(t1 not occurs t2 occurs)/P(t1 not occurs)/P(t2 occurs)}
 * </pre>
 * 2. For each query term the scores of every other term in the reranking pool are stored in a
 * PriorityQueue, only the top {@code K} are kept.
 * 3. Add the scores of the same term together and pick the top {@code M} ones.
 *
 * @param termInvertedList A Map of <term -> Set<docId>> where the Set of docIds is where the term occurs
 * @param context An instance of RerankerContext
 * @return Map<String, Double> Top terms and their weight scores in a HashMap
 */
private Map<String, Double> computeTermScore(Map<String, Set<Integer>> termInvertedList,
        RerankerContext<T> context) throws IOException {
    class ScoreComparator implements Comparator<Pair<String, Double>> {
        public int compare(Pair<String, Double> a, Pair<String, Double> b) {
            int cmp = Double.compare(b.getRight(), a.getRight());
            if (cmp == 0) {
                return a.getLeft().compareToIgnoreCase(b.getLeft());
            } else {
                return cmp;
            }
        }
    }

    // get collection statistics so that we can get idf later on.
    IndexReader reader;
    if (this.externalIndexPath != null) {
        Path indexPath = Paths.get(this.externalIndexPath);
        if (!Files.exists(indexPath) || !Files.isDirectory(indexPath) || !Files.isReadable(indexPath)) {
            throw new IllegalArgumentException(
                    this.externalIndexPath + " does not exist or is not a directory.");
        }
        reader = DirectoryReader.open(FSDirectory.open(indexPath));
    } else {
        IndexSearcher searcher = context.getIndexSearcher();
        reader = searcher.getIndexReader();
    }
    final long docCount = reader.numDocs() == -1 ? reader.maxDoc() : reader.numDocs();

    //calculate the Mutual Information between term with each query term
    List<String> queryTerms = context.getQueryTokens();
    Map<String, Integer> queryTermsCounts = new HashMap<>();
    for (String qt : queryTerms) {
        queryTermsCounts.put(qt, queryTermsCounts.getOrDefault(qt, 0) + 1);
    }

    Set<Integer> allDocIds = new HashSet<>();
    for (Set<Integer> s : termInvertedList.values()) {
        allDocIds.addAll(s);
    }
    int docIdsCount = allDocIds.size();

    // Each priority queue corresponds to a query term: The p-queue itself stores all terms
    // in the reranking pool and their reranking scores to the query term.
    List<PriorityQueue<Pair<String, Double>>> allTermScoresPQ = new ArrayList<>();
    for (Map.Entry<String, Integer> q : queryTermsCounts.entrySet()) {
        String queryTerm = q.getKey();
        long df = reader.docFreq(new Term(LuceneDocumentGenerator.FIELD_BODY, queryTerm));
        if (df == 0L) {
            continue;
        }
        float idf = (float) Math.log((1 + docCount) / df);
        int qtf = q.getValue();
        if (termInvertedList.containsKey(queryTerm)) {
            PriorityQueue<Pair<String, Double>> termScorePQ = new PriorityQueue<>(new ScoreComparator());
            double selfMI = computeMutualInformation(termInvertedList.get(queryTerm),
                    termInvertedList.get(queryTerm), docIdsCount);
            for (Map.Entry<String, Set<Integer>> termEntry : termInvertedList.entrySet()) {
                double score;
                if (termEntry.getKey().equals(queryTerm)) { // The mutual information to itself will always be 1
                    score = idf * qtf;
                } else {
                    double crossMI = computeMutualInformation(termInvertedList.get(queryTerm),
                            termEntry.getValue(), docIdsCount);
                    score = idf * beta * qtf * crossMI / selfMI;
                }
                termScorePQ.add(Pair.of(termEntry.getKey(), score));
            }
            allTermScoresPQ.add(termScorePQ);
        }
    }

    Map<String, Double> aggTermScores = new HashMap<>();
    for (PriorityQueue<Pair<String, Double>> termScores : allTermScoresPQ) {
        for (int i = 0; i < Math.min(termScores.size(), this.K); i++) {
            Pair<String, Double> termScore = termScores.poll();
            String term = termScore.getLeft();
            Double score = termScore.getRight();
            if (score - 0.0 > 1e-8) {
                aggTermScores.put(term, aggTermScores.getOrDefault(term, 0.0) + score);
            }
        }
    }
    PriorityQueue<Pair<String, Double>> termScoresPQ = new PriorityQueue<>(new ScoreComparator());
    for (Map.Entry<String, Double> termScore : aggTermScores.entrySet()) {
        termScoresPQ.add(Pair.of(termScore.getKey(), termScore.getValue() / queryTerms.size()));
    }
    Map<String, Double> resultTermScores = new HashMap<>();
    for (int i = 0; i < Math.min(termScoresPQ.size(), this.M); i++) {
        Pair<String, Double> termScore = termScoresPQ.poll();
        String term = termScore.getKey();
        double score = termScore.getValue();
        resultTermScores.put(term, score);
    }

    return resultTermScores;
}

From source file:org.apache.hadoop.hbase.io.hfile.LruBlockCache.java

/**
 * Eviction method./*from www. ja  va2  s  .  c  om*/
 */
void evict() {

    // Ensure only one eviction at a time
    if (!evictionLock.tryLock())
        return;

    try {
        evictionInProgress = true;
        long currentSize = this.size.get();
        long bytesToFree = currentSize - minSize();

        if (LOG.isTraceEnabled()) {
            LOG.trace("Block cache LRU eviction started; Attempting to free "
                    + StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize));
        }

        if (bytesToFree <= 0)
            return;

        // Instantiate priority buckets
        BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize, singleSize());
        BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize, multiSize());
        BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize, memorySize());

        // Scan entire map putting into appropriate buckets
        for (CachedBlock cachedBlock : map.values()) {
            switch (cachedBlock.getPriority()) {
            case SINGLE: {
                bucketSingle.add(cachedBlock);
                break;
            }
            case MULTI: {
                bucketMulti.add(cachedBlock);
                break;
            }
            case MEMORY: {
                bucketMemory.add(cachedBlock);
                break;
            }
            }
        }

        long bytesFreed = 0;
        if (forceInMemory || memoryFactor > 0.999f) {
            long s = bucketSingle.totalSize();
            long m = bucketMulti.totalSize();
            if (bytesToFree > (s + m)) {
                // this means we need to evict blocks in memory bucket to make room,
                // so the single and multi buckets will be emptied
                bytesFreed = bucketSingle.free(s);
                bytesFreed += bucketMulti.free(m);
                bytesFreed += bucketMemory.free(bytesToFree - bytesFreed);
            } else {
                // this means no need to evict block in memory bucket,
                // and we try best to make the ratio between single-bucket and
                // multi-bucket is 1:2
                long bytesRemain = s + m - bytesToFree;
                if (3 * s <= bytesRemain) {
                    // single-bucket is small enough that no eviction happens for it
                    // hence all eviction goes from multi-bucket
                    bytesFreed = bucketMulti.free(bytesToFree);
                } else if (3 * m <= 2 * bytesRemain) {
                    // multi-bucket is small enough that no eviction happens for it
                    // hence all eviction goes from single-bucket
                    bytesFreed = bucketSingle.free(bytesToFree);
                } else {
                    // both buckets need to evict some blocks
                    bytesFreed = bucketSingle.free(s - bytesRemain / 3);
                    if (bytesFreed < bytesToFree) {
                        bytesFreed += bucketMulti.free(bytesToFree - bytesFreed);
                    }
                }
            }
        } else {
            PriorityQueue<BlockBucket> bucketQueue = new PriorityQueue<BlockBucket>(3);

            bucketQueue.add(bucketSingle);
            bucketQueue.add(bucketMulti);
            bucketQueue.add(bucketMemory);

            int remainingBuckets = 3;

            BlockBucket bucket;
            while ((bucket = bucketQueue.poll()) != null) {
                long overflow = bucket.overflow();
                if (overflow > 0) {
                    long bucketBytesToFree = Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets);
                    bytesFreed += bucket.free(bucketBytesToFree);
                }
                remainingBuckets--;
            }
        }

        if (LOG.isTraceEnabled()) {
            long single = bucketSingle.totalSize();
            long multi = bucketMulti.totalSize();
            long memory = bucketMemory.totalSize();
            LOG.trace("Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed)
                    + ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single="
                    + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", "
                    + "memory=" + StringUtils.byteDesc(memory));
        }
    } finally {
        stats.evict();
        evictionInProgress = false;
        evictionLock.unlock();
    }
}

From source file:org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.java

/**
 * Free the space if the used size reaches acceptableSize() or one size block
 * couldn't be allocated. When freeing the space, we use the LRU algorithm and
 * ensure there must be some blocks evicted
 *///from   w ww .j ava2  s  .c  o m
private void freeSpace() {
    // Ensure only one freeSpace progress at a time
    if (!freeSpaceLock.tryLock())
        return;
    try {
        freeInProgress = true;
        long bytesToFreeWithoutExtra = 0;
        /*
         * Calculate free byte for each bucketSizeinfo
         */
        StringBuffer msgBuffer = new StringBuffer();
        BucketAllocator.IndexStatistics[] stats = bucketAllocator.getIndexStatistics();
        long[] bytesToFreeForBucket = new long[stats.length];
        for (int i = 0; i < stats.length; i++) {
            bytesToFreeForBucket[i] = 0;
            long freeGoal = (long) Math.floor(stats[i].totalCount() * (1 - DEFAULT_MIN_FACTOR));
            freeGoal = Math.max(freeGoal, 1);
            if (stats[i].freeCount() < freeGoal) {
                bytesToFreeForBucket[i] = stats[i].itemSize() * (freeGoal - stats[i].freeCount());
                bytesToFreeWithoutExtra += bytesToFreeForBucket[i];
                msgBuffer.append("Free for bucketSize(" + stats[i].itemSize() + ")="
                        + StringUtils.byteDesc(bytesToFreeForBucket[i]) + ", ");
            }
        }
        msgBuffer.append("Free for total=" + StringUtils.byteDesc(bytesToFreeWithoutExtra) + ", ");

        if (bytesToFreeWithoutExtra <= 0) {
            return;
        }
        long currentSize = bucketAllocator.getUsedSize();
        long totalSize = bucketAllocator.getTotalSize();
        LOG.debug("Bucket cache free space started; Attempting to  " + msgBuffer.toString()
                + " of current used=" + StringUtils.byteDesc(currentSize) + ",actual cacheSize="
                + StringUtils.byteDesc(realCacheSize.get()) + ",total=" + StringUtils.byteDesc(totalSize));

        long bytesToFreeWithExtra = (long) Math
                .floor(bytesToFreeWithoutExtra * (1 + DEFAULT_EXTRA_FREE_FACTOR));

        // Instantiate priority buckets
        BucketEntryGroup bucketSingle = new BucketEntryGroup(bytesToFreeWithExtra, blockSize, singleSize());
        BucketEntryGroup bucketMulti = new BucketEntryGroup(bytesToFreeWithExtra, blockSize, multiSize());
        BucketEntryGroup bucketMemory = new BucketEntryGroup(bytesToFreeWithExtra, blockSize, memorySize());

        // Scan entire map putting bucket entry into appropriate bucket entry
        // group
        for (Map.Entry<BlockCacheKey, BucketEntry> bucketEntryWithKey : backingMap.entrySet()) {
            switch (bucketEntryWithKey.getValue().getPriority()) {
            case SINGLE: {
                bucketSingle.add(bucketEntryWithKey);
                break;
            }
            case MULTI: {
                bucketMulti.add(bucketEntryWithKey);
                break;
            }
            case MEMORY: {
                bucketMemory.add(bucketEntryWithKey);
                break;
            }
            }
        }

        PriorityQueue<BucketEntryGroup> bucketQueue = new PriorityQueue<BucketEntryGroup>(3);

        bucketQueue.add(bucketSingle);
        bucketQueue.add(bucketMulti);
        bucketQueue.add(bucketMemory);

        int remainingBuckets = 3;
        long bytesFreed = 0;

        BucketEntryGroup bucketGroup;
        while ((bucketGroup = bucketQueue.poll()) != null) {
            long overflow = bucketGroup.overflow();
            if (overflow > 0) {
                long bucketBytesToFree = Math.min(overflow,
                        (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets);
                bytesFreed += bucketGroup.free(bucketBytesToFree);
            }
            remainingBuckets--;
        }

        /**
         * Check whether need extra free because some bucketSizeinfo still needs
         * free space
         */
        stats = bucketAllocator.getIndexStatistics();
        boolean needFreeForExtra = false;
        for (int i = 0; i < stats.length; i++) {
            long freeGoal = (long) Math.floor(stats[i].totalCount() * (1 - DEFAULT_MIN_FACTOR));
            freeGoal = Math.max(freeGoal, 1);
            if (stats[i].freeCount() < freeGoal) {
                needFreeForExtra = true;
                break;
            }
        }

        if (needFreeForExtra) {
            bucketQueue.clear();
            remainingBuckets = 2;

            bucketQueue.add(bucketSingle);
            bucketQueue.add(bucketMulti);

            while ((bucketGroup = bucketQueue.poll()) != null) {
                long bucketBytesToFree = (bytesToFreeWithExtra - bytesFreed) / remainingBuckets;
                bytesFreed += bucketGroup.free(bucketBytesToFree);
                remainingBuckets--;
            }
        }

        if (LOG.isDebugEnabled()) {
            long single = bucketSingle.totalSize();
            long multi = bucketMulti.totalSize();
            long memory = bucketMemory.totalSize();
            LOG.debug("Bucket cache free space completed; " + "freed=" + StringUtils.byteDesc(bytesFreed) + ", "
                    + "total=" + StringUtils.byteDesc(totalSize) + ", " + "single="
                    + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", "
                    + "memory=" + StringUtils.byteDesc(memory));
        }

    } finally {
        cacheStats.evict();
        freeInProgress = false;
        freeSpaceLock.unlock();
    }
}

From source file:org.rhq.core.pc.measurement.MeasurementManager.java

public Map<String, Object> getMeasurementScheduleInfoForResource(int resourceId) {
    Map<String, Object> results = null;

    for (ScheduledMeasurementInfo info : new PriorityQueue<ScheduledMeasurementInfo>(scheduledRequests)) {
        if (info.getResourceId() == resourceId) {
            if (results == null) {
                results = new HashMap<String, Object>();
            }//  ww  w. j  av  a  2s . c  o m
            String scheduleId = String.valueOf(info.getScheduleId());
            String interval = String.valueOf(info.getInterval()) + "ms";
            results.put(scheduleId, interval);
        }
    }

    return results;
}

From source file:bwem.Graph.java

private int[] computeDistances(final ChokePoint start, final List<ChokePoint> targets) {
    final int[] distances = new int[targets.size()];

    TileImpl.getStaticMarkable().unmarkAll();

    final Queue<Pair<Integer, ChokePoint>> toVisit = new PriorityQueue<>(Comparator.comparingInt(a -> a.first));
    toVisit.offer(new Pair<>(0, start));

    int remainingTargets = targets.size();
    while (!toVisit.isEmpty()) {
        final Pair<Integer, ChokePoint> distanceAndChokePoint = toVisit.poll();
        final int currentDist = distanceAndChokePoint.first;
        final ChokePoint current = distanceAndChokePoint.second;
        final Tile currentTile = getMap().getData().getTile(current.getCenter().toTilePosition(),
                CheckMode.NO_CHECK);//w ww  .  ja  v  a2 s.c o  m
        //            bwem_assert(currentTile.InternalData() == currentDist);
        if (!(((TileImpl) currentTile).getInternalData() == currentDist)) {
            throw new IllegalStateException();
        }
        ((TileImpl) currentTile).setInternalData(0); // resets Tile::m_internalData for future usage
        ((TileImpl) currentTile).getMarkable().setMarked();

        for (int i = 0; i < targets.size(); ++i) {
            if (current == targets.get(i)) {
                distances[i] = currentDist;
                --remainingTargets;
            }
        }
        if (remainingTargets == 0) {
            break;
        }

        if (current.isBlocked() && (!current.equals(start))) {
            continue;
        }

        for (final Area pArea : new Area[] { current.getAreas().getFirst(), current.getAreas().getSecond() }) {
            for (final ChokePoint next : pArea.getChokePoints()) {
                if (!next.equals(current)) {
                    final int newNextDist = currentDist + distance(current, next);
                    final Tile nextTile = getMap().getData().getTile(next.getCenter().toTilePosition(),
                            CheckMode.NO_CHECK);
                    if (!((TileImpl) nextTile).getMarkable().isMarked()) {
                        if (((TileImpl) nextTile).getInternalData() != 0) { // next already in toVisit
                            if (newNextDist < ((TileImpl) nextTile).getInternalData()) { // nextNewDist < nextOldDist
                                                                                         // To update next's distance, we need to remove-insert it from toVisit:
                                                                                         //                                    bwem_assert(iNext != range.second);
                                final boolean removed = toVisit
                                        .remove(new Pair<>(((TileImpl) nextTile).getInternalData(), next));
                                if (!removed) {
                                    throw new IllegalStateException();
                                }
                                ((TileImpl) nextTile).setInternalData(newNextDist);
                                ((ChokePointImpl) next).setPathBackTrace(current);
                                toVisit.offer(new Pair<>(newNextDist, next));
                            }
                        } else {
                            ((TileImpl) nextTile).setInternalData(newNextDist);
                            ((ChokePointImpl) next).setPathBackTrace(current);
                            toVisit.offer(new Pair<>(newNextDist, next));
                        }
                    }
                }
            }
        }
    }

    //    //   bwem_assert(!remainingTargets);
    //        if (!(remainingTargets == 0)) {
    //            throw new IllegalStateException();
    //        }

    // reset Tile::m_internalData for future usage
    for (Pair<Integer, ChokePoint> distanceToChokePoint : toVisit) {
        ((TileImpl) getMap().getData().getTile(distanceToChokePoint.second.getCenter().toTilePosition(),
                CheckMode.NO_CHECK)).setInternalData(0);
    }

    return distances;
}

From source file:org.jiemamy.utils.collection.CollectionsUtil.java

/**
 * {@link PriorityQueue}?????/*  w  ww  .j  a va  2 s.c  o  m*/
 * 
 * @param <E> {@link PriorityQueue}??
 * @param c ?????
 * @return {@link PriorityQueue}???
 * @throws IllegalArgumentException ?{@code null}???
 * @see PriorityQueue#PriorityQueue(Collection)
 */
public static <E> PriorityQueue<E> newPriorityQueue(Collection<? extends E> c) {
    Validate.notNull(c);
    return new PriorityQueue<E>(c);
}

From source file:org.jiemamy.utils.collection.CollectionsUtil.java

/**
 * {@link PriorityQueue}?????/*  ww  w .ja  v  a 2  s .c o  m*/
 * 
 * @param <E> {@link PriorityQueue}??
 * @param initialCapacity ?????
 * @return {@link PriorityQueue}???
 * @throws IllegalArgumentException if <tt>initialCapacity</tt> is less than 1
 * @see PriorityQueue#PriorityQueue(int)
 */
public static <E> PriorityQueue<E> newPriorityQueue(int initialCapacity) {
    return new PriorityQueue<E>(initialCapacity);
}