Example usage for java.util PriorityQueue clear

List of usage examples for java.util PriorityQueue clear

Introduction

In this page you can find the example usage for java.util PriorityQueue clear.

Prototype

public void clear() 

Source Link

Document

Removes all of the elements from this priority queue.

Usage

From source file:Main.java

public static void main(String args[]) {

    PriorityQueue<Integer> prq = new PriorityQueue<Integer>();

    for (int i = 0; i < 10; i++) {
        prq.add(i);// www.j  a va2  s  . c  om
    }

    System.out.println(prq);

    prq.clear();

    System.out.println(prq);
}

From source file:main.java.RMDupper.java

public static void checkForDuplication(DupStats dupStats, OccurenceCounterMerged occurenceCounterMerged,
        SAMFileWriter outputSam, Boolean allReadsAsMerged,
        PriorityQueue<ImmutableTriple<Integer, Integer, SAMRecord>> recordBuffer,
        PriorityQueue<ImmutableTriple<Integer, Integer, SAMRecord>> duplicateBuffer, Set<String> discardSet) {
    // At this point recordBuffer contains all alignments that overlap with its first entry
    // Therefore the task here is to de-duplicate for the first entry in recordBuffer
    duplicateBuffer.clear();

    Iterator<ImmutableTriple<Integer, Integer, SAMRecord>> it = recordBuffer.iterator();
    while (it.hasNext()) {
        ImmutableTriple<Integer, Integer, SAMRecord> maybeDuplicate = it.next();

        if (allReadsAsMerged) {
            if (recordBuffer.peek().left.equals(maybeDuplicate.left)
                    && recordBuffer.peek().middle.equals(maybeDuplicate.middle)) {
                duplicateBuffer.add(maybeDuplicate);
            }/*from w  w w.j a v  a 2 s  .c  om*/
        } else {
            // We build a logic table
            EnumSet<DL> testConditon = EnumSet.noneOf(DL.class);
            if (recordBuffer.peek().right.getReadName().startsWith("M_")) {
                testConditon.add(DL.buffer_read_merged);
            } else if (recordBuffer.peek().right.getReadName().startsWith("F_")) {
                testConditon.add(DL.buffer_read_one);
            } else if (recordBuffer.peek().right.getReadName().startsWith("R_")) {
                testConditon.add(DL.buffer_read_two);
            } else {
                throw new RuntimeException("Unlabelled read '" + recordBuffer.peek().right.getReadName()
                        + "' read name must start with one of M_,F_,R when not treating all reads as merged");
            }

            if (maybeDuplicate.right.getReadName().startsWith("M_")) {
                testConditon.add(DL.maybed_read_merged);
            } else if (maybeDuplicate.right.getReadName().startsWith("F_")) {
                testConditon.add(DL.maybed_read_one);
            } else if (maybeDuplicate.right.getReadName().startsWith("R_")) {
                testConditon.add(DL.maybed_read_two);
            } else {
                System.err.println("Unlabelled read '" + maybeDuplicate.right.getReadName()
                        + "' read name must start with one of M_,F_,R when not treating all reads as merged");
            }

            if (recordBuffer.peek().left.equals(maybeDuplicate.left)) {
                testConditon.add(DL.equal_alignment_start);
            }
            if (recordBuffer.peek().middle.equals(maybeDuplicate.middle)) {
                testConditon.add(DL.equal_alignment_end);
            }

            boolean duplicateIsShorterOrEqual = maybeDuplicate.middle
                    - maybeDuplicate.left <= recordBuffer.peek().middle - recordBuffer.peek().left;
            boolean duplicateIsLongerOrEqual = recordBuffer.peek().middle
                    - recordBuffer.peek().left <= maybeDuplicate.middle - maybeDuplicate.left;

            if (duplicateIsShorterOrEqual) {
                testConditon.add(DL.maybed_shorter_or_equal);
            }
            if (duplicateIsLongerOrEqual) {
                testConditon.add(DL.maybed_longer_or_equal);
            }

            if (recordBuffer.peek().right.getReadNegativeStrandFlag()) {
                testConditon.add(DL.buffer_reverse_strand);
            } else {
                testConditon.add(DL.buffer_forward_strand);
            }
            if (maybeDuplicate.right.getReadNegativeStrandFlag()) {
                testConditon.add(DL.maybed_reverse_strand);
            } else {
                testConditon.add(DL.maybed_forward_strand);
            }

            //System.out.println("Testing for duplication: "+testConditon);
            //System.out.println(recordBuffer.peek().right.getReadName()+"\t"+recordBuffer.peek().right.getAlignmentStart()+"\t"+recordBuffer.peek().right.getAlignmentEnd());
            //System.out.println(maybeDuplicate.right.getReadName()+"\t"+maybeDuplicate.right.getAlignmentStart()+"\t"+maybeDuplicate.right.getAlignmentEnd());

            //for ( EnumSet<DL> match : duplicateConditionSet.stream().filter(dc -> testConditon.containsAll(dc) ).collect(Collectors.toList()) ) {
            //  System.out.println("Match to: "+match);
            //}
            //for ( EnumSet<DL> match : duplicateConditionSet.stream().collect(Collectors.toList()) ) {
            //  System.out.println("Try to match: "+match);
            //  if ( match.containsAll(testConditon) )
            //  {
            //    System.out.println("success");
            //  }
            //}

            // Test for Duplication
            if (duplicateConditionSet.stream().anyMatch(dc -> testConditon.containsAll(dc))) {
                duplicateBuffer.add(maybeDuplicate);
            }
        }
    }
    //START DEBUG
    /*
    System.out.println ("recordBuffer");
            
    Comparator<SAMRecord> samRecordComparatorForRecordBuffer = new SAMRecordPositionAndQualityComparator();
    ArrayList<ImmutableTriple<Integer, Integer, SAMRecord>> sortedRecordBuffer = new ArrayList<ImmutableTriple<Integer, Integer, SAMRecord>>(recordBuffer.size());
    Iterator<ImmutableTriple<Integer, Integer, SAMRecord>> rit = recordBuffer.iterator();
            
    while (rit.hasNext()) {
    sortedRecordBuffer.add(rit.next());
    }
    sortedRecordBuffer.sort(Comparator.comparing(ImmutableTriple<Integer, Integer, SAMRecord>::getRight, samRecordComparatorForRecordBuffer));
            
    for ( ImmutableTriple<Integer, Integer, SAMRecord> currTriple : sortedRecordBuffer ) {
    System.out.println(" srb: "+(currTriple.right.getReadNegativeStrandFlag()?"-":"+")+" "+currTriple+" "+SAMRecordQualityComparator.getQualityScore(currTriple.right.getBaseQualityString()));
    }
            
    System.out.println ("duplicateBuffer");
    ArrayList<ImmutableTriple<Integer, Integer, SAMRecord>> sortedDuplicateBuffer = new ArrayList<ImmutableTriple<Integer, Integer, SAMRecord>>(duplicateBuffer.size());
    Iterator<ImmutableTriple<Integer, Integer, SAMRecord>> dit = duplicateBuffer.iterator();
    while (dit.hasNext()) {
    sortedDuplicateBuffer.add(dit.next());
    }
    sortedDuplicateBuffer.sort(Comparator.comparing(ImmutableTriple<Integer, Integer, SAMRecord>::getMiddle));
            
    for ( ImmutableTriple<Integer, Integer, SAMRecord> currTriple : sortedDuplicateBuffer ) {
    System.out.println(" dbe: "+(currTriple.right.getReadNegativeStrandFlag()?"-":"+")+" "+currTriple+" "+SAMRecordQualityComparator.getQualityScore(currTriple.right.getBaseQualityString()));
    }
            
    // Sort again with priority queue order
    sortedDuplicateBuffer.sort(Comparator.comparing(ImmutableTriple<Integer, Integer, SAMRecord>::getRight, samRecordComparator.reversed()));
    for ( ImmutableTriple<Integer, Integer, SAMRecord> currTriple : sortedDuplicateBuffer ) {
    System.out.println("sdbe: "+(currTriple.right.getReadNegativeStrandFlag()?"-":"+")+" "+currTriple+" "+SAMRecordQualityComparator.getQualityScore(currTriple.right.getBaseQualityString()));
    }
    */
    //END DEBUG
    if (!duplicateBuffer.isEmpty() && !discardSet.contains(duplicateBuffer.peek().right.getReadName())) {
        //System.out.println("WRITE "+duplicateBuffer.peek());
        decrementDuplicateStats(dupStats, allReadsAsMerged, duplicateBuffer.peek().right.getReadName());
        occurenceCounterMerged.putValue(Long
                .valueOf(duplicateBuffer.stream()
                        .filter(d -> allReadsAsMerged || d.right.getReadName().startsWith("M_")).count())
                .intValue() - 1);
        outputSam.addAlignment(duplicateBuffer.peek().right);
    }
    while (!duplicateBuffer.isEmpty()) {
        discardSet.add(duplicateBuffer.poll().right.getReadName());
    }
    // Maintain the invariant that the first item in recordBuffer may have duplicates
    while (!recordBuffer.isEmpty() && discardSet.contains(recordBuffer.peek().right.getReadName())) {
        String duplicateReadName = recordBuffer.poll().right.getReadName();
        incrementDuplicateStats(dupStats, allReadsAsMerged, duplicateReadName);
        discardSet.remove(duplicateReadName);
    }
}

From source file:hivemall.knn.lsh.MinHashUDTF.java

private void computeAndForwardSignatures(List<FeatureValue> features, Object[] forwardObjs)
        throws HiveException {
    final PriorityQueue<Integer> minhashes = new PriorityQueue<Integer>();
    // Compute N sets K minhash values
    for (int i = 0; i < num_hashes; i++) {
        float weightedMinHashValues = Float.MAX_VALUE;

        for (FeatureValue fv : features) {
            Object f = fv.getFeature();
            int hashIndex = Math.abs(hashFuncs[i].hash(f));
            float w = fv.getValueAsFloat();
            float hashValue = calcWeightedHashValue(hashIndex, w);
            if (hashValue < weightedMinHashValues) {
                weightedMinHashValues = hashValue;
                minhashes.offer(hashIndex);
            }//from w w  w. ja  v a  2 s  .  co m
        }

        forwardObjs[0] = getSignature(minhashes, num_keygroups);
        forward(forwardObjs);

        minhashes.clear();
    }
}

From source file:org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.java

/**
 * Free the space if the used size reaches acceptableSize() or one size block
 * couldn't be allocated. When freeing the space, we use the LRU algorithm and
 * ensure there must be some blocks evicted
 */// ww  w.j ava 2s.co  m
private void freeSpace() {
    // Ensure only one freeSpace progress at a time
    if (!freeSpaceLock.tryLock())
        return;
    try {
        freeInProgress = true;
        long bytesToFreeWithoutExtra = 0;
        /*
         * Calculate free byte for each bucketSizeinfo
         */
        StringBuffer msgBuffer = new StringBuffer();
        BucketAllocator.IndexStatistics[] stats = bucketAllocator.getIndexStatistics();
        long[] bytesToFreeForBucket = new long[stats.length];
        for (int i = 0; i < stats.length; i++) {
            bytesToFreeForBucket[i] = 0;
            long freeGoal = (long) Math.floor(stats[i].totalCount() * (1 - DEFAULT_MIN_FACTOR));
            freeGoal = Math.max(freeGoal, 1);
            if (stats[i].freeCount() < freeGoal) {
                bytesToFreeForBucket[i] = stats[i].itemSize() * (freeGoal - stats[i].freeCount());
                bytesToFreeWithoutExtra += bytesToFreeForBucket[i];
                msgBuffer.append("Free for bucketSize(" + stats[i].itemSize() + ")="
                        + StringUtils.byteDesc(bytesToFreeForBucket[i]) + ", ");
            }
        }
        msgBuffer.append("Free for total=" + StringUtils.byteDesc(bytesToFreeWithoutExtra) + ", ");

        if (bytesToFreeWithoutExtra <= 0) {
            return;
        }
        long currentSize = bucketAllocator.getUsedSize();
        long totalSize = bucketAllocator.getTotalSize();
        LOG.debug("Bucket cache free space started; Attempting to  " + msgBuffer.toString()
                + " of current used=" + StringUtils.byteDesc(currentSize) + ",actual cacheSize="
                + StringUtils.byteDesc(realCacheSize.get()) + ",total=" + StringUtils.byteDesc(totalSize));

        long bytesToFreeWithExtra = (long) Math
                .floor(bytesToFreeWithoutExtra * (1 + DEFAULT_EXTRA_FREE_FACTOR));

        // Instantiate priority buckets
        BucketEntryGroup bucketSingle = new BucketEntryGroup(bytesToFreeWithExtra, blockSize, singleSize());
        BucketEntryGroup bucketMulti = new BucketEntryGroup(bytesToFreeWithExtra, blockSize, multiSize());
        BucketEntryGroup bucketMemory = new BucketEntryGroup(bytesToFreeWithExtra, blockSize, memorySize());

        // Scan entire map putting bucket entry into appropriate bucket entry
        // group
        for (Map.Entry<BlockCacheKey, BucketEntry> bucketEntryWithKey : backingMap.entrySet()) {
            switch (bucketEntryWithKey.getValue().getPriority()) {
            case SINGLE: {
                bucketSingle.add(bucketEntryWithKey);
                break;
            }
            case MULTI: {
                bucketMulti.add(bucketEntryWithKey);
                break;
            }
            case MEMORY: {
                bucketMemory.add(bucketEntryWithKey);
                break;
            }
            }
        }

        PriorityQueue<BucketEntryGroup> bucketQueue = new PriorityQueue<BucketEntryGroup>(3);

        bucketQueue.add(bucketSingle);
        bucketQueue.add(bucketMulti);
        bucketQueue.add(bucketMemory);

        int remainingBuckets = 3;
        long bytesFreed = 0;

        BucketEntryGroup bucketGroup;
        while ((bucketGroup = bucketQueue.poll()) != null) {
            long overflow = bucketGroup.overflow();
            if (overflow > 0) {
                long bucketBytesToFree = Math.min(overflow,
                        (bytesToFreeWithoutExtra - bytesFreed) / remainingBuckets);
                bytesFreed += bucketGroup.free(bucketBytesToFree);
            }
            remainingBuckets--;
        }

        /**
         * Check whether need extra free because some bucketSizeinfo still needs
         * free space
         */
        stats = bucketAllocator.getIndexStatistics();
        boolean needFreeForExtra = false;
        for (int i = 0; i < stats.length; i++) {
            long freeGoal = (long) Math.floor(stats[i].totalCount() * (1 - DEFAULT_MIN_FACTOR));
            freeGoal = Math.max(freeGoal, 1);
            if (stats[i].freeCount() < freeGoal) {
                needFreeForExtra = true;
                break;
            }
        }

        if (needFreeForExtra) {
            bucketQueue.clear();
            remainingBuckets = 2;

            bucketQueue.add(bucketSingle);
            bucketQueue.add(bucketMulti);

            while ((bucketGroup = bucketQueue.poll()) != null) {
                long bucketBytesToFree = (bytesToFreeWithExtra - bytesFreed) / remainingBuckets;
                bytesFreed += bucketGroup.free(bucketBytesToFree);
                remainingBuckets--;
            }
        }

        if (LOG.isDebugEnabled()) {
            long single = bucketSingle.totalSize();
            long multi = bucketMulti.totalSize();
            long memory = bucketMemory.totalSize();
            LOG.debug("Bucket cache free space completed; " + "freed=" + StringUtils.byteDesc(bytesFreed) + ", "
                    + "total=" + StringUtils.byteDesc(totalSize) + ", " + "single="
                    + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", "
                    + "memory=" + StringUtils.byteDesc(memory));
        }

    } finally {
        cacheStats.evict();
        freeInProgress = false;
        freeSpaceLock.unlock();
    }
}

From source file:org.apache.hadoop.mapred.PoolFairnessCalculator.java

/**
 * This method takes a list of {@link PoolMetadata} objects and calculates
 * fairness metrics of how well scheduling is doing.
 *
 * The goals of the fair scheduling are to insure that every pool is getting
 * an equal share.  The expected share of resources for each pool is
 * complicated by the pools not requiring an equal share
 * or pools that have a minimum or maximum allocation of resources.
 *
 * @param poolMetadataList List of all pool metadata
 * @param metricsRecord Where to write the metrics
 *//*from  w  ww. j av a  2 s  .  c o m*/
public static void calculateFairness(final List<PoolMetadata> poolMetadataList,
        final MetricsRecord metricsRecord) {
    if (poolMetadataList == null || poolMetadataList.isEmpty()) {
        return;
    }

    // Find the total available usage and guaranteed resources by resource
    // type.  Add the resource metadata to the sorted set to schedule if
    // there is something to schedule (desiredAfterConstraints > 0)
    long startTime = System.currentTimeMillis();
    Map<String, TotalResourceMetadata> resourceTotalMap = new HashMap<String, TotalResourceMetadata>();
    Map<String, Set<ResourceMetadata>> resourceSchedulablePoolMap = new HashMap<String, Set<ResourceMetadata>>();
    for (PoolMetadata poolMetadata : poolMetadataList) {
        for (String resourceName : poolMetadata.getResourceMetadataKeys()) {
            ResourceMetadata resourceMetadata = poolMetadata.getResourceMetadata(resourceName);
            TotalResourceMetadata totalResourceMetadata = resourceTotalMap.get(resourceName);
            if (totalResourceMetadata == null) {
                totalResourceMetadata = new TotalResourceMetadata();
                resourceTotalMap.put(resourceName, totalResourceMetadata);
            }
            totalResourceMetadata.totalAvailable += resourceMetadata.getCurrentlyUsed();

            Set<ResourceMetadata> schedulablePoolSet = resourceSchedulablePoolMap.get(resourceName);
            if (schedulablePoolSet == null) {
                schedulablePoolSet = new HashSet<ResourceMetadata>();
                resourceSchedulablePoolMap.put(resourceName, schedulablePoolSet);
            }
            if (resourceMetadata.getDesiredAfterConstraints() > 0) {
                if (!schedulablePoolSet.add(resourceMetadata)) {
                    throw new RuntimeException(
                            "Duplicate resource metadata " + resourceMetadata + " in " + schedulablePoolSet);
                }
            }
        }
    }

    // First, allocate resources for all the min guaranteed resources
    // for the pools.  Ordering is done by the largest
    // min(min guaranteed, desired).
    GuaranteedDesiredComparator guarantedDesiredComparator = new GuaranteedDesiredComparator();
    List<ResourceMetadata> removePoolList = new ArrayList<ResourceMetadata>();
    for (Map.Entry<String, TotalResourceMetadata> entry : resourceTotalMap.entrySet()) {
        List<ResourceMetadata> resourceMetadataList = new ArrayList<ResourceMetadata>(
                resourceSchedulablePoolMap.get(entry.getKey()));
        TotalResourceMetadata totalResourceMetadata = entry.getValue();
        Collections.sort(resourceMetadataList, guarantedDesiredComparator);
        while ((totalResourceMetadata.totalAllocated < totalResourceMetadata.totalAvailable)
                && !resourceMetadataList.isEmpty()) {
            removePoolList.clear();
            for (ResourceMetadata resourceMetadata : resourceMetadataList) {
                if (resourceMetadata.getExpectedUsed() == resourceMetadata.getGuaranteedUsedAndDesired()) {
                    removePoolList.add(resourceMetadata);
                    continue;
                }
                resourceMetadata.incrExpectedUsed();
                ++totalResourceMetadata.totalAllocated;
            }
            resourceMetadataList.removeAll(removePoolList);
        }
        LOG.info("After allocating min guaranteed and desired - " + "Resource type " + entry.getKey()
                + " totalAvailable=" + totalResourceMetadata.totalAvailable + ", totalAllocated="
                + totalResourceMetadata.totalAllocated);
    }

    // At this point, all pools have been allocated their guaranteed used and
    // desired resources.  If there are any more resources to allocate, give
    // resources to lowest allocated pool that hasn't reached desired
    // until all the resources are gone
    ExpectedUsedComparator expectedUsedComparator = new ExpectedUsedComparator();
    PriorityQueue<ResourceMetadata> minHeap = new PriorityQueue<ResourceMetadata>(100, expectedUsedComparator);
    for (Map.Entry<String, TotalResourceMetadata> entry : resourceTotalMap.entrySet()) {
        minHeap.addAll(resourceSchedulablePoolMap.get(entry.getKey()));
        TotalResourceMetadata totalResourceMetadata = entry.getValue();
        while ((totalResourceMetadata.totalAllocated < totalResourceMetadata.totalAvailable)
                && !minHeap.isEmpty()) {
            ResourceMetadata resourceMetadata = minHeap.remove();
            if (resourceMetadata.getExpectedUsed() == resourceMetadata.getDesiredAfterConstraints()) {
                continue;
            }
            resourceMetadata.incrExpectedUsed();
            ++totalResourceMetadata.totalAllocated;
            minHeap.add(resourceMetadata);
        }
        minHeap.clear();
    }

    // Now calculate the difference of the expected allocation and the
    // actual allocation to get the following metrics.  When calculating
    // the percent bad allocated divide by 2 because the difference double
    // counts a bad allocation
    // 1) total tasks difference between expected and actual allocation
    //    0 is totally fair, higher is less fair
    // 2) % of tasks incorrectly allocated
    //    0 is totally fair, higher is less fair
    // 3) average difference per pool
    //    0 is totally fair, higher is less fair
    // 4) standard deviation per pool
    //    0 is totally fair, higher is less fair
    for (PoolMetadata poolMetadata : poolMetadataList) {
        for (String resourceName : poolMetadata.getResourceMetadataKeys()) {
            ResourceMetadata resourceMetadata = poolMetadata.getResourceMetadata(resourceName);
            int diff = Math.abs(resourceMetadata.getExpectedUsed() - resourceMetadata.getCurrentlyUsed());
            LOG.info("Pool " + poolMetadata.getPoolName() + ", resourceName=" + resourceName + ", expectedUsed="
                    + resourceMetadata.getExpectedUsed() + ", currentUsed="
                    + resourceMetadata.getCurrentlyUsed() + ", maxAllowed=" + resourceMetadata.getMaxAllowed()
                    + ", desiredAfterConstraints=" + resourceMetadata.getDesiredAfterConstraints()
                    + ", guaranteedUsedAndDesired=" + resourceMetadata.getGuaranteedUsedAndDesired() + ", diff="
                    + diff);
            resourceTotalMap.get(resourceName).totalFairnessDifference += diff;
            resourceTotalMap.get(resourceName).totalFairnessDifferenceSquared += diff * diff;
        }
    }
    TotalResourceMetadata allResourceMetadata = new TotalResourceMetadata();
    allResourceMetadata.resourceTypeCount = resourceTotalMap.size();
    for (TotalResourceMetadata totalResourceMetadata : resourceTotalMap.values()) {
        allResourceMetadata.totalAvailable += totalResourceMetadata.totalAvailable;
        allResourceMetadata.totalFairnessDifference += totalResourceMetadata.totalFairnessDifference;
        allResourceMetadata.totalFairnessDifferenceSquared += totalResourceMetadata.totalFairnessDifferenceSquared;
    }
    resourceTotalMap.put("all", allResourceMetadata);
    StringBuilder metricsBuilder = new StringBuilder();
    for (Map.Entry<String, TotalResourceMetadata> entry : resourceTotalMap.entrySet()) {
        TotalResourceMetadata totalResourceMetadata = entry.getValue();
        totalResourceMetadata.percentUnfair = (totalResourceMetadata.totalAvailable == 0) ? 0
                : totalResourceMetadata.totalFairnessDifference * 100f / 2
                        / totalResourceMetadata.totalAvailable;
        totalResourceMetadata.stdDevUnfair = (float) Math
                .sqrt((double) totalResourceMetadata.totalFairnessDifferenceSquared / poolMetadataList.size()
                        / totalResourceMetadata.resourceTypeCount);
        totalResourceMetadata.averageUnfairPerPool = (float) totalResourceMetadata.totalFairnessDifference
                / poolMetadataList.size() / totalResourceMetadata.resourceTypeCount;

        metricsRecord.setMetric(FAIRNESS_DIFFERENCE_COUNT_PREFIX + entry.getKey(),
                totalResourceMetadata.totalFairnessDifference);
        metricsBuilder.append(FAIRNESS_DIFFERENCE_COUNT_PREFIX + entry.getKey() + "="
                + totalResourceMetadata.totalFairnessDifference + "\n");
        metricsRecord.setMetric(FAIRNESS_PERCENT_UNFAIR_PREFIX + entry.getKey(),
                totalResourceMetadata.percentUnfair);
        metricsBuilder.append(FAIRNESS_PERCENT_UNFAIR_PREFIX + entry.getKey() + "="
                + totalResourceMetadata.percentUnfair + "\n");
        metricsRecord.setMetric(FAIRNESS_DIFFERENCE_PER_POOL_PREFIX + entry.getKey(),
                totalResourceMetadata.averageUnfairPerPool);
        metricsBuilder.append(FAIRNESS_DIFFERENCE_PER_POOL_PREFIX + entry.getKey() + "="
                + totalResourceMetadata.averageUnfairPerPool + "\n");
        metricsRecord.setMetric(FAIRNESS_UNFAIR_STD_DEV_PERFIX + entry.getKey(),
                totalResourceMetadata.stdDevUnfair);
        metricsBuilder.append(FAIRNESS_UNFAIR_STD_DEV_PERFIX + entry.getKey() + "="
                + totalResourceMetadata.stdDevUnfair + "\n");
        metricsBuilder.append(
                TOTAL_RESOURCES_PREFIX + entry.getKey() + "=" + totalResourceMetadata.totalAvailable + "\n");
    }

    if (LOG.isInfoEnabled()) {
        LOG.info("calculateFairness took " + (System.currentTimeMillis() - startTime) + " millisecond(s).");
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("\n" + metricsBuilder.toString());
    }
}

From source file:org.apache.storm.daemon.logviewer.utils.DirectoryCleaner.java

/**
 * If totalSize of files exceeds the either the per-worker quota or global quota,
 * Logviewer deletes oldest inactive log files in a worker directory or in all worker dirs.
 * We use the parameter forPerDir to switch between the two deletion modes.
 *
 * @param dirs the list of directories to be scanned for deletion
 * @param quota the per-dir quota or the total quota for the all directories
 * @param forPerDir if true, deletion happens for a single dir; otherwise, for all directories globally
 * @param activeDirs only for global deletion, we want to skip the active logs in activeDirs
 * @return number of files deleted/* w w  w  .  j  av  a  2 s.co m*/
 */
public DeletionMeta deleteOldestWhileTooLarge(List<Path> dirs, long quota, boolean forPerDir,
        Set<Path> activeDirs) throws IOException {
    long totalSize = 0;
    for (Path dir : dirs) {
        try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) {
            for (Path path : stream) {
                totalSize += Files.size(path);
            }
        }
    }
    LOG.debug("totalSize: {} quota: {}", totalSize, quota);
    long toDeleteSize = totalSize - quota;
    if (toDeleteSize <= 0) {
        return DeletionMeta.EMPTY;
    }

    int deletedFiles = 0;
    long deletedSize = 0;
    // the oldest pq_size files in this directory will be placed in PQ, with the newest at the root
    PriorityQueue<Pair<Path, FileTime>> pq = new PriorityQueue<>(PQ_SIZE,
            Comparator.comparing((Pair<Path, FileTime> p) -> p.getRight()).reversed());
    int round = 0;
    final Set<Path> excluded = new HashSet<>();
    while (toDeleteSize > 0) {
        LOG.debug("To delete size is {}, start a new round of deletion, round: {}", toDeleteSize, round);
        for (Path dir : dirs) {
            try (DirectoryStream<Path> stream = getStreamForDirectory(dir)) {
                for (Path path : stream) {
                    if (!excluded.contains(path)) {
                        if (isFileEligibleToSkipDelete(forPerDir, activeDirs, dir, path)) {
                            excluded.add(path);
                        } else {
                            Pair<Path, FileTime> p = Pair.of(path, Files.getLastModifiedTime(path));
                            if (pq.size() < PQ_SIZE) {
                                pq.offer(p);
                            } else if (p.getRight().toMillis() < pq.peek().getRight().toMillis()) {
                                pq.poll();
                                pq.offer(p);
                            }
                        }
                    }
                }
            }
        }
        if (!pq.isEmpty()) {
            // need to reverse the order of elements in PQ to delete files from oldest to newest
            Stack<Pair<Path, FileTime>> stack = new Stack<>();
            while (!pq.isEmpty()) {
                stack.push(pq.poll());
            }
            while (!stack.isEmpty() && toDeleteSize > 0) {
                Pair<Path, FileTime> pair = stack.pop();
                Path file = pair.getLeft();
                final String canonicalPath = file.toAbsolutePath().normalize().toString();
                final long fileSize = Files.size(file);
                final long lastModified = pair.getRight().toMillis();
                //Original implementation doesn't actually check if delete succeeded or not.
                try {
                    Utils.forceDelete(file.toString());
                    LOG.info("Delete file: {}, size: {}, lastModified: {}", canonicalPath, fileSize,
                            lastModified);
                    toDeleteSize -= fileSize;
                    deletedSize += fileSize;
                    deletedFiles++;
                } catch (IOException e) {
                    excluded.add(file);
                }
            }
            pq.clear();
            round++;
            if (round >= MAX_ROUNDS) {
                if (forPerDir) {
                    LOG.warn(
                            "Reach the MAX_ROUNDS: {} during per-dir deletion, you may have too many files in "
                                    + "a single directory : {}, will delete the rest files in next interval.",
                            MAX_ROUNDS, dirs.get(0).toAbsolutePath().normalize());
                } else {
                    LOG.warn("Reach the MAX_ROUNDS: {} during global deletion, you may have too many files, "
                            + "will delete the rest files in next interval.", MAX_ROUNDS);
                }
                break;
            }
        } else {
            LOG.warn("No more files able to delete this round, but {} is over quota by {} MB",
                    forPerDir ? "this directory" : "root directory", toDeleteSize * 1e-6);
        }
    }
    return new DeletionMeta(deletedSize, deletedFiles);
}