Example usage for java.util PriorityQueue poll

List of usage examples for java.util PriorityQueue poll

Introduction

In this page you can find the example usage for java.util PriorityQueue poll.

Prototype

public E poll() 

Source Link

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.TestFileJournalManager.java

/**
 * Find out how many transactions we can read from a
 * FileJournalManager, starting at a given transaction ID.
 * /*from   w  ww. j a v  a  2 s .co m*/
 * @param jm              The journal manager
 * @param fromTxId        Transaction ID to start at
 * @param inProgressOk    Should we consider edit logs that are not finalized?
 * @return                The number of transactions
 * @throws IOException
 */
static long getNumberOfTransactions(FileJournalManager jm, long fromTxId, boolean inProgressOk,
        boolean abortOnGap) throws IOException {
    long numTransactions = 0, txId = fromTxId;
    final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64,
            JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
    jm.selectInputStreams(allStreams, fromTxId, inProgressOk);
    EditLogInputStream elis = null;
    try {
        while ((elis = allStreams.poll()) != null) {
            try {
                elis.skipUntil(txId);
                while (true) {
                    FSEditLogOp op = elis.readOp();
                    if (op == null) {
                        break;
                    }
                    if (abortOnGap && (op.getTransactionId() != txId)) {
                        LOG.info("getNumberOfTransactions: detected gap at txId " + fromTxId);
                        return numTransactions;
                    }
                    txId = op.getTransactionId() + 1;
                    numTransactions++;
                }
            } finally {
                IOUtils.cleanup(LOG, elis);
            }
        }
    } finally {
        IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
    }
    return numTransactions;
}

From source file:org.pepstock.jem.ant.tasks.utilities.SortTask.java

/**
 * This merges a bunch of temporary flat files
 * //from   w w  w.j  a v a2 s.  c o m
 * @param files
 * @param fileOutput 
 * @param cmp 
 * @param cs 
 * @param output file
 * @param Charset character set to use to load the strings
 * @return The number of lines sorted.
 * @throws IOException 
 */
public static int mergeSortedFiles(List<File> files, FileOutputStream fileOutput, final Comparator<String> cmp,
        Charset cs) throws IOException {
    PriorityQueue<BinaryFileBuffer> pq = new PriorityQueue<BinaryFileBuffer>(11,
            new Comparator<BinaryFileBuffer>() {
                public int compare(BinaryFileBuffer i, BinaryFileBuffer j) {
                    return cmp.compare(i.peek(), j.peek());
                }
            });
    for (File f : files) {
        BinaryFileBuffer bfb = new BinaryFileBuffer(f, cs);
        pq.add(bfb);
    }
    BufferedWriter fbw = new BufferedWriter(new OutputStreamWriter(fileOutput, cs));
    int rowcounter = 0;
    try {
        while (!pq.isEmpty()) {
            BinaryFileBuffer bfb = pq.poll();
            String r = bfb.pop();
            fbw.write(r);
            fbw.newLine();
            ++rowcounter;
            if (bfb.empty()) {
                bfb.getBufferReader().close();
                // we don't need you anymore
                boolean isDeleted = bfb.getOriginalfile().delete();
                if (!isDeleted) {
                    // nop
                }
            } else {
                // add it back
                pq.add(bfb);
            }
        }
    } finally {
        fbw.flush();
        fbw.close();
        for (BinaryFileBuffer bfb : pq) {
            bfb.close();
        }
    }
    return rowcounter;
}

From source file:com.linkedin.pinot.query.selection.SelectionQueriesSVTest.java

@Override
void verifySelectionOrderByResult(Collection<Serializable[]> selectionOrderByResult) {
    Assert.assertEquals(selectionOrderByResult.size(), 10);

    // Need to cache the result in the priority queue, and add them back in the end.
    List<Serializable[]> list = new ArrayList<>();
    PriorityQueue<Serializable[]> priorityQueue = (PriorityQueue<Serializable[]>) selectionOrderByResult;
    for (int i = 0; i < 10; i++) {
        Serializable[] row = priorityQueue.poll();
        for (int j = 0; j < 4; j++) {
            Assert.assertEquals(row[j], EXPECTED_SELECTION_ORDER_BY_RESULT[i][j]);
        }//from  w w  w  .  j ava 2s.com
        list.add(row);
    }

    // Add back the result in the priority queue.
    priorityQueue.addAll(list);
}

From source file:org.apache.hadoop.hdfs.server.namenode.JournalSet.java

public static void chainAndMakeRedundantStreams(Collection<EditLogInputStream> outStreams,
        PriorityQueue<EditLogInputStream> allStreams, long fromTxId) {
    // We want to group together all the streams that start on the same start
    // transaction ID.  To do this, we maintain an accumulator (acc) of all
    // the streams we've seen at a given start transaction ID.  When we see a
    // higher start transaction ID, we select a stream from the accumulator and
    // clear it.  Then we begin accumulating streams with the new, higher start
    // transaction ID.
    LinkedList<EditLogInputStream> acc = new LinkedList<EditLogInputStream>();
    EditLogInputStream elis;//from   w ww  . j a  va 2  s.co m
    while ((elis = allStreams.poll()) != null) {
        if (acc.isEmpty()) {
            acc.add(elis);
        } else {
            EditLogInputStream accFirst = acc.get(0);
            long accFirstTxId = accFirst.getFirstTxId();
            if (accFirstTxId == elis.getFirstTxId()) {
                // if we have a finalized log segment available at this txid,
                // we should throw out all in-progress segments at this txid
                if (elis.isInProgress()) {
                    if (accFirst.isInProgress()) {
                        acc.add(elis);
                    }
                } else {
                    if (accFirst.isInProgress()) {
                        acc.clear();
                    }
                    acc.add(elis);
                }
            } else if (accFirstTxId < elis.getFirstTxId()) {
                // try to read from the local logs first since the throughput should
                // be higher
                Collections.sort(acc, LOCAL_LOG_PREFERENCE_COMPARATOR);
                outStreams.add(new RedundantEditLogInputStream(acc, fromTxId));
                acc.clear();
                acc.add(elis);
            } else if (accFirstTxId > elis.getFirstTxId()) {
                throw new RuntimeException("sorted set invariants violated!  " + "Got stream with first txid "
                        + elis.getFirstTxId() + ", but the last firstTxId was " + accFirstTxId);
            }
        }
    }
    if (!acc.isEmpty()) {
        Collections.sort(acc, LOCAL_LOG_PREFERENCE_COMPARATOR);
        outStreams.add(new RedundantEditLogInputStream(acc, fromTxId));
        acc.clear();
    }
}

From source file:com.linkedin.pinot.query.selection.SelectionQueriesMVTest.java

@Override
void verifySelectionOrderByResult(Collection<Serializable[]> selectionOrderByResult) {
    Assert.assertEquals(selectionOrderByResult.size(), 10);

    // Need to cache the result in the priority queue, and add them back in the end.
    PriorityQueue<Serializable[]> priorityQueue = (PriorityQueue<Serializable[]>) selectionOrderByResult;
    List<Serializable[]> list = new ArrayList<>();
    for (int i = 0; i < 10; i++) {
        Serializable[] row = priorityQueue.poll();
        Assert.assertEquals(row[0], EXPECTED_SELECTION_ORDER_BY_RESULT[i][0]);
        Assert.assertEquals(row[1], EXPECTED_SELECTION_ORDER_BY_RESULT[i][1]);
        // The third element is a multi-value column with one value inside.
        Assert.assertEquals(((int[]) row[2])[0], EXPECTED_SELECTION_ORDER_BY_RESULT[i][2]);
        Assert.assertEquals(row[3], EXPECTED_SELECTION_ORDER_BY_RESULT[i][3]);
        list.add(row);//from  w ww  . j  ava  2 s. com
    }

    // Add back the result in the priority queue.
    priorityQueue.addAll(list);
}

From source file:org.apache.hama.util.Files.java

/**
 * Merges k sequence files each of size n using knlog(k) merge algorithm.
 * @param  inputPath :the input directory which contains sorted sequence
 *                    files, that have to be merged.
 * @param  fs        :the filesystem// www. j a  va  2 s  .c o m
 * @param outputPath :the path to the merged sorted sequence file.
 */
public static <KEY extends WritableComparable<? super KEY>, VALUE extends Writable> void merge(FileSystem fs,
        Path inputPath, Path outputPath, Class<KEY> keyClazz, Class<VALUE> valClazz) {

    Configuration conf = fs.getConf();

    PriorityQueue<KVPair<KEY, VALUE>> pq = new PriorityQueue<KVPair<KEY, VALUE>>();

    //Map from KeyValuePair to the split number to which it belongs.
    HashMap<KVPair<KEY, VALUE>, Integer> keySplitMap = new HashMap<KVPair<KEY, VALUE>, Integer>();

    FileStatus[] files;
    SequenceFile.Writer writer = null;
    SequenceFile.Reader reader[] = null;
    try {
        files = fs.listStatus(inputPath);
        reader = new SequenceFile.Reader[files.length];

        for (int i = 0; i < files.length; i++) {
            if (files[i].getLen() > 0) {
                reader[i] = new SequenceFile.Reader(fs, files[i].getPath(), conf);
                KEY key = ReflectionUtils.newInstance(keyClazz, new Object[0]);
                VALUE val = ReflectionUtils.newInstance(valClazz, new Object[0]);

                reader[i].next(key, val);
                KVPair<KEY, VALUE> kv = new KVPair<KEY, VALUE>(key, val);
                pq.add(kv);
                keySplitMap.put(kv, i);
            }
        }

        writer = SequenceFile.createWriter(fs, conf, outputPath, keyClazz, valClazz);

        while (!pq.isEmpty()) {
            KVPair<KEY, VALUE> smallestKey = pq.poll();
            writer.append(smallestKey.getKey(), smallestKey.getValue());
            Integer index = keySplitMap.get(smallestKey);
            keySplitMap.remove(smallestKey);

            KEY key = ReflectionUtils.newInstance(keyClazz, new Object[0]);
            VALUE val = ReflectionUtils.newInstance(valClazz, new Object[0]);

            if (reader[index].next(key, val)) {
                KVPair<KEY, VALUE> kv = new KVPair<KEY, VALUE>(key, val);
                pq.add(kv);
                keySplitMap.put(kv, index);
            }
        }

    } catch (IOException e) {
        LOG.error("Couldn't get status, exiting ...", e);
        System.exit(-1);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (IOException e) {
                LOG.error("Cannot close writer to sorted seq. file. Exiting ...", e);
                System.exit(-1);
            }
        }
    }
}

From source file:org.petalslink.dsb.federation.core.server.ServiceManagerImpl.java

/**
 * {@inheritDoc}//  w  w w. j  av a2  s.  c om
 */
public void start() {

    // first start the internal services...
    if (logger.isDebugEnabled()) {
        logger.debug("Starting internal services");
    }
    PriorityQueue<Service> queue = new PriorityQueue<Service>(this.services.size(), new Comparator());
    for (String name : this.services.keySet()) {
        if ((this.services.get(name) != null) && (this.services.get(name).getType() == Service.TYPE.INTERNAL)) {
            queue.add(this.services.get(name));
        }
    }
    Service s = null;
    while ((s = queue.poll()) != null) {
        this.start(s.getName());
    }

    if (logger.isDebugEnabled()) {
        logger.debug("Starting *bound services");
    }
    queue = new PriorityQueue<Service>(this.services.size(), new Comparator());
    for (String name : this.services.keySet()) {
        if ((this.services.get(name) != null) && (this.services.get(name).getType() != Service.TYPE.INTERNAL)) {
            queue.add(this.services.get(name));
        }
    }
    while ((s = queue.poll()) != null) {
        this.start(s.getName());
    }
}

From source file:android.support.v7.graphics.ColorCutQuantizer.java

/**
 * Iterate through the {@link java.util.Queue}, popping
 * {@link ColorCutQuantizer.Vbox} objects from the queue
 * and splitting them. Once split, the new box and the remaining box are offered back to the
 * queue.//from w w w  .  j  a v a  2  s  .c  o m
 *
 * @param queue {@link java.util.PriorityQueue} to poll for boxes
 * @param maxSize Maximum amount of boxes to split
 */
private void splitBoxes(final PriorityQueue<Vbox> queue, final int maxSize) {
    while (queue.size() < maxSize) {
        final Vbox vbox = queue.poll();

        if (vbox != null && vbox.canSplit()) {
            // First split the box, and offer the result
            queue.offer(vbox.splitBox());

            if (LOG_TIMINGS) {
                mTimingLogger.addSplit("Box split");
            }
            // Then offer the box back
            queue.offer(vbox);
        } else {
            if (LOG_TIMINGS) {
                mTimingLogger.addSplit("All boxes split");
            }
            // If we get here then there are no more boxes to split, so return
            return;
        }
    }
}

From source file:mulavito.algorithms.shortestpath.ksp.Eppstein.java

@Override
protected List<List<E>> getShortestPathsIntern(V source, V target, int k) {
    PriorityQueue<WeightedPath> prioQ = new PriorityQueue<WeightedPath>();
    List<List<E>> found_paths = new LinkedList<List<E>>();

    Transformer<E, Double> delta = prepareTransformations(target);

    // Initialize with start vertex.
    prioQ.add(new WeightedPath(source));

    while (!prioQ.isEmpty() && found_paths.size() < k) {
        WeightedPath curPath = prioQ.poll(); // get & remove next shortest
        V curV = curPath.getLast();//ww w  .j ava  2s .  co m

        if (curV.equals(target)) {
            found_paths.add(curPath.getPath());
            continue;
        }

        // Create new paths for every expanded vertex ...
        for (V nextV : graph.getSuccessors(curV)) {
            if (curPath.contains(nextV))
                continue; // Prevent looping!

            // ... and every possible edge.
            for (E e : graph.findEdgeSet(curV, nextV)) {
                if (Double.isInfinite(delta.transform(e)))
                    continue; // Skip unreachable vertices.

                WeightedPath tmpPath = new WeightedPath(curPath); // clone
                tmpPath.addHop(e, delta.transform(e), nextV);

                prioQ.add(tmpPath);
            }
        }
    }

    return found_paths;
}

From source file:org.springframework.cloud.stream.app.pose.estimation.processor.PoseEstimationTensorflowOutputConverter.java

/**
 * From all possible limb candidates for a given Limb Type, select those that maximize the total PAF score.
 * The algorithm starts from the limb candidates with higher PAF score. Also the algorithm tracks the parts
 * already assigned t a final limbs and rejects limb candidates with already assigned parts.
 *
 * @param limbType Limb Type for which final limbs a selected.
 * @param limbCandidatesQueue possible Limb candidates, sorted by total PAF score in a descending order.
 * @return Returns the final list of Limbs for a given {@link org.springframework.cloud.stream.app.pose.estimation.model.Model.LimbType}
 *///  w  w w  . j a va 2s  .  c o  m
private List<Limb> selectFinalLimbs(Model.LimbType limbType, PriorityQueue<Limb> limbCandidatesQueue) {

    List<Limb> finalLimbs = new ArrayList<>();

    // Parts assigned to final limbs.
    Set<Part> assignedParts = new HashSet<>();

    // Start from the candidates with higher PAF score and progress in descending order
    while (!limbCandidatesQueue.isEmpty()) {

        Limb limbCandidate = limbCandidatesQueue.poll();

        Assert.isTrue(limbType == limbCandidate.getLimbType(), "Incorrect Limb Type!");

        // Ignore candidate limbs with parts already assigned a final Limb from earlier iteration.
        if (!assignedParts.contains(limbCandidate.getFromPart())
                && !assignedParts.contains(limbCandidate.getToPart())) {

            // Make the candidate final.
            finalLimbs.add(limbCandidate);

            // Mark limb's parts as assigned.
            assignedParts.add(limbCandidate.getFromPart());
            assignedParts.add(limbCandidate.getToPart());
        }
    }

    return finalLimbs;
}