Example usage for java.util PriorityQueue poll

List of usage examples for java.util PriorityQueue poll

Introduction

In this page you can find the example usage for java.util PriorityQueue poll.

Prototype

public E poll() 

Source Link

Usage

From source file:com.android.switchaccess.HuffmanTreeBuilder.java

/**
 *  Given a priority queue of HuffmanNodes and the number of nodes per parent, a new
 *  parent HuffmanNode is constructed. The probability of the parent HuffmanNode is the sum of
 *  the probabilities of all its children. If none of the children branches have a
 *  {@code ClearFocusNode}, one is included when the parent node is created.
 *
 *  @param nodes The total nodes that will be included in the Huffman tree.
 *  @param nodesPerParent The number of children the parent node will have.
 *  @param clearFocusNode The clear focus node to be included if none of the children branches
 *         have a {@code ClearFocusNode} included.
 *  @return The parent HuffmanNode created.
 *///from   w ww .  j  av  a 2s.  co  m
private HuffmanNode createParentNode(PriorityQueue<HuffmanNode> nodes, int nodesPerParent,
        ClearFocusNode clearFocusNode) throws IllegalArgumentException {
    if (nodesPerParent < 2 || nodes.size() < nodesPerParent) {
        throw new IllegalArgumentException();
    }
    Double childrenProbability = 0.0;
    List<OptionScanNode> children = new ArrayList<>(nodesPerParent);
    Boolean clearFocusNodePresence = false;
    for (int i = 0; i < nodesPerParent; i++) {
        HuffmanNode huffmanNode = nodes.poll();
        childrenProbability += huffmanNode.getProbability();
        children.add(huffmanNode.getOptionScanNode());
        if ((i == nodesPerParent - 1) && huffmanNode.hasClearFocusNode()) {
            clearFocusNodePresence = true;
        }
    }
    if (!clearFocusNodePresence) {
        addClearFocusNodeToBranch(children, clearFocusNode);
    }
    List<OptionScanNode> otherChildren = children.subList(2, children.size());
    OptionScanNode parent = new OptionScanSelectionNode(children.get(0), children.get(1),
            otherChildren.toArray(new OptionScanNode[otherChildren.size()]));
    HuffmanNode parentHuffmanNode = new HuffmanNode(parent, childrenProbability);
    parentHuffmanNode.setClearFocusNodePresence();
    return parentHuffmanNode;
}

From source file:MSUmpire.PSMDataStructure.ProtID.java

public float GetAbundanceByTopPepFrag(int toppep, int topfrag, float pepweight) {
    if (PeptideID.isEmpty()) {
        return 0;
    }/*  w w  w .j  a  v a  2s  .  c om*/
    PriorityQueue<Float> TopQueue = new PriorityQueue<>(PeptideID.size(), Collections.reverseOrder());
    for (PepIonID peptide : PeptideID.values()) {
        if (peptide.FilteringWeight > pepweight) {
            TopQueue.add(peptide.GetPepAbundanceByTopFragments(topfrag));
        }
    }
    float totalabundance = 0f;
    int num = Math.min(toppep, TopQueue.size());

    for (int i = 0; i < num; i++) {
        totalabundance += TopQueue.poll();
    }
    return totalabundance / num;
}

From source file:MSUmpire.PSMDataStructure.ProtID.java

public float GetAbundanceByMS1_TopN(int topN, float pepweight) {
    if (PeptideID.isEmpty()) {
        return 0;
    }/* w  w w. ja v  a2 s  .  c  om*/
    PriorityQueue<Float> TopQueue = new PriorityQueue<>(PeptideID.size(), Collections.reverseOrder());
    for (PepIonID peptide : PeptideID.values()) {
        if (peptide.PeakHeight != null && peptide.FilteringWeight > pepweight) {
            TopQueue.add(peptide.PeakHeight[0]);
        }
    }

    float totalabundance = 0f;
    int num = Math.min(topN, TopQueue.size());

    for (int i = 0; i < num; i++) {
        totalabundance += TopQueue.poll();
    }
    return totalabundance / num;
}

From source file:com.linkedin.pinot.query.selection.SelectionQueriesTest.java

@Test
public void testSelectionIteration() {
    Operator filterOperator = new MatchEntireSegmentOperator(_indexSegment.getSegmentMetadata().getTotalDocs());
    final BReusableFilteredDocIdSetOperator docIdSetOperator = new BReusableFilteredDocIdSetOperator(
            filterOperator, _indexSegment.getSegmentMetadata().getTotalDocs(), 5000);
    final Map<String, DataSource> dataSourceMap = getDataSourceMap();

    final MProjectionOperator projectionOperator = new MProjectionOperator(dataSourceMap, docIdSetOperator);

    final Selection selection = getSelectionQuery();

    final MSelectionOrderByOperator selectionOperator = new MSelectionOrderByOperator(_indexSegment, selection,
            projectionOperator);//w  w w .  ja  va  2  s.  c o m

    final IntermediateResultsBlock block = (IntermediateResultsBlock) selectionOperator.nextBlock();
    final PriorityQueue<Serializable[]> pq = (PriorityQueue<Serializable[]>) block.getSelectionResult();
    final DataSchema dataSchema = block.getSelectionDataSchema();
    System.out.println(dataSchema);
    while (!pq.isEmpty()) {
        final Serializable[] row = pq.poll();
        System.out.println(SelectionOperatorUtils.getRowStringFromSerializable(row, dataSchema));
        Assert.assertEquals(row[0], "i");
    }
}

From source file:de.tudarmstadt.lt.n2n.annotators.RelationAnnotator.java

protected List<Dependency> find_path_dijkstra(Token start, Token dest, Collection<Token> nodes,
        Map<Token, List<Dependency>> edges) throws IllegalStateException {
    List<Dependency> shortest_path = new ArrayList<Dependency>();

    final Map<Token, Integer> dist = new HashMap<Token, Integer>();
    final Map<Token, Dependency> prev = new HashMap<Token, Dependency>();
    for (Token t : nodes)
        dist.put(t, Integer.MAX_VALUE);
    dist.put(start, 0);/*from w  ww.j ava2s  . c o  m*/

    PriorityQueue<Token> Q = new PriorityQueue<Token>(edges.size(), new Comparator<Token>() {
        @Override
        public int compare(Token o1, Token o2) {
            return dist.get(o1).compareTo(dist.get(o2));
        }
    });
    Q.addAll(nodes);

    while (!Q.isEmpty()) {
        Token u = Q.poll(); // initially source node
        if (u.equals(dest)) // stop if dest
            break;
        if (dist.get(u) == Integer.MAX_VALUE)
            throw new IllegalStateException(String.format(
                    "Could not find path from token '%s' to token '%s'. Perhaps start or dest is part of a preposition? (%s)",
                    start.getCoveredText(), dest.getCoveredText(),
                    DocumentMetaData.get(u.getCAS()).getDocumentId()));

        List<Dependency> connected_edges = edges.get(u);
        if (connected_edges == null)
            continue;

        for (Dependency d : connected_edges) {
            Token v = null;
            if (u.equals(d.getGovernor()))
                v = d.getDependent();
            else
                v = d.getGovernor();
            if (!Q.contains(v))
                continue;
            int alt = dist.get(u) + 1; // dist(u,v) = 1
            if (alt < dist.get(v)) {
                dist.put(v, alt);
                prev.put(v, d);
                Q.remove(v); // reinsert v so that Q is recomputed
                Q.offer(v);
            }
        }
    }

    Token u = dest;
    Dependency e = prev.get(u);
    while (e != null) {
        shortest_path.add(0, e);
        if (u == e.getGovernor())
            u = e.getDependent();
        else
            u = e.getGovernor();
        e = prev.get(u);
    }

    return shortest_path;
}

From source file:io.anserini.rerank.lib.AxiomReranker.java

/**
 * Calculate the scores (weights) of each term that occured in the reranking pool.
 * The Process:/*from  www .  ja va 2 s.  c  om*/
 * 1. For each query term, calculate its score for each term in the reranking pool. the score
 * is calcuated as
 * <pre>
 * P(both occurs)*log{P(both occurs)/P(t1 occurs)/P(t2 occurs)}
 * + P(both not occurs)*log{P(both not occurs)/P(t1 not occurs)/P(t2 not occurs)}
 * + P(t1 occurs t2 not occurs)*log{P(t1 occurs t2 not occurs)/P(t1 occurs)/P(t2 not occurs)}
 * + P(t1 not occurs t2 occurs)*log{P(t1 not occurs t2 occurs)/P(t1 not occurs)/P(t2 occurs)}
 * </pre>
 * 2. For each query term the scores of every other term in the reranking pool are stored in a
 * PriorityQueue, only the top {@code K} are kept.
 * 3. Add the scores of the same term together and pick the top {@code M} ones.
 *
 * @param termInvertedList A Map of <term -> Set<docId>> where the Set of docIds is where the term occurs
 * @param context An instance of RerankerContext
 * @return Map<String, Double> Top terms and their weight scores in a HashMap
 */
private Map<String, Double> computeTermScore(Map<String, Set<Integer>> termInvertedList,
        RerankerContext<T> context) throws IOException {
    class ScoreComparator implements Comparator<Pair<String, Double>> {
        public int compare(Pair<String, Double> a, Pair<String, Double> b) {
            int cmp = Double.compare(b.getRight(), a.getRight());
            if (cmp == 0) {
                return a.getLeft().compareToIgnoreCase(b.getLeft());
            } else {
                return cmp;
            }
        }
    }

    // get collection statistics so that we can get idf later on.
    IndexReader reader;
    if (this.externalIndexPath != null) {
        Path indexPath = Paths.get(this.externalIndexPath);
        if (!Files.exists(indexPath) || !Files.isDirectory(indexPath) || !Files.isReadable(indexPath)) {
            throw new IllegalArgumentException(
                    this.externalIndexPath + " does not exist or is not a directory.");
        }
        reader = DirectoryReader.open(FSDirectory.open(indexPath));
    } else {
        IndexSearcher searcher = context.getIndexSearcher();
        reader = searcher.getIndexReader();
    }
    final long docCount = reader.numDocs() == -1 ? reader.maxDoc() : reader.numDocs();

    //calculate the Mutual Information between term with each query term
    List<String> queryTerms = context.getQueryTokens();
    Map<String, Integer> queryTermsCounts = new HashMap<>();
    for (String qt : queryTerms) {
        queryTermsCounts.put(qt, queryTermsCounts.getOrDefault(qt, 0) + 1);
    }

    Set<Integer> allDocIds = new HashSet<>();
    for (Set<Integer> s : termInvertedList.values()) {
        allDocIds.addAll(s);
    }
    int docIdsCount = allDocIds.size();

    // Each priority queue corresponds to a query term: The p-queue itself stores all terms
    // in the reranking pool and their reranking scores to the query term.
    List<PriorityQueue<Pair<String, Double>>> allTermScoresPQ = new ArrayList<>();
    for (Map.Entry<String, Integer> q : queryTermsCounts.entrySet()) {
        String queryTerm = q.getKey();
        long df = reader.docFreq(new Term(LuceneDocumentGenerator.FIELD_BODY, queryTerm));
        if (df == 0L) {
            continue;
        }
        float idf = (float) Math.log((1 + docCount) / df);
        int qtf = q.getValue();
        if (termInvertedList.containsKey(queryTerm)) {
            PriorityQueue<Pair<String, Double>> termScorePQ = new PriorityQueue<>(new ScoreComparator());
            double selfMI = computeMutualInformation(termInvertedList.get(queryTerm),
                    termInvertedList.get(queryTerm), docIdsCount);
            for (Map.Entry<String, Set<Integer>> termEntry : termInvertedList.entrySet()) {
                double score;
                if (termEntry.getKey().equals(queryTerm)) { // The mutual information to itself will always be 1
                    score = idf * qtf;
                } else {
                    double crossMI = computeMutualInformation(termInvertedList.get(queryTerm),
                            termEntry.getValue(), docIdsCount);
                    score = idf * beta * qtf * crossMI / selfMI;
                }
                termScorePQ.add(Pair.of(termEntry.getKey(), score));
            }
            allTermScoresPQ.add(termScorePQ);
        }
    }

    Map<String, Double> aggTermScores = new HashMap<>();
    for (PriorityQueue<Pair<String, Double>> termScores : allTermScoresPQ) {
        for (int i = 0; i < Math.min(termScores.size(), this.K); i++) {
            Pair<String, Double> termScore = termScores.poll();
            String term = termScore.getLeft();
            Double score = termScore.getRight();
            if (score - 0.0 > 1e-8) {
                aggTermScores.put(term, aggTermScores.getOrDefault(term, 0.0) + score);
            }
        }
    }
    PriorityQueue<Pair<String, Double>> termScoresPQ = new PriorityQueue<>(new ScoreComparator());
    for (Map.Entry<String, Double> termScore : aggTermScores.entrySet()) {
        termScoresPQ.add(Pair.of(termScore.getKey(), termScore.getValue() / queryTerms.size()));
    }
    Map<String, Double> resultTermScores = new HashMap<>();
    for (int i = 0; i < Math.min(termScoresPQ.size(), this.M); i++) {
        Pair<String, Double> termScore = termScoresPQ.poll();
        String term = termScore.getKey();
        double score = termScore.getValue();
        resultTermScores.put(term, score);
    }

    return resultTermScores;
}

From source file:com.linkedin.pinot.routing.builder.KafkaLowLevelConsumerRoutingTableBuilder.java

@Override
public List<ServerToSegmentSetMap> computeRoutingTableFromExternalView(String tableName,
        ExternalView externalView, List<InstanceConfig> instanceConfigList) {
    // We build the routing table based off the external view here. What we want to do is to make sure that we uphold
    // the guarantees clients expect (no duplicate records, eventual consistency) and spreading the load as equally as
    // possible between the servers.
    ////from www . jav a2 s .  c o  m
    // Each Kafka partition contains a fraction of the data, so we need to make sure that we query all partitions.
    // Because in certain unlikely degenerate scenarios, we can consume overlapping data until segments are flushed (at
    // which point the overlapping data is discarded during the reconciliation process with the controller), we need to
    // ensure that the query that is sent has only one partition in CONSUMING state in order to avoid duplicate records.
    //
    // Because we also want to want to spread the load as equally as possible between servers, we use a weighted random
    // replica selection that favors picking replicas with fewer segments assigned to them, thus having an approximately
    // equal distribution of load between servers.
    //
    // For example, given three replicas with 1, 2 and 3 segments assigned to each, the replica with one segment should
    // have a weight of 2, which is the maximum segment count minus the segment count for that replica. Thus, each
    // replica other than the replica(s) with the maximum segment count should have a chance of getting a segment
    // assigned to it. This corresponds to alternative three below:
    //
    // Alternative 1 (weight is sum of segment counts - segment count in that replica):
    // (6 - 1) = 5 -> P(0.4166)
    // (6 - 2) = 4 -> P(0.3333)
    // (6 - 3) = 3 -> P(0.2500)
    //
    // Alternative 2 (weight is max of segment counts - segment count in that replica + 1):
    // (3 - 1) + 1 = 3 -> P(0.5000)
    // (3 - 2) + 1 = 2 -> P(0.3333)
    // (3 - 3) + 1 = 1 -> P(0.1666)
    //
    // Alternative 3 (weight is max of segment counts - segment count in that replica):
    // (3 - 1) = 2 -> P(0.6666)
    // (3 - 2) = 1 -> P(0.3333)
    // (3 - 3) = 0 -> P(0.0000)
    //
    // Of those three weighting alternatives, the third one has the smallest standard deviation of the number of
    // segments assigned per replica, so it corresponds to the weighting strategy used for segment assignment. Empirical
    // testing shows that for 20 segments and three replicas, the standard deviation of each alternative is respectively
    // 2.112, 1.496 and 0.853.
    //
    // This algorithm works as follows:
    // 1. Gather all segments and group them by Kafka partition, sorted by sequence number
    // 2. Ensure that for each partition, we have at most one partition in consuming state
    // 3. Sort all the segments to be used during assignment in ascending order of replicas
    // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments
    //    assigned to each replica.

    // 1. Gather all segments and group them by Kafka partition, sorted by sequence number
    Map<String, SortedSet<SegmentName>> sortedSegmentsByKafkaPartition = new HashMap<String, SortedSet<SegmentName>>();
    for (String helixPartitionName : externalView.getPartitionSet()) {
        // Ignore segments that are not low level consumer segments
        if (!SegmentNameBuilder.Realtime.isRealtimeV2Name(helixPartitionName)) {
            continue;
        }

        final LLCSegmentName segmentName = new LLCSegmentName(helixPartitionName);
        String kafkaPartitionName = segmentName.getPartitionRange();
        SortedSet<SegmentName> segmentsForPartition = sortedSegmentsByKafkaPartition.get(kafkaPartitionName);

        // Create sorted set if necessary
        if (segmentsForPartition == null) {
            segmentsForPartition = new TreeSet<SegmentName>();

            sortedSegmentsByKafkaPartition.put(kafkaPartitionName, segmentsForPartition);
        }

        segmentsForPartition.add(segmentName);
    }

    // 2. Ensure that for each Kafka partition, we have at most one Helix partition (Pinot segment) in consuming state
    Map<String, SegmentName> allowedSegmentInConsumingStateByKafkaPartition = new HashMap<String, SegmentName>();
    for (String kafkaPartition : sortedSegmentsByKafkaPartition.keySet()) {
        SortedSet<SegmentName> sortedSegmentsForKafkaPartition = sortedSegmentsByKafkaPartition
                .get(kafkaPartition);
        SegmentName lastAllowedSegmentInConsumingState = null;

        for (SegmentName segmentName : sortedSegmentsForKafkaPartition) {
            Map<String, String> helixPartitionState = externalView.getStateMap(segmentName.getSegmentName());
            boolean allInConsumingState = true;
            int replicasInConsumingState = 0;

            // Only keep the segment if all replicas have it in CONSUMING state
            for (String externalViewState : helixPartitionState.values()) {
                // Ignore ERROR state
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ERROR)) {
                    continue;
                }

                // Not all segments are in CONSUMING state, therefore don't consider the last segment assignable to CONSUMING
                // replicas
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) {
                    allInConsumingState = false;
                    break;
                }

                // Otherwise count the replica as being in CONSUMING state
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)) {
                    replicasInConsumingState++;
                }
            }

            // If all replicas have this segment in consuming state (and not all of them are in ERROR state), then pick this
            // segment to be the last allowed segment to be in CONSUMING state
            if (allInConsumingState && 0 < replicasInConsumingState) {
                lastAllowedSegmentInConsumingState = segmentName;
                break;
            }
        }

        if (lastAllowedSegmentInConsumingState != null) {
            allowedSegmentInConsumingStateByKafkaPartition.put(kafkaPartition,
                    lastAllowedSegmentInConsumingState);
        }
    }

    // 3. Sort all the segments to be used during assignment in ascending order of replicas

    // PriorityQueue throws IllegalArgumentException when given a size of zero
    int segmentCount = Math.max(externalView.getPartitionSet().size(), 1);
    PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueue = new PriorityQueue<Pair<String, Set<String>>>(
            segmentCount, new Comparator<Pair<String, Set<String>>>() {
                @Override
                public int compare(Pair<String, Set<String>> firstPair, Pair<String, Set<String>> secondPair) {
                    return Integer.compare(firstPair.getRight().size(), secondPair.getRight().size());
                }
            });
    RoutingTableInstancePruner instancePruner = new RoutingTableInstancePruner(instanceConfigList);

    for (Map.Entry<String, SortedSet<SegmentName>> entry : sortedSegmentsByKafkaPartition.entrySet()) {
        String kafkaPartition = entry.getKey();
        SortedSet<SegmentName> segmentNames = entry.getValue();

        // The only segment name which is allowed to be in CONSUMING state or null
        SegmentName validConsumingSegment = allowedSegmentInConsumingStateByKafkaPartition.get(kafkaPartition);

        for (SegmentName segmentName : segmentNames) {
            Set<String> validReplicas = new HashSet<String>();
            Map<String, String> externalViewState = externalView.getStateMap(segmentName.getSegmentName());

            for (Map.Entry<String, String> instanceAndStateEntry : externalViewState.entrySet()) {
                String instance = instanceAndStateEntry.getKey();
                String state = instanceAndStateEntry.getValue();

                // Skip pruned replicas (shutting down or otherwise disabled)
                if (instancePruner.isInactive(instance)) {
                    continue;
                }

                // Replicas in ONLINE state are always allowed
                if (state.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) {
                    validReplicas.add(instance);
                    continue;
                }

                // Replicas in CONSUMING state are only allowed on the last segment
                if (state.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)
                        && segmentName.equals(validConsumingSegment)) {
                    validReplicas.add(instance);
                }
            }

            segmentToReplicaSetQueue
                    .add(new ImmutablePair<String, Set<String>>(segmentName.getSegmentName(), validReplicas));

            // If this segment is the segment allowed in CONSUMING state, don't process segments after it in that Kafka
            // partition
            if (segmentName.equals(validConsumingSegment)) {
                break;
            }
        }
    }

    // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments
    //    assigned to each replica.
    List<ServerToSegmentSetMap> routingTables = new ArrayList<ServerToSegmentSetMap>(routingTableCount);
    for (int i = 0; i < routingTableCount; ++i) {
        Map<String, Set<String>> instanceToSegmentSetMap = new HashMap<String, Set<String>>();

        PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueueCopy = new PriorityQueue<Pair<String, Set<String>>>(
                segmentToReplicaSetQueue);

        while (!segmentToReplicaSetQueueCopy.isEmpty()) {
            Pair<String, Set<String>> segmentAndValidReplicaSet = segmentToReplicaSetQueueCopy.poll();
            String segment = segmentAndValidReplicaSet.getKey();
            Set<String> validReplicaSet = segmentAndValidReplicaSet.getValue();

            String replica = pickWeightedRandomReplica(validReplicaSet, instanceToSegmentSetMap);
            if (replica != null) {
                Set<String> segmentsForInstance = instanceToSegmentSetMap.get(replica);

                if (segmentsForInstance == null) {
                    segmentsForInstance = new HashSet<String>();
                    instanceToSegmentSetMap.put(replica, segmentsForInstance);
                }

                segmentsForInstance.add(segment);
            }
        }

        routingTables.add(new ServerToSegmentSetMap(instanceToSegmentSetMap));
    }

    return routingTables;
}

From source file:com.linkedin.pinot.query.selection.SelectionQueriesForMultiValueColumnTest.java

@Test
public void testSelectionIteration() throws Exception {
    setupSegment();// w  w  w.ja  va2s  . c o m
    Operator filterOperator = new MatchEntireSegmentOperator(_indexSegment.getSegmentMetadata().getTotalDocs());
    final BReusableFilteredDocIdSetOperator docIdSetOperator = new BReusableFilteredDocIdSetOperator(
            filterOperator, _indexSegment.getSegmentMetadata().getTotalDocs(), 5000);
    final Map<String, DataSource> dataSourceMap = getDataSourceMap();

    final MProjectionOperator projectionOperator = new MProjectionOperator(dataSourceMap, docIdSetOperator);

    final Selection selection = getSelectionQuery();
    final MSelectionOrderByOperator selectionOperator = new MSelectionOrderByOperator(_indexSegment, selection,
            projectionOperator);

    final IntermediateResultsBlock block = (IntermediateResultsBlock) selectionOperator.nextBlock();
    final PriorityQueue<Serializable[]> pq = (PriorityQueue<Serializable[]>) block.getSelectionResult();
    final DataSchema dataSchema = block.getSelectionDataSchema();
    System.out.println(dataSchema);
    Serializable[] selectionArray = new Serializable[] { 2147279568, 2147339302, 2147344388, 2147344388,
            2147393520, 2147393520, 2147434110, 2147434110, 2147434110, 2147434110 };
    int i = 0;
    while (!pq.isEmpty()) {
        final Serializable[] row = pq.poll();
        System.out.println(SelectionOperatorUtils.getRowStringFromSerializable(row, dataSchema));
        Assert.assertEquals(row[0], selectionArray[i++]);
    }
}

From source file:exploration.rendezvous.MultiPointRendezvousStrategy.java

/**
 * This method finds a point among connectionsToBase (that is in comm range of Base Station)
 * that is closest to origPoint. That is, it's an estimate of the shortest distance we need to
 * travel from origPoint to get into comm range of the Base station
 *
 * @param origPoint//  www  . ja v a  2  s .  c  o m
 * @param connectionsToBase
 * @param ag
 * @return
 */
public static int findNearestPointInBaseCommRange(NearRVPoint origPoint, List<CommLink> connectionsToBase,
        RealAgent ag) {
    int pathsCalculated = 0;
    // only calculate nearest base point for connectedPoint if we haven't already.
    if (origPoint.distanceToParent == Double.MAX_VALUE) {
        PriorityQueue<NearRVPoint> lineOfSightBasePoints = new PriorityQueue<NearRVPoint>();
        PriorityQueue<NearRVPoint> nonLOSBasePoints = new PriorityQueue<NearRVPoint>();
        for (CommLink baseLink : connectionsToBase) {
            NearRVPoint basePoint = new NearRVPoint(baseLink.getRemotePoint().x, baseLink.getRemotePoint().y);
            double approxPathLen = basePoint.distance(origPoint);
            basePoint.setDistanceToFrontier(approxPathLen);
            if (baseLink.numObstacles == 0) {
                lineOfSightBasePoints.add(basePoint);
            } else {
                nonLOSBasePoints.add(basePoint);
            }
        }

        LinkedList<NearRVPoint> pointsConnectedToBase = new LinkedList<NearRVPoint>();

        for (int j = 0; (j < 5) && !lineOfSightBasePoints.isEmpty(); j++) {
            pointsConnectedToBase.add(lineOfSightBasePoints.poll());
        }

        for (int j = 0; (j < 20) && !nonLOSBasePoints.isEmpty(); j++) {
            pointsConnectedToBase.add(nonLOSBasePoints.poll());
        }

        for (NearRVPoint basePoint : pointsConnectedToBase) {
            pathsCalculated++;
            Path pathToBase = ag.calculatePath(origPoint, basePoint, false, false);
            double pathLen = Double.MAX_VALUE;
            if (pathToBase.found) {
                pathLen = pathToBase.getLength();
            }
            if (pathLen < origPoint.distanceToParent) {
                origPoint.distanceToParent = pathLen;
                origPoint.parentPoint = basePoint;
            }
        }
    }
    return pathsCalculated;
}

From source file:org.onebusaway.uk.network_rail.gtfs_realtime.graph.PositionBerthToStanoxGraphMain.java

private Map<Location, Integer> getNearbyNodesWithLocation(Map<RawBerthNode, Location> nodesToLocations,
        RawBerthNode source, int minCount) {

    Map<Location, Integer> locationsAndTime = new HashMap<Location, Integer>();

    PriorityQueue<OrderedNode> queue = new PriorityQueue<OrderedNode>();
    queue.add(new OrderedNode(source, 0));

    Set<RawBerthNode> visited = new HashSet<RawBerthNode>();
    visited.add(source);//from   w  ww  . j a v  a  2  s. co  m

    Map<RawBerthNode, Integer> minTimeToSource = new HashMap<RawBerthNode, Integer>();

    while (!queue.isEmpty()) {
        OrderedNode orderedNode = queue.poll();
        RawBerthNode node = orderedNode.node;
        if (minTimeToSource.containsKey(node)) {
            continue;
        }
        int time = orderedNode.value;
        minTimeToSource.put(node, time);
        if (nodesToLocations.containsKey(node)) {
            locationsAndTime.put(nodesToLocations.get(node), time);
            if (locationsAndTime.size() >= minCount) {
                return locationsAndTime;
            }
        }

        for (Edge edge : node.getEdges()) {
            RawBerthNode to = edge.getTo();
            int proposedTime = edge.getAverageDuration() + time;
            if (!minTimeToSource.containsKey(to)) {
                queue.add(new OrderedNode(to, proposedTime));
            }
        }
    }

    return locationsAndTime;
}