Example usage for java.util PriorityQueue PriorityQueue

List of usage examples for java.util PriorityQueue PriorityQueue

Introduction

In this page you can find the example usage for java.util PriorityQueue PriorityQueue.

Prototype

public PriorityQueue() 

Source Link

Document

Creates a PriorityQueue with the default initial capacity (11) that orders its elements according to their Comparable natural ordering .

Usage

From source file:com.facebook.internal.FileLruCache.java

private void trim() {
    synchronized (lock) {
        isTrimPending = false;//from  w  w  w  .  j  a v  a2  s  .  c  o m
        isTrimInProgress = true;
    }
    try {
        Logger.log(LoggingBehavior.CACHE, TAG, "trim started");
        PriorityQueue<ModifiedFile> heap = new PriorityQueue<ModifiedFile>();
        long size = 0;
        long count = 0;
        File[] filesToTrim = this.directory.listFiles(BufferFile.excludeBufferFiles());
        if (filesToTrim != null) {
            for (File file : filesToTrim) {
                ModifiedFile modified = new ModifiedFile(file);
                heap.add(modified);
                Logger.log(LoggingBehavior.CACHE, TAG, "  trim considering time="
                        + Long.valueOf(modified.getModified()) + " name=" + modified.getFile().getName());

                size += file.length();
                count++;
            }
        }

        while ((size > limits.getByteCount()) || (count > limits.getFileCount())) {
            File file = heap.remove().getFile();
            Logger.log(LoggingBehavior.CACHE, TAG, "  trim removing " + file.getName());
            size -= file.length();
            count--;
            file.delete();
        }
    } finally {
        synchronized (lock) {
            isTrimInProgress = false;
            lock.notifyAll();
        }
    }
}

From source file:edu.snu.leader.hierarchy.simple.Individual.java

/**
 * Finds the nearest neighbors for this individual
 *
 * @param simState/*w ww  .  ja v a2  s.  c  o  m*/
 */
private void findNearestNeighbors(SimulationState simState) {
    _LOG.trace("Entering findNearestNeighbors( simState )");

    // Get the number of nearest neighbors
    _nearestNeighborCount = simState.getNearestNeighborCount();

    // Build a priority queue to sort things for us
    PriorityQueue<Neighbor> sortedNeighbors = new PriorityQueue<Neighbor>();

    // Iterate through all the individuals
    Iterator<Individual> indIter = simState.getAllIndividuals().iterator();
    while (indIter.hasNext()) {
        // Get the individual
        Individual ind = indIter.next();

        // If it is us, continue on
        if (_id.equals(ind._id)) {
            continue;
        }

        // Build a neighbor out of it and put it in the queue
        Neighbor neighbor = new Neighbor((float) _location.distance(ind._location), ind);
        sortedNeighbors.add(neighbor);
    }

    // Get the "nearest" neighbors
    int count = Math.min(sortedNeighbors.size(), _nearestNeighborCount);
    for (int i = 0; i < count; i++) {
        _nearestNeighbors.add(sortedNeighbors.poll());
    }

    _LOG.trace("Leaving findNearestNeighbors( simState )");
}

From source file:edu.oregonstate.eecs.mcplan.domains.firegirl.FireGirlState.java

private FireResult doFire(final RandomGenerator rng, final boolean suppress) {
    final int reach = params.fire_param_reach;

    final double end_time = drawEndOfFire(rng);
    double current_time = 0;

    // Construct the priority queue and add the first cell to it with time = 0
    final PriorityQueue<PrioritizedLocation> pqueue = new PriorityQueue<PrioritizedLocation>();
    pqueue.add(new PrioritizedLocation(0, ignite_loc));

    // setting a variable that will hold the lowest of all the ingition times in the queue.
    //        final int next_ign = 1000;

    final boolean[][] burned = new boolean[params.width][params.height];
    final boolean[][] crown_burned = new boolean[params.width][params.height];

    // start the queue loop
    int iter_count = 0;
    while (true) {

        //check to make sure that there is at least one queued arrival
        if (pqueue.isEmpty()) {
            //no queued arrivals, so there's no fire, so we're done
            //print("Priority Queue Exiting: No more queued ignitions")
            break;
        }//from   ww w .  j  ava 2s  . c  o m

        //look through all the queued ignitions and find the earliest ignition
        //  time.
        final PrioritizedLocation next_ign = pqueue.poll();
        //now check to see if the soonest arrival happens before the time is up.
        if (next_ign.priority >= end_time) {
            //no fire arrivals (ignitions) are in queue within the alloted time
            //   so the firespread has stopped.
            //print("Priority Queue Exiting: Remaining queued ignitions are past the time limit")
            break;
        }

        //moving current time up to this ignition
        current_time = next_ign.priority;
        final int xloc = next_ign.location[0];
        final int yloc = next_ign.location[1];

        if (burned[xloc][yloc]) {
            continue;
        }

        //we haven't left the loop, so the next arrival is valid, so look at
        //  it and add its neighbors to the queue

        //failsafe exit
        iter_count += 1;
        if (iter_count > params.fire_iter_cap) {
            Log.warn("! Stopping fire early. time: {}", current_time);
            break;
        }

        //setting this cell to burned
        burned[xloc][yloc] = true;

        //Calculating this cell's fire spreadrate, which needs it's fuel load, too
        final int fuel_ld = fuel_load[xloc][yloc];
        double spreadrate = calcFireSpreadRate(ignite_wind, ignite_temp, fuel_ld);

        //add the effects of suppression
        if (suppress) {
            spreadrate *= params.fire_suppression_rate;
        }

        // Check if the crown will burn (if the spreadrate is > 0)
        // Timber loss is a probabalistic function based on the
        //   calcCrownFireRisk() function.  This function will return
        //   a probability of crownfire, and we'll roll a uniform
        //   number against it.

        // NOTE: Deviation from Python
        if (rng.nextDouble() < calcCrownFireRisk(fuel_ld)) {
            crown_burned[xloc][yloc] = true;
        }

        //if the fire spreadrate of this fire is 0, then don't bother checking
        //   for neighbors and calculating arrival times... there won't be any
        //   spread, and for that matter, we'll get a divide-by-zero error.
        if (spreadrate == 0) {
            //no spreadrate, so we can't calculate arrival times, etc...
            //pqueue.remove([current_time,[xloc,yloc]])

            // Note: Already removed the element
            continue;
        }

        //recording information in the Logbook item
        //function signature is:  FireGirlfireLog.addIgnitionEvent(time, location, spread_rate, crown_burned):
        //            fire_log_item.addIgnitionEvent(current_time, [xloc,yloc], spreadrate, crown_burned[xloc][yloc])

        //setting iteration final ranges
        final int x_low = Math.max(xloc - reach, 0);
        final int x_high = Math.min(xloc + reach + 1, params.width - 1);
        final int y_low = Math.max(yloc - reach, 0);
        final int y_high = Math.min(yloc + reach + 1, params.height - 1);

        //            #checking bounds
        //            if (x_low < 0): x_low = 0
        //            if (y_low < 0): y_low = 0
        //            if (x_high >= self.width): x_high = self.width - 1
        //            if (y_high >= self.height): y_high = self.height - 1

        // FIXME: I think this indexing is incorrect (one short) due to
        // how x/y_high are capped above
        // Resolved: Changed '<' to '<='
        for (int i = x_low; i <= x_high; ++i) { //i in range(x_low, x_high):
            for (int j = y_low; j <= y_high; ++j) { //for j in range(y_low, y_high):

                //                    #don't calculate time to the current cell
                if (!((xloc == i) && (yloc == j))) {

                    //                        #we're checking each neighbor within the reach range, so
                    //                        #  first, we need to check whether it's already been
                    //                        #  burned over

                    if (!burned[i][j]) {

                        //                            #this neighbor hasn't burned over yet, so:
                        //                            # 1) calculate a new time-till arrival
                        //                            # 2) check to see if this neighbor is already in the queue
                        //                            # 2a) if it is, then check to see if this arrival time is sooner
                        //                            #       and if so, update it. Otherwise, just move on.
                        //                            # 2b) if it isn't in the queue, then add it as a new queue item

                        //                            # 1) final arrival time for this neighbor
                        final double dist = Math.sqrt((xloc - i) * (xloc - i) + (yloc - j) * (yloc - j));
                        final double arrival_time = (dist / spreadrate) + current_time;

                        // Just add it again; we filter duplicates by checking if they're already burned.
                        pqueue.add(new PrioritizedLocation(arrival_time, new int[] { i, j }));

                        ////                            # 2) checking to see if this neighbor is already queued
                        //                            boolean found_in_q = false;
                        //                            final Iterator<PrioritizedLocation> itr = pqueue.iterator();
                        //                            while( itr.hasNext() ) {
                        //                               final PrioritizedLocation ign = itr.next();
                        //                                if( ign.location[0] == i && ign.location[1] == j ) {
                        ////                                    #this neighbor IS in the queue already, so check its arrival time
                        ////                                    #print("   neighbor found in queue... updating...")
                        //                                    found_in_q = true;
                        //
                        ////                                    #updating it's arrival time if need be
                        //                                    if( arrival_time < ign.priority ) {
                        //                                       itr.remove();
                        ////                                        #the new arrival time is sooner, so update this queue item
                        //                                       pqueue.add( new PrioritizedLocation( arrival_time, ign.location ) );
                        //                                    }
                        //                                    break;
                        //                                }
                        //                            }
                        //
                        //
                        ////                            #check to see if we ever found this neighbor
                        //                            if( !found_in_q ) {
                        ////                                #we never found it, so it wasn't in the queue, and it's not burned, so add it
                        //                                pqueue.add( new PrioritizedLocation( arrival_time, new int[] { i, j } ) );
                        //                            }
                    }
                }
            }
        }

        //            # we've now finished checking the neighbors, so it's time to remove this item from the queue
        //            pqueue.remove([current_time,[xloc,yloc]])
    } // priority queue empty

    //        # and now we've exited the priority queue as well

    //        #look through the burned cells and update the actual grid values
    //        #Also, record losses
    double timber_loss = 0;
    int cells_burned = 0;
    int cells_crowned = 0;

    for (int i = 0; i < params.width; ++i) {
        for (int j = 0; j < params.height; ++j) {
            if (burned[i][j]) {
                cells_burned += 1;

                //                    #this cell was burned, so set the fuel_load to zero, and apply
                //                    #  the crown-burning model to the timber_value
                fuel_load[i][j] = 0;

                //                    #adding up timber loss
                if (crown_burned[i][j]) { //this was set when spreadrate was calculated earlier
                    //                        #the crown burned, so record the loss and set it to zero

                    //                        #SPEED
                    //                        ####BOTH Lines are modified for self.getPresentTimberValue(i,j)###########
                    timber_loss += getPresentTimberValue(i, j); //self.timber_value[i][j]
                    //                        #self.timber_value[i][j] = 0
                    //                        ####################

                    cells_crowned += 1;

                    //                        #and reset the age so that self.year + self.stand_age = 0
                    //                        stand_age[i][j] = -1 * year;
                    // NOTE: Deviation from Python code
                    stand_age[i][j] = 0;
                }
            }
        }
    }

    //        #Adding the final results to the fire_log_item
    //        fire_log_item.updateResults(timber_loss, cells_burned, cells_crowned)

    //        #Adding the lists (final maps) as well
    //        fire_log_item.map_burned = burned
    //        fire_log_item.map_crowned = crown_burned

    //        #add the FireLog item to the pathway's list (it's just an ordinary list)
    //        self.FireLog.append(fire_log_item)

    //        #add up suppression cost and record it
    int sup_cost = 0;
    if (suppress) {
        sup_cost += cells_burned * params.fire_suppression_cost_per_cell;
        sup_cost += end_time * params.fire_suppression_cost_per_day;
    }

    //        self.yearly_suppression_costs.append(sup_cost)

    //        #and finally, return the loss data
    return new FireResult(timber_loss, cells_burned, cells_crowned, sup_cost, end_time);
    //        return [timber_loss, cells_burned, sup_cost, end_time, cells_crowned]
}

From source file:com.joliciel.talismane.tokeniser.patterns.CompoundPatternTokeniser.java

@Override
public List<TokenisedAtomicTokenSequence> tokeniseWithDecisions(Sentence sentence) {
    MONITOR.startTask("tokeniseWithDecisions");
    try {/* w  w  w  . j a v  a2  s  .  c  o m*/
        // apply any pre-tokenisation decisions via filters
        // we only want one placeholder per start index - the first one that gets added
        Map<Integer, TokenPlaceholder> placeholderMap = new HashMap<Integer, TokenPlaceholder>();
        for (TokenFilter tokenFilter : this.tokenFilters) {
            Set<TokenPlaceholder> myPlaceholders = tokenFilter.apply(sentence.getText());
            for (TokenPlaceholder placeholder : myPlaceholders) {
                if (!placeholderMap.containsKey(placeholder.getStartIndex())) {
                    placeholderMap.put(placeholder.getStartIndex(), placeholder);
                }
            }
            if (LOG.isTraceEnabled()) {
                if (myPlaceholders.size() > 0) {
                    LOG.trace("TokenFilter: " + tokenFilter);
                    LOG.trace("placeholders: " + myPlaceholders);
                }
            }
        }

        Set<TokenPlaceholder> placeholders = new HashSet<TokenPlaceholder>(placeholderMap.values());

        // Initially, separate the sentence into tokens using the separators provided
        TokenSequence tokenSequence = this.tokeniserService.getTokenSequence(sentence, Tokeniser.SEPARATORS,
                placeholders);

        // apply any pre-processing filters that have been added
        for (TokenSequenceFilter tokenSequenceFilter : this.tokenSequenceFilters) {
            tokenSequenceFilter.apply(tokenSequence);
        }

        // Assign each separator its default value
        List<TokeniserOutcome> defaultOutcomes = this.tokeniserPatternManager.getDefaultOutcomes(tokenSequence);
        List<Decision<TokeniserOutcome>> defaultDecisions = new ArrayList<Decision<TokeniserOutcome>>(
                defaultOutcomes.size());
        for (TokeniserOutcome outcome : defaultOutcomes) {
            Decision<TokeniserOutcome> tokeniserDecision = this.tokeniserDecisionFactory
                    .createDefaultDecision(outcome);
            tokeniserDecision.addAuthority("_" + this.getClass().getSimpleName());
            tokeniserDecision.addAuthority("_" + "DefaultDecision");
            defaultDecisions.add(tokeniserDecision);
        }

        List<TokenisedAtomicTokenSequence> sequences = null;

        // For each test pattern, see if anything in the sentence matches it
        if (this.decisionMaker != null) {
            List<TokenPatternMatchSequence> matchingSequences = new ArrayList<TokenPatternMatchSequence>();
            Map<Token, Set<TokenPatternMatchSequence>> tokenMatchSequenceMap = new HashMap<Token, Set<TokenPatternMatchSequence>>();
            Map<TokenPatternMatchSequence, TokenPatternMatch> primaryMatchMap = new HashMap<TokenPatternMatchSequence, TokenPatternMatch>();
            Set<Token> matchedTokens = new HashSet<Token>();

            MONITOR.startTask("pattern matching");
            try {
                for (TokenPattern parsedPattern : this.getTokeniserPatternManager().getParsedTestPatterns()) {
                    List<TokenPatternMatchSequence> matchesForThisPattern = parsedPattern.match(tokenSequence);
                    for (TokenPatternMatchSequence matchSequence : matchesForThisPattern) {
                        matchingSequences.add(matchSequence);
                        matchedTokens.addAll(matchSequence.getTokensToCheck());

                        TokenPatternMatch primaryMatch = null;
                        Token token = matchSequence.getTokensToCheck().get(0);

                        Set<TokenPatternMatchSequence> matchSequences = tokenMatchSequenceMap.get(token);
                        if (matchSequences == null) {
                            matchSequences = new TreeSet<TokenPatternMatchSequence>();
                            tokenMatchSequenceMap.put(token, matchSequences);
                        }
                        matchSequences.add(matchSequence);

                        for (TokenPatternMatch patternMatch : matchSequence.getTokenPatternMatches()) {
                            if (patternMatch.getToken().equals(token)) {
                                primaryMatch = patternMatch;
                                break;
                            }
                        }

                        if (LOG.isTraceEnabled()) {
                            LOG.trace("Found match: " + primaryMatch);
                        }
                        primaryMatchMap.put(matchSequence, primaryMatch);
                    }
                }
            } finally {
                MONITOR.endTask("pattern matching");
            }

            // we want to create the n most likely token sequences
            // the sequence has to correspond to a token pattern
            Map<TokenPatternMatchSequence, List<Decision<TokeniserOutcome>>> matchSequenceDecisionMap = new HashMap<TokenPatternMatchSequence, List<Decision<TokeniserOutcome>>>();

            for (TokenPatternMatchSequence matchSequence : matchingSequences) {
                TokenPatternMatch match = primaryMatchMap.get(matchSequence);
                LOG.debug("next pattern match: " + match.toString());
                List<FeatureResult<?>> tokenFeatureResults = new ArrayList<FeatureResult<?>>();
                MONITOR.startTask("analyse features");
                try {
                    for (TokenPatternMatchFeature<?> feature : features) {
                        RuntimeEnvironment env = this.featureService.getRuntimeEnvironment();
                        FeatureResult<?> featureResult = feature.check(match, env);
                        if (featureResult != null) {
                            tokenFeatureResults.add(featureResult);
                        }
                    }

                    if (LOG.isTraceEnabled()) {
                        for (FeatureResult<?> featureResult : tokenFeatureResults) {
                            LOG.trace(featureResult.toString());
                        }
                    }
                } finally {
                    MONITOR.endTask("analyse features");
                }

                List<Decision<TokeniserOutcome>> decisions = null;
                MONITOR.startTask("make decision");
                try {
                    decisions = this.decisionMaker.decide(tokenFeatureResults);

                    for (ClassificationObserver<TokeniserOutcome> observer : this.observers)
                        observer.onAnalyse(match.getToken(), tokenFeatureResults, decisions);

                    for (Decision<TokeniserOutcome> decision : decisions) {
                        decision.addAuthority("_" + this.getClass().getSimpleName());
                        decision.addAuthority("_" + "Patterns");
                        decision.addAuthority(match.getPattern().getName());
                    }
                } finally {
                    MONITOR.endTask("make decision");
                }

                matchSequenceDecisionMap.put(matchSequence, decisions);
            }

            // initially create a heap with a single, empty sequence
            PriorityQueue<TokenisedAtomicTokenSequence> heap = new PriorityQueue<TokenisedAtomicTokenSequence>();
            TokenisedAtomicTokenSequence emptySequence = this.getTokeniserService()
                    .getTokenisedAtomicTokenSequence(sentence, 0);
            heap.add(emptySequence);

            for (int i = 0; i < tokenSequence.listWithWhiteSpace().size(); i++) {
                Token token = tokenSequence.listWithWhiteSpace().get(i);
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Token : \"" + token.getText() + "\"");
                }

                // build a new heap for this iteration
                PriorityQueue<TokenisedAtomicTokenSequence> previousHeap = heap;
                heap = new PriorityQueue<TokenisedAtomicTokenSequence>();

                if (i == 0) {
                    // first token is always "separate" from the outside world
                    Decision<TokeniserOutcome> decision = this.tokeniserDecisionFactory
                            .createDefaultDecision(TokeniserOutcome.SEPARATE);
                    decision.addAuthority("_" + this.getClass().getSimpleName());
                    decision.addAuthority("_" + "DefaultDecision");

                    TaggedToken<TokeniserOutcome> taggedToken = this.tokeniserService.getTaggedToken(token,
                            decision);

                    TokenisedAtomicTokenSequence newSequence = this.getTokeniserService()
                            .getTokenisedAtomicTokenSequence(emptySequence);
                    newSequence.add(taggedToken);
                    heap.add(newSequence);
                    continue;
                }

                // limit the heap breadth to K
                int maxSequences = previousHeap.size() > this.getBeamWidth() ? this.getBeamWidth()
                        : previousHeap.size();
                MONITOR.startTask("heap sort");
                try {
                    for (int j = 0; j < maxSequences; j++) {
                        TokenisedAtomicTokenSequence history = previousHeap.poll();

                        // Find the separating & non-separating decisions
                        if (history.size() > i) {
                            // token already added as part of a sequence introduced by another token
                            heap.add(history);
                        } else if (tokenMatchSequenceMap.containsKey(token)) {
                            // token begins one or more match sequences
                            // these are ordered from shortest to longest (via TreeSet)
                            List<TokenPatternMatchSequence> matchSequences = new ArrayList<TokenPatternMatchSequence>(
                                    tokenMatchSequenceMap.get(token));

                            // Since sequences P1..Pn contain each other,
                            // there can be exactly matchSequences.size() consistent solutions
                            // Assume the default is separate
                            // 0: all separate
                            // 1: join P1, separate rest
                            // 2: join P2, separate rest
                            // ...
                            // n: join Pn
                            // We need to add each of these to the heap
                            // by taking the product of all probabilities consistent with each solution
                            // The probabities for each solution are (j=join, s=separate)
                            // All separate: s1 x s2 x ... x sn
                            // P1: j1 x s2 x ... x sn
                            // P2: j1 x j2 x ... x sn
                            // ...
                            // Pn: j1 x j2 x ... x jn
                            // Any solution of the form s1 x j2 would be inconsistent, and is not considered
                            // If Pi and Pj start and end on the exact same token, then the solution for both is
                            // Pi: j1 x ... x ji x jj x sj+1 ... x sn
                            // Pj: j1 x ... x ji x jj x sj+1 ... x sn
                            // Note of course that we're never likely to have more than two Ps here,
                            // but we need a solution for more just to be sure to be sure
                            TokeniserOutcome defaultOutcome = defaultDecisions
                                    .get(token.getIndexWithWhiteSpace()).getOutcome();
                            TokeniserOutcome otherOutcome = null;
                            if (defaultOutcome == TokeniserOutcome.SEPARATE)
                                otherOutcome = TokeniserOutcome.JOIN;
                            else
                                otherOutcome = TokeniserOutcome.SEPARATE;

                            double[] decisionProbs = new double[matchSequences.size() + 1];
                            for (int k = 0; k < decisionProbs.length; k++)
                                decisionProbs[k] = 1;

                            // Note: k0 = default decision (e.g. separate all), k1=first pattern
                            // p1 = first pattern
                            int p = 1;
                            int prevEndIndex = -1;
                            for (TokenPatternMatchSequence matchSequence : matchSequences) {
                                int endIndex = matchSequence.getTokensToCheck()
                                        .get(matchSequence.getTokensToCheck().size() - 1).getEndIndex();
                                List<Decision<TokeniserOutcome>> decisions = matchSequenceDecisionMap
                                        .get(matchSequence);
                                for (Decision<TokeniserOutcome> decision : decisions) {
                                    for (int k = 0; k < decisionProbs.length; k++) {
                                        if (decision.getOutcome() == defaultOutcome) {
                                            // e.g. separate in most cases
                                            if (k < p && endIndex > prevEndIndex)
                                                decisionProbs[k] *= decision.getProbability();
                                            else if (k + 1 < p && endIndex <= prevEndIndex)
                                                decisionProbs[k] *= decision.getProbability();
                                        } else {
                                            // e.g. join in most cases
                                            if (k >= p && endIndex > prevEndIndex)
                                                decisionProbs[k] *= decision.getProbability();
                                            else if (k + 1 >= p && endIndex <= prevEndIndex)
                                                decisionProbs[k] *= decision.getProbability();
                                        }
                                    } // next k
                                } // next decision (only 2 of these)
                                prevEndIndex = endIndex;
                                p++;
                            }

                            // transform to probability distribution
                            double sumProbs = 0;
                            for (int k = 0; k < decisionProbs.length; k++)
                                sumProbs += decisionProbs[k];

                            if (sumProbs > 0)
                                for (int k = 0; k < decisionProbs.length; k++)
                                    decisionProbs[k] /= sumProbs;

                            // Apply default decision
                            // Since this is the default decision for all tokens in the sequence, we don't add the other tokens for now,
                            // so as to allow them
                            // to get examined one at a time, just in case one of them starts its own separate sequence
                            Decision<TokeniserOutcome> defaultDecision = this.tokeniserDecisionFactory
                                    .createDecision(defaultOutcome.getCode(), decisionProbs[0]);
                            defaultDecision.addAuthority("_" + this.getClass().getSimpleName());
                            defaultDecision.addAuthority("_" + "Patterns");
                            for (TokenPatternMatchSequence matchSequence : matchSequences) {
                                defaultDecision.addAuthority(matchSequence.getTokenPattern().getName());
                            }

                            TaggedToken<TokeniserOutcome> defaultTaggedToken = this.tokeniserService
                                    .getTaggedToken(token, defaultDecision);
                            TokenisedAtomicTokenSequence defaultSequence = this.getTokeniserService()
                                    .getTokenisedAtomicTokenSequence(history);
                            defaultSequence.add(defaultTaggedToken);
                            defaultSequence.addDecision(defaultDecision);
                            heap.add(defaultSequence);

                            // Apply one non-default decision per match sequence
                            for (int k = 0; k < matchSequences.size(); k++) {
                                TokenPatternMatchSequence matchSequence = matchSequences.get(k);
                                double prob = decisionProbs[k + 1];
                                Decision<TokeniserOutcome> decision = this.tokeniserDecisionFactory
                                        .createDecision(otherOutcome.getCode(), prob);
                                decision.addAuthority("_" + this.getClass().getSimpleName());
                                decision.addAuthority("_" + "Patterns");
                                decision.addAuthority(matchSequence.getTokenPattern().getName());

                                TaggedToken<TokeniserOutcome> taggedToken = this.tokeniserService
                                        .getTaggedToken(token, decision);

                                TokenisedAtomicTokenSequence newSequence = this.getTokeniserService()
                                        .getTokenisedAtomicTokenSequence(history);
                                newSequence.add(taggedToken);
                                newSequence.addDecision(decision);

                                // The decision is NOT the default decision for all tokens in the sequence, add all other tokens
                                // in this sequence to the solution
                                for (Token tokenInSequence : matchSequence.getTokensToCheck()) {
                                    if (tokenInSequence.equals(token)) {
                                        continue;
                                    }
                                    Decision<TokeniserOutcome> decisionInSequence = this.tokeniserDecisionFactory
                                            .createDefaultDecision(decision.getOutcome());
                                    decisionInSequence.addAuthority("_" + this.getClass().getSimpleName());
                                    decisionInSequence.addAuthority("_" + "DecisionInSequence");
                                    decisionInSequence.addAuthority("_" + "DecisionInSequence_non_default");
                                    decisionInSequence.addAuthority("_" + "Patterns");
                                    TaggedToken<TokeniserOutcome> taggedTokenInSequence = this.tokeniserService
                                            .getTaggedToken(tokenInSequence, decisionInSequence);
                                    newSequence.add(taggedTokenInSequence);
                                }

                                heap.add(newSequence);

                            } // next sequence
                        } else {
                            // token doesn't start match sequence, and hasn't already been added to the current sequence
                            Decision<TokeniserOutcome> decision = defaultDecisions.get(i);
                            if (matchedTokens.contains(token)) {
                                decision = this.tokeniserDecisionFactory
                                        .createDefaultDecision(decision.getOutcome());
                                decision.addAuthority("_" + this.getClass().getSimpleName());
                                decision.addAuthority("_" + "DecisionInSequence");
                                decision.addAuthority("_" + "DecisionInSequence_default");
                                decision.addAuthority("_" + "Patterns");
                            }
                            TaggedToken<TokeniserOutcome> taggedToken = this.tokeniserService
                                    .getTaggedToken(token, decision);

                            TokenisedAtomicTokenSequence newSequence = this.getTokeniserService()
                                    .getTokenisedAtomicTokenSequence(history);
                            newSequence.add(taggedToken);
                            heap.add(newSequence);
                        }

                    } // next sequence in the old heap
                } finally {
                    MONITOR.endTask("heap sort");
                }
            } // next token

            sequences = new ArrayList<TokenisedAtomicTokenSequence>();
            int k = 0;
            while (!heap.isEmpty()) {
                sequences.add(heap.poll());
                k++;
                if (k >= this.getBeamWidth())
                    break;
            }
        } else {
            sequences = new ArrayList<TokenisedAtomicTokenSequence>();
            TokenisedAtomicTokenSequence defaultSequence = this.getTokeniserService()
                    .getTokenisedAtomicTokenSequence(sentence, 0);
            int i = 0;
            for (Token token : tokenSequence.listWithWhiteSpace()) {
                TaggedToken<TokeniserOutcome> taggedToken = this.tokeniserService.getTaggedToken(token,
                        defaultDecisions.get(i++));
                defaultSequence.add(taggedToken);
            }
            sequences.add(defaultSequence);
        } // have decision maker?

        LOG.debug("####Final token sequences:");
        int j = 1;
        for (TokenisedAtomicTokenSequence sequence : sequences) {
            TokenSequence newTokenSequence = sequence.inferTokenSequence();
            if (LOG.isDebugEnabled()) {
                LOG.debug("Token sequence " + (j++) + ", score=" + df.format(sequence.getScore()));
                LOG.debug("Atomic sequence: " + sequence);
                LOG.debug("Resulting sequence: " + newTokenSequence);
            }
            // need to re-apply the pre-processing filters, because the tokens are all new
            // Question: why can't we conserve the initial tokens when they haven't changed at all?
            // Answer: because the tokenSequence and index in the sequence is referenced by the token.
            // Question: should we create a separate class, Token and TokenInSequence,
            // one with index & sequence access & one without?
            for (TokenSequenceFilter tokenSequenceFilter : this.tokenSequenceFilters) {
                tokenSequenceFilter.apply(newTokenSequence);
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("After filters:      " + newTokenSequence);
            }
        }

        return sequences;
    } finally {
        MONITOR.endTask("tokeniseWithDecisions");
    }
}

From source file:ma.glasnost.orika.metadata.ScoringClassMapBuilder.java

public ClassMapBuilder<A, B> byDefault(DefaultFieldMapper... withDefaults) {

    DefaultFieldMapper[] defaults;//from ww  w.ja  v  a2 s . co m
    if (withDefaults.length == 0) {
        defaults = getDefaultFieldMappers();
    } else {
        defaults = withDefaults;
    }
    /*
     * For our custom 'byDefault' method, we're going to try and match
     * fields by their Levenshtein distance
     */
    PriorityQueue<FieldMatchScore> matchScores = new PriorityQueue<FieldMatchScore>();

    Map<String, Property> propertiesForA = getPropertyExpressions(getAType());
    Map<String, Property> propertiesForB = getPropertyExpressions(getBType());

    for (final Entry<String, Property> propertyA : propertiesForA.entrySet()) {
        if (!propertyA.getValue().getName().equals("class")) {
            for (final Entry<String, Property> propertyB : propertiesForB.entrySet()) {
                if (!propertyB.getValue().getName().equals("class")) {
                    FieldMatchScore matchScore = new FieldMatchScore(propertyA.getValue(), propertyB.getValue(),
                            matchingWeights);
                    matchScores.add(matchScore);
                }
            }
        }
    }

    Set<String> unmatchedFields = new LinkedHashSet<String>(this.getPropertiesForTypeA());
    unmatchedFields.remove("class");

    for (FieldMatchScore score : matchScores) {

        if (!this.getMappedPropertiesForTypeA().contains(score.propertyA.getExpression())
                && !this.getMappedPropertiesForTypeB().contains(score.propertyB.getExpression())) {
            if (LOGGER.isTraceEnabled()) {
                LOGGER.trace("\n" + score.toString());
            }
            if (score.meetsMinimumScore()) {
                fieldMap(score.propertyA.getExpression(), score.propertyB.getExpression()).add();
                unmatchedFields.remove(score.propertyA.getExpression());
            }
        }
    }

    /*
     * Apply any default field mappers to the unmapped fields
     */
    for (String propertyNameA : unmatchedFields) {
        Property prop = resolvePropertyForA(propertyNameA);
        for (DefaultFieldMapper defaulter : defaults) {
            String suggestion = defaulter.suggestMappedField(propertyNameA, prop.getType());
            if (suggestion != null && getPropertiesForTypeB().contains(suggestion)) {
                if (!getMappedPropertiesForTypeB().contains(suggestion)) {
                    fieldMap(propertyNameA, suggestion).add();
                }
            }
        }
    }

    return this;
}

From source file:org.onebusaway.uk.network_rail.gtfs_realtime.graph.PositionBerthToStanoxGraphMain.java

private void explore(Set<RawBerthNode> connections, RawStanoxNode stanoxNode) {
    Queue<OrderedRawBerthNode> queue = new PriorityQueue<OrderedRawBerthNode>();
    Set<RawBerthNode> visited = new HashSet<RawBerthNode>();
    int openCount = 0;
    for (RawBerthNode connection : connections) {
        queue.add(new OrderedRawBerthNode(connection, null, 0.0));
        openCount++;//  w  w  w.j  av a 2  s  . c  o m
    }

    Map<RawBerthNode, RawBerthNode> parents = new HashMap<RawBerthNode, RawBerthNode>();

    while (!queue.isEmpty()) {
        OrderedRawBerthNode currentNode = queue.poll();
        RawBerthNode node = currentNode.getNode();
        boolean isOpen = currentNode.isOpen();
        if (isOpen) {
            openCount--;
        } else if (openCount == 0) {
            return;
        }
        if (visited.contains(node)) {
            continue;
        }
        visited.add(node);
        parents.put(node, currentNode.getParent());
        Set<RawStanoxNode> stanoxes = node.getStanox();
        if (stanoxes.size() > 0 && !stanoxes.contains(stanoxNode)) {
            _log.info(node + " stanoxes=" + stanoxes + " " + currentNode.getDistance() + " open=" + openCount);
            RawBerthNode c = node;
            while (c != null) {
                _log.info("  " + c);
                c = parents.get(c);
            }
            isOpen = false;
        }
        for (Map.Entry<RawBerthNode, List<Integer>> entry : node.getOutgoing().entrySet()) {
            RawBerthNode outgoing = entry.getKey();
            int avgDuration = RawNode.average(entry.getValue());
            queue.add(new OrderedRawBerthNode(outgoing, node, currentNode.getDistance() + avgDuration, isOpen));
            if (isOpen) {
                openCount++;
            }
        }
    }
}

From source file:com.gargoylesoftware.htmlunit.javascript.background.JavaScriptJobManagerImpl.java

/**
 * Our own serialization (to handle the weak reference)
 * @param in the stream to read form//ww w .j  av a2 s . c o  m
 * @throws IOException in case of error
 * @throws ClassNotFoundException in case of error
 */
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException {
    in.defaultReadObject();

    // we do not store the jobs (at the moment)
    scheduledJobsQ_ = new PriorityQueue<>();
    cancelledJobs_ = new ArrayList<>();
    currentlyRunningJob_ = null;
}

From source file:org.apache.sysml.runtime.compress.CompressedMatrixBlock.java

private static ColGroup compressColGroup(MatrixBlock in, CompressedSizeEstimator estim,
        HashMap<Integer, Double> compRatios, int rlen, double sp, int[] colIndexes) {
    int[] allGroupIndices = null;
    int allColsCount = colIndexes.length;
    CompressedSizeInfo sizeInfo;//from   w  w  w  .  j av  a 2  s . co m
    // The compression type is decided based on a full bitmap since it
    // will be reused for the actual compression step.
    UncompressedBitmap ubm = null;
    PriorityQueue<CompressedColumn> compRatioPQ = null;
    boolean skipGroup = false;
    while (true) {
        //exact big list and observe compression ratio
        ubm = BitmapEncoder.extractBitmap(colIndexes, in);
        sizeInfo = estim.estimateCompressedColGroupSize(ubm);
        double compRatio = getUncompressedSize(rlen, colIndexes.length, sp) / sizeInfo.getMinSize();

        if (compRatio > 1) {
            break; // we have a good group
        }

        // modify the group
        if (compRatioPQ == null) {
            // first modification
            allGroupIndices = colIndexes.clone();
            compRatioPQ = new PriorityQueue<CompressedMatrixBlock.CompressedColumn>();
            for (int i = 0; i < colIndexes.length; i++)
                compRatioPQ.add(new CompressedColumn(i, compRatios.get(colIndexes[i])));
        }

        // index in allGroupIndices
        int removeIx = compRatioPQ.poll().colIx;
        allGroupIndices[removeIx] = -1;
        allColsCount--;
        if (allColsCount == 0) {
            skipGroup = true;
            break;
        }
        colIndexes = new int[allColsCount];
        // copying the values that do not equal -1
        int ix = 0;
        for (int col : allGroupIndices)
            if (col != -1)
                colIndexes[ix++] = col;
    }

    //add group to uncompressed fallback
    if (skipGroup)
        return null;

    //create compressed column group
    long rleSize = sizeInfo.getRLESize();
    long oleSize = sizeInfo.getOLESize();
    if (rleSize < oleSize)
        return new ColGroupRLE(colIndexes, rlen, ubm);
    else
        return new ColGroupOLE(colIndexes, rlen, ubm);
}

From source file:org.onebusaway.uk.network_rail.gtfs_realtime.graph.PositionBerthToStanoxGraphMain.java

private Map<Location, Integer> getNearbyNodesWithLocation(Map<RawBerthNode, Location> nodesToLocations,
        RawBerthNode source, int minCount) {

    Map<Location, Integer> locationsAndTime = new HashMap<Location, Integer>();

    PriorityQueue<OrderedNode> queue = new PriorityQueue<OrderedNode>();
    queue.add(new OrderedNode(source, 0));

    Set<RawBerthNode> visited = new HashSet<RawBerthNode>();
    visited.add(source);/*w  w  w .j av  a2  s  .  co  m*/

    Map<RawBerthNode, Integer> minTimeToSource = new HashMap<RawBerthNode, Integer>();

    while (!queue.isEmpty()) {
        OrderedNode orderedNode = queue.poll();
        RawBerthNode node = orderedNode.node;
        if (minTimeToSource.containsKey(node)) {
            continue;
        }
        int time = orderedNode.value;
        minTimeToSource.put(node, time);
        if (nodesToLocations.containsKey(node)) {
            locationsAndTime.put(nodesToLocations.get(node), time);
            if (locationsAndTime.size() >= minCount) {
                return locationsAndTime;
            }
        }

        for (Edge edge : node.getEdges()) {
            RawBerthNode to = edge.getTo();
            int proposedTime = edge.getAverageDuration() + time;
            if (!minTimeToSource.containsKey(to)) {
                queue.add(new OrderedNode(to, proposedTime));
            }
        }
    }

    return locationsAndTime;
}

From source file:exploration.rendezvous.MultiPointRendezvousStrategy.java

private PriorityQueue<NearRVPoint> GetPointsWithinDistOfFrontier(List<NearRVPoint> generatedPoints,
        double maxDist) {
    PriorityQueue<NearRVPoint> pointsNearFrontier = new PriorityQueue<NearRVPoint>();

    Point frontierCentre = getExplorerFrontier();

    if (SimConstants.DEBUG_OUTPUT) {
        System.out.println(agent + " frontierCentre is " + frontierCentre);
    }/* w w w  . j  a v  a2  s .com*/
    // create priority queue of all potential rvpoints within given straight line distance
    for (NearRVPoint p : generatedPoints) {
        double dist = p.distance(frontierCentre);
        if (dist > maxDist) {
            continue;
        }
        p.setDistanceToFrontier(dist);
        pointsNearFrontier.add(p);
    }

    return pointsNearFrontier;
}