Example usage for java.util PriorityQueue add

List of usage examples for java.util PriorityQueue add

Introduction

In this page you can find the example usage for java.util PriorityQueue add.

Prototype

public boolean add(E e) 

Source Link

Document

Inserts the specified element into this priority queue.

Usage

From source file:com.joliciel.talismane.tokeniser.patterns.IntervalPatternTokeniser.java

@Override
public List<TokenisedAtomicTokenSequence> tokeniseWithDecisions(Sentence sentence) {
    MONITOR.startTask("tokeniseWithDecisions");
    try {/*from  ww  w.ja  v a 2s  .  c  o  m*/
        // apply any pre-tokenisation decisions via filters
        // we only want one placeholder per start index - the first one that gets added
        Map<Integer, TokenPlaceholder> placeholderMap = new HashMap<Integer, TokenPlaceholder>();
        for (TokenFilter tokenFilter : this.tokenFilters) {
            Set<TokenPlaceholder> myPlaceholders = tokenFilter.apply(sentence.getText());
            for (TokenPlaceholder placeholder : myPlaceholders) {
                if (!placeholderMap.containsKey(placeholder.getStartIndex())) {
                    placeholderMap.put(placeholder.getStartIndex(), placeholder);
                }
            }
            if (LOG.isTraceEnabled()) {
                if (myPlaceholders.size() > 0) {
                    LOG.trace("TokenFilter: " + tokenFilter);
                    LOG.trace("placeholders: " + myPlaceholders);
                }
            }
        }

        Set<TokenPlaceholder> placeholders = new HashSet<TokenPlaceholder>(placeholderMap.values());

        // Initially, separate the sentence into tokens using the separators provided
        TokenSequence tokenSequence = this.tokeniserService.getTokenSequence(sentence, Tokeniser.SEPARATORS,
                placeholders);

        // apply any pre-processing filters that have been added
        for (TokenSequenceFilter tokenSequenceFilter : this.tokenSequenceFilters) {
            tokenSequenceFilter.apply(tokenSequence);
        }

        // Assign each separator its default value
        List<TokeniserOutcome> defaultOutcomes = this.tokeniserPatternManager.getDefaultOutcomes(tokenSequence);
        List<Decision<TokeniserOutcome>> defaultDecisions = new ArrayList<Decision<TokeniserOutcome>>(
                defaultOutcomes.size());
        for (TokeniserOutcome outcome : defaultOutcomes) {
            Decision<TokeniserOutcome> tokeniserDecision = this.tokeniserDecisionFactory
                    .createDefaultDecision(outcome);
            tokeniserDecision.addAuthority("_" + this.getClass().getSimpleName());
            tokeniserDecision.addAuthority("_" + "DefaultDecision");
            defaultDecisions.add(tokeniserDecision);
        }
        List<TokenisedAtomicTokenSequence> sequences = null;

        // For each test pattern, see if anything in the sentence matches it
        if (this.decisionMaker != null) {
            Set<Token> tokensToCheck = new HashSet<Token>();
            MONITOR.startTask("pattern matching");
            try {
                for (TokenPattern parsedPattern : this.getTokeniserPatternManager().getParsedTestPatterns()) {
                    Set<Token> tokensToCheckForThisPattern = new HashSet<Token>();
                    List<TokenPatternMatchSequence> matchesForThisPattern = parsedPattern.match(tokenSequence);
                    for (TokenPatternMatchSequence tokenPatternMatch : matchesForThisPattern) {
                        if (LOG.isTraceEnabled())
                            tokensToCheckForThisPattern.addAll(tokenPatternMatch.getTokensToCheck());
                        tokensToCheck.addAll(tokenPatternMatch.getTokensToCheck());
                    }
                    if (LOG.isTraceEnabled()) {
                        if (tokensToCheckForThisPattern.size() > 0) {
                            LOG.trace("Parsed pattern: " + parsedPattern);
                            LOG.trace("tokensToCheck: " + tokensToCheckForThisPattern);
                        }
                    }
                }
            } finally {
                MONITOR.endTask("pattern matching");
            }

            // we want to create the n most likely token sequences
            // the sequence has to correspond to a token pattern

            // initially create a heap with a single, empty sequence
            PriorityQueue<TokenisedAtomicTokenSequence> heap = new PriorityQueue<TokenisedAtomicTokenSequence>();
            TokenisedAtomicTokenSequence emptySequence = this.getTokeniserService()
                    .getTokenisedAtomicTokenSequence(sentence, 0);
            heap.add(emptySequence);
            int i = 0;
            for (Token token : tokenSequence.listWithWhiteSpace()) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Token : \"" + token.getText() + "\"");
                }
                // build a new heap for this iteration
                PriorityQueue<TokenisedAtomicTokenSequence> previousHeap = heap;
                heap = new PriorityQueue<TokenisedAtomicTokenSequence>();

                // limit the heap breadth to K
                int maxSequences = previousHeap.size() > this.getBeamWidth() ? this.getBeamWidth()
                        : previousHeap.size();
                for (int j = 0; j < maxSequences; j++) {
                    TokenisedAtomicTokenSequence history = previousHeap.poll();

                    // Find the separating & non-separating decisions
                    List<Decision<TokeniserOutcome>> decisions = null;
                    if (tokensToCheck.contains(token)) {
                        // test the features on the current token
                        TokeniserContext context = new TokeniserContext(token, history);
                        List<FeatureResult<?>> tokenFeatureResults = new ArrayList<FeatureResult<?>>();
                        MONITOR.startTask("analyse features");
                        try {
                            for (TokeniserContextFeature<?> feature : tokeniserContextFeatures) {
                                RuntimeEnvironment env = this.featureService.getRuntimeEnvironment();
                                FeatureResult<?> featureResult = feature.check(context, env);
                                if (featureResult != null) {
                                    tokenFeatureResults.add(featureResult);
                                }
                            }

                            if (LOG.isTraceEnabled()) {
                                for (FeatureResult<?> featureResult : tokenFeatureResults) {
                                    LOG.trace(featureResult.toString());
                                }
                            }
                        } finally {
                            MONITOR.endTask("analyse features");
                        }

                        MONITOR.startTask("make decision");
                        try {
                            decisions = this.decisionMaker.decide(tokenFeatureResults);

                            for (ClassificationObserver<TokeniserOutcome> observer : this.observers)
                                observer.onAnalyse(token, tokenFeatureResults, decisions);

                            for (Decision<TokeniserOutcome> decision : decisions) {
                                decision.addAuthority(this.getClass().getSimpleName());
                                for (TokenPatternMatch tokenMatch : token.getMatches()) {
                                    decision.addAuthority(tokenMatch.getPattern().toString());
                                }
                            }
                        } finally {
                            MONITOR.endTask("make decision");
                        }
                    } else {
                        decisions = new ArrayList<Decision<TokeniserOutcome>>();
                        decisions.add(defaultDecisions.get(i));
                    }

                    MONITOR.startTask("heap sort");
                    try {
                        for (Decision<TokeniserOutcome> decision : decisions) {
                            TaggedToken<TokeniserOutcome> taggedToken = this.tokeniserService
                                    .getTaggedToken(token, decision);

                            TokenisedAtomicTokenSequence tokenisedSequence = this.getTokeniserService()
                                    .getTokenisedAtomicTokenSequence(history);
                            tokenisedSequence.add(taggedToken);
                            if (decision.isStatistical())
                                tokenisedSequence.addDecision(decision);
                            heap.add(tokenisedSequence);
                        }
                    } finally {
                        MONITOR.endTask("heap sort");
                    }

                } // next sequence in the old heap
                i++;
            } // next token

            sequences = new ArrayList<TokenisedAtomicTokenSequence>();
            i = 0;
            while (!heap.isEmpty()) {
                sequences.add(heap.poll());
                i++;
                if (i >= this.getBeamWidth())
                    break;
            }
        } else {
            sequences = new ArrayList<TokenisedAtomicTokenSequence>();
            TokenisedAtomicTokenSequence defaultSequence = this.getTokeniserService()
                    .getTokenisedAtomicTokenSequence(sentence, 0);
            int i = 0;
            for (Token token : tokenSequence.listWithWhiteSpace()) {
                TaggedToken<TokeniserOutcome> taggedToken = this.tokeniserService.getTaggedToken(token,
                        defaultDecisions.get(i++));
                defaultSequence.add(taggedToken);
            }
            sequences.add(defaultSequence);
        } // have decision maker?

        LOG.debug("####Final token sequences:");
        int j = 1;
        for (TokenisedAtomicTokenSequence sequence : sequences) {
            TokenSequence newTokenSequence = sequence.inferTokenSequence();
            if (LOG.isDebugEnabled()) {
                LOG.debug("Token sequence " + (j++) + ", score=" + df.format(sequence.getScore()));
                LOG.debug("Atomic sequence: " + sequence);
                LOG.debug("Resulting sequence: " + newTokenSequence);
            }
            // need to re-apply the pre-processing filters, because the tokens are all new
            // Question: why can't we conserve the initial tokens when they haven't changed at all?
            // Answer: because the tokenSequence and index in the sequence is referenced by the token.
            // Question: should we create a separate class, Token and TokenInSequence,
            // one with index & sequence access & one without?
            for (TokenSequenceFilter tokenSequenceFilter : this.tokenSequenceFilters) {
                tokenSequenceFilter.apply(newTokenSequence);
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("After filters: " + newTokenSequence);
            }
        }

        return sequences;
    } finally {
        MONITOR.endTask("tokeniseWithDecisions");
    }
}

From source file:com.trk.aboutme.facebook.internal.FileLruCache.java

private void trim() {
    try {/*from  w ww .  j  a v  a2 s  .c o  m*/
        Logger.log(LoggingBehavior.CACHE, TAG, "trim started");
        PriorityQueue<ModifiedFile> heap = new PriorityQueue<ModifiedFile>();
        long size = 0;
        long count = 0;
        for (File file : this.directory.listFiles(BufferFile.excludeBufferFiles())) {
            ModifiedFile modified = new ModifiedFile(file);
            heap.add(modified);
            Logger.log(LoggingBehavior.CACHE, TAG, "  trim considering time="
                    + Long.valueOf(modified.getModified()) + " name=" + modified.getFile().getName());

            size += file.length();
            count++;
        }

        while ((size > limits.getByteCount()) || (count > limits.getFileCount())) {
            File file = heap.remove().getFile();
            Logger.log(LoggingBehavior.CACHE, TAG, "  trim removing " + file.getName());
            size -= file.length();
            count--;
            file.delete();
        }
    } finally {
        synchronized (lock) {
            isTrimPending = false;
            lock.notifyAll();
        }
    }
}

From source file:net.sourceforge.jasa.market.FourHeapOrderBook.java

/**
 * Unify the shout at the top of the heap with the supplied shout, so that
 * quantity(shout) = quantity(top(heap)). This is achieved by splitting the
 * supplied shout or the shout at the top of the heap.
 * //from  w  ww  .ja  v a 2s . com
 * @param shout
 *          The shout.
 * @param to
 *          The heap being moved to.
 * 
 * @return A reference to the, possibly modified, shout.
 * 
 */
protected Order unifyShout(Order shout, PriorityQueue<Order> from, PriorityQueue<Order> to) {

    Order top = (Order) from.peek();

    if (shout.getQuantity() > top.getQuantity()) {
        shout = shout.splat(shout.getQuantity() - top.getQuantity());
    } else {
        if (top.getQuantity() > shout.getQuantity()) {
            Order remainder = top.split(top.getQuantity() - shout.getQuantity());
            from.add(remainder);
            assert ((remainder.isBid && from == bOut) || ((!remainder.isBid) && from == sOut));
            to.add(from.remove());
            //            to.add(remainder);
            return shout;
        }
    }

    to.add(from.remove());
    return shout;
}

From source file:com.joliciel.jochre.analyser.BeamSearchImageAnalyser.java

public void analyseInternal(JochreImage image) {
    LOG.debug("Analysing image " + image.getId());
    if (currentMonitor != null) {
        currentMonitor.setCurrentAction("imageMonitor.analysingImage",
                new Object[] { image.getPage().getIndex() });
    }/*from  w w  w .  j a v a 2  s . co m*/
    for (LetterGuessObserver observer : observers) {
        observer.onImageStart(image);
    }
    if (totalShapeCount < 0)
        totalShapeCount = image.getShapeCount();

    for (Paragraph paragraph : image.getParagraphs()) {
        LOG.debug("Analysing paragraph " + paragraph.getIndex() + " (id=" + paragraph.getId() + ")");
        List<LetterSequence> holdoverSequences = null;
        for (RowOfShapes row : paragraph.getRows()) {
            LOG.debug("Analysing row " + row.getIndex() + " (id=" + row.getId() + ")");
            for (GroupOfShapes group : row.getGroups()) {
                if (group.isSkip()) {
                    LOG.debug("Skipping group " + group.getIndex() + " (id=" + group.getId() + ")");
                    continue;
                }
                LOG.debug("Analysing group " + group.getIndex() + " (id=" + group.getId() + ")");

                int width = group.getRight() - group.getLeft() + 1;

                List<ShapeSequence> shapeSequences = null;
                if (boundaryDetector != null) {
                    shapeSequences = boundaryDetector.findBoundaries(group);
                } else {
                    // simply add this groups shape's
                    shapeSequences = new ArrayList<ShapeSequence>();
                    ShapeSequence shapeSequence = boundaryService.getEmptyShapeSequence();
                    for (Shape shape : group.getShapes())
                        shapeSequence.addShape(shape);
                    shapeSequences.add(shapeSequence);
                }

                // Perform a beam search to guess the most likely sequence for this word
                TreeMap<Integer, PriorityQueue<LetterSequence>> heaps = new TreeMap<Integer, PriorityQueue<LetterSequence>>();

                // prime a starter heap with the n best shape boundary analyses for this group
                PriorityQueue<LetterSequence> starterHeap = new PriorityQueue<LetterSequence>(1);
                for (ShapeSequence shapeSequence : shapeSequences) {
                    LetterSequence emptySequence = this.getLetterGuesserService()
                            .getEmptyLetterSequence(shapeSequence);
                    starterHeap.add(emptySequence);
                }
                heaps.put(0, starterHeap);

                PriorityQueue<LetterSequence> finalHeap = null;
                while (heaps.size() > 0) {
                    Entry<Integer, PriorityQueue<LetterSequence>> heapEntry = heaps.pollFirstEntry();
                    if (LOG.isTraceEnabled())
                        LOG.trace("heap for index: " + heapEntry.getKey().intValue() + ", width: " + width);
                    if (heapEntry.getKey().intValue() == width) {
                        finalHeap = heapEntry.getValue();
                        break;
                    }

                    PriorityQueue<LetterSequence> previousHeap = heapEntry.getValue();

                    // limit the breadth to K
                    int maxSequences = previousHeap.size() > this.beamWidth ? this.beamWidth
                            : previousHeap.size();

                    for (int j = 0; j < maxSequences; j++) {
                        LetterSequence history = previousHeap.poll();
                        ShapeInSequence shapeInSequence = history.getNextShape();
                        Shape shape = shapeInSequence.getShape();
                        if (LOG.isTraceEnabled()) {
                            LOG.trace("Sequence " + history + ", shape: " + shape);
                        }
                        LogUtils.logMemory(LOG);
                        int position = 0;
                        if (Linguistics.getInstance(image.getPage().getDocument().getLocale())
                                .isLeftToRight()) {
                            position = shape.getRight() - group.getLeft() + 1;
                        } else {
                            position = group.getRight() - shape.getLeft() + 1;
                        }
                        PriorityQueue<LetterSequence> heap = heaps.get(position);
                        if (heap == null) {
                            heap = new PriorityQueue<LetterSequence>();
                            heaps.put(position, heap);
                        }

                        MONITOR.startTask("guess letter");
                        try {
                            letterGuesser.guessLetter(shapeInSequence, history);
                        } finally {
                            MONITOR.endTask();
                        }

                        MONITOR.startTask("heap sort");
                        try {
                            for (Decision<Letter> letterGuess : shape.getLetterGuesses()) {
                                // leave out very low probability outcomes
                                if (letterGuess.getProbability() > this.minOutcomeWeight) {
                                    LetterSequence sequence = this.getLetterGuesserService()
                                            .getLetterSequencePlusOne(history);
                                    sequence.add(letterGuess.getOutcome());
                                    sequence.addDecision(letterGuess);
                                    heap.add(sequence);
                                } // weight big enough to include
                            } // next letter guess for this shape
                        } finally {
                            MONITOR.endTask();
                        }
                    } // next history in heap
                } // any more heaps?

                LetterSequence bestSequence = null;
                boolean shouldCombineWithHoldover = false;
                boolean isHoldover = false;
                MONITOR.startTask("best sequence");
                try {
                    List<LetterSequence> finalSequences = new ArrayList<LetterSequence>();
                    for (int i = 0; i < this.beamWidth; i++) {
                        if (finalHeap.isEmpty())
                            break;
                        finalSequences.add(finalHeap.poll());
                    }

                    if (this.getMostLikelyWordChooser() == null) {
                        // most likely sequence is on top of the last heap
                        bestSequence = finalSequences.get(0);
                    } else {
                        // get most likely sequence using lexicon
                        if (holdoverSequences != null) {
                            // we have a holdover from the previous row ending with a dash
                            bestSequence = this.getMostLikelyWordChooser().chooseMostLikelyWord(finalSequences,
                                    holdoverSequences, this.beamWidth);
                            shouldCombineWithHoldover = true;
                        } else {
                            // check if this is the last group on the row and could end with a dash
                            boolean shouldBeHeldOver = false;
                            if (group.getIndex() == row.getGroups().size() - 1
                                    && row.getIndex() < paragraph.getRows().size() - 1) {
                                for (LetterSequence letterSequence : finalSequences) {
                                    if (letterSequence.toString().endsWith("-")) {
                                        shouldBeHeldOver = true;
                                        break;
                                    }
                                }
                            }
                            if (shouldBeHeldOver) {
                                holdoverSequences = finalSequences;
                                isHoldover = true;
                            } else {
                                // simplest case: no holdover
                                bestSequence = this.getMostLikelyWordChooser()
                                        .chooseMostLikelyWord(finalSequences, this.beamWidth);
                            }
                        } // have we holdover sequences?
                    } // have we a most likely word chooser?

                    if (!isHoldover) {
                        for (LetterGuessObserver observer : observers) {
                            observer.onBeamSearchEnd(bestSequence, finalSequences, holdoverSequences);
                        }
                    }
                } finally {
                    MONITOR.endTask();
                }

                MONITOR.startTask("assign letter");
                try {
                    if (shouldCombineWithHoldover) {
                        holdoverSequences = null;
                    }
                    if (!isHoldover) {
                        for (LetterGuessObserver observer : observers) {
                            observer.onStartSequence(bestSequence);
                        }

                        group.setBestLetterSequence(bestSequence);

                        int i = 0;
                        for (ShapeInSequence shapeInSequence : bestSequence.getUnderlyingShapeSequence()) {
                            String bestOutcome = bestSequence.get(i).getString();
                            this.assignLetter(shapeInSequence, bestOutcome);
                            i++;
                        } // next shape

                        for (LetterGuessObserver observer : observers) {
                            observer.onGuessSequence(bestSequence);
                        }
                    }

                    this.shapeCount += group.getShapes().size();
                    if (this.currentMonitor != null) {
                        double progress = (double) shapeCount / (double) totalShapeCount;
                        LOG.debug("progress: " + progress);
                        currentMonitor.setPercentComplete(progress);
                    }
                } finally {
                    MONITOR.endTask();
                }
            } // next group
        } // next row
    } // next paragraph

    for (LetterGuessObserver observer : observers) {
        observer.onImageEnd();
    }
}

From source file:edu.oregonstate.eecs.mcplan.domains.firegirl.FireGirlState.java

private FireResult doFire(final RandomGenerator rng, final boolean suppress) {
    final int reach = params.fire_param_reach;

    final double end_time = drawEndOfFire(rng);
    double current_time = 0;

    // Construct the priority queue and add the first cell to it with time = 0
    final PriorityQueue<PrioritizedLocation> pqueue = new PriorityQueue<PrioritizedLocation>();
    pqueue.add(new PrioritizedLocation(0, ignite_loc));

    // setting a variable that will hold the lowest of all the ingition times in the queue.
    //        final int next_ign = 1000;

    final boolean[][] burned = new boolean[params.width][params.height];
    final boolean[][] crown_burned = new boolean[params.width][params.height];

    // start the queue loop
    int iter_count = 0;
    while (true) {

        //check to make sure that there is at least one queued arrival
        if (pqueue.isEmpty()) {
            //no queued arrivals, so there's no fire, so we're done
            //print("Priority Queue Exiting: No more queued ignitions")
            break;
        }/*w  w w  .j  a v  a 2 s .  c  om*/

        //look through all the queued ignitions and find the earliest ignition
        //  time.
        final PrioritizedLocation next_ign = pqueue.poll();
        //now check to see if the soonest arrival happens before the time is up.
        if (next_ign.priority >= end_time) {
            //no fire arrivals (ignitions) are in queue within the alloted time
            //   so the firespread has stopped.
            //print("Priority Queue Exiting: Remaining queued ignitions are past the time limit")
            break;
        }

        //moving current time up to this ignition
        current_time = next_ign.priority;
        final int xloc = next_ign.location[0];
        final int yloc = next_ign.location[1];

        if (burned[xloc][yloc]) {
            continue;
        }

        //we haven't left the loop, so the next arrival is valid, so look at
        //  it and add its neighbors to the queue

        //failsafe exit
        iter_count += 1;
        if (iter_count > params.fire_iter_cap) {
            Log.warn("! Stopping fire early. time: {}", current_time);
            break;
        }

        //setting this cell to burned
        burned[xloc][yloc] = true;

        //Calculating this cell's fire spreadrate, which needs it's fuel load, too
        final int fuel_ld = fuel_load[xloc][yloc];
        double spreadrate = calcFireSpreadRate(ignite_wind, ignite_temp, fuel_ld);

        //add the effects of suppression
        if (suppress) {
            spreadrate *= params.fire_suppression_rate;
        }

        // Check if the crown will burn (if the spreadrate is > 0)
        // Timber loss is a probabalistic function based on the
        //   calcCrownFireRisk() function.  This function will return
        //   a probability of crownfire, and we'll roll a uniform
        //   number against it.

        // NOTE: Deviation from Python
        if (rng.nextDouble() < calcCrownFireRisk(fuel_ld)) {
            crown_burned[xloc][yloc] = true;
        }

        //if the fire spreadrate of this fire is 0, then don't bother checking
        //   for neighbors and calculating arrival times... there won't be any
        //   spread, and for that matter, we'll get a divide-by-zero error.
        if (spreadrate == 0) {
            //no spreadrate, so we can't calculate arrival times, etc...
            //pqueue.remove([current_time,[xloc,yloc]])

            // Note: Already removed the element
            continue;
        }

        //recording information in the Logbook item
        //function signature is:  FireGirlfireLog.addIgnitionEvent(time, location, spread_rate, crown_burned):
        //            fire_log_item.addIgnitionEvent(current_time, [xloc,yloc], spreadrate, crown_burned[xloc][yloc])

        //setting iteration final ranges
        final int x_low = Math.max(xloc - reach, 0);
        final int x_high = Math.min(xloc + reach + 1, params.width - 1);
        final int y_low = Math.max(yloc - reach, 0);
        final int y_high = Math.min(yloc + reach + 1, params.height - 1);

        //            #checking bounds
        //            if (x_low < 0): x_low = 0
        //            if (y_low < 0): y_low = 0
        //            if (x_high >= self.width): x_high = self.width - 1
        //            if (y_high >= self.height): y_high = self.height - 1

        // FIXME: I think this indexing is incorrect (one short) due to
        // how x/y_high are capped above
        // Resolved: Changed '<' to '<='
        for (int i = x_low; i <= x_high; ++i) { //i in range(x_low, x_high):
            for (int j = y_low; j <= y_high; ++j) { //for j in range(y_low, y_high):

                //                    #don't calculate time to the current cell
                if (!((xloc == i) && (yloc == j))) {

                    //                        #we're checking each neighbor within the reach range, so
                    //                        #  first, we need to check whether it's already been
                    //                        #  burned over

                    if (!burned[i][j]) {

                        //                            #this neighbor hasn't burned over yet, so:
                        //                            # 1) calculate a new time-till arrival
                        //                            # 2) check to see if this neighbor is already in the queue
                        //                            # 2a) if it is, then check to see if this arrival time is sooner
                        //                            #       and if so, update it. Otherwise, just move on.
                        //                            # 2b) if it isn't in the queue, then add it as a new queue item

                        //                            # 1) final arrival time for this neighbor
                        final double dist = Math.sqrt((xloc - i) * (xloc - i) + (yloc - j) * (yloc - j));
                        final double arrival_time = (dist / spreadrate) + current_time;

                        // Just add it again; we filter duplicates by checking if they're already burned.
                        pqueue.add(new PrioritizedLocation(arrival_time, new int[] { i, j }));

                        ////                            # 2) checking to see if this neighbor is already queued
                        //                            boolean found_in_q = false;
                        //                            final Iterator<PrioritizedLocation> itr = pqueue.iterator();
                        //                            while( itr.hasNext() ) {
                        //                               final PrioritizedLocation ign = itr.next();
                        //                                if( ign.location[0] == i && ign.location[1] == j ) {
                        ////                                    #this neighbor IS in the queue already, so check its arrival time
                        ////                                    #print("   neighbor found in queue... updating...")
                        //                                    found_in_q = true;
                        //
                        ////                                    #updating it's arrival time if need be
                        //                                    if( arrival_time < ign.priority ) {
                        //                                       itr.remove();
                        ////                                        #the new arrival time is sooner, so update this queue item
                        //                                       pqueue.add( new PrioritizedLocation( arrival_time, ign.location ) );
                        //                                    }
                        //                                    break;
                        //                                }
                        //                            }
                        //
                        //
                        ////                            #check to see if we ever found this neighbor
                        //                            if( !found_in_q ) {
                        ////                                #we never found it, so it wasn't in the queue, and it's not burned, so add it
                        //                                pqueue.add( new PrioritizedLocation( arrival_time, new int[] { i, j } ) );
                        //                            }
                    }
                }
            }
        }

        //            # we've now finished checking the neighbors, so it's time to remove this item from the queue
        //            pqueue.remove([current_time,[xloc,yloc]])
    } // priority queue empty

    //        # and now we've exited the priority queue as well

    //        #look through the burned cells and update the actual grid values
    //        #Also, record losses
    double timber_loss = 0;
    int cells_burned = 0;
    int cells_crowned = 0;

    for (int i = 0; i < params.width; ++i) {
        for (int j = 0; j < params.height; ++j) {
            if (burned[i][j]) {
                cells_burned += 1;

                //                    #this cell was burned, so set the fuel_load to zero, and apply
                //                    #  the crown-burning model to the timber_value
                fuel_load[i][j] = 0;

                //                    #adding up timber loss
                if (crown_burned[i][j]) { //this was set when spreadrate was calculated earlier
                    //                        #the crown burned, so record the loss and set it to zero

                    //                        #SPEED
                    //                        ####BOTH Lines are modified for self.getPresentTimberValue(i,j)###########
                    timber_loss += getPresentTimberValue(i, j); //self.timber_value[i][j]
                    //                        #self.timber_value[i][j] = 0
                    //                        ####################

                    cells_crowned += 1;

                    //                        #and reset the age so that self.year + self.stand_age = 0
                    //                        stand_age[i][j] = -1 * year;
                    // NOTE: Deviation from Python code
                    stand_age[i][j] = 0;
                }
            }
        }
    }

    //        #Adding the final results to the fire_log_item
    //        fire_log_item.updateResults(timber_loss, cells_burned, cells_crowned)

    //        #Adding the lists (final maps) as well
    //        fire_log_item.map_burned = burned
    //        fire_log_item.map_crowned = crown_burned

    //        #add the FireLog item to the pathway's list (it's just an ordinary list)
    //        self.FireLog.append(fire_log_item)

    //        #add up suppression cost and record it
    int sup_cost = 0;
    if (suppress) {
        sup_cost += cells_burned * params.fire_suppression_cost_per_cell;
        sup_cost += end_time * params.fire_suppression_cost_per_day;
    }

    //        self.yearly_suppression_costs.append(sup_cost)

    //        #and finally, return the loss data
    return new FireResult(timber_loss, cells_burned, cells_crowned, sup_cost, end_time);
    //        return [timber_loss, cells_burned, sup_cost, end_time, cells_crowned]
}

From source file:com.koda.integ.hbase.blockcache.OnHeapBlockCache.java

/**
 * Eviction method./*w  w w.ja  v a 2 s  .  c o m*/
 */
void evict() {

    // Ensure only one eviction at a time
    if (!evictionLock.tryLock())
        return;

    try {
        evictionInProgress = true;
        long currentSize = this.size.get();
        long bytesToFree = currentSize - minSize();

        if (LOG.isDebugEnabled()) {
            LOG.debug("Block cache LRU eviction started; Attempting to free "
                    + StringUtils.byteDesc(bytesToFree) + " of total=" + StringUtils.byteDesc(currentSize));
        }

        if (bytesToFree <= 0)
            return;

        // Instantiate priority buckets
        BlockBucket bucketSingle = new BlockBucket(bytesToFree, blockSize, singleSize());
        BlockBucket bucketMulti = new BlockBucket(bytesToFree, blockSize, multiSize());
        BlockBucket bucketMemory = new BlockBucket(bytesToFree, blockSize, memorySize());

        // Scan entire map putting into appropriate buckets
        for (CachedBlock cachedBlock : map.values()) {
            switch (cachedBlock.getPriority()) {
            case SINGLE: {
                bucketSingle.add(cachedBlock);
                break;
            }
            case MULTI: {
                bucketMulti.add(cachedBlock);
                break;
            }
            case MEMORY: {
                bucketMemory.add(cachedBlock);
                break;
            }
            }
        }

        PriorityQueue<BlockBucket> bucketQueue = new PriorityQueue<BlockBucket>(3);

        bucketQueue.add(bucketSingle);
        bucketQueue.add(bucketMulti);
        bucketQueue.add(bucketMemory);

        int remainingBuckets = 3;
        long bytesFreed = 0;

        BlockBucket bucket;
        while ((bucket = bucketQueue.poll()) != null) {
            long overflow = bucket.overflow();
            if (overflow > 0) {
                long bucketBytesToFree = Math.min(overflow, (bytesToFree - bytesFreed) / remainingBuckets);
                bytesFreed += bucket.free(bucketBytesToFree);
            }
            remainingBuckets--;
        }

        if (LOG.isDebugEnabled()) {
            long single = bucketSingle.totalSize();
            long multi = bucketMulti.totalSize();
            long memory = bucketMemory.totalSize();
            LOG.debug("Block cache LRU eviction completed; " + "freed=" + StringUtils.byteDesc(bytesFreed)
                    + ", " + "total=" + StringUtils.byteDesc(this.size.get()) + ", " + "single="
                    + StringUtils.byteDesc(single) + ", " + "multi=" + StringUtils.byteDesc(multi) + ", "
                    + "memory=" + StringUtils.byteDesc(memory));
        }
    } finally {
        stats.evict();
        evictionInProgress = false;
        evictionLock.unlock();
    }
}

From source file:org.apache.hama.ml.recommendation.cf.OnlineCF.java

@Override
public List<Pair<Long, Double>> getMostSimilarUsers(long user, int count) {

    Comparator<Pair<Long, Double>> similarityComparator = new Comparator<Pair<Long, Double>>() {

        @Override/*w  w  w. j av a  2  s.  com*/
        public int compare(Pair<Long, Double> arg0, Pair<Long, Double> arg1) {
            double difference = arg0.getValue().doubleValue() - arg1.getValue().doubleValue();
            return (int) (100000 * difference);
        }
    };
    PriorityQueue<Pair<Long, Double>> queue = new PriorityQueue<Pair<Long, Double>>(count,
            similarityComparator);
    LinkedList<Pair<Long, Double>> results = new LinkedList<Pair<Long, Double>>();
    for (Long candidateUser : modelUserFactorizedValues.keySet()) {
        double similarity = calculateUserSimilarity(user, candidateUser);
        Pair<Long, Double> targetUser = new Pair<Long, Double>(candidateUser, similarity);
        queue.add(targetUser);
    }
    results.addAll(queue);
    return results;
}

From source file:org.apache.hama.ml.recommendation.cf.OnlineCF.java

@Override
public List<Pair<Long, Double>> getMostSimilarItems(long item, int count) {

    Comparator<Pair<Long, Double>> similarityComparator = new Comparator<Pair<Long, Double>>() {

        @Override/*from w  ww .jav a2s  .c o  m*/
        public int compare(Pair<Long, Double> arg0, Pair<Long, Double> arg1) {
            double difference = arg0.getValue().doubleValue() - arg1.getValue().doubleValue();
            return (int) (100000 * difference);
        }
    };
    PriorityQueue<Pair<Long, Double>> queue = new PriorityQueue<Pair<Long, Double>>(count,
            similarityComparator);
    LinkedList<Pair<Long, Double>> results = new LinkedList<Pair<Long, Double>>();
    for (Long candidateItem : modelItemFactorizedValues.keySet()) {
        double similarity = calculateItemSimilarity(item, candidateItem);
        Pair<Long, Double> targetItem = new Pair<Long, Double>(candidateItem, similarity);
        queue.add(targetItem);
    }
    results.addAll(queue);
    return results;
}

From source file:edu.stanford.cfuller.colocalization3d.correction.PositionCorrector.java

/**
* Creates a correction from a set of objects whose positions should be the same in each channel.
* 
* @param imageObjects                  A Vector containing all the ImageObjects to be used for the correction
*                                      or in the order it appears in a multiwavelength image file.
* @return                              A Correction object that can be used to correct the positions of other objects based upon the standards provided.
*///w w  w .  j  a v  a  2s . c o  m
public Correction getCorrection(java.util.List<ImageObject> imageObjects) {

    int referenceChannel = this.parameters.getIntValueForKey(REF_CH_PARAM);

    int channelToCorrect = this.parameters.getIntValueForKey(CORR_CH_PARAM);

    if (!this.parameters.hasKeyAndTrue(DET_CORR_PARAM)) {
        try {
            return Correction.readFromDisk(FileUtils.getCorrectionFilename(this.parameters));
        } catch (java.io.IOException e) {

            java.util.logging.Logger
                    .getLogger(edu.stanford.cfuller.colocalization3d.Colocalization3DMain.LOGGER_NAME)
                    .severe("Exception encountered while reading correction from disk: ");
            e.printStackTrace();

        } catch (ClassNotFoundException e) {

            java.util.logging.Logger
                    .getLogger(edu.stanford.cfuller.colocalization3d.Colocalization3DMain.LOGGER_NAME)
                    .severe("Exception encountered while reading correction from disk: ");
            e.printStackTrace();

        }

        return null;
    }

    int numberOfPointsToFit = this.parameters.getIntValueForKey(NUM_POINT_PARAM);

    RealMatrix correctionX = new Array2DRowRealMatrix(imageObjects.size(), numberOfCorrectionParameters);
    RealMatrix correctionY = new Array2DRowRealMatrix(imageObjects.size(), numberOfCorrectionParameters);
    RealMatrix correctionZ = new Array2DRowRealMatrix(imageObjects.size(), numberOfCorrectionParameters);

    RealVector distanceCutoffs = new ArrayRealVector(imageObjects.size(), 0.0);

    RealVector ones = new ArrayRealVector(numberOfPointsToFit, 1.0);

    RealVector distancesToObjects = new ArrayRealVector(imageObjects.size(), 0.0);

    RealMatrix allCorrectionParametersMatrix = new Array2DRowRealMatrix(numberOfPointsToFit,
            numberOfCorrectionParameters);

    for (int i = 0; i < imageObjects.size(); i++) {

        RealVector ithPos = imageObjects.get(i).getPositionForChannel(referenceChannel);

        for (int j = 0; j < imageObjects.size(); j++) {

            double d = imageObjects.get(j).getPositionForChannel(referenceChannel).subtract(ithPos).getNorm();

            distancesToObjects.setEntry(j, d);

        }

        //the sorting becomes a bottleneck once the number of points gets large

        //reverse comparator so we can use the priority queue and get the max element at the head

        Comparator<Double> cdReverse = new Comparator<Double>() {

            public int compare(Double o1, Double o2) {

                if (o1.equals(o2))
                    return 0;
                if (o1 > o2)
                    return -1;
                return 1;
            }

        };

        PriorityQueue<Double> pq = new PriorityQueue<Double>(numberOfPointsToFit + 2, cdReverse);

        double maxElement = Double.MAX_VALUE;

        for (int p = 0; p < numberOfPointsToFit + 1; p++) {

            pq.add(distancesToObjects.getEntry(p));

        }

        maxElement = pq.peek();

        for (int p = numberOfPointsToFit + 1; p < distancesToObjects.getDimension(); p++) {

            double value = distancesToObjects.getEntry(p);

            if (value < maxElement) {

                pq.poll();

                pq.add(value);

                maxElement = pq.peek();

            }

        }

        double firstExclude = pq.poll();
        double lastDist = pq.poll();

        double distanceCutoff = (lastDist + firstExclude) / 2.0;

        distanceCutoffs.setEntry(i, distanceCutoff);

        RealVector xPositionsToFit = new ArrayRealVector(numberOfPointsToFit, 0.0);
        RealVector yPositionsToFit = new ArrayRealVector(numberOfPointsToFit, 0.0);
        RealVector zPositionsToFit = new ArrayRealVector(numberOfPointsToFit, 0.0);

        RealMatrix differencesToFit = new Array2DRowRealMatrix(numberOfPointsToFit,
                imageObjects.get(0).getPositionForChannel(referenceChannel).getDimension());

        int toFitCounter = 0;

        for (int j = 0; j < imageObjects.size(); j++) {
            if (distancesToObjects.getEntry(j) < distanceCutoff) {
                xPositionsToFit.setEntry(toFitCounter,
                        imageObjects.get(j).getPositionForChannel(referenceChannel).getEntry(0));
                yPositionsToFit.setEntry(toFitCounter,
                        imageObjects.get(j).getPositionForChannel(referenceChannel).getEntry(1));
                zPositionsToFit.setEntry(toFitCounter,
                        imageObjects.get(j).getPositionForChannel(referenceChannel).getEntry(2));

                differencesToFit.setRowVector(toFitCounter, imageObjects.get(j)
                        .getVectorDifferenceBetweenChannels(referenceChannel, channelToCorrect));

                toFitCounter++;
            }
        }

        RealVector x = xPositionsToFit.mapSubtractToSelf(ithPos.getEntry(0));
        RealVector y = yPositionsToFit.mapSubtractToSelf(ithPos.getEntry(1));

        allCorrectionParametersMatrix.setColumnVector(0, ones);
        allCorrectionParametersMatrix.setColumnVector(1, x);
        allCorrectionParametersMatrix.setColumnVector(2, y);
        allCorrectionParametersMatrix.setColumnVector(3, x.map(new Power(2)));
        allCorrectionParametersMatrix.setColumnVector(4, y.map(new Power(2)));
        allCorrectionParametersMatrix.setColumnVector(5, x.ebeMultiply(y));

        DecompositionSolver solver = (new QRDecomposition(allCorrectionParametersMatrix)).getSolver();

        RealVector cX = solver.solve(differencesToFit.getColumnVector(0));
        RealVector cY = solver.solve(differencesToFit.getColumnVector(1));
        RealVector cZ = solver.solve(differencesToFit.getColumnVector(2));

        correctionX.setRowVector(i, cX);
        correctionY.setRowVector(i, cY);
        correctionZ.setRowVector(i, cZ);

    }

    Correction c = new Correction(correctionX, correctionY, correctionZ, distanceCutoffs, imageObjects,
            referenceChannel, channelToCorrect);

    return c;

}

From source file:com.facebook.internal.FileLruCache.java

private void trim() {
    synchronized (lock) {
        isTrimPending = false;/* www . j a v a2 s .c  om*/
        isTrimInProgress = true;
    }
    try {
        Logger.log(LoggingBehavior.CACHE, TAG, "trim started");
        PriorityQueue<ModifiedFile> heap = new PriorityQueue<ModifiedFile>();
        long size = 0;
        long count = 0;
        File[] filesToTrim = this.directory.listFiles(BufferFile.excludeBufferFiles());
        if (filesToTrim != null) {
            for (File file : filesToTrim) {
                ModifiedFile modified = new ModifiedFile(file);
                heap.add(modified);
                Logger.log(LoggingBehavior.CACHE, TAG, "  trim considering time="
                        + Long.valueOf(modified.getModified()) + " name=" + modified.getFile().getName());

                size += file.length();
                count++;
            }
        }

        while ((size > limits.getByteCount()) || (count > limits.getFileCount())) {
            File file = heap.remove().getFile();
            Logger.log(LoggingBehavior.CACHE, TAG, "  trim removing " + file.getName());
            size -= file.length();
            count--;
            file.delete();
        }
    } finally {
        synchronized (lock) {
            isTrimInProgress = false;
            lock.notifyAll();
        }
    }
}