Example usage for java.util PriorityQueue add

List of usage examples for java.util PriorityQueue add

Introduction

In this page you can find the example usage for java.util PriorityQueue add.

Prototype

public boolean add(E e) 

Source Link

Document

Inserts the specified element into this priority queue.

Usage

From source file:com.joliciel.csvLearner.features.RealValueFeatureEvaluator.java

/**
 * For a given feature, calculate the entropy after each level of splitting.
 * Level 0: the entropy taking into account only those events which have a value as opposed to those which don't
 * Level 1: entropy for events without a value (where value=0) + entropy of other events after first split
 * Level 2: entropy for events without a value (where value=0) + entropy of other events after second split
 * etc.//  www  .jav  a 2 s  .c  o m
 * @param events the list of events
 * @param feature the feature to consider for splitting
 * @return 
 */
public List<Double> evaluateFeature(GenericEvents events, String feature, String testOutcome) {
    long startTime = (new Date()).getTime();

    if (LOG.isTraceEnabled()) {
        LOG.trace("Evaluating feature: " + feature);
        LOG.trace("Test outcome: " + testOutcome);
    }
    long startTimeInitialise = (new Date()).getTime();

    PriorityQueue<NameValuePair> heap = new PriorityQueue<NameValuePair>(events.size());
    Collection<NameValuePair> featureValues = new ArrayList<NameValuePair>();
    Map<String, Integer> eventOutcomeMap = new TreeMap<String, Integer>();
    Map<String, Integer> featureOutcomeMap = new TreeMap<String, Integer>();
    Map<String, Integer> nonFeatureOutcomeMap = new TreeMap<String, Integer>();

    List<String> outcomes = null;
    if (testOutcome == null) {
        Set<String> outcomeSet = events.getOutcomes();
        outcomes = new ArrayList<String>(outcomeSet);
    } else {
        outcomes = new ArrayList<String>();
        outcomes.add(testOutcome);
        outcomes.add("");
    }
    int[] eventOutcomeCounts = new int[outcomes.size()];
    int[] featureOutcomeCounts = new int[outcomes.size()];
    int[] nonFeatureOutcomeCounts = new int[outcomes.size()];

    int eventCount = events.size();
    int featureCount = 0;
    for (GenericEvent event : events) {
        if (!event.isTest()) {
            String outcome = event.getOutcome();
            int outcomeIndex = 0;
            if (testOutcome == null) {
                outcomeIndex = outcomes.indexOf(outcome);
            } else {
                if (!outcome.equals(testOutcome)) {
                    outcome = "";
                    outcomeIndex = 1;
                } else {
                    outcomeIndex = 0;
                }
            }

            long startTimeFindFeature = (new Date()).getTime();
            int featureIndex = event.getFeatureIndex(feature);
            long endTimeFindFeature = (new Date()).getTime();
            totalTimeFindFeature += (endTimeFindFeature - startTimeFindFeature);
            if (featureIndex >= 0) {
                long startTimeOrdering = (new Date()).getTime();
                heap.add(new NameValuePair(outcome, event.getWeights().get(featureIndex)));
                long endTimeOrdering = (new Date()).getTime();
                totalTimeOrdering += (endTimeOrdering - startTimeOrdering);
                featureOutcomeCounts[outcomeIndex]++;
                featureCount++;
            } else {
                nonFeatureOutcomeCounts[outcomeIndex]++;
            }
            eventOutcomeCounts[outcomeIndex]++;
        }
    }
    int nonFeatureCount = eventCount - featureCount;

    long startTimeOrdering = (new Date()).getTime();
    while (!heap.isEmpty())
        featureValues.add(heap.poll());
    long endTimeOrdering = (new Date()).getTime();
    totalTimeOrdering += (endTimeOrdering - startTimeOrdering);

    int i = 0;
    for (String outcome : outcomes) {
        eventOutcomeMap.put(outcome, eventOutcomeCounts[i]);
        featureOutcomeMap.put(outcome, featureOutcomeCounts[i]);
        nonFeatureOutcomeMap.put(outcome, nonFeatureOutcomeCounts[i]);
        i++;
    }

    long endTimeInitialise = (new Date()).getTime();
    totalTimeInitialise += (endTimeInitialise - startTimeInitialise);

    long startTimeInitialEntropy = (new Date()).getTime();
    double eventSpaceEntropy = EntropyCalculator.getEntropy(eventOutcomeMap.values(), eventCount);
    double featureEntropy = EntropyCalculator.getEntropy(featureOutcomeMap.values(), featureCount);
    double nonFeatureEntropy = EntropyCalculator.getEntropy(nonFeatureOutcomeMap.values(), nonFeatureCount);
    long endTimeInitialEntropy = (new Date()).getTime();
    totalTimeInitialEntropy += (endTimeInitialEntropy - startTimeInitialEntropy);

    List<Double> entropyByLevel = new ArrayList<Double>();
    entropyByLevel.add(eventSpaceEntropy);

    double proportionalFeatureEntropy = ((double) featureCount / (double) eventCount) * featureEntropy;
    double proportionalNonFeatureEntropy = ((double) nonFeatureCount / (double) eventCount) * nonFeatureEntropy;
    double level0Entropy = proportionalFeatureEntropy + proportionalNonFeatureEntropy;
    entropyByLevel.add(level0Entropy);

    if (LOG.isTraceEnabled()) {
        LOG.trace("eventSpaceEntropy: " + eventSpaceEntropy);
        LOG.trace("proportionalFeatureEntropy: " + proportionalFeatureEntropy);
        LOG.trace("proportionalNonFeatureEntropy: " + proportionalNonFeatureEntropy);
        LOG.trace("level 0 Entropy: " + level0Entropy);
    }

    List<NameValuePair> featureValueList = new ArrayList<NameValuePair>(featureValues);
    long startTimeSplit = (new Date()).getTime();
    featureSplitter.split(featureValueList);
    long endTimeSplit = (new Date()).getTime();
    totalTimeSplit += (endTimeSplit - startTimeSplit);

    Map<Integer, Set<Split>> splitsByDepth = featureSplitter.getSplitsByDepth();

    for (int level : splitsByDepth.keySet()) {
        double levelEntropy = proportionalNonFeatureEntropy;
        if (splitsByDepth.get(level).size() == 0)
            levelEntropy += proportionalFeatureEntropy;
        else {
            for (Split split : splitsByDepth.get(level)) {
                long startTimeSplitEntropy = (new Date()).getTime();
                double proprotionalEntropy = ((double) split.getSize() / (double) eventCount)
                        * split.getEntropy();
                long endTimeSplitEntropy = (new Date()).getTime();
                totalTimeSplitEntropy += (endTimeSplitEntropy - startTimeSplitEntropy);
                levelEntropy += proprotionalEntropy;
            }
        }
        entropyByLevel.add(levelEntropy);
        if (LOG.isTraceEnabled())
            LOG.trace("level " + level + " Entropy: " + levelEntropy);
    }
    long endTime = (new Date()).getTime();
    totalTime += (endTime - startTime);

    return entropyByLevel;
}

From source file:org.apache.hadoop.hbase.extended.loadbalance.strategies.hotspot.HotSpotLoadBalancer.java

@Override
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
    initParameters();/*from w w  w .j a v a  2 s.c o m*/
    /**
     * <pre>
     * We atleast need two priority queues 
     * a) It would contain HotSpot regions with their load as the moving criteria (max priority queue)
     * b) Non hot spot region with their loads (min priority queue)
     * 
     * Further we need to iterate over these queues and decrease the load so we 
     * need a data structure to build these queues 
     * and lastly we need to return the Region plan.
     * </pre>
     */

    LOG.debug("#################Came in the new Balancer Code and the cluster status is = " + this.status);
    long startTime = System.currentTimeMillis();
    int numServers = clusterState.size();
    if (numServers == 0) {
        LOG.info("numServers=0 so skipping load balancing");
        return null;

    }

    NavigableMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>> regionServerAndServerLoadMap = new TreeMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>>();
    PriorityQueue<HotSpotServerAndLoad> hotspotRegionServers = new PriorityQueue<HotSpotServerAndLoad>(
            numServers, HotSpotServerAndLoad.DESC_LOAD);
    PriorityQueue<HotSpotServerAndLoad> nonHotspotRegionServers = new PriorityQueue<HotSpotServerAndLoad>(
            numServers, HotSpotServerAndLoad.ASC_LOAD);
    HashBiMap<HRegionInfo, HotSpotRegionLoad> allRegionsLoadBiMap = HashBiMap.create();
    LOG.debug("#################clusterState=" + clusterState);
    double normalisedTotalLoadOfAllRegions = initRegionLoadMapsBasedOnInput(clusterState,
            regionServerAndServerLoadMap, allRegionsLoadBiMap);
    LOG.debug("#################normalisedTotalLoadOfAllRegions=" + normalisedTotalLoadOfAllRegions);
    // Check if we even need to do any load balancing
    double average = normalisedTotalLoadOfAllRegions / numServers; // for
    // logging
    // HBASE-3681 check sloppiness first
    LOG.debug("######################## final regionServerAndServerLoadMap == " + regionServerAndServerLoadMap);
    if (!loadBalancingNeeded(numServers, regionServerAndServerLoadMap, normalisedTotalLoadOfAllRegions,
            average)) {
        // we do not need load balancing
        return null;
    }
    double minLoad = normalisedTotalLoadOfAllRegions / numServers;
    double maxLoad = normalisedTotalLoadOfAllRegions % numServers == 0 ? minLoad : minLoad + 1;
    // as we now have to balance stuff, init PQ's
    LOG.debug(String.format("#################minLoad =%s,maxLoad= %s", minLoad, maxLoad));
    for (Map.Entry<HotSpotServerAndLoad, List<HotSpotRegionLoad>> item : regionServerAndServerLoadMap
            .entrySet()) {
        HotSpotServerAndLoad serverLoad = item.getKey();
        if (serverLoad.isHotSpot()) {

            hotspotRegionServers.add(serverLoad);
        } else {
            if (serverLoad.getLoad() < maxLoad) {
                nonHotspotRegionServers.add(serverLoad);
            }
        }
    }
    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numRegions=").append(normalisedTotalLoadOfAllRegions)
            .append(", numServers=").append(numServers).append(", max=").append(maxLoad).append(", min=")
            .append(minLoad);
    LOG.debug(strBalanceParam.toString());
    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    while (hotspotRegionServers.size() > 0 && nonHotspotRegionServers.size() > 0) {
        HotSpotServerAndLoad serverToBalance = hotspotRegionServers.poll();
        LOG.debug(String.format("#################serverToBalance =%s",
                serverToBalance.getServerName().getServerName()));
        // get least loaded not hotspot regions of this server
        List<HotSpotRegionLoad> regionList = regionServerAndServerLoadMap.get(serverToBalance);
        // assume it to be sorted asc.
        if (regionList.size() > 0) {
            HotSpotRegionLoad regionToMove = regionList.remove(0);
            HRegionInfo regionMoveInfo = allRegionsLoadBiMap.inverse().get(regionToMove);

            /*
             * regionMoveInfo can be null in case the load map returns us
             * the root and meta regions along with the movable regions But
             * as the clusterState which is passed to us does not contain
             * these regions we can have a situation where
             * regionServerAndServerLoadMap contains some regions which are
             * not present in the allRegionsLoadBiMap
             */
            if (regionMoveInfo != null && !regionMoveInfo.isMetaRegion() && !regionMoveInfo.isRootRegion()
                    && !regionMoveInfo.isMetaTable() && regionToMove.isRegionHotspot()) {
                LOG.debug(String.format(
                        "#################Came to move the region regionMoveInfo=%s;; regionToMove=%s ",
                        regionMoveInfo, regionToMove));
                // move out.
                HotSpotServerAndLoad destinationServer = nonHotspotRegionServers.poll();

                RegionPlan rpl = new RegionPlan(allRegionsLoadBiMap.inverse().get(regionToMove),
                        serverToBalance.getServerName(), destinationServer.getServerName());
                regionsToReturn.add(rpl);
                serverToBalance.modifyLoad(regionToMove.getLoad());
                destinationServer.modifyLoad(-1 * regionToMove.getLoad());
                // reenter them to list. if they satisfy conditions
                if (serverToBalance.getLoad() > minLoad) {
                    hotspotRegionServers.offer(serverToBalance);
                }
                if (destinationServer.getLoad() < maxLoad) {
                    nonHotspotRegionServers.offer(destinationServer);
                }
            }
        }
    }
    LOG.info("Total Time taken to balance = " + (System.currentTimeMillis() - startTime));
    LOG.info(String.format("#################regionsToReturn=%s ", regionsToReturn));
    return regionsToReturn;

}

From source file:amfservices.actions.PGServicesAction.java

public Map<String, Object> spawnEggAction(String uid, String coteID, List<String> penguinIDs, long now)
        throws PGException {
    User user = User.getUser(uid);/*  w w w .  ja v  a 2  s . c  om*/
    PGException.Assert(user.cotes().contains(coteID), PGError.INVALID_COTE, "Invalid cote");

    final Cote cote = Cote.getCote(uid, coteID);
    for (String pengId : penguinIDs) {
        PGException.Assert(cote.penguins().contains(pengId), PGError.PENGUIN_NOT_IN_COTE,
                "Penguin isn't contained in cote");
    }

    PriorityQueue<Penguin> penguins = new PriorityQueue(Math.max(penguinIDs.size(), 1),
            new Comparator<Penguin>() {
                @Override
                public int compare(Penguin p1, Penguin p2) {
                    long p1NextSpawnTime = PenguinServices.inst().nextSpawn(p1, cote);
                    long p2NextSpawnTime = PenguinServices.inst().nextSpawn(p2, cote);

                    return (p1NextSpawnTime > p2NextSpawnTime) ? 1
                            : ((p1NextSpawnTime == p2NextSpawnTime) ? 0 : -1);
                }
            });

    Map<String, Object> failData = new HashMap();

    // init penguin entities
    for (String pengId : penguinIDs) {
        Penguin penguin = Penguin.getPenguin(uid, coteID, pengId);

        long nextSpawn = PenguinServices.inst().nextSpawn(penguin, cote);
        if (nextSpawn > now) {
            Map<String, Object> failPenguinData = new HashMap(2);
            failPenguinData.put(PGMacro.TIME_LAST_SPAWN, penguin.getLastSpawn());
            failPenguinData.put(PGMacro.EGG_STORE, penguin.getLastEggStorage().getValue());

            failData.put(pengId, failPenguinData);
        } else {
            penguins.add(penguin);
        }
    }

    Map<String, Object> successData = new HashMap();
    List<String> limitedEggPenguins = new LinkedList();

    // need for add egg
    BoxEgg boxEgg = BoxEgg.getBoxEgg(uid, coteID);
    Dog dog = Dog.getDog(uid, coteID);

    while (!penguins.isEmpty()) {
        Penguin penguin = penguins.poll();
        long nextSpawn = PenguinServices.inst().nextSpawn(penguin, cote);
        String spawnedEggKind = PenguinServices.inst().spawnEgg(penguin, nextSpawn);

        EggStoreServices.EggStorage eggStorage = EggStoreServices.inst().addEgg(cote, boxEgg, dog,
                spawnedEggKind, now);
        if (eggStorage == EggStoreServices.EggStorage.LIMITED) {
            limitedEggPenguins.add(penguin.getPenguinID());
        }

        penguin.setLastEggStorage(eggStorage);
        penguin.saveToDB();

        Map<String, Object> penguinResp = new HashMap();
        penguinResp.put(PGMacro.KIND, spawnedEggKind);
        penguinResp.put(PGMacro.EGG_STORE, eggStorage.getValue());
        successData.put(penguin.getPenguinID(), penguinResp);
    }

    Map<String, Object> response = new HashMap();
    response.put(PGMacro.SUCCESS, successData);
    response.put(PGMacro.FAIL, failData);
    response.put(PGMacro.SPAWN_LIMITED_PENGUINS, AMFBuilder.toAMF(limitedEggPenguins));
    return response;
}

From source file:edu.usc.ir.geo.gazetteer.GeoNameResolver.java

/**
 * Select the best match for each location name extracted from a document,
 * choosing from among a list of lists of candidate matches. Filter uses the
 * following features: 1) edit distance between name and the resolved name,
 * choose smallest one 2) content (haven't implemented)
 *
 * @param resolvedEntities/*w w w. jav  a2s. c  o  m*/
 *            final result for the input stream
 * @param allCandidates
 *            each location name may hits several documents, this is the
 *            collection for all hitted documents
 * @param count
 *            Number of results for one locations
 * @throws IOException
 * @throws RuntimeException
 */

private void pickBestCandidates(HashMap<String, List<Location>> resolvedEntities,
        HashMap<String, List<Location>> allCandidates, int count) {

    for (String extractedName : allCandidates.keySet()) {

        List<Location> cur = allCandidates.get(extractedName);
        if (cur.isEmpty())
            continue;//continue if no results found

        int maxWeight = Integer.MIN_VALUE;
        //In case weight is equal for all return top element
        int bestIndex = 0;
        //Priority queue to return top elements
        PriorityQueue<Location> pq = new PriorityQueue<>(cur.size(), new Comparator<Location>() {
            @Override
            public int compare(Location o1, Location o2) {
                return Integer.compare(o2.getWeight(), o1.getWeight());
            }
        });

        for (int i = 0; i < cur.size(); ++i) {
            int weight = 0;
            // get cur's ith resolved entry's name
            String resolvedName = String.format(" %s ", cur.get(i).getName());
            if (resolvedName.contains(String.format(" %s ", extractedName))) {
                // Assign a weight as per configuration if extracted name is found as a exact word in name
                weight = WEIGHT_NAME_MATCH;
            } else if (resolvedName.contains(extractedName)) {
                // Assign a weight as per configuration if extracted name is found partly in name
                weight = WEIGHT_NAME_PART_MATCH;
            }
            // get all alternate names of cur's ith resolved entry's
            String[] altNames = cur.get(i).getAlternateNames().split(",");
            float altEditDist = 0;
            for (String altName : altNames) {
                if (altName.contains(extractedName)) {
                    altEditDist += StringUtils.getLevenshteinDistance(extractedName, altName);
                }
            }
            //lesser the edit distance more should be the weight
            weight += getCalibratedWeight(altNames.length, altEditDist);

            //Give preference to sorted results. 0th result should have more priority
            weight += (cur.size() - i) * WEIGHT_SORT_ORDER;

            cur.get(i).setWeight(weight);

            if (weight > maxWeight) {
                maxWeight = weight;
                bestIndex = i;
            }

            pq.add(cur.get(i));
        }
        if (bestIndex == -1)
            continue;

        List<Location> resultList = new ArrayList<>();

        for (int i = 0; i < count && !pq.isEmpty(); i++) {
            resultList.add(pq.poll());
        }

        resolvedEntities.put(extractedName, resultList);
    }
}

From source file:amfservices.actions.PGServicesAction.java

public Map<String, Object> penguinWannaEatAction(String uid, String coteID, List<String> penguinIDs, long now)
        throws PGException {
    final EntityContext context = EntityContext.getContext(uid);

    for (String pengId : penguinIDs) {
        PGException.Assert(context.getCote().penguins().contains(pengId), PGError.PENGUIN_NOT_IN_COTE,
                "Penguin isn't contained in cote");
    }/*from  w w w  .  ja va2  s. c  o m*/

    PriorityQueue<Penguin> penguins = new PriorityQueue(penguinIDs.size(), new Comparator<Penguin>() {
        @Override
        public int compare(Penguin p1, Penguin p2) {
            long p1NextEatTime = PenguinServices.inst().nextEat(p1, context.getCote());
            long p2NextEatTime = PenguinServices.inst().nextEat(p2, context.getCote());

            return (p1NextEatTime > p2NextEatTime) ? 1 : ((p1NextEatTime == p2NextEatTime) ? 0 : -1);
        }
    });

    Map<String, Object> failData = new HashMap();

    int remainFish = context.getCote().getPoolFish();
    for (String pengId : penguinIDs) {
        Penguin penguin = Penguin.getPenguin(uid, coteID, pengId);

        long nextEat = PenguinServices.inst().nextEat(penguin, context.getCote());
        if (nextEat > now) {
            Map<String, Object> lastPenguinEatData = new HashMap();
            lastPenguinEatData.put(PGMacro.TIME_LAST_EAT, penguin.getLastEat());
            lastPenguinEatData.put(PGMacro.FISH_LAST_EAT, penguin.getFood());

            failData.put(penguin.getPenguinID(), lastPenguinEatData);
        } else {
            PGException.Assert(remainFish > 0, PGError.EMPTY_POOL, "Empty pool");
            PGException.Assert(PenguinServices.inst().configOf(penguin).getFeed() > 0,
                    PGError.PENGUIN_CANNOT_EAT, "Penguin cannot eat");

            penguins.add(penguin);
            remainFish -= Math.min(PenguinServices.inst().configOf(penguin).getFeed(), remainFish);
        }
    }

    List<Penguin> fedPenguins = new ArrayList(penguinIDs.size());
    while (!penguins.isEmpty()) {
        Penguin penguin = penguins.poll();
        long nextEat = PenguinServices.inst().nextEat(penguin, context.getCote());

        QuestLogger questLogger = QuestServices.inst().getQuestLogger(uid, now);
        PenguinServices.inst().eat(penguin, context, questLogger, nextEat);
        fedPenguins.add(penguin);
    }

    Map<String, Object> successData = new HashMap();
    for (Penguin penguin : fedPenguins) {
        penguin.saveToDB();
        successData.put(penguin.getPenguinID(), AMFBuilder.make(PGMacro.FISH_LAST_EAT, penguin.getFood(),
                PGMacro.TIME_LAST_EAT, penguin.getLastEat()));
    }

    context.saveToDB();

    Map<String, Object> response = new HashMap();
    response.put(PGMacro.SUCCESS, successData);
    response.put(PGMacro.FAIL, failData);

    return response;
}

From source file:org.springframework.cloud.stream.app.pose.estimation.processor.PoseEstimationTensorflowOutputConverter.java

/**
 *
 * The Part Affinity Field (PAF) is a 2D vector field for each limb. For each pixel in the area belonging to a
 * particular limb, a 2D vector encodes the direction that points from one part of the limb to the other.
 * Each type of limb has a corresponding affinity field joining its two associated body parts.
 *
 * @param limbType Limb Type to find limb candidates form.
 * @param fromParts/*from w w w  . java2s.co  m*/
 * @param toParts
 * @param outputTensor
 * @return Returns a list of Limb candidates sorted by their total PAF score in a descending order.
 */
private PriorityQueue<Limb> findLimbCandidates(Model.LimbType limbType, List<Part> fromParts,
        List<Part> toParts, float[][][] outputTensor) {

    // Use priority queue to keeps the limb instance candidates in descending order.
    int initialSize = (fromParts.size() * toParts.size()) / 2 + 1;
    PriorityQueue<Limb> limbCandidatesQueue = new PriorityQueue<>(initialSize, (limb1, limb2) -> {
        if (limb1.getPafScore() == limb2.getPafScore())
            return 0;
        return (limb1.getPafScore() > limb2.getPafScore()) ? -1 : 1;
    });

    // For every {from -> to} pair compute a line integral over the Limb-PAF vector field toward the line
    // connecting both Parts. Computed value is used as a Limb candidate score. The higher the value the
    // higher the chance for connection between those Parts.
    for (Part fromPart : fromParts) {
        for (Part toPart : toParts) {

            float deltaX = toPart.getY() - fromPart.getY();
            float deltaY = toPart.getX() - fromPart.getX();
            float norm = (float) Math.sqrt(deltaX * deltaX + deltaY * deltaY);

            // Skip self-pointing edges (e.g. fromPartInstance == toPartInstance)
            if (norm > 1e-12) {

                float dx = deltaX / norm;
                float dy = deltaY / norm;

                int STEP_PAF = 10;
                float pafScores[] = new float[STEP_PAF];
                int stepPafScoreCount = 0;
                float totalPafScore = 0.0f;
                for (int t = 0; t < STEP_PAF; t++) {
                    int tx = (int) ((float) fromPart.getY() + (t * deltaX / STEP_PAF) + 0.5);
                    int ty = (int) ((float) fromPart.getX() + (t * deltaY / STEP_PAF) + 0.5);

                    float pafScoreX = outputTensor[tx][ty][limbType.getPafIndexX()];
                    float pafScoreY = outputTensor[tx][ty][limbType.getPafIndexY()];

                    pafScores[t] = (dy * pafScoreX) + (dx * pafScoreY);

                    totalPafScore += pafScores[t];

                    // Filter out the step PAF scores below a given, pre-defined stepPafScoreThreshold
                    if (pafScores[t] > poseProperties.getStepPafScoreThreshold()) {
                        stepPafScoreCount++;
                    }
                }

                if (totalPafScore > poseProperties.getTotalPafScoreThreshold()
                        && stepPafScoreCount >= poseProperties.getPafCountThreshold()) {
                    limbCandidatesQueue.add(new Limb(limbType, totalPafScore, fromPart, toPart));
                }
            }
        }
    }

    return limbCandidatesQueue;
}

From source file:mulavito.algorithms.shortestpath.ksp.Yen.java

@Override
protected List<List<E>> getShortestPathsIntern(final V source, final V target, int k) {
    LinkedList<List<E>> found_paths = new LinkedList<List<E>>();
    PriorityQueue<WeightedPath> prioQ = new PriorityQueue<WeightedPath>();
    DijkstraShortestPath<V, E> blockedDijkstra;

    // Check if target is reachable from source.
    if (dijkstra.getDistance(source, target) == null)
        return found_paths;

    // Add Dijkstra solution, the first shortest path.
    found_paths.add(dijkstra.getPath(source, target));

    while (found_paths.size() < k) {
        List<E> curShortestPath = found_paths.getLast();

        int maxIndex = curShortestPath.size();

        List<V> curShortestPathNodes = new LinkedList<V>();
        curShortestPathNodes.add(source);
        for (E e : found_paths.getLast()) {
            V v = graph.getEndpoints(e).getFirst();
            if (!curShortestPathNodes.contains(v))
                curShortestPathNodes.add(v);
            v = graph.getEndpoints(e).getSecond();
            if (!curShortestPathNodes.contains(v))
                curShortestPathNodes.add(v);
        }/*from   ww  w.j av a 2  s.  com*/
        curShortestPathNodes.remove(target);

        // Split path into Head and NextEdge
        for (int i = 0; i < maxIndex; i++) {
            List<E> head = curShortestPath.subList(0, i);
            //   V deviation = head.isEmpty() ? source : graph.getEndpoints(head.get(i - 1)).getSecond();
            V deviation = curShortestPathNodes.get(i);

            // 1. Block edges.
            Graph<V, E> blocked = blockFilter(head, deviation, curShortestPathNodes, found_paths);

            // 2. Get shortest path in graph with blocked edges.
            blockedDijkstra = new DijkstraShortestPath<V, E>(blocked, nev);

            Number dist = blockedDijkstra.getDistance(deviation, target);
            if (dist == null)
                continue;

            List<E> tail = blockedDijkstra.getPath(deviation, target);

            // 3. Combine head and tail into new path.
            List<E> candidate = new ArrayList<E>();
            candidate.addAll(head);
            candidate.addAll(tail);

            // Check if we already found this solution
            boolean duplicate = false;
            for (WeightedPath path : prioQ)
                if (ListUtils.isEqualList(path.getPath(), candidate)) {
                    duplicate = true;
                    break;
                }

            if (!duplicate)
                prioQ.add(new WeightedPath(candidate));
        }

        if (prioQ.isEmpty())
            break; // We have not found any new candidate!
        else
            found_paths.add(prioQ.poll().getPath());
    }

    return found_paths;
}

From source file:com.microsoft.azure.vmagent.AzureVMAgentCleanUpTask.java

public void cleanLeakedResources(final String resourceGroup, final ServicePrincipal servicePrincipal,
        final String cloudName, final DeploymentRegistrar deploymentRegistrar) {
    try {//  w  w  w  .ja va 2 s .  c  o  m
        final List<String> validVMs = getValidVMs(cloudName);
        final Azure azureClient = TokenCache.getInstance(servicePrincipal).getAzureClient();
        //can't use listByTag because for some reason that method strips all the tags from the outputted resources (https://github.com/Azure/azure-sdk-for-java/issues/1436)
        final PagedList<GenericResource> resources = azureClient.genericResources().listByGroup(resourceGroup);

        if (resources == null || resources.isEmpty()) {
            return;
        }

        final PriorityQueue<GenericResource> resourcesMarkedForDeletion = new PriorityQueue<>(resources.size(),
                new Comparator<GenericResource>() {
                    @Override
                    public int compare(GenericResource o1, GenericResource o2) {
                        int o1Priority = getPriority(o1);
                        int o2Priority = getPriority(o2);
                        if (o1Priority == o2Priority) {
                            return 0;
                        }
                        return (o1Priority < o2Priority) ? -1 : 1;
                    }

                    private int getPriority(final GenericResource resource) {
                        final String type = resource.type();
                        if (StringUtils.containsIgnoreCase(type, "virtualMachine")) {
                            return 1;
                        }
                        if (StringUtils.containsIgnoreCase(type, "networkInterface")) {
                            return 2;
                        }
                        if (StringUtils.containsIgnoreCase(type, "IPAddress")) {
                            return 3;
                        }
                        return 4;
                    }
                });

        for (GenericResource resource : resources) {
            final Map<String, String> tags = resource.tags();
            if (!tags.containsKey(Constants.AZURE_RESOURCES_TAG_NAME) || !deploymentRegistrar.getDeploymentTag()
                    .matches(new AzureUtil.DeploymentTag(tags.get(Constants.AZURE_RESOURCES_TAG_NAME)))) {
                continue;
            }
            boolean shouldSkipDeletion = false;
            for (String validVM : validVMs) {
                if (resource.name().contains(validVM)) {
                    shouldSkipDeletion = true;
                    break;
                }
            }
            // we're not removing storage accounts of networks - someone else might be using them
            if (shouldSkipDeletion || StringUtils.containsIgnoreCase(resource.type(), "StorageAccounts")
                    || StringUtils.containsIgnoreCase(resource.type(), "virtualNetworks")) {
                continue;
            }
            resourcesMarkedForDeletion.add(resource);
        }

        while (!resourcesMarkedForDeletion.isEmpty()) {
            try {
                final GenericResource resource = resourcesMarkedForDeletion.poll();
                if (resource == null)
                    continue;

                URI osDiskURI = null;
                if (StringUtils.containsIgnoreCase(resource.type(), "virtualMachine")) {
                    osDiskURI = new URI(
                            azureClient.virtualMachines().getById(resource.id()).osUnmanagedDiskVhdUri());
                }

                LOGGER.log(Level.INFO, "cleanLeakedResources: deleting {0} from resource group {1}",
                        new Object[] { resource.name(), resourceGroup });
                azureClient.genericResources().deleteById(resource.id());
                if (osDiskURI != null) {
                    AzureVMManagementServiceDelegate.removeStorageBlob(azureClient, osDiskURI, resourceGroup);
                }
            } catch (Exception e) {
                LOGGER.log(Level.INFO,
                        "AzureVMAgentCleanUpTask: cleanLeakedResources: failed to clean resource ", e);
            }
        }
    } catch (Exception e) {
        // No need to throw exception back, just log and move on. 
        LOGGER.log(Level.INFO,
                "AzureVMAgentCleanUpTask: cleanLeakedResources: failed to clean leaked resources ", e);
    }
}

From source file:com.linkedin.pinot.routing.builder.KafkaLowLevelConsumerRoutingTableBuilder.java

@Override
public List<ServerToSegmentSetMap> computeRoutingTableFromExternalView(String tableName,
        ExternalView externalView, List<InstanceConfig> instanceConfigList) {
    // We build the routing table based off the external view here. What we want to do is to make sure that we uphold
    // the guarantees clients expect (no duplicate records, eventual consistency) and spreading the load as equally as
    // possible between the servers.
    ///*w  w w  . j  av  a  2 s .  com*/
    // Each Kafka partition contains a fraction of the data, so we need to make sure that we query all partitions.
    // Because in certain unlikely degenerate scenarios, we can consume overlapping data until segments are flushed (at
    // which point the overlapping data is discarded during the reconciliation process with the controller), we need to
    // ensure that the query that is sent has only one partition in CONSUMING state in order to avoid duplicate records.
    //
    // Because we also want to want to spread the load as equally as possible between servers, we use a weighted random
    // replica selection that favors picking replicas with fewer segments assigned to them, thus having an approximately
    // equal distribution of load between servers.
    //
    // For example, given three replicas with 1, 2 and 3 segments assigned to each, the replica with one segment should
    // have a weight of 2, which is the maximum segment count minus the segment count for that replica. Thus, each
    // replica other than the replica(s) with the maximum segment count should have a chance of getting a segment
    // assigned to it. This corresponds to alternative three below:
    //
    // Alternative 1 (weight is sum of segment counts - segment count in that replica):
    // (6 - 1) = 5 -> P(0.4166)
    // (6 - 2) = 4 -> P(0.3333)
    // (6 - 3) = 3 -> P(0.2500)
    //
    // Alternative 2 (weight is max of segment counts - segment count in that replica + 1):
    // (3 - 1) + 1 = 3 -> P(0.5000)
    // (3 - 2) + 1 = 2 -> P(0.3333)
    // (3 - 3) + 1 = 1 -> P(0.1666)
    //
    // Alternative 3 (weight is max of segment counts - segment count in that replica):
    // (3 - 1) = 2 -> P(0.6666)
    // (3 - 2) = 1 -> P(0.3333)
    // (3 - 3) = 0 -> P(0.0000)
    //
    // Of those three weighting alternatives, the third one has the smallest standard deviation of the number of
    // segments assigned per replica, so it corresponds to the weighting strategy used for segment assignment. Empirical
    // testing shows that for 20 segments and three replicas, the standard deviation of each alternative is respectively
    // 2.112, 1.496 and 0.853.
    //
    // This algorithm works as follows:
    // 1. Gather all segments and group them by Kafka partition, sorted by sequence number
    // 2. Ensure that for each partition, we have at most one partition in consuming state
    // 3. Sort all the segments to be used during assignment in ascending order of replicas
    // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments
    //    assigned to each replica.

    // 1. Gather all segments and group them by Kafka partition, sorted by sequence number
    Map<String, SortedSet<SegmentName>> sortedSegmentsByKafkaPartition = new HashMap<String, SortedSet<SegmentName>>();
    for (String helixPartitionName : externalView.getPartitionSet()) {
        // Ignore segments that are not low level consumer segments
        if (!SegmentNameBuilder.Realtime.isRealtimeV2Name(helixPartitionName)) {
            continue;
        }

        final LLCSegmentName segmentName = new LLCSegmentName(helixPartitionName);
        String kafkaPartitionName = segmentName.getPartitionRange();
        SortedSet<SegmentName> segmentsForPartition = sortedSegmentsByKafkaPartition.get(kafkaPartitionName);

        // Create sorted set if necessary
        if (segmentsForPartition == null) {
            segmentsForPartition = new TreeSet<SegmentName>();

            sortedSegmentsByKafkaPartition.put(kafkaPartitionName, segmentsForPartition);
        }

        segmentsForPartition.add(segmentName);
    }

    // 2. Ensure that for each Kafka partition, we have at most one Helix partition (Pinot segment) in consuming state
    Map<String, SegmentName> allowedSegmentInConsumingStateByKafkaPartition = new HashMap<String, SegmentName>();
    for (String kafkaPartition : sortedSegmentsByKafkaPartition.keySet()) {
        SortedSet<SegmentName> sortedSegmentsForKafkaPartition = sortedSegmentsByKafkaPartition
                .get(kafkaPartition);
        SegmentName lastAllowedSegmentInConsumingState = null;

        for (SegmentName segmentName : sortedSegmentsForKafkaPartition) {
            Map<String, String> helixPartitionState = externalView.getStateMap(segmentName.getSegmentName());
            boolean allInConsumingState = true;
            int replicasInConsumingState = 0;

            // Only keep the segment if all replicas have it in CONSUMING state
            for (String externalViewState : helixPartitionState.values()) {
                // Ignore ERROR state
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ERROR)) {
                    continue;
                }

                // Not all segments are in CONSUMING state, therefore don't consider the last segment assignable to CONSUMING
                // replicas
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) {
                    allInConsumingState = false;
                    break;
                }

                // Otherwise count the replica as being in CONSUMING state
                if (externalViewState.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)) {
                    replicasInConsumingState++;
                }
            }

            // If all replicas have this segment in consuming state (and not all of them are in ERROR state), then pick this
            // segment to be the last allowed segment to be in CONSUMING state
            if (allInConsumingState && 0 < replicasInConsumingState) {
                lastAllowedSegmentInConsumingState = segmentName;
                break;
            }
        }

        if (lastAllowedSegmentInConsumingState != null) {
            allowedSegmentInConsumingStateByKafkaPartition.put(kafkaPartition,
                    lastAllowedSegmentInConsumingState);
        }
    }

    // 3. Sort all the segments to be used during assignment in ascending order of replicas

    // PriorityQueue throws IllegalArgumentException when given a size of zero
    int segmentCount = Math.max(externalView.getPartitionSet().size(), 1);
    PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueue = new PriorityQueue<Pair<String, Set<String>>>(
            segmentCount, new Comparator<Pair<String, Set<String>>>() {
                @Override
                public int compare(Pair<String, Set<String>> firstPair, Pair<String, Set<String>> secondPair) {
                    return Integer.compare(firstPair.getRight().size(), secondPair.getRight().size());
                }
            });
    RoutingTableInstancePruner instancePruner = new RoutingTableInstancePruner(instanceConfigList);

    for (Map.Entry<String, SortedSet<SegmentName>> entry : sortedSegmentsByKafkaPartition.entrySet()) {
        String kafkaPartition = entry.getKey();
        SortedSet<SegmentName> segmentNames = entry.getValue();

        // The only segment name which is allowed to be in CONSUMING state or null
        SegmentName validConsumingSegment = allowedSegmentInConsumingStateByKafkaPartition.get(kafkaPartition);

        for (SegmentName segmentName : segmentNames) {
            Set<String> validReplicas = new HashSet<String>();
            Map<String, String> externalViewState = externalView.getStateMap(segmentName.getSegmentName());

            for (Map.Entry<String, String> instanceAndStateEntry : externalViewState.entrySet()) {
                String instance = instanceAndStateEntry.getKey();
                String state = instanceAndStateEntry.getValue();

                // Skip pruned replicas (shutting down or otherwise disabled)
                if (instancePruner.isInactive(instance)) {
                    continue;
                }

                // Replicas in ONLINE state are always allowed
                if (state.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.ONLINE)) {
                    validReplicas.add(instance);
                    continue;
                }

                // Replicas in CONSUMING state are only allowed on the last segment
                if (state.equalsIgnoreCase(
                        CommonConstants.Helix.StateModel.RealtimeSegmentOnlineOfflineStateModel.CONSUMING)
                        && segmentName.equals(validConsumingSegment)) {
                    validReplicas.add(instance);
                }
            }

            segmentToReplicaSetQueue
                    .add(new ImmutablePair<String, Set<String>>(segmentName.getSegmentName(), validReplicas));

            // If this segment is the segment allowed in CONSUMING state, don't process segments after it in that Kafka
            // partition
            if (segmentName.equals(validConsumingSegment)) {
                break;
            }
        }
    }

    // 4. For each segment to be used during assignment, pick a random replica, weighted by the number of segments
    //    assigned to each replica.
    List<ServerToSegmentSetMap> routingTables = new ArrayList<ServerToSegmentSetMap>(routingTableCount);
    for (int i = 0; i < routingTableCount; ++i) {
        Map<String, Set<String>> instanceToSegmentSetMap = new HashMap<String, Set<String>>();

        PriorityQueue<Pair<String, Set<String>>> segmentToReplicaSetQueueCopy = new PriorityQueue<Pair<String, Set<String>>>(
                segmentToReplicaSetQueue);

        while (!segmentToReplicaSetQueueCopy.isEmpty()) {
            Pair<String, Set<String>> segmentAndValidReplicaSet = segmentToReplicaSetQueueCopy.poll();
            String segment = segmentAndValidReplicaSet.getKey();
            Set<String> validReplicaSet = segmentAndValidReplicaSet.getValue();

            String replica = pickWeightedRandomReplica(validReplicaSet, instanceToSegmentSetMap);
            if (replica != null) {
                Set<String> segmentsForInstance = instanceToSegmentSetMap.get(replica);

                if (segmentsForInstance == null) {
                    segmentsForInstance = new HashSet<String>();
                    instanceToSegmentSetMap.put(replica, segmentsForInstance);
                }

                segmentsForInstance.add(segment);
            }
        }

        routingTables.add(new ServerToSegmentSetMap(instanceToSegmentSetMap));
    }

    return routingTables;
}

From source file:exploration.rendezvous.MultiPointRendezvousStrategy.java

private void calculateRendezvousRandomSampling(int timeElapsed) {
    RendezvousAgentData rvd = agent.getRendezvousAgentData();
    // Only calculate rv every several time steps at most
    if (rvd.getTimeSinceLastRVCalc() < SimConstants.RV_REPLAN_INTERVAL) {
        return;/*from  w w w.  java2 s  .  co  m*/
    } else {
        rvd.setTimeSinceLastRVCalc(0);
    }

    TeammateAgent relay = agent.getParentTeammate();
    generatedPoints = SampleEnvironmentPoints(agent, settings.SamplePointDensity);
    connectionsToBase = FindCommLinks(generatedPoints, agent);
    PriorityQueue<NearRVPoint> pointsNearFrontier = GetPointsWithinDistOfFrontier(generatedPoints, 100);

    int pathsCalculated = 0;

    //Now for top K points, let's calculate p' distances to base, and find the nearest point connected to base
    PriorityQueue<NearRVPoint> pointsNearFrontierReal = new PriorityQueue<NearRVPoint>();
    for (int k = 0; (k < 50) && !pointsNearFrontier.isEmpty(); k++) {
        NearRVPoint p = pointsNearFrontier.poll();
        double minDistToBase = Double.MAX_VALUE;

        for (CommLink link : p.commLinks) {
            NearRVPoint connectedPoint = link.getRemotePoint();

            pathsCalculated = findNearestPointInBaseCommRange(connectedPoint, connectionsToBase, agent);

            if (connectedPoint.distanceToParent < minDistToBase) {
                minDistToBase = connectedPoint.distanceToParent;
                p.commLinkClosestToBase = link;
            }
        }
        //At this point, for p, we know:
        //  1. Connected point p' that is nearest to comm range of Base
        //  2. Distance from p' to comm range of Base
        //  3. Nearest point from p' that is within comm range of Base
        //So we know how long each point p will have to wait for relay, and so can estimate
        //where explorer will be at the time, to calculate regret accurately.

        //For now, just calculate accurate distance to next frontier:
        Path pathToFrontier = agent.calculatePath(p, getExplorerFrontier(), false, false);
        double distToFrontier = Double.MAX_VALUE;
        if (pathToFrontier.found) {
            distToFrontier = pathToFrontier.getLength();
        }
        pathsCalculated++;
        p.setDistanceToFrontier(distToFrontier);

        if (p.commLinkClosestToBase == null || p.commLinkClosestToBase.getRemotePoint() == null) {
            //something went wrong, set RV to our current location and return
            Rendezvous meetingLocation = new Rendezvous(agent.getLocation());
            Point baseLocation = agent.getTeammate(agent.getParentTeammate().getParent()).getLocation();
            meetingLocation.parentsRVLocation = new Rendezvous(baseLocation);
            rvd.setParentRendezvous(meetingLocation);
            calculateRVTimings(timeElapsed);
            return;
        }

        p.utility = NearRVPoint.getFullRVUtility(p.distanceToFrontier,
                p.commLinkClosestToBase.getRemotePoint().distanceToParent,
                p.commLinkClosestToBase.numObstacles);

        pointsNearFrontierReal.add(p);
    }

    //Now just need to retrieve the best point
    NearRVPoint childPoint = pointsNearFrontierReal.peek();
    NearRVPoint parentPoint = childPoint.commLinkClosestToBase.getRemotePoint();

    Rendezvous meetingLocation = new Rendezvous(childPoint);
    meetingLocation.setParentLocation(parentPoint);

    Rendezvous parentsMeetingLocation = new Rendezvous(parentPoint.parentPoint);
    Point baseLocation = agent.getTeammate(agent.getParentTeammate().getParent()).getLocation();
    parentsMeetingLocation
            .setParentLocation(agent.getTeammate(agent.getParentTeammate().getParent()).getLocation());

    meetingLocation.parentsRVLocation = parentsMeetingLocation;
    rvd.setParentRendezvous(meetingLocation);

    Rendezvous backupRV = new Rendezvous(childPoint);
    rvd.setParentBackupRendezvous(backupRV);

    calculateRVTimings(timeElapsed);

    displayData.setGeneratedPoints(generatedPoints);
    displayData.setPointsNearFrontier(pointsNearFrontier);
}