Example usage for java.util PriorityQueue poll

List of usage examples for java.util PriorityQueue poll

Introduction

In this page you can find the example usage for java.util PriorityQueue poll.

Prototype

public E poll() 

Source Link

Usage

From source file:org.apache.tez.dag.app.rm.DagAwareYarnTaskScheduler.java

@GuardedBy("this")
@Nullable//from  w w  w .j a v a2 s.  c  o m
private Collection<ContainerId> maybePreempt(Resource freeResources) {
    if (preemptionPercentage == 0
            || numHeartbeats - lastPreemptionHeartbeat < numHeartbeatsBetweenPreemptions) {
        return null;
    }
    if (!requestTracker.isPreemptionDeadlineExpired()
            && requestTracker.fitsHighestPriorityRequest(freeResources)) {
        if (numHeartbeats % 50 == 1) {
            LOG.info("Highest priority request fits in free resources {}", freeResources);
        }
        return null;
    }

    int numIdleContainers = idleTracker.getNumContainers();
    if (numIdleContainers > 0) {
        if (numHeartbeats % 50 == 1) {
            LOG.info("Avoiding preemption since there are {} idle containers", numIdleContainers);
        }
        return null;
    }

    BitSet blocked = requestTracker.createVertexBlockedSet();
    if (!blocked.intersects(assignedVertices)) {
        if (numHeartbeats % 50 == 1) {
            LOG.info(
                    "Avoiding preemption since there are no descendants of the highest priority requests running");
        }
        return null;
    }

    Resource preemptLeft = requestTracker.getAmountToPreempt(preemptionPercentage);
    if (!resourceCalculator.anyAvailable(preemptLeft)) {
        if (numHeartbeats % 50 == 1) {
            LOG.info("Avoiding preemption since amount to preempt is {}", preemptLeft);
        }
        return null;
    }

    PriorityQueue<HeldContainer> candidates = new PriorityQueue<>(11, PREEMPT_ORDER_COMPARATOR);
    blocked.and(assignedVertices);
    for (int i = blocked.nextSetBit(0); i >= 0; i = blocked.nextSetBit(i + 1)) {
        Collection<HeldContainer> containers = vertexAssignments.get(i);
        if (containers != null) {
            candidates.addAll(containers);
        } else {
            LOG.error("Vertex {} in assignedVertices but no assignments?", i);
        }
    }

    ArrayList<ContainerId> preemptedContainers = new ArrayList<>();
    HeldContainer hc;
    while ((hc = candidates.poll()) != null) {
        LOG.info("Preempting container {} currently allocated to task {}", hc.getId(), hc.getAssignedTask());
        preemptedContainers.add(hc.getId());
        resourceCalculator.deductFrom(preemptLeft, hc.getCapability());
        if (!resourceCalculator.anyAvailable(preemptLeft)) {
            break;
        }
    }

    return preemptedContainers;
}

From source file:edu.usc.ir.geo.gazetteer.GeoNameResolver.java

/**
 * Select the best match for each location name extracted from a document,
 * choosing from among a list of lists of candidate matches. Filter uses the
 * following features: 1) edit distance between name and the resolved name,
 * choose smallest one 2) content (haven't implemented)
 *
 * @param resolvedEntities/*w ww .j a va  2  s  . c o m*/
 *            final result for the input stream
 * @param allCandidates
 *            each location name may hits several documents, this is the
 *            collection for all hitted documents
 * @param count
 *            Number of results for one locations
 * @throws IOException
 * @throws RuntimeException
 */

private void pickBestCandidates(HashMap<String, List<Location>> resolvedEntities,
        HashMap<String, List<Location>> allCandidates, int count) {

    for (String extractedName : allCandidates.keySet()) {

        List<Location> cur = allCandidates.get(extractedName);
        if (cur.isEmpty())
            continue;//continue if no results found

        int maxWeight = Integer.MIN_VALUE;
        //In case weight is equal for all return top element
        int bestIndex = 0;
        //Priority queue to return top elements
        PriorityQueue<Location> pq = new PriorityQueue<>(cur.size(), new Comparator<Location>() {
            @Override
            public int compare(Location o1, Location o2) {
                return Integer.compare(o2.getWeight(), o1.getWeight());
            }
        });

        for (int i = 0; i < cur.size(); ++i) {
            int weight = 0;
            // get cur's ith resolved entry's name
            String resolvedName = String.format(" %s ", cur.get(i).getName());
            if (resolvedName.contains(String.format(" %s ", extractedName))) {
                // Assign a weight as per configuration if extracted name is found as a exact word in name
                weight = WEIGHT_NAME_MATCH;
            } else if (resolvedName.contains(extractedName)) {
                // Assign a weight as per configuration if extracted name is found partly in name
                weight = WEIGHT_NAME_PART_MATCH;
            }
            // get all alternate names of cur's ith resolved entry's
            String[] altNames = cur.get(i).getAlternateNames().split(",");
            float altEditDist = 0;
            for (String altName : altNames) {
                if (altName.contains(extractedName)) {
                    altEditDist += StringUtils.getLevenshteinDistance(extractedName, altName);
                }
            }
            //lesser the edit distance more should be the weight
            weight += getCalibratedWeight(altNames.length, altEditDist);

            //Give preference to sorted results. 0th result should have more priority
            weight += (cur.size() - i) * WEIGHT_SORT_ORDER;

            cur.get(i).setWeight(weight);

            if (weight > maxWeight) {
                maxWeight = weight;
                bestIndex = i;
            }

            pq.add(cur.get(i));
        }
        if (bestIndex == -1)
            continue;

        List<Location> resultList = new ArrayList<>();

        for (int i = 0; i < count && !pq.isEmpty(); i++) {
            resultList.add(pq.poll());
        }

        resolvedEntities.put(extractedName, resultList);
    }
}

From source file:org.apache.hadoop.hbase.master.balancer.LocalityAwareLoadBalancer.java

/**
 * This implements the Locality Aware Load Balancer.
 * Information for the algorithm can be found here: https://issues.apache.org/jira/browse/HBASE-10075
 *
 * @param clusterMap Map of regionservers and their load/region information to
 *                   a list of their most loaded regions
 * @return a list of regions to be moved, including source and destination,
 *         or null if cluster is already balanced
 *//*from  ww  w.j  av a 2 s.  c om*/
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterMap) {
    long startTime = System.currentTimeMillis();

    ClusterLoadState cs = new ClusterLoadState(clusterMap);

    float average = cs.getLoadAverage(); // for logging
    int ceiling = (int) Math.ceil(average * (1 + slop));
    NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();

    if (!this.needsBalance(cs)) {
        /*LOG.info("Skipping load balancing because balanced cluster; " +
                "servers=" + cs.getNumServers() + " " +
                "regions=" + cs.getNumRegions() + " average=" + average + " " +
                "mostloaded=" + serversByLoad.lastKey().getLoad() +
                " leastloaded=" + serversByLoad.firstKey().getLoad());*/
        return null;
    }

    // Additional check for locality aware load balancer as it only considers
    // only max loaded servers
    if (!(cs.getMaxLoad() > ceiling)) {
        return null;
    }

    Cluster cluster = new Cluster(clusterMap, new HashMap<String, Deque<RegionLoad>>(), regionLocationFinder);
    int numRegions = cs.getNumRegions();

    LOG.info(" ####################################################################################");
    LOG.info(" Before Locality-aware Balancing");
    LOG.info(" Average=" + average + " Ceiling=" + ceiling + " slop=" + slop);
    /* for (ServerAndLoad server : serversByLoad.keySet()) {
      LOG.info("---------------" + "Server Name: " + server.getServerName() + "---------------");
      List<HRegionInfo> hRegionInfos = serversByLoad.get(server);
      LOG.info("Number of Regions:" + hRegionInfos.size());
      for (HRegionInfo hRegionInfo : hRegionInfos){
        LOG.info(String.format("Name of Region: %s ", hRegionInfo.getRegionNameAsString()));
        //LOG.info(String.format("Size of Region in number of rows"+(Bytes.toInt(hRegionInfo.getStartKey())-Bytes.toInt(hRegionInfo.getEndKey()))));
        LOG.info("Start Key: " + Bytes.toString(hRegionInfo.getStartKey()));
        LOG.info("End Key: " + Bytes.toString(hRegionInfo.getEndKey()));
      }
      LOG.info("------------------------------------------------------------------------------");
    } */

    // calculate allTableRegionNumber = total number of regions per table.
    Map<Integer, Integer> allTableRegionNumberMap = new HashMap<Integer, Integer>();
    for (int i = 0; i < cluster.numServers; ++i) {
        for (int j = 0; j < cluster.numTables; ++j) {
            if (allTableRegionNumberMap.containsKey(j)) {
                Integer integer = allTableRegionNumberMap.get(j);
                integer = integer + cluster.numRegionsPerServerPerTable[i][j];
                allTableRegionNumberMap.put(j, integer);
            } else {
                allTableRegionNumberMap.put(j, cluster.numRegionsPerServerPerTable[i][j]);
            }
        }
    }

    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    for (ServerAndLoad server : serversByLoad.keySet()) {
        List<HRegionInfo> hRegionInfos = serversByLoad.get(server);
        // Check if number of regions on current server is greater than floor.
        // Continue only if number regions is greater than floor.
        if (hRegionInfos.size() <= ceiling) {
            LOG.debug("Number of HRegions <= ceiling (" + hRegionInfos.size() + " <= " + ceiling + ")");
            continue;
        }
        PriorityQueue<RegionServerRegionAffinity> queue = new PriorityQueue<RegionServerRegionAffinity>();
        int numberOfRegionsToMove = hRegionInfos.size() - ceiling;
        double regionAffinityNumber = (1 - hRegionInfos.size() / numRegions) * SERVER_BALANCER_WEIGHT;
        double tableRegionAffinityNumber = 0;
        // Calculate allTableRegionNumber
        for (HRegionInfo hRegionInfo : hRegionInfos) {
            // Do not move metaregion.
            if (hRegionInfo.isMetaRegion()) {
                continue;
            }
            TableName table = hRegionInfo.getTable();
            String tableName = table.getNameAsString();
            int tableIndex = cluster.tablesToIndex.get(tableName);
            int serverIndex = cluster.serversToIndex.get(server.getServerName().getHostAndPort());
            tableRegionAffinityNumber = (1 - cluster.numRegionsPerServerPerTable[serverIndex][tableIndex]
                    / allTableRegionNumberMap.get(tableIndex)) * TABLE_BALANCER_WEIGHT;
            float localityIndex = getLocalityIndex(hRegionInfo, server) * LOCALITY_WEIGHT;
            LOG.info("tableRegionaffinity: " + tableRegionAffinityNumber);
            LOG.info("regionAffinityNUmber: " + regionAffinityNumber);
            LOG.info("localityIndex: " + localityIndex);
            double finalAffinity = regionAffinityNumber + tableRegionAffinityNumber + localityIndex
                    + getStickinessWeight(hRegionInfo);
            queue.add(new RegionServerRegionAffinity(server, hRegionInfo, finalAffinity));
            LOG.info("Affinity between server=" + server.getServerName() + " and region="
                    + hRegionInfo.getRegionNameAsString() + " is " + finalAffinity);
        }

        LOG.info("Number of regions to move=" + numberOfRegionsToMove + " All server and region affinities: "
                + queue);

        // Get top numberOfRegionsToMove
        List<RegionServerRegionAffinity> listOfRegionsToMove = new ArrayList<RegionServerRegionAffinity>();
        for (int i = 0; i < numberOfRegionsToMove; ++i) {
            if (queue.isEmpty()) {
                continue;
            }
            listOfRegionsToMove.add(queue.poll());
        }

        // Search for the most affine servers to these listOfRegionsToMove
        for (RegionServerRegionAffinity regionServerRegionAffinity : listOfRegionsToMove) {
            HRegionInfo hRegionInfoToMove = regionServerRegionAffinity.getHRegionInfo();
            ServerAndLoad serverToMove = null;
            double maxAffinity = Double.MIN_VALUE;
            // Get the most affine server to hRegionInfoToMove
            for (ServerAndLoad activeServer : serversByLoad.keySet()) {
                hRegionInfos = serversByLoad.get(activeServer);
                if (activeServer.equals(regionServerRegionAffinity.getServer())) {
                    continue;
                }
                if (hRegionInfos.size() >= ceiling) {
                    LOG.debug("Number of HRegions >= ceiling (" + hRegionInfos.size() + " >= " + ceiling + ")");
                    continue;
                }
                regionAffinityNumber = (1 - hRegionInfos.size() / numRegions) * SERVER_BALANCER_WEIGHT;
                TableName table = hRegionInfoToMove.getTable();
                String tableNameAsString = table.getNameAsString();
                int serverIndex = cluster.serversToIndex.get(activeServer.getServerName().getHostAndPort());
                tableRegionAffinityNumber = 0;
                if (cluster.tablesToIndex.containsKey(tableNameAsString)) {
                    Integer tableIndex = cluster.tablesToIndex.get(tableNameAsString);
                    tableRegionAffinityNumber = (1
                            - cluster.numRegionsPerServerPerTable[serverIndex][tableIndex]
                                    / allTableRegionNumberMap.get(tableIndex))
                            * TABLE_BALANCER_WEIGHT;
                } else {
                    LOG.error("Table " + tableNameAsString + "not present in cluster.tablesToIndex");
                }
                double finalAffinity = regionAffinityNumber + tableRegionAffinityNumber
                        + getLocalityIndex(hRegionInfoToMove, activeServer) * LOCALITY_WEIGHT
                        + getStickinessWeight(hRegionInfoToMove);
                if (finalAffinity > maxAffinity) {
                    maxAffinity = finalAffinity;
                    serverToMove = activeServer;
                }
            }
            regionsToReturn.add(new RegionPlan(hRegionInfoToMove,
                    regionServerRegionAffinity.getServer().getServerName(), serverToMove.getServerName()));
        }
    }

    LOG.info("Returning plan: " + regionsToReturn);

    // Reset previuosly moved regions and add new regions
    previouslyMovedRegions.clear();
    for (RegionPlan regionPlan : regionsToReturn) {
        previouslyMovedRegions.add(regionPlan.getRegionInfo());
    }

    long endTime = System.currentTimeMillis();
    LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving "
            + regionsToReturn.size() + " regions");
    return regionsToReturn;
}

From source file:amfservices.actions.PGServicesAction.java

public Map<String, Object> spawnEggAction(String uid, String coteID, List<String> penguinIDs, long now)
        throws PGException {
    User user = User.getUser(uid);// w w w  .ja  v a2  s. c o m
    PGException.Assert(user.cotes().contains(coteID), PGError.INVALID_COTE, "Invalid cote");

    final Cote cote = Cote.getCote(uid, coteID);
    for (String pengId : penguinIDs) {
        PGException.Assert(cote.penguins().contains(pengId), PGError.PENGUIN_NOT_IN_COTE,
                "Penguin isn't contained in cote");
    }

    PriorityQueue<Penguin> penguins = new PriorityQueue(Math.max(penguinIDs.size(), 1),
            new Comparator<Penguin>() {
                @Override
                public int compare(Penguin p1, Penguin p2) {
                    long p1NextSpawnTime = PenguinServices.inst().nextSpawn(p1, cote);
                    long p2NextSpawnTime = PenguinServices.inst().nextSpawn(p2, cote);

                    return (p1NextSpawnTime > p2NextSpawnTime) ? 1
                            : ((p1NextSpawnTime == p2NextSpawnTime) ? 0 : -1);
                }
            });

    Map<String, Object> failData = new HashMap();

    // init penguin entities
    for (String pengId : penguinIDs) {
        Penguin penguin = Penguin.getPenguin(uid, coteID, pengId);

        long nextSpawn = PenguinServices.inst().nextSpawn(penguin, cote);
        if (nextSpawn > now) {
            Map<String, Object> failPenguinData = new HashMap(2);
            failPenguinData.put(PGMacro.TIME_LAST_SPAWN, penguin.getLastSpawn());
            failPenguinData.put(PGMacro.EGG_STORE, penguin.getLastEggStorage().getValue());

            failData.put(pengId, failPenguinData);
        } else {
            penguins.add(penguin);
        }
    }

    Map<String, Object> successData = new HashMap();
    List<String> limitedEggPenguins = new LinkedList();

    // need for add egg
    BoxEgg boxEgg = BoxEgg.getBoxEgg(uid, coteID);
    Dog dog = Dog.getDog(uid, coteID);

    while (!penguins.isEmpty()) {
        Penguin penguin = penguins.poll();
        long nextSpawn = PenguinServices.inst().nextSpawn(penguin, cote);
        String spawnedEggKind = PenguinServices.inst().spawnEgg(penguin, nextSpawn);

        EggStoreServices.EggStorage eggStorage = EggStoreServices.inst().addEgg(cote, boxEgg, dog,
                spawnedEggKind, now);
        if (eggStorage == EggStoreServices.EggStorage.LIMITED) {
            limitedEggPenguins.add(penguin.getPenguinID());
        }

        penguin.setLastEggStorage(eggStorage);
        penguin.saveToDB();

        Map<String, Object> penguinResp = new HashMap();
        penguinResp.put(PGMacro.KIND, spawnedEggKind);
        penguinResp.put(PGMacro.EGG_STORE, eggStorage.getValue());
        successData.put(penguin.getPenguinID(), penguinResp);
    }

    Map<String, Object> response = new HashMap();
    response.put(PGMacro.SUCCESS, successData);
    response.put(PGMacro.FAIL, failData);
    response.put(PGMacro.SPAWN_LIMITED_PENGUINS, AMFBuilder.toAMF(limitedEggPenguins));
    return response;
}

From source file:amfservices.actions.PGServicesAction.java

public Map<String, Object> penguinWannaEatAction(String uid, String coteID, List<String> penguinIDs, long now)
        throws PGException {
    final EntityContext context = EntityContext.getContext(uid);

    for (String pengId : penguinIDs) {
        PGException.Assert(context.getCote().penguins().contains(pengId), PGError.PENGUIN_NOT_IN_COTE,
                "Penguin isn't contained in cote");
    }//from   ww  w.ja v a2  s .  c o m

    PriorityQueue<Penguin> penguins = new PriorityQueue(penguinIDs.size(), new Comparator<Penguin>() {
        @Override
        public int compare(Penguin p1, Penguin p2) {
            long p1NextEatTime = PenguinServices.inst().nextEat(p1, context.getCote());
            long p2NextEatTime = PenguinServices.inst().nextEat(p2, context.getCote());

            return (p1NextEatTime > p2NextEatTime) ? 1 : ((p1NextEatTime == p2NextEatTime) ? 0 : -1);
        }
    });

    Map<String, Object> failData = new HashMap();

    int remainFish = context.getCote().getPoolFish();
    for (String pengId : penguinIDs) {
        Penguin penguin = Penguin.getPenguin(uid, coteID, pengId);

        long nextEat = PenguinServices.inst().nextEat(penguin, context.getCote());
        if (nextEat > now) {
            Map<String, Object> lastPenguinEatData = new HashMap();
            lastPenguinEatData.put(PGMacro.TIME_LAST_EAT, penguin.getLastEat());
            lastPenguinEatData.put(PGMacro.FISH_LAST_EAT, penguin.getFood());

            failData.put(penguin.getPenguinID(), lastPenguinEatData);
        } else {
            PGException.Assert(remainFish > 0, PGError.EMPTY_POOL, "Empty pool");
            PGException.Assert(PenguinServices.inst().configOf(penguin).getFeed() > 0,
                    PGError.PENGUIN_CANNOT_EAT, "Penguin cannot eat");

            penguins.add(penguin);
            remainFish -= Math.min(PenguinServices.inst().configOf(penguin).getFeed(), remainFish);
        }
    }

    List<Penguin> fedPenguins = new ArrayList(penguinIDs.size());
    while (!penguins.isEmpty()) {
        Penguin penguin = penguins.poll();
        long nextEat = PenguinServices.inst().nextEat(penguin, context.getCote());

        QuestLogger questLogger = QuestServices.inst().getQuestLogger(uid, now);
        PenguinServices.inst().eat(penguin, context, questLogger, nextEat);
        fedPenguins.add(penguin);
    }

    Map<String, Object> successData = new HashMap();
    for (Penguin penguin : fedPenguins) {
        penguin.saveToDB();
        successData.put(penguin.getPenguinID(), AMFBuilder.make(PGMacro.FISH_LAST_EAT, penguin.getFood(),
                PGMacro.TIME_LAST_EAT, penguin.getLastEat()));
    }

    context.saveToDB();

    Map<String, Object> response = new HashMap();
    response.put(PGMacro.SUCCESS, successData);
    response.put(PGMacro.FAIL, failData);

    return response;
}

From source file:com.joliciel.csvLearner.features.RealValueFeatureEvaluator.java

/**
 * For a given feature, calculate the entropy after each level of splitting.
 * Level 0: the entropy taking into account only those events which have a value as opposed to those which don't
 * Level 1: entropy for events without a value (where value=0) + entropy of other events after first split
 * Level 2: entropy for events without a value (where value=0) + entropy of other events after second split
 * etc./*  www  .  j  a  v  a 2  s.  c om*/
 * @param events the list of events
 * @param feature the feature to consider for splitting
 * @return 
 */
public List<Double> evaluateFeature(GenericEvents events, String feature, String testOutcome) {
    long startTime = (new Date()).getTime();

    if (LOG.isTraceEnabled()) {
        LOG.trace("Evaluating feature: " + feature);
        LOG.trace("Test outcome: " + testOutcome);
    }
    long startTimeInitialise = (new Date()).getTime();

    PriorityQueue<NameValuePair> heap = new PriorityQueue<NameValuePair>(events.size());
    Collection<NameValuePair> featureValues = new ArrayList<NameValuePair>();
    Map<String, Integer> eventOutcomeMap = new TreeMap<String, Integer>();
    Map<String, Integer> featureOutcomeMap = new TreeMap<String, Integer>();
    Map<String, Integer> nonFeatureOutcomeMap = new TreeMap<String, Integer>();

    List<String> outcomes = null;
    if (testOutcome == null) {
        Set<String> outcomeSet = events.getOutcomes();
        outcomes = new ArrayList<String>(outcomeSet);
    } else {
        outcomes = new ArrayList<String>();
        outcomes.add(testOutcome);
        outcomes.add("");
    }
    int[] eventOutcomeCounts = new int[outcomes.size()];
    int[] featureOutcomeCounts = new int[outcomes.size()];
    int[] nonFeatureOutcomeCounts = new int[outcomes.size()];

    int eventCount = events.size();
    int featureCount = 0;
    for (GenericEvent event : events) {
        if (!event.isTest()) {
            String outcome = event.getOutcome();
            int outcomeIndex = 0;
            if (testOutcome == null) {
                outcomeIndex = outcomes.indexOf(outcome);
            } else {
                if (!outcome.equals(testOutcome)) {
                    outcome = "";
                    outcomeIndex = 1;
                } else {
                    outcomeIndex = 0;
                }
            }

            long startTimeFindFeature = (new Date()).getTime();
            int featureIndex = event.getFeatureIndex(feature);
            long endTimeFindFeature = (new Date()).getTime();
            totalTimeFindFeature += (endTimeFindFeature - startTimeFindFeature);
            if (featureIndex >= 0) {
                long startTimeOrdering = (new Date()).getTime();
                heap.add(new NameValuePair(outcome, event.getWeights().get(featureIndex)));
                long endTimeOrdering = (new Date()).getTime();
                totalTimeOrdering += (endTimeOrdering - startTimeOrdering);
                featureOutcomeCounts[outcomeIndex]++;
                featureCount++;
            } else {
                nonFeatureOutcomeCounts[outcomeIndex]++;
            }
            eventOutcomeCounts[outcomeIndex]++;
        }
    }
    int nonFeatureCount = eventCount - featureCount;

    long startTimeOrdering = (new Date()).getTime();
    while (!heap.isEmpty())
        featureValues.add(heap.poll());
    long endTimeOrdering = (new Date()).getTime();
    totalTimeOrdering += (endTimeOrdering - startTimeOrdering);

    int i = 0;
    for (String outcome : outcomes) {
        eventOutcomeMap.put(outcome, eventOutcomeCounts[i]);
        featureOutcomeMap.put(outcome, featureOutcomeCounts[i]);
        nonFeatureOutcomeMap.put(outcome, nonFeatureOutcomeCounts[i]);
        i++;
    }

    long endTimeInitialise = (new Date()).getTime();
    totalTimeInitialise += (endTimeInitialise - startTimeInitialise);

    long startTimeInitialEntropy = (new Date()).getTime();
    double eventSpaceEntropy = EntropyCalculator.getEntropy(eventOutcomeMap.values(), eventCount);
    double featureEntropy = EntropyCalculator.getEntropy(featureOutcomeMap.values(), featureCount);
    double nonFeatureEntropy = EntropyCalculator.getEntropy(nonFeatureOutcomeMap.values(), nonFeatureCount);
    long endTimeInitialEntropy = (new Date()).getTime();
    totalTimeInitialEntropy += (endTimeInitialEntropy - startTimeInitialEntropy);

    List<Double> entropyByLevel = new ArrayList<Double>();
    entropyByLevel.add(eventSpaceEntropy);

    double proportionalFeatureEntropy = ((double) featureCount / (double) eventCount) * featureEntropy;
    double proportionalNonFeatureEntropy = ((double) nonFeatureCount / (double) eventCount) * nonFeatureEntropy;
    double level0Entropy = proportionalFeatureEntropy + proportionalNonFeatureEntropy;
    entropyByLevel.add(level0Entropy);

    if (LOG.isTraceEnabled()) {
        LOG.trace("eventSpaceEntropy: " + eventSpaceEntropy);
        LOG.trace("proportionalFeatureEntropy: " + proportionalFeatureEntropy);
        LOG.trace("proportionalNonFeatureEntropy: " + proportionalNonFeatureEntropy);
        LOG.trace("level 0 Entropy: " + level0Entropy);
    }

    List<NameValuePair> featureValueList = new ArrayList<NameValuePair>(featureValues);
    long startTimeSplit = (new Date()).getTime();
    featureSplitter.split(featureValueList);
    long endTimeSplit = (new Date()).getTime();
    totalTimeSplit += (endTimeSplit - startTimeSplit);

    Map<Integer, Set<Split>> splitsByDepth = featureSplitter.getSplitsByDepth();

    for (int level : splitsByDepth.keySet()) {
        double levelEntropy = proportionalNonFeatureEntropy;
        if (splitsByDepth.get(level).size() == 0)
            levelEntropy += proportionalFeatureEntropy;
        else {
            for (Split split : splitsByDepth.get(level)) {
                long startTimeSplitEntropy = (new Date()).getTime();
                double proprotionalEntropy = ((double) split.getSize() / (double) eventCount)
                        * split.getEntropy();
                long endTimeSplitEntropy = (new Date()).getTime();
                totalTimeSplitEntropy += (endTimeSplitEntropy - startTimeSplitEntropy);
                levelEntropy += proprotionalEntropy;
            }
        }
        entropyByLevel.add(levelEntropy);
        if (LOG.isTraceEnabled())
            LOG.trace("level " + level + " Entropy: " + levelEntropy);
    }
    long endTime = (new Date()).getTime();
    totalTime += (endTime - startTime);

    return entropyByLevel;
}

From source file:mulavito.algorithms.shortestpath.ksp.Yen.java

@Override
protected List<List<E>> getShortestPathsIntern(final V source, final V target, int k) {
    LinkedList<List<E>> found_paths = new LinkedList<List<E>>();
    PriorityQueue<WeightedPath> prioQ = new PriorityQueue<WeightedPath>();
    DijkstraShortestPath<V, E> blockedDijkstra;

    // Check if target is reachable from source.
    if (dijkstra.getDistance(source, target) == null)
        return found_paths;

    // Add Dijkstra solution, the first shortest path.
    found_paths.add(dijkstra.getPath(source, target));

    while (found_paths.size() < k) {
        List<E> curShortestPath = found_paths.getLast();

        int maxIndex = curShortestPath.size();

        List<V> curShortestPathNodes = new LinkedList<V>();
        curShortestPathNodes.add(source);
        for (E e : found_paths.getLast()) {
            V v = graph.getEndpoints(e).getFirst();
            if (!curShortestPathNodes.contains(v))
                curShortestPathNodes.add(v);
            v = graph.getEndpoints(e).getSecond();
            if (!curShortestPathNodes.contains(v))
                curShortestPathNodes.add(v);
        }/*from   w  w  w  .j a v a 2  s  . c  o m*/
        curShortestPathNodes.remove(target);

        // Split path into Head and NextEdge
        for (int i = 0; i < maxIndex; i++) {
            List<E> head = curShortestPath.subList(0, i);
            //   V deviation = head.isEmpty() ? source : graph.getEndpoints(head.get(i - 1)).getSecond();
            V deviation = curShortestPathNodes.get(i);

            // 1. Block edges.
            Graph<V, E> blocked = blockFilter(head, deviation, curShortestPathNodes, found_paths);

            // 2. Get shortest path in graph with blocked edges.
            blockedDijkstra = new DijkstraShortestPath<V, E>(blocked, nev);

            Number dist = blockedDijkstra.getDistance(deviation, target);
            if (dist == null)
                continue;

            List<E> tail = blockedDijkstra.getPath(deviation, target);

            // 3. Combine head and tail into new path.
            List<E> candidate = new ArrayList<E>();
            candidate.addAll(head);
            candidate.addAll(tail);

            // Check if we already found this solution
            boolean duplicate = false;
            for (WeightedPath path : prioQ)
                if (ListUtils.isEqualList(path.getPath(), candidate)) {
                    duplicate = true;
                    break;
                }

            if (!duplicate)
                prioQ.add(new WeightedPath(candidate));
        }

        if (prioQ.isEmpty())
            break; // We have not found any new candidate!
        else
            found_paths.add(prioQ.poll().getPath());
    }

    return found_paths;
}

From source file:mondrian.olap.fun.FunUtil.java

/**
 * Julian's algorithm for stable partial sort. Improves Pedro's algorithm
 * by using a heap (priority queue) for the top {@code limit} items seen.
 * The items on the priority queue have an ordinal field, so the queue
 * can be used to generate a list of stably sorted items. (Heap sort is
 * not normally stable.)/*from   w  w w  . j  a  va 2  s .  c  o  m*/
 *
 * @param list List to sort
 * @param comp Comparator
 * @param limit Maximum number of items to return
 * @param <T> Element type
 * @return Sorted list, containing at most limit items
 */
public static <T> List<T> stablePartialSortJulian(final List<T> list, final Comparator<T> comp, int limit) {
    final Comparator<ObjIntPair<T>> comp2 = new Comparator<ObjIntPair<T>>() {
        public int compare(ObjIntPair<T> o1, ObjIntPair<T> o2) {
            int c = comp.compare(o1.t, o2.t);
            if (c == 0) {
                c = Util.compare(o1.i, o2.i);
            }
            return -c;
        }
    };
    int filled = 0;
    final PriorityQueue<ObjIntPair<T>> queue = new PriorityQueue<ObjIntPair<T>>(limit, comp2);
    for (T element : list) {
        if (filled < limit) {
            queue.offer(new ObjIntPair<T>(element, filled++));
        } else {
            ObjIntPair<T> head = queue.element();
            if (comp.compare(element, head.t) <= 0) {
                ObjIntPair<T> item = new ObjIntPair<T>(element, filled++);
                if (comp2.compare(item, head) >= 0) {
                    ObjIntPair poll = queue.remove();
                    Util.discard(poll);
                    queue.offer(item);
                }
            }
        }
    }

    int n = queue.size();
    final Object[] elements = new Object[n];
    while (n > 0) {
        elements[--n] = queue.poll().t;
    }
    assert queue.isEmpty();
    //noinspection unchecked
    return Arrays.asList((T[]) elements);
}

From source file:com.microsoft.azure.vmagent.AzureVMAgentCleanUpTask.java

public void cleanLeakedResources(final String resourceGroup, final ServicePrincipal servicePrincipal,
        final String cloudName, final DeploymentRegistrar deploymentRegistrar) {
    try {/*from  w  w w  .  j  a v  a 2s . c  o m*/
        final List<String> validVMs = getValidVMs(cloudName);
        final Azure azureClient = TokenCache.getInstance(servicePrincipal).getAzureClient();
        //can't use listByTag because for some reason that method strips all the tags from the outputted resources (https://github.com/Azure/azure-sdk-for-java/issues/1436)
        final PagedList<GenericResource> resources = azureClient.genericResources().listByGroup(resourceGroup);

        if (resources == null || resources.isEmpty()) {
            return;
        }

        final PriorityQueue<GenericResource> resourcesMarkedForDeletion = new PriorityQueue<>(resources.size(),
                new Comparator<GenericResource>() {
                    @Override
                    public int compare(GenericResource o1, GenericResource o2) {
                        int o1Priority = getPriority(o1);
                        int o2Priority = getPriority(o2);
                        if (o1Priority == o2Priority) {
                            return 0;
                        }
                        return (o1Priority < o2Priority) ? -1 : 1;
                    }

                    private int getPriority(final GenericResource resource) {
                        final String type = resource.type();
                        if (StringUtils.containsIgnoreCase(type, "virtualMachine")) {
                            return 1;
                        }
                        if (StringUtils.containsIgnoreCase(type, "networkInterface")) {
                            return 2;
                        }
                        if (StringUtils.containsIgnoreCase(type, "IPAddress")) {
                            return 3;
                        }
                        return 4;
                    }
                });

        for (GenericResource resource : resources) {
            final Map<String, String> tags = resource.tags();
            if (!tags.containsKey(Constants.AZURE_RESOURCES_TAG_NAME) || !deploymentRegistrar.getDeploymentTag()
                    .matches(new AzureUtil.DeploymentTag(tags.get(Constants.AZURE_RESOURCES_TAG_NAME)))) {
                continue;
            }
            boolean shouldSkipDeletion = false;
            for (String validVM : validVMs) {
                if (resource.name().contains(validVM)) {
                    shouldSkipDeletion = true;
                    break;
                }
            }
            // we're not removing storage accounts of networks - someone else might be using them
            if (shouldSkipDeletion || StringUtils.containsIgnoreCase(resource.type(), "StorageAccounts")
                    || StringUtils.containsIgnoreCase(resource.type(), "virtualNetworks")) {
                continue;
            }
            resourcesMarkedForDeletion.add(resource);
        }

        while (!resourcesMarkedForDeletion.isEmpty()) {
            try {
                final GenericResource resource = resourcesMarkedForDeletion.poll();
                if (resource == null)
                    continue;

                URI osDiskURI = null;
                if (StringUtils.containsIgnoreCase(resource.type(), "virtualMachine")) {
                    osDiskURI = new URI(
                            azureClient.virtualMachines().getById(resource.id()).osUnmanagedDiskVhdUri());
                }

                LOGGER.log(Level.INFO, "cleanLeakedResources: deleting {0} from resource group {1}",
                        new Object[] { resource.name(), resourceGroup });
                azureClient.genericResources().deleteById(resource.id());
                if (osDiskURI != null) {
                    AzureVMManagementServiceDelegate.removeStorageBlob(azureClient, osDiskURI, resourceGroup);
                }
            } catch (Exception e) {
                LOGGER.log(Level.INFO,
                        "AzureVMAgentCleanUpTask: cleanLeakedResources: failed to clean resource ", e);
            }
        }
    } catch (Exception e) {
        // No need to throw exception back, just log and move on. 
        LOGGER.log(Level.INFO,
                "AzureVMAgentCleanUpTask: cleanLeakedResources: failed to clean leaked resources ", e);
    }
}

From source file:org.apache.hadoop.hbase.extended.loadbalance.strategies.hotspot.HotSpotLoadBalancer.java

@Override
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
    initParameters();// w w w .  ja  v  a 2  s. c  o m
    /**
     * <pre>
     * We atleast need two priority queues 
     * a) It would contain HotSpot regions with their load as the moving criteria (max priority queue)
     * b) Non hot spot region with their loads (min priority queue)
     * 
     * Further we need to iterate over these queues and decrease the load so we 
     * need a data structure to build these queues 
     * and lastly we need to return the Region plan.
     * </pre>
     */

    LOG.debug("#################Came in the new Balancer Code and the cluster status is = " + this.status);
    long startTime = System.currentTimeMillis();
    int numServers = clusterState.size();
    if (numServers == 0) {
        LOG.info("numServers=0 so skipping load balancing");
        return null;

    }

    NavigableMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>> regionServerAndServerLoadMap = new TreeMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>>();
    PriorityQueue<HotSpotServerAndLoad> hotspotRegionServers = new PriorityQueue<HotSpotServerAndLoad>(
            numServers, HotSpotServerAndLoad.DESC_LOAD);
    PriorityQueue<HotSpotServerAndLoad> nonHotspotRegionServers = new PriorityQueue<HotSpotServerAndLoad>(
            numServers, HotSpotServerAndLoad.ASC_LOAD);
    HashBiMap<HRegionInfo, HotSpotRegionLoad> allRegionsLoadBiMap = HashBiMap.create();
    LOG.debug("#################clusterState=" + clusterState);
    double normalisedTotalLoadOfAllRegions = initRegionLoadMapsBasedOnInput(clusterState,
            regionServerAndServerLoadMap, allRegionsLoadBiMap);
    LOG.debug("#################normalisedTotalLoadOfAllRegions=" + normalisedTotalLoadOfAllRegions);
    // Check if we even need to do any load balancing
    double average = normalisedTotalLoadOfAllRegions / numServers; // for
    // logging
    // HBASE-3681 check sloppiness first
    LOG.debug("######################## final regionServerAndServerLoadMap == " + regionServerAndServerLoadMap);
    if (!loadBalancingNeeded(numServers, regionServerAndServerLoadMap, normalisedTotalLoadOfAllRegions,
            average)) {
        // we do not need load balancing
        return null;
    }
    double minLoad = normalisedTotalLoadOfAllRegions / numServers;
    double maxLoad = normalisedTotalLoadOfAllRegions % numServers == 0 ? minLoad : minLoad + 1;
    // as we now have to balance stuff, init PQ's
    LOG.debug(String.format("#################minLoad =%s,maxLoad= %s", minLoad, maxLoad));
    for (Map.Entry<HotSpotServerAndLoad, List<HotSpotRegionLoad>> item : regionServerAndServerLoadMap
            .entrySet()) {
        HotSpotServerAndLoad serverLoad = item.getKey();
        if (serverLoad.isHotSpot()) {

            hotspotRegionServers.add(serverLoad);
        } else {
            if (serverLoad.getLoad() < maxLoad) {
                nonHotspotRegionServers.add(serverLoad);
            }
        }
    }
    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numRegions=").append(normalisedTotalLoadOfAllRegions)
            .append(", numServers=").append(numServers).append(", max=").append(maxLoad).append(", min=")
            .append(minLoad);
    LOG.debug(strBalanceParam.toString());
    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    while (hotspotRegionServers.size() > 0 && nonHotspotRegionServers.size() > 0) {
        HotSpotServerAndLoad serverToBalance = hotspotRegionServers.poll();
        LOG.debug(String.format("#################serverToBalance =%s",
                serverToBalance.getServerName().getServerName()));
        // get least loaded not hotspot regions of this server
        List<HotSpotRegionLoad> regionList = regionServerAndServerLoadMap.get(serverToBalance);
        // assume it to be sorted asc.
        if (regionList.size() > 0) {
            HotSpotRegionLoad regionToMove = regionList.remove(0);
            HRegionInfo regionMoveInfo = allRegionsLoadBiMap.inverse().get(regionToMove);

            /*
             * regionMoveInfo can be null in case the load map returns us
             * the root and meta regions along with the movable regions But
             * as the clusterState which is passed to us does not contain
             * these regions we can have a situation where
             * regionServerAndServerLoadMap contains some regions which are
             * not present in the allRegionsLoadBiMap
             */
            if (regionMoveInfo != null && !regionMoveInfo.isMetaRegion() && !regionMoveInfo.isRootRegion()
                    && !regionMoveInfo.isMetaTable() && regionToMove.isRegionHotspot()) {
                LOG.debug(String.format(
                        "#################Came to move the region regionMoveInfo=%s;; regionToMove=%s ",
                        regionMoveInfo, regionToMove));
                // move out.
                HotSpotServerAndLoad destinationServer = nonHotspotRegionServers.poll();

                RegionPlan rpl = new RegionPlan(allRegionsLoadBiMap.inverse().get(regionToMove),
                        serverToBalance.getServerName(), destinationServer.getServerName());
                regionsToReturn.add(rpl);
                serverToBalance.modifyLoad(regionToMove.getLoad());
                destinationServer.modifyLoad(-1 * regionToMove.getLoad());
                // reenter them to list. if they satisfy conditions
                if (serverToBalance.getLoad() > minLoad) {
                    hotspotRegionServers.offer(serverToBalance);
                }
                if (destinationServer.getLoad() < maxLoad) {
                    nonHotspotRegionServers.offer(destinationServer);
                }
            }
        }
    }
    LOG.info("Total Time taken to balance = " + (System.currentTimeMillis() - startTime));
    LOG.info(String.format("#################regionsToReturn=%s ", regionsToReturn));
    return regionsToReturn;

}