Example usage for java.util PriorityQueue add

List of usage examples for java.util PriorityQueue add

Introduction

In this page you can find the example usage for java.util PriorityQueue add.

Prototype

public boolean add(E e) 

Source Link

Document

Inserts the specified element into this priority queue.

Usage

From source file:exploration.rendezvous.MultiPointRendezvousStrategy.java

private PriorityQueue<NearRVPoint> GetPointsWithinDistOfFrontier(List<NearRVPoint> generatedPoints,
        double maxDist) {
    PriorityQueue<NearRVPoint> pointsNearFrontier = new PriorityQueue<NearRVPoint>();

    Point frontierCentre = getExplorerFrontier();

    if (SimConstants.DEBUG_OUTPUT) {
        System.out.println(agent + " frontierCentre is " + frontierCentre);
    }//ww  w .j  av  a 2 s.  com
    // create priority queue of all potential rvpoints within given straight line distance
    for (NearRVPoint p : generatedPoints) {
        double dist = p.distance(frontierCentre);
        if (dist > maxDist) {
            continue;
        }
        p.setDistanceToFrontier(dist);
        pointsNearFrontier.add(p);
    }

    return pointsNearFrontier;
}

From source file:blusunrize.immersiveengineering.api.energy.wires.ImmersiveNetHandler.java

public Set<AbstractConnection> getIndirectEnergyConnections(BlockPos node, World world,
        boolean ignoreIsEnergyOutput) {
    int dimension = world.provider.getDimension();
    if (!ignoreIsEnergyOutput && indirectConnections.containsKey(dimension)
            && indirectConnections.get(dimension).containsKey(node))
        return indirectConnections.get(dimension).get(node);
    else if (ignoreIsEnergyOutput && indirectConnectionsIgnoreOut.containsKey(dimension)
            && indirectConnectionsIgnoreOut.get(dimension).containsKey(node))
        return indirectConnectionsIgnoreOut.get(dimension).get(node);

    PriorityQueue<Pair<IImmersiveConnectable, Float>> queue = new PriorityQueue<>(
            Comparator.comparingDouble(Pair::getRight));
    Set<AbstractConnection> closedList = newSetFromMap(new ConcurrentHashMap<AbstractConnection, Boolean>());
    List<BlockPos> checked = new ArrayList<>();
    HashMap<BlockPos, BlockPos> backtracker = new HashMap<>();

    checked.add(node);/*from   ww  w.  j a v a  2s.co m*/
    Set<Connection> conL = getConnections(world, node);
    if (conL != null)
        for (Connection con : conL) {
            IImmersiveConnectable end = toIIC(con.end, world);
            if (end != null) {
                queue.add(new ImmutablePair<>(end, con.getBaseLoss()));
                backtracker.put(con.end, node);
            }
        }

    IImmersiveConnectable next;
    final int closedListMax = 1200;

    while (closedList.size() < closedListMax && !queue.isEmpty()) {
        Pair<IImmersiveConnectable, Float> pair = queue.poll();
        next = pair.getLeft();
        float loss = pair.getRight();
        BlockPos nextPos = toBlockPos(next);
        if (!checked.contains(nextPos) && queue.stream().noneMatch((p) -> p.getLeft().equals(nextPos))) {
            boolean isOutput = next.isEnergyOutput();
            if (ignoreIsEnergyOutput || isOutput) {
                BlockPos last = toBlockPos(next);
                WireType minimumType = null;
                int distance = 0;
                List<Connection> connectionParts = new ArrayList<>();
                while (last != null) {
                    BlockPos prev = last;
                    last = backtracker.get(last);
                    if (last != null) {

                        Set<Connection> conLB = getConnections(world, last);
                        if (conLB != null)
                            for (Connection conB : conLB)
                                if (conB.end.equals(prev)) {
                                    connectionParts.add(0, conB);
                                    distance += conB.length;
                                    if (minimumType == null
                                            || conB.cableType.getTransferRate() < minimumType.getTransferRate())
                                        minimumType = conB.cableType;
                                    break;
                                }
                    }
                }
                closedList.add(new AbstractConnection(toBlockPos(node), toBlockPos(next), minimumType, distance,
                        isOutput, connectionParts.toArray(new Connection[connectionParts.size()])));
            }

            Set<Connection> conLN = getConnections(world, toBlockPos(next));
            if (conLN != null)
                for (Connection con : conLN)
                    if (next.allowEnergyToPass(con)) {
                        IImmersiveConnectable end = toIIC(con.end, world);

                        Optional<Pair<IImmersiveConnectable, Float>> existing = queue.stream()
                                .filter((p) -> p.getLeft() == end).findAny();
                        float newLoss = con.getBaseLoss() + loss;
                        if (end != null && !checked.contains(con.end)
                                && existing.map(Pair::getRight).orElse(Float.MAX_VALUE) > newLoss) {
                            existing.ifPresent(p1 -> queue.removeIf((p2) -> p1.getLeft() == p2.getLeft()));
                            queue.add(new ImmutablePair<>(end, newLoss));
                            backtracker.put(con.end, toBlockPos(next));
                        }
                    }
            checked.add(toBlockPos(next));
        }
    }
    if (FMLCommonHandler.instance().getEffectiveSide() == Side.SERVER) {
        if (ignoreIsEnergyOutput) {
            if (!indirectConnectionsIgnoreOut.containsKey(dimension))
                indirectConnectionsIgnoreOut.put(dimension, new ConcurrentHashMap<>());
            Map<BlockPos, Set<AbstractConnection>> conns = indirectConnectionsIgnoreOut.get(dimension);
            if (!conns.containsKey(node))
                conns.put(node, newSetFromMap(new ConcurrentHashMap<>()));
            conns.get(node).addAll(closedList);
        } else {
            if (!indirectConnections.containsKey(dimension))
                indirectConnections.put(dimension, new ConcurrentHashMap<>());
            Map<BlockPos, Set<AbstractConnection>> conns = indirectConnections.get(dimension);
            if (!conns.containsKey(node))
                conns.put(node, newSetFromMap(new ConcurrentHashMap<>()));
            conns.get(node).addAll(closedList);
        }
    }
    return closedList;
}

From source file:ma.glasnost.orika.metadata.ScoringClassMapBuilder.java

public ClassMapBuilder<A, B> byDefault(DefaultFieldMapper... withDefaults) {

    DefaultFieldMapper[] defaults;/*from   w w w.j  ava2 s  .  co  m*/
    if (withDefaults.length == 0) {
        defaults = getDefaultFieldMappers();
    } else {
        defaults = withDefaults;
    }
    /*
     * For our custom 'byDefault' method, we're going to try and match
     * fields by their Levenshtein distance
     */
    PriorityQueue<FieldMatchScore> matchScores = new PriorityQueue<FieldMatchScore>();

    Map<String, Property> propertiesForA = getPropertyExpressions(getAType());
    Map<String, Property> propertiesForB = getPropertyExpressions(getBType());

    for (final Entry<String, Property> propertyA : propertiesForA.entrySet()) {
        if (!propertyA.getValue().getName().equals("class")) {
            for (final Entry<String, Property> propertyB : propertiesForB.entrySet()) {
                if (!propertyB.getValue().getName().equals("class")) {
                    FieldMatchScore matchScore = new FieldMatchScore(propertyA.getValue(), propertyB.getValue(),
                            matchingWeights);
                    matchScores.add(matchScore);
                }
            }
        }
    }

    Set<String> unmatchedFields = new LinkedHashSet<String>(this.getPropertiesForTypeA());
    unmatchedFields.remove("class");

    for (FieldMatchScore score : matchScores) {

        if (!this.getMappedPropertiesForTypeA().contains(score.propertyA.getExpression())
                && !this.getMappedPropertiesForTypeB().contains(score.propertyB.getExpression())) {
            if (LOGGER.isTraceEnabled()) {
                LOGGER.trace("\n" + score.toString());
            }
            if (score.meetsMinimumScore()) {
                fieldMap(score.propertyA.getExpression(), score.propertyB.getExpression()).add();
                unmatchedFields.remove(score.propertyA.getExpression());
            }
        }
    }

    /*
     * Apply any default field mappers to the unmapped fields
     */
    for (String propertyNameA : unmatchedFields) {
        Property prop = resolvePropertyForA(propertyNameA);
        for (DefaultFieldMapper defaulter : defaults) {
            String suggestion = defaulter.suggestMappedField(propertyNameA, prop.getType());
            if (suggestion != null && getPropertiesForTypeB().contains(suggestion)) {
                if (!getMappedPropertiesForTypeB().contains(suggestion)) {
                    fieldMap(propertyNameA, suggestion).add();
                }
            }
        }
    }

    return this;
}

From source file:org.hbasene.index.search.HBaseTopFieldCollector.java

private void doAppendToPQ(final Map<byte[], SortFieldDoc> docMap, final PriorityQueue<SortFieldDoc> outputPq,
        final String sortField, final int sortIndex) throws IOException {
    HTableInterface table = this.tablePool.getTable(this.indexName);
    final String sortFieldPrefix = sortField + "/"; // separator
    try {//from www  .j  a va  2s.c o m
        byte[] row = Bytes.toBytes(sortFieldPrefix);
        Result priorToFirstTerm = table.getRowOrBefore(row, FAMILY_TERMVECTOR);
        ResultScanner scanner = table
                .getScanner(this.createScan((priorToFirstTerm != null) ? priorToFirstTerm.getRow() : null));
        try {
            int index = 0;
            Result result = scanner.next();
            while (result != null) {
                String currentRow = Bytes.toString(result.getRow());
                if (currentRow.startsWith(sortFieldPrefix)) {
                    ++index;
                    NavigableMap<byte[], byte[]> columnQualifiers = result.getFamilyMap(FAMILY_TERMVECTOR);
                    SetView<byte[]> intersectionSet = Sets.intersection(columnQualifiers.keySet(),
                            docMap.keySet());
                    for (final byte[] commonDocId : intersectionSet) {
                        SortFieldDoc next = docMap.get(commonDocId);
                        next.indices[sortIndex] = index;
                        outputPq.add(next);
                    }
                    //Method works best if the ratio between the unique number of elements 
                    // in the field to be sorted is small compared to the total 
                    // number of documents in the list
                    docMap.keySet().removeAll(intersectionSet);
                    LOG.info("Docs Size after  " + currentRow + " is " + docMap.size());
                    if (docMap.isEmpty()) {
                        break;
                    }
                }
                result = scanner.next();
            }
        } finally {
            scanner.close();
        }
    } finally {
        this.tablePool.putTable(table);
    }
}

From source file:beast.evolution.tree.ConstrainedClusterTree.java

/**
 * Perform clustering using a link method
 * This implementation uses a priority queue resulting in a O(n^2 log(n)) algorithm
 *
 * @param nClusters    number of clusters
 * @param nClusterID//from  w  w w  .j av a 2  s .c  o m
 * @param clusterNodes
 */
void doLinkClustering(int nClusters, final List<Integer>[] nClusterID, final NodeX[] clusterNodes) {
    Log.warning.print("Calculating distance");
    final int nInstances = taxaNames.size();
    final PriorityQueue<Tuple> queue = new PriorityQueue<Tuple>(nClusters * nClusters / 2,
            new TupleComparator());
    final double[][] fDistance0 = new double[nClusters][nClusters];
    for (int i = 0; i < nClusters; i++) {
        fDistance0[i][i] = 0;
        for (int j = i + 1; j < nClusters; j++) {
            fDistance0[i][j] = getDistance0(nClusterID[i], nClusterID[j]);
            fDistance0[j][i] = fDistance0[i][j];
            if (isCompatible(i, j, nClusterID)) {
                queue.add(new Tuple(fDistance0[i][j], i, j, 1, 1));
            }
        }
        // feedback on progress
        if ((i + 1) % 100 == 0) {
            if ((i + 1) % 1000 == 0) {
                Log.warning.print('|');
            } else {
                Log.warning.print('.');
            }
        }
    }
    Log.warning.print("\nClustering: ");
    while (nClusters > 1) {
        int iMin1 = -1;
        int iMin2 = -1;
        // use priority queue to find next best pair to cluster
        Tuple t;
        do {
            t = queue.poll();
        } while (t != null && (nClusterID[t.m_iCluster1].size() != t.m_nClusterSize1
                || nClusterID[t.m_iCluster2].size() != t.m_nClusterSize2));
        iMin1 = t.m_iCluster1;
        iMin2 = t.m_iCluster2;
        merge(iMin1, iMin2, t.m_fDist / 2.0, t.m_fDist / 2.0, nClusterID, clusterNodes);
        updateConstraints(nClusterID[iMin1]);
        // merge  clusters

        // update distances & queue
        for (int i = 0; i < nInstances; i++) {
            if (i != iMin1 && nClusterID[i].size() != 0) {
                final int i1 = Math.min(iMin1, i);
                final int i2 = Math.max(iMin1, i);
                if (isCompatible(i1, i2, nClusterID)) {
                    final double fDistance = getDistance(fDistance0, nClusterID[i1], nClusterID[i2]);
                    queue.add(new Tuple(fDistance, i1, i2, nClusterID[i1].size(), nClusterID[i2].size()));
                }
            }
        }

        nClusters--;

        // feedback on progress
        if (nClusters % 100 == 0) {
            if (nClusters % 1000 == 0) {
                Log.warning.print('|');
            } else {
                Log.warning.print('.');
            }
        }
    }
    Log.warning.println(" done.");
}

From source file:org.mskcc.cbio.portal.servlet.NetworkServlet.java

/**
 *
 * @param network//from   w w w  . j  a  va2 s  .c o  m
 * @param n
 * @return
 */
private List<Node> getNodesToRemove(final Network network, final double diffusion, final int n) {
    final Map<Node, Double> mapDiffusion = getMapDiffusedTotalAlteredPercentage(network, diffusion);

    // keep track of the top nKeep
    PriorityQueue<Node> topAlteredNodes = new PriorityQueue<Node>(n, new Comparator<Node>() {
        public int compare(Node n1, Node n2) {
            int ret = mapDiffusion.get(n1).compareTo(mapDiffusion.get(n2));
            if (diffusion != 0 && ret == 0) { // if the same diffused perc, use own perc
                ret = Double.compare(getTotalAlteredPercentage(n1), getTotalAlteredPercentage(n2));
            }

            if (ret == 0) { // if the same, rank according to degree
                ret = network.getDegree(n1) - network.getDegree(n2);
            }

            return ret;
        }
    });

    List<Node> nodesToRemove = new ArrayList<Node>();
    for (Node node : network.getNodes()) {
        if (isInQuery(node) || node.getType().equals(NodeType.DRUG)) {
            continue;
        }

        if (topAlteredNodes.size() < n) {
            topAlteredNodes.add(node);
        } else {
            if (n == 0) {
                nodesToRemove.add(node);
            } else {
                if (mapDiffusion.get(node) > mapDiffusion.get(topAlteredNodes.peek())) {
                    nodesToRemove.add(topAlteredNodes.poll());
                    topAlteredNodes.add(node);
                } else {
                    nodesToRemove.add(node);
                }
            }
        }
    }

    return nodesToRemove;
}

From source file:org.apache.sysml.runtime.compress.CompressedMatrixBlock.java

private static ColGroup compressColGroup(MatrixBlock in, CompressedSizeEstimator estim,
        HashMap<Integer, Double> compRatios, int rlen, double sp, int[] colIndexes) {
    int[] allGroupIndices = null;
    int allColsCount = colIndexes.length;
    CompressedSizeInfo sizeInfo;//ww  w  .  ja v a2s . co m
    // The compression type is decided based on a full bitmap since it
    // will be reused for the actual compression step.
    UncompressedBitmap ubm = null;
    PriorityQueue<CompressedColumn> compRatioPQ = null;
    boolean skipGroup = false;
    while (true) {
        //exact big list and observe compression ratio
        ubm = BitmapEncoder.extractBitmap(colIndexes, in);
        sizeInfo = estim.estimateCompressedColGroupSize(ubm);
        double compRatio = getUncompressedSize(rlen, colIndexes.length, sp) / sizeInfo.getMinSize();

        if (compRatio > 1) {
            break; // we have a good group
        }

        // modify the group
        if (compRatioPQ == null) {
            // first modification
            allGroupIndices = colIndexes.clone();
            compRatioPQ = new PriorityQueue<CompressedMatrixBlock.CompressedColumn>();
            for (int i = 0; i < colIndexes.length; i++)
                compRatioPQ.add(new CompressedColumn(i, compRatios.get(colIndexes[i])));
        }

        // index in allGroupIndices
        int removeIx = compRatioPQ.poll().colIx;
        allGroupIndices[removeIx] = -1;
        allColsCount--;
        if (allColsCount == 0) {
            skipGroup = true;
            break;
        }
        colIndexes = new int[allColsCount];
        // copying the values that do not equal -1
        int ix = 0;
        for (int col : allGroupIndices)
            if (col != -1)
                colIndexes[ix++] = col;
    }

    //add group to uncompressed fallback
    if (skipGroup)
        return null;

    //create compressed column group
    long rleSize = sizeInfo.getRLESize();
    long oleSize = sizeInfo.getOLESize();
    if (rleSize < oleSize)
        return new ColGroupRLE(colIndexes, rlen, ubm);
    else
        return new ColGroupOLE(colIndexes, rlen, ubm);
}

From source file:org.apache.hama.ml.recommendation.cf.OnlineCF.java

@Override
public List<Preference<Long, Long>> getMostPreferredItems(long userId, int count) {
    Comparator<Preference<Long, Long>> scoreComparator = new Comparator<Preference<Long, Long>>() {

        @Override//from w w w . j  av  a2 s .c o m
        public int compare(Preference<Long, Long> arg0, Preference<Long, Long> arg1) {
            double difference = arg0.getValue().get() - arg1.getValue().get();
            return (int) (100000 * difference);
        }
    };
    PriorityQueue<Preference<Long, Long>> queue = new PriorityQueue<Preference<Long, Long>>(count,
            scoreComparator);
    LinkedList<Preference<Long, Long>> results = new LinkedList<Preference<Long, Long>>();

    if (function == null) {
        Class<?> cls = conf.getClass(OnlineCF.Settings.CONF_ONLINE_UPDATE_FUNCTION, null);
        try {
            function = (OnlineUpdate.Function) (cls.newInstance());
        } catch (Exception e) {
            // set default function
        }
    }

    InputStructure e = new InputStructure();
    e.user = this.modelUserFactorizedValues.get(Long.valueOf(userId));
    e.userFeatureFactorized = this.modelUserFeatureFactorizedValues;
    e.userFeatures = this.modelUserFeatures.get(Long.valueOf(userId));
    e.itemFeatureFactorized = this.modelItemFeatureFactorizedValues;
    if (e.user == null) {
        return null;
    }

    double score = 0.0;
    for (Entry<Long, VectorWritable> item : modelItemFactorizedValues.entrySet()) {
        e.item = item.getValue();
        e.itemFeatures = this.modelItemFeatures.get(item.getKey());
        score = function.predict(e);
        queue.add(new Preference<Long, Long>(userId, item.getKey(), score));
    }
    results.addAll(queue);
    return results;
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.FifoIntraQueuePreemptionPlugin.java

private PriorityQueue<TempAppPerPartition> createTempAppForResCalculation(String partition,
        Collection<FiCaSchedulerApp> apps, TAPriorityComparator taComparator) {
    PriorityQueue<TempAppPerPartition> orderedByPriority = new PriorityQueue<>(100, taComparator);

    // have an internal temp app structure to store intermediate data(priority)
    for (FiCaSchedulerApp app : apps) {

        Resource used = app.getAppAttemptResourceUsage().getUsed(partition);
        Resource amUsed = null;/*ww  w . ja v a 2  s  . com*/
        if (!app.isWaitingForAMContainer()) {
            amUsed = app.getAMResource(partition);
        }
        Resource pending = app.getTotalPendingRequestsPerPartition().get(partition);
        Resource reserved = app.getAppAttemptResourceUsage().getReserved(partition);

        used = (used == null) ? Resources.createResource(0, 0) : used;
        amUsed = (amUsed == null) ? Resources.createResource(0, 0) : amUsed;
        pending = (pending == null) ? Resources.createResource(0, 0) : pending;
        reserved = (reserved == null) ? Resources.createResource(0, 0) : reserved;

        HashSet<String> partitions = new HashSet<String>(
                app.getAppAttemptResourceUsage().getNodePartitionsSet());
        partitions.addAll(app.getTotalPendingRequestsPerPartition().keySet());

        // Create TempAppPerQueue for further calculation.
        TempAppPerPartition tmpApp = new TempAppPerPartition(app, Resources.clone(used),
                Resources.clone(amUsed), Resources.clone(reserved), Resources.clone(pending));

        // Set ideal allocation of app as 0.
        tmpApp.idealAssigned = Resources.createResource(0, 0);

        orderedByPriority.add(tmpApp);
    }
    return orderedByPriority;
}

From source file:org.apache.hadoop.hbase.master.balancer.LocalityAwareLoadBalancer.java

/**
 * This implements the Locality Aware Load Balancer.
 * Information for the algorithm can be found here: https://issues.apache.org/jira/browse/HBASE-10075
 *
 * @param clusterMap Map of regionservers and their load/region information to
 *                   a list of their most loaded regions
 * @return a list of regions to be moved, including source and destination,
 *         or null if cluster is already balanced
 *//*  ww  w  .  j a  v a  2 s. c om*/
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterMap) {
    long startTime = System.currentTimeMillis();

    ClusterLoadState cs = new ClusterLoadState(clusterMap);

    float average = cs.getLoadAverage(); // for logging
    int ceiling = (int) Math.ceil(average * (1 + slop));
    NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = cs.getServersByLoad();

    if (!this.needsBalance(cs)) {
        /*LOG.info("Skipping load balancing because balanced cluster; " +
                "servers=" + cs.getNumServers() + " " +
                "regions=" + cs.getNumRegions() + " average=" + average + " " +
                "mostloaded=" + serversByLoad.lastKey().getLoad() +
                " leastloaded=" + serversByLoad.firstKey().getLoad());*/
        return null;
    }

    // Additional check for locality aware load balancer as it only considers
    // only max loaded servers
    if (!(cs.getMaxLoad() > ceiling)) {
        return null;
    }

    Cluster cluster = new Cluster(clusterMap, new HashMap<String, Deque<RegionLoad>>(), regionLocationFinder);
    int numRegions = cs.getNumRegions();

    LOG.info(" ####################################################################################");
    LOG.info(" Before Locality-aware Balancing");
    LOG.info(" Average=" + average + " Ceiling=" + ceiling + " slop=" + slop);
    /* for (ServerAndLoad server : serversByLoad.keySet()) {
      LOG.info("---------------" + "Server Name: " + server.getServerName() + "---------------");
      List<HRegionInfo> hRegionInfos = serversByLoad.get(server);
      LOG.info("Number of Regions:" + hRegionInfos.size());
      for (HRegionInfo hRegionInfo : hRegionInfos){
        LOG.info(String.format("Name of Region: %s ", hRegionInfo.getRegionNameAsString()));
        //LOG.info(String.format("Size of Region in number of rows"+(Bytes.toInt(hRegionInfo.getStartKey())-Bytes.toInt(hRegionInfo.getEndKey()))));
        LOG.info("Start Key: " + Bytes.toString(hRegionInfo.getStartKey()));
        LOG.info("End Key: " + Bytes.toString(hRegionInfo.getEndKey()));
      }
      LOG.info("------------------------------------------------------------------------------");
    } */

    // calculate allTableRegionNumber = total number of regions per table.
    Map<Integer, Integer> allTableRegionNumberMap = new HashMap<Integer, Integer>();
    for (int i = 0; i < cluster.numServers; ++i) {
        for (int j = 0; j < cluster.numTables; ++j) {
            if (allTableRegionNumberMap.containsKey(j)) {
                Integer integer = allTableRegionNumberMap.get(j);
                integer = integer + cluster.numRegionsPerServerPerTable[i][j];
                allTableRegionNumberMap.put(j, integer);
            } else {
                allTableRegionNumberMap.put(j, cluster.numRegionsPerServerPerTable[i][j]);
            }
        }
    }

    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    for (ServerAndLoad server : serversByLoad.keySet()) {
        List<HRegionInfo> hRegionInfos = serversByLoad.get(server);
        // Check if number of regions on current server is greater than floor.
        // Continue only if number regions is greater than floor.
        if (hRegionInfos.size() <= ceiling) {
            LOG.debug("Number of HRegions <= ceiling (" + hRegionInfos.size() + " <= " + ceiling + ")");
            continue;
        }
        PriorityQueue<RegionServerRegionAffinity> queue = new PriorityQueue<RegionServerRegionAffinity>();
        int numberOfRegionsToMove = hRegionInfos.size() - ceiling;
        double regionAffinityNumber = (1 - hRegionInfos.size() / numRegions) * SERVER_BALANCER_WEIGHT;
        double tableRegionAffinityNumber = 0;
        // Calculate allTableRegionNumber
        for (HRegionInfo hRegionInfo : hRegionInfos) {
            // Do not move metaregion.
            if (hRegionInfo.isMetaRegion()) {
                continue;
            }
            TableName table = hRegionInfo.getTable();
            String tableName = table.getNameAsString();
            int tableIndex = cluster.tablesToIndex.get(tableName);
            int serverIndex = cluster.serversToIndex.get(server.getServerName().getHostAndPort());
            tableRegionAffinityNumber = (1 - cluster.numRegionsPerServerPerTable[serverIndex][tableIndex]
                    / allTableRegionNumberMap.get(tableIndex)) * TABLE_BALANCER_WEIGHT;
            float localityIndex = getLocalityIndex(hRegionInfo, server) * LOCALITY_WEIGHT;
            LOG.info("tableRegionaffinity: " + tableRegionAffinityNumber);
            LOG.info("regionAffinityNUmber: " + regionAffinityNumber);
            LOG.info("localityIndex: " + localityIndex);
            double finalAffinity = regionAffinityNumber + tableRegionAffinityNumber + localityIndex
                    + getStickinessWeight(hRegionInfo);
            queue.add(new RegionServerRegionAffinity(server, hRegionInfo, finalAffinity));
            LOG.info("Affinity between server=" + server.getServerName() + " and region="
                    + hRegionInfo.getRegionNameAsString() + " is " + finalAffinity);
        }

        LOG.info("Number of regions to move=" + numberOfRegionsToMove + " All server and region affinities: "
                + queue);

        // Get top numberOfRegionsToMove
        List<RegionServerRegionAffinity> listOfRegionsToMove = new ArrayList<RegionServerRegionAffinity>();
        for (int i = 0; i < numberOfRegionsToMove; ++i) {
            if (queue.isEmpty()) {
                continue;
            }
            listOfRegionsToMove.add(queue.poll());
        }

        // Search for the most affine servers to these listOfRegionsToMove
        for (RegionServerRegionAffinity regionServerRegionAffinity : listOfRegionsToMove) {
            HRegionInfo hRegionInfoToMove = regionServerRegionAffinity.getHRegionInfo();
            ServerAndLoad serverToMove = null;
            double maxAffinity = Double.MIN_VALUE;
            // Get the most affine server to hRegionInfoToMove
            for (ServerAndLoad activeServer : serversByLoad.keySet()) {
                hRegionInfos = serversByLoad.get(activeServer);
                if (activeServer.equals(regionServerRegionAffinity.getServer())) {
                    continue;
                }
                if (hRegionInfos.size() >= ceiling) {
                    LOG.debug("Number of HRegions >= ceiling (" + hRegionInfos.size() + " >= " + ceiling + ")");
                    continue;
                }
                regionAffinityNumber = (1 - hRegionInfos.size() / numRegions) * SERVER_BALANCER_WEIGHT;
                TableName table = hRegionInfoToMove.getTable();
                String tableNameAsString = table.getNameAsString();
                int serverIndex = cluster.serversToIndex.get(activeServer.getServerName().getHostAndPort());
                tableRegionAffinityNumber = 0;
                if (cluster.tablesToIndex.containsKey(tableNameAsString)) {
                    Integer tableIndex = cluster.tablesToIndex.get(tableNameAsString);
                    tableRegionAffinityNumber = (1
                            - cluster.numRegionsPerServerPerTable[serverIndex][tableIndex]
                                    / allTableRegionNumberMap.get(tableIndex))
                            * TABLE_BALANCER_WEIGHT;
                } else {
                    LOG.error("Table " + tableNameAsString + "not present in cluster.tablesToIndex");
                }
                double finalAffinity = regionAffinityNumber + tableRegionAffinityNumber
                        + getLocalityIndex(hRegionInfoToMove, activeServer) * LOCALITY_WEIGHT
                        + getStickinessWeight(hRegionInfoToMove);
                if (finalAffinity > maxAffinity) {
                    maxAffinity = finalAffinity;
                    serverToMove = activeServer;
                }
            }
            regionsToReturn.add(new RegionPlan(hRegionInfoToMove,
                    regionServerRegionAffinity.getServer().getServerName(), serverToMove.getServerName()));
        }
    }

    LOG.info("Returning plan: " + regionsToReturn);

    // Reset previuosly moved regions and add new regions
    previouslyMovedRegions.clear();
    for (RegionPlan regionPlan : regionsToReturn) {
        previouslyMovedRegions.add(regionPlan.getRegionInfo());
    }

    long endTime = System.currentTimeMillis();
    LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving "
            + regionsToReturn.size() + " regions");
    return regionsToReturn;
}