Example usage for java.util NavigableMap put

List of usage examples for java.util NavigableMap put

Introduction

In this page you can find the example usage for java.util NavigableMap put.

Prototype

V put(K key, V value);

Source Link

Document

Associates the specified value with the specified key in this map (optional operation).

Usage

From source file:org.apache.hadoop.hbase.coprocessor.example.WriteHeavyIncrementObserver.java

@Override
public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> c, Get get, List<Cell> result)
        throws IOException {
    Scan scan = new Scan().withStartRow(get.getRow()).withStopRow(get.getRow(), true).readAllVersions();
    NavigableMap<byte[], NavigableMap<byte[], MutableLong>> sums = new TreeMap<>(Bytes.BYTES_COMPARATOR);
    get.getFamilyMap().forEach((cf, cqs) -> {
        NavigableMap<byte[], MutableLong> ss = new TreeMap<>(Bytes.BYTES_COMPARATOR);
        sums.put(cf, ss);
        cqs.forEach(cq -> {//from  ww  w . j a v  a 2s  .  c o  m
            ss.put(cq, new MutableLong(0));
            scan.addColumn(cf, cq);
        });
    });
    List<Cell> cells = new ArrayList<>();
    try (RegionScanner scanner = c.getEnvironment().getRegion().getScanner(scan)) {
        boolean moreRows;
        do {
            moreRows = scanner.next(cells);
            for (Cell cell : cells) {
                byte[] family = CellUtil.cloneFamily(cell);
                byte[] qualifier = CellUtil.cloneQualifier(cell);
                long value = Bytes.toLong(cell.getValueArray(), cell.getValueOffset());
                sums.get(family).get(qualifier).add(value);
            }
            cells.clear();
        } while (moreRows);
    }
    sums.forEach((cf, m) -> m.forEach((cq, s) -> result
            .add(createCell(get.getRow(), cf, cq, HConstants.LATEST_TIMESTAMP, s.longValue()))));
    c.bypass();
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccTableClientUtils.java

static public void testSsccDelete(final int value) throws IOException {

    log.info("  " + Thread.currentThread().getName() + "  Starting testSsccDelete");
    final long id = getTransId();
    Batch.Call<SsccRegionService, SsccDeleteTransactionalResponse> callable = new Batch.Call<SsccRegionService, SsccDeleteTransactionalResponse>() {
        ServerRpcController controller = new ServerRpcController();
        BlockingRpcCallback<SsccDeleteTransactionalResponse> rpcCallback = new BlockingRpcCallback<SsccDeleteTransactionalResponse>();

        @Override/*from   w  ww. j  a v a2  s . c  om*/
        public SsccDeleteTransactionalResponse call(SsccRegionService instance) throws IOException {
            org.apache.hadoop.hbase.coprocessor.transactional.generated.SsccRegionProtos.SsccDeleteTransactionalRequest.Builder builder = SsccDeleteTransactionalRequest
                    .newBuilder();
            builder.setTransactionId(id);
            builder.setRegionName(ByteString.copyFromUtf8(regionname));

            Delete d = new Delete(Bytes.toBytes(ROW1));

            // d.deleteColumn(FAMILY, QUAL_A, System.currentTimeMillis());
            NavigableMap<byte[], List<Cell>> map = d.getFamilyCellMap();
            List<Cell> list = new ArrayList<Cell>();
            Cell c = new KeyValue(Bytes.toBytes(ROW1), FAMILY, QUAL_A, System.currentTimeMillis(), Type.Delete);

            list.add(KeyValueUtil.ensureKeyValue(c));
            map.put(FAMILY, list);
            d.setFamilyCellMap(map);

            MutationProto m1 = ProtobufUtil.toMutation(MutationType.DELETE, d);
            builder.setDelete(m1);

            instance.delete(controller, builder.build(), rpcCallback);
            return rpcCallback.get();
        }
    };

    Map<byte[], SsccDeleteTransactionalResponse> result = null;
    try {
        result = ht.coprocessorService(SsccRegionService.class, null, null, callable);
    } catch (Throwable e) {
        e.printStackTrace();
    }

    for (SsccDeleteTransactionalResponse dresponse : result.values()) {
        boolean hasException = dresponse.getHasException();
        if (hasException) {
            String exception = dresponse.getException();
            log.info("  " + Thread.currentThread().getName() + "  testSsccDelete exception " + exception);
        } else {
            returnStatus = dresponse.getStatus();
            String returnString;

            switch (returnStatus) {
            case STATEFUL_UPDATE_OK:
                returnString = new String("STATEFUL_UPDATE_OK");
                break;
            case STATEFUL_UPDATE_CONFLICT:
                returnString = new String("STATEFUL_UPDATE_CONFLICT");
                break;
            case STATELESS_UPDATE_OK:
                returnString = new String("STATELESS_UPDATE_OK");
                break;
            case STATELESS_UPDATE_CONFLICT:
                returnString = new String("STATELESS_UPDATE_CONFLICT");
                break;
            default:
                returnString = new String("Unknown return value: " + Integer.toString(returnStatus));
                break;

            }
            log.info("  " + Thread.currentThread().getName() + "  testSsccDelete returnStatus is  "
                    + returnString);
        }

    }

    log.info("  " + Thread.currentThread().getName() + "  Finished testSsccDelete");
    return;
}

From source file:org.apache.hadoop.hbase.extended.loadbalance.strategies.hotspot.HotSpotLoadBalancer.java

private double addRegionsToCompleteMap(Map<String, HRegionInfo> regionNameRegionInfoMap, ServerName serverName,
        NavigableMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>> serversByLoad,
        Map<String, RegionLoad> regionalLoadMapforServer,
        HashBiMap<HRegionInfo, HotSpotRegionLoad> allRegionsLoadBiMap) {
    double loadAccumulator = 0.0;
    List<HotSpotRegionLoad> modRegionLoadList = new ArrayList<HotSpotRegionLoad>();
    boolean isHotspot = false;
    HRegionInfo regionInfo = null;//  w  w w  . j  a  v  a  2 s .  c om
    TreeMap<HotSpotRegionLoad, HRegionInfo> regionLoadMap = new TreeMap<HotSpotRegionLoad, HRegionInfo>(
            HotSpotRegionLoad.DESC_LOAD);

    for (Map.Entry<String, RegionLoad> loadItem : regionalLoadMapforServer.entrySet()) {

        regionInfo = regionNameRegionInfoMap.get(loadItem.getKey());
        if (regionInfo == null) {
            String message = "######################## as regionInfo is null from regionNameRegionInfoMap for the region name ="
                    + loadItem.getKey()
                    + " determined from  regionNameRegionInfoMap. The rest of the balancing is useless. "
                    + "We need to return to the assignment manager.";
            LOG.warn(message);
        } else {
            HotSpotRegionLoad readHotSpotRegionLoad = getHotSpotRegionLoadInstance(loadItem, this.divideFactor);
            LOG.debug("######################## loadItem = " + loadItem + "\n readHotSpotRegionLoad= "
                    + readHotSpotRegionLoad);
            regionLoadMap.put(readHotSpotRegionLoad, regionInfo);
            loadAccumulator += readHotSpotRegionLoad.getLoad();
            LOG.debug("######################## current loadAccumulator=" + loadAccumulator);
            modRegionLoadList.add(readHotSpotRegionLoad);
            allRegionsLoadBiMap.put(regionInfo, readHotSpotRegionLoad);
        }

    }
    // iterate over regionLoadMap and find if the top x% have y% load
    isHotspot = isHotSpot(regionLoadMap, modRegionLoadList, loadAccumulator);
    HotSpotServerAndLoad msl = new HotSpotServerAndLoad(serverName, loadAccumulator, isHotspot);
    Collections.sort(modRegionLoadList, HotSpotRegionLoad.DESC_LOAD);
    serversByLoad.put(msl, modRegionLoadList);

    return loadAccumulator;

}

From source file:org.apache.hadoop.hbase.master.DefaultLoadBalancer.java

/**
 * Generate a global load balancing plan according to the specified map of
 * server information to the most loaded regions of each server.
 *
 * The load balancing invariant is that all servers are within 1 region of the
 * average number of regions per server.  If the average is an integer number,
 * all servers will be balanced to the average.  Otherwise, all servers will
 * have either floor(average) or ceiling(average) regions.
 *
 * HBASE-3609 Modeled regionsToMove using Guava's MinMaxPriorityQueue so that
 *   we can fetch from both ends of the queue. 
 * At the beginning, we check whether there was empty region server 
 *   just discovered by Master. If so, we alternately choose new / old
 *   regions from head / tail of regionsToMove, respectively. This alternation
 *   avoids clustering young regions on the newly discovered region server.
 *   Otherwise, we choose new regions from head of regionsToMove.
 *   /*from   w  w w .  ja  v a 2  s.  com*/
 * Another improvement from HBASE-3609 is that we assign regions from
 *   regionsToMove to underloaded servers in round-robin fashion.
 *   Previously one underloaded server would be filled before we move onto
 *   the next underloaded server, leading to clustering of young regions.
 *   
 * Finally, we randomly shuffle underloaded servers so that they receive
 *   offloaded regions relatively evenly across calls to balanceCluster().
 *         
 * The algorithm is currently implemented as such:
 *
 * <ol>
 * <li>Determine the two valid numbers of regions each server should have,
 *     <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
 *
 * <li>Iterate down the most loaded servers, shedding regions from each so
 *     each server hosts exactly <b>MAX</b> regions.  Stop once you reach a
 *     server that already has &lt;= <b>MAX</b> regions.
 *     <p>
 *     Order the regions to move from most recent to least.
 *
 * <li>Iterate down the least loaded servers, assigning regions so each server
 *     has exactly </b>MIN</b> regions.  Stop once you reach a server that
 *     already has &gt;= <b>MIN</b> regions.
 *
 *     Regions being assigned to underloaded servers are those that were shed
 *     in the previous step.  It is possible that there were not enough
 *     regions shed to fill each underloaded server to <b>MIN</b>.  If so we
 *     end up with a number of regions required to do so, <b>neededRegions</b>.
 *
 *     It is also possible that we were able to fill each underloaded but ended
 *     up with regions that were unassigned from overloaded servers but that
 *     still do not have assignment.
 *
 *     If neither of these conditions hold (no regions needed to fill the
 *     underloaded servers, no regions leftover from overloaded servers),
 *     we are done and return.  Otherwise we handle these cases below.
 *
 * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers),
 *     we iterate the most loaded servers again, shedding a single server from
 *     each (this brings them from having <b>MAX</b> regions to having
 *     <b>MIN</b> regions).
 *
 * <li>We now definitely have more regions that need assignment, either from
 *     the previous step or from the original shedding from overloaded servers.
 *     Iterate the least loaded servers filling each to <b>MIN</b>.
 *
 * <li>If we still have more regions that need assignment, again iterate the
 *     least loaded servers, this time giving each one (filling them to
 *     </b>MAX</b>) until we run out.
 *
 * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
 *
 *     In addition, any server hosting &gt;= <b>MAX</b> regions is guaranteed
 *     to end up with <b>MAX</b> regions at the end of the balancing.  This
 *     ensures the minimal number of regions possible are moved.
 * </ol>
 *
 * TODO: We can at-most reassign the number of regions away from a particular
 *       server to be how many they report as most loaded.
 *       Should we just keep all assignment in memory?  Any objections?
 *       Does this mean we need HeapSize on HMaster?  Or just careful monitor?
 *       (current thinking is we will hold all assignments in memory)
 *
 * @param clusterState Map of regionservers and their load/region information to
 *                   a list of their most loaded regions
 * @return a list of regions to be moved, including source and destination,
 *         or null if cluster is already balanced
 */
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
    boolean emptyRegionServerPresent = false;
    long startTime = System.currentTimeMillis();

    int numServers = clusterState.size();
    if (numServers == 0) {
        LOG.debug("numServers=0 so skipping load balancing");
        return null;
    }
    NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = new TreeMap<ServerAndLoad, List<HRegionInfo>>();
    int numRegions = 0;
    // Iterate so we can count regions as we build the map
    for (Map.Entry<ServerName, List<HRegionInfo>> server : clusterState.entrySet()) {
        List<HRegionInfo> regions = server.getValue();
        int sz = regions.size();
        if (sz == 0)
            emptyRegionServerPresent = true;
        numRegions += sz;
        serversByLoad.put(new ServerAndLoad(server.getKey(), sz), regions);
    }
    // Check if we even need to do any load balancing
    float average = (float) numRegions / numServers; // for logging
    // HBASE-3681 check sloppiness first
    int floor = (int) Math.floor(average * (1 - slop));
    int ceiling = (int) Math.ceil(average * (1 + slop));
    if (serversByLoad.lastKey().getLoad() <= ceiling && serversByLoad.firstKey().getLoad() >= floor) {
        // Skipped because no server outside (min,max) range
        LOG.info("Skipping load balancing because balanced cluster; " + "servers=" + numServers + " "
                + "regions=" + numRegions + " average=" + average + " " + "mostloaded="
                + serversByLoad.lastKey().getLoad() + " leastloaded=" + serversByLoad.firstKey().getLoad());
        return null;
    }
    int min = numRegions / numServers;
    int max = numRegions % numServers == 0 ? min : min + 1;

    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numRegions=").append(numRegions).append(", numServers=")
            .append(numServers).append(", max=").append(max).append(", min=").append(min);
    LOG.debug(strBalanceParam.toString());

    // Balance the cluster
    // TODO: Look at data block locality or a more complex load to do this
    MinMaxPriorityQueue<RegionPlan> regionsToMove = MinMaxPriorityQueue.orderedBy(rpComparator).create();
    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    // Walk down most loaded, pruning each to the max
    int serversOverloaded = 0;
    // flag used to fetch regions from head and tail of list, alternately
    boolean fetchFromTail = false;
    Map<ServerName, BalanceInfo> serverBalanceInfo = new TreeMap<ServerName, BalanceInfo>();
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
        ServerAndLoad sal = server.getKey();
        int regionCount = sal.getLoad();
        if (regionCount <= max) {
            serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(0, 0));
            break;
        }
        serversOverloaded++;
        List<HRegionInfo> regions = server.getValue();
        int numToOffload = Math.min(regionCount - max, regions.size());
        // account for the out-of-band regions which were assigned to this server
        // after some other region server crashed 
        Collections.sort(regions, riComparator);
        int numTaken = 0;
        for (int i = 0; i <= numToOffload;) {
            HRegionInfo hri = regions.get(i); // fetch from head
            if (fetchFromTail) {
                hri = regions.get(regions.size() - 1 - i);
            }
            i++;
            // Don't rebalance meta regions.
            if (hri.isMetaRegion())
                continue;
            regionsToMove.add(new RegionPlan(hri, sal.getServerName(), null));
            numTaken++;
            if (numTaken >= numToOffload)
                break;
            // fetch in alternate order if there is new region server
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
        serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(numToOffload, (-1) * numTaken));
    }
    int totalNumMoved = regionsToMove.size();

    // Walk down least loaded, filling each to the min
    int neededRegions = 0; // number of regions needed to bring all up to min
    fetchFromTail = false;

    Map<ServerName, Integer> underloadedServers = new HashMap<ServerName, Integer>();
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad();
        if (regionCount >= min) {
            break;
        }
        underloadedServers.put(server.getKey().getServerName(), min - regionCount);
    }
    // number of servers that get new regions
    int serversUnderloaded = underloadedServers.size();
    int incr = 1;
    List<ServerName> sns = Arrays
            .asList(underloadedServers.keySet().toArray(new ServerName[serversUnderloaded]));
    Collections.shuffle(sns, RANDOM);
    while (regionsToMove.size() > 0) {
        int cnt = 0;
        int i = incr > 0 ? 0 : underloadedServers.size() - 1;
        for (; i >= 0 && i < underloadedServers.size(); i += incr) {
            if (regionsToMove.isEmpty())
                break;
            ServerName si = sns.get(i);
            int numToTake = underloadedServers.get(si);
            if (numToTake == 0)
                continue;

            addRegionPlan(regionsToMove, fetchFromTail, si, regionsToReturn);
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }

            underloadedServers.put(si, numToTake - 1);
            cnt++;
            BalanceInfo bi = serverBalanceInfo.get(si);
            if (bi == null) {
                bi = new BalanceInfo(0, 0);
                serverBalanceInfo.put(si, bi);
            }
            bi.setNumRegionsAdded(bi.getNumRegionsAdded() + 1);
        }
        if (cnt == 0)
            break;
        // iterates underloadedServers in the other direction
        incr = -incr;
    }
    for (Integer i : underloadedServers.values()) {
        // If we still want to take some, increment needed
        neededRegions += i;
    }

    // If none needed to fill all to min and none left to drain all to max,
    // we are done
    if (neededRegions == 0 && regionsToMove.isEmpty()) {
        long endTime = System.currentTimeMillis();
        LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
                + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
                + " less loaded servers");
        return regionsToReturn;
    }

    // Need to do a second pass.
    // Either more regions to assign out or servers that are still underloaded

    // If we need more to fill min, grab one from each most loaded until enough
    if (neededRegions != 0) {
        // Walk down most loaded, grabbing one from each until we get enough
        for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
            BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
            int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload();
            if (idx >= server.getValue().size())
                break;
            HRegionInfo region = server.getValue().get(idx);
            if (region.isMetaRegion())
                continue; // Don't move meta regions.
            regionsToMove.add(new RegionPlan(region, server.getKey().getServerName(), null));
            totalNumMoved++;
            if (--neededRegions == 0) {
                // No more regions needed, done shedding
                break;
            }
        }
    }

    // Now we have a set of regions that must be all assigned out
    // Assign each underloaded up to the min, then if leftovers, assign to max

    // Walk down least loaded, assigning to each to fill up to min
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad();
        if (regionCount >= min)
            break;
        BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
        if (balanceInfo != null) {
            regionCount += balanceInfo.getNumRegionsAdded();
        }
        if (regionCount >= min) {
            continue;
        }
        int numToTake = min - regionCount;
        int numTaken = 0;
        while (numTaken < numToTake && 0 < regionsToMove.size()) {
            addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
            numTaken++;
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
    }

    // If we still have regions to dish out, assign underloaded to max
    if (0 < regionsToMove.size()) {
        for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
            int regionCount = server.getKey().getLoad();
            if (regionCount >= max) {
                break;
            }
            addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
            if (regionsToMove.isEmpty()) {
                break;
            }
        }
    }

    long endTime = System.currentTimeMillis();

    if (!regionsToMove.isEmpty() || neededRegions != 0) {
        // Emit data so can diagnose how balancer went astray.
        LOG.warn("regionsToMove=" + totalNumMoved + ", numServers=" + numServers + ", serversOverloaded="
                + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded);
        StringBuilder sb = new StringBuilder();
        for (Map.Entry<ServerName, List<HRegionInfo>> e : clusterState.entrySet()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(e.getKey().toString());
            sb.append(" ");
            sb.append(e.getValue().size());
        }
        LOG.warn("Input " + sb.toString());
    }

    // All done!
    LOG.info("Done. Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
            + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
            + " less loaded servers");

    return regionsToReturn;
}

From source file:org.apache.hadoop.hbase.master.procedure.MasterDDLOperationHelper.java

/**
 * Reopen all regions from a table after a schema change operation.
 **/// ww w.  j  a  va2  s.c  o  m
public static boolean reOpenAllRegions(final MasterProcedureEnv env, final TableName tableName,
        final List<HRegionInfo> regionInfoList) throws IOException {
    boolean done = false;
    LOG.info("Bucketing regions by region server...");
    List<HRegionLocation> regionLocations = null;
    Connection connection = env.getMasterServices().getConnection();
    try (RegionLocator locator = connection.getRegionLocator(tableName)) {
        regionLocations = locator.getAllRegionLocations();
    }
    // Convert List<HRegionLocation> to Map<HRegionInfo, ServerName>.
    NavigableMap<HRegionInfo, ServerName> hri2Sn = new TreeMap<HRegionInfo, ServerName>();
    for (HRegionLocation location : regionLocations) {
        hri2Sn.put(location.getRegionInfo(), location.getServerName());
    }
    TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap();
    List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>();
    for (HRegionInfo hri : regionInfoList) {
        ServerName sn = hri2Sn.get(hri);
        // Skip the offlined split parent region
        // See HBASE-4578 for more information.
        if (null == sn) {
            LOG.info("Skip " + hri);
            continue;
        }
        if (!serverToRegions.containsKey(sn)) {
            LinkedList<HRegionInfo> hriList = Lists.newLinkedList();
            serverToRegions.put(sn, hriList);
        }
        reRegions.add(hri);
        serverToRegions.get(sn).add(hri);
    }

    LOG.info("Reopening " + reRegions.size() + " regions on " + serverToRegions.size() + " region servers.");
    AssignmentManager am = env.getMasterServices().getAssignmentManager();
    am.setRegionsToReopen(reRegions);
    BulkReOpen bulkReopen = new BulkReOpen(env.getMasterServices(), serverToRegions, am);
    while (true) {
        try {
            if (bulkReopen.bulkReOpen()) {
                done = true;
                break;
            } else {
                LOG.warn("Timeout before reopening all regions");
            }
        } catch (InterruptedException e) {
            LOG.warn("Reopen was interrupted");
            // Preserve the interrupt.
            Thread.currentThread().interrupt();
            break;
        }
    }
    return done;
}

From source file:org.apache.hadoop.hbase.master.RegionManager.java

/**
 * @return Snapshot of regionsintransition as a sorted Map.
 *///from w w w  . j a v a 2 s  . c  o  m
NavigableMap<String, String> getRegionsInTransition() {
    NavigableMap<String, String> result = new TreeMap<String, String>();
    synchronized (this.regionsInTransition) {
        if (this.regionsInTransition.isEmpty())
            return result;
        for (Map.Entry<String, RegionState> e : this.regionsInTransition.entrySet()) {
            result.put(e.getKey(), e.getValue().toString());
        }
    }
    return result;
}

From source file:org.apache.hadoop.hbase.MetaTableAccessor.java

/**
 * Lists all of the table regions currently in META.
 * Deprecated, keep there until some test use this.
 * @param connection what we will use// w  w w.j  a v a  2s  .  co  m
 * @param tableName table to list
 * @return Map of all user-space regions to servers
 * @throws java.io.IOException
 * @deprecated use {@link #getTableRegionsAndLocations}, region can have multiple locations
 */
@Deprecated
public static NavigableMap<HRegionInfo, ServerName> allTableRegions(Connection connection,
        final TableName tableName) throws IOException {
    final NavigableMap<HRegionInfo, ServerName> regions = new TreeMap<HRegionInfo, ServerName>();
    Visitor visitor = new TableVisitorBase(tableName) {
        @Override
        public boolean visitInternal(Result result) throws IOException {
            RegionLocations locations = getRegionLocations(result);
            if (locations == null)
                return true;
            for (HRegionLocation loc : locations.getRegionLocations()) {
                if (loc != null) {
                    HRegionInfo regionInfo = loc.getRegionInfo();
                    regions.put(regionInfo, loc.getServerName());
                }
            }
            return true;
        }
    };
    scanMetaForTableRegions(connection, visitor, tableName);
    return regions;
}

From source file:org.apache.hadoop.hbase.MetaTableAccessor.java

/**
 * @param connection connection we're using
 * @param serverName server whose regions we're interested in
 * @return List of user regions installed on this server (does not include
 * catalog regions)./*w ww . j a  v  a 2 s. c om*/
 * @throws IOException
 */
public static NavigableMap<HRegionInfo, Result> getServerUserRegions(Connection connection,
        final ServerName serverName) throws IOException {
    final NavigableMap<HRegionInfo, Result> hris = new TreeMap<HRegionInfo, Result>();
    // Fill the above hris map with entries from hbase:meta that have the passed
    // servername.
    CollectingVisitor<Result> v = new CollectingVisitor<Result>() {
        @Override
        void add(Result r) {
            if (r == null || r.isEmpty())
                return;
            RegionLocations locations = getRegionLocations(r);
            if (locations == null)
                return;
            for (HRegionLocation loc : locations.getRegionLocations()) {
                if (loc != null) {
                    if (loc.getServerName() != null && loc.getServerName().equals(serverName)) {
                        hris.put(loc.getRegionInfo(), r);
                    }
                }
            }
        }
    };
    scanMeta(connection, null, null, QueryType.REGION, v);
    return hris;
}

From source file:org.apache.hadoop.hbase.regionserver.ccindex.IndexedRegion.java

private NavigableMap<byte[], byte[]> getColumnsFromPut(Put put) {
    NavigableMap<byte[], byte[]> columnValues = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
    for (List<KeyValue> familyPuts : put.getFamilyMap().values()) {
        for (KeyValue kv : familyPuts) {
            columnValues.put(kv.getColumn(), kv.getValue());
        }//w  w w. j  a  v  a 2  s . co m
    }
    return columnValues;
}

From source file:org.apache.hadoop.hbase.regionserver.HRegion.java

/**
 * Add updates first to the hlog and then add values to memstore.
 * Warning: Assumption is caller has lock on passed in row.
 * @param edits Cell updates by column/*from   w  w  w.j av a  2 s  .  c o m*/
 * @throws IOException
 */
private void put(final byte[] row, byte[] family, List<Cell> edits) throws IOException {
    NavigableMap<byte[], List<Cell>> familyMap;
    familyMap = new TreeMap<byte[], List<Cell>>(Bytes.BYTES_COMPARATOR);

    familyMap.put(family, edits);
    Put p = new Put(row);
    p.setFamilyCellMap(familyMap);
    doBatchMutate(p);
}