Example usage for java.util NavigableMap entrySet

List of usage examples for java.util NavigableMap entrySet

Introduction

In this page you can find the example usage for java.util NavigableMap entrySet.

Prototype

Set<Map.Entry<K, V>> entrySet();

Source Link

Document

Returns a Set view of the mappings contained in this map.

Usage

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Processes a transactional delete/*w w  w.j  av a2 s. co m*/
 * @param long transactionId
 * @param Delete delete
 * @return int
 * @throws IOException
 */
public int delete(final long transactionId, final long startId, final Delete delete) throws IOException {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: delete -- ENTRY txId: " + transactionId + ", startId:"
                + startId);
    checkClosing(transactionId);
    SsccTransactionState state = this.beginTransIfNotExist(transactionId, startId);

    //clone the delete
    //just to change the timestamp. But I hope the overhead is not too big a concern here
    byte[] rowkey = delete.getRow();
    Delete newDelete = new Delete(rowkey, startId);
    NavigableMap<byte[], List<Cell>> familyCellMap = delete.getFamilyCellMap();
    byte[] mergedColsV = null;
    byte[] cv = null;
    for (Entry<byte[], List<Cell>> entry : familyCellMap.entrySet()) {
        for (Iterator<Cell> iterator = entry.getValue().iterator(); iterator.hasNext();) {
            Cell cell = iterator.next();
            byte[] family = CellUtil.cloneFamily(cell);
            byte[] qualifier = CellUtil.cloneQualifier(cell);
            cv = null;
            cv = byteMerger("|".getBytes(), null);
            cv = byteMerger(cv, qualifier);
            cv = byteMerger(cv, "|".getBytes());
            byte[] currentCollist = state.getColList(rowkey);
            newDelete.deleteColumns(family, qualifier, startId); //NOTE: HBase 1.0 this API will change ...
            //here use deleteColumns with timestamp, so it will delete all history version of this row
            //but the real delete is not done at this point, but when doCommit
            //Only when it goes to doCommit(), it is safe to delete all versions
            //Otherwise, SSCC use MVCC in hbase to save history versions, those versions may need in other transaction
            //  to get a snapshot value in the before.
            //Another choice here is to use deleteColumn instead of using deleteColumns, so it will only delete the specific version
            //  specified by the startId. But I suppose these two methods are same. Need more test maybe.

            if (indexOf(currentCollist, cv) != -1) //already in this list
            {
                mergedColsV = byteMerger(currentCollist, null);
                continue;
            }
            mergedColsV = byteMerger(currentCollist, cv);
            state.addToColList(rowkey, mergedColsV);
        }
    }
    Get statusGet = new Get(rowkey);
    statusGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.STATUS_COL);
    statusGet.setMaxVersions();
    Result statusResult = m_Region.get(statusGet);
    List<Cell> sl = null;
    if (statusResult != null)
        sl = statusResult.listCells();

    // All deletes are treated as stateless, so no need to retrieve the versions
    if (state.hasConflict(sl, null, true, startId, transactionId) == false) {
        state.addToDelList(newDelete);
        /*update the status metadata*/
        Put statusPut = new Put(rowkey, startId);
        byte[] statusValue;
        statusValue = SsccConst.generateStatusValue(SsccConst.S_DELETE_BYTE, transactionId); //stateless delete
        statusPut.add(DtmConst.TRANSACTION_META_FAMILY, SsccConst.STATUS_COL, startId, statusValue);
        statusPut.add(DtmConst.TRANSACTION_META_FAMILY, SsccConst.COLUMNS_COL, startId, mergedColsV);
        m_Region.put(statusPut);
        if (LOG.isTraceEnabled())
            LOG.trace("SsccRegionEndpoint coprocessor: delete: STATELESS_UPDATE_OK");
        return STATELESS_UPDATE_OK;
    } else {
        // Return conflict, but don't trigger and abort.  That needs to be triggered from the client, if desired.
        if (LOG.isTraceEnabled())
            LOG.trace("SsccRegionEndpoint coprocessor: delete: STATELESS_UPDATE_CONFLICT");
        return STATELESS_UPDATE_CONFLICT;
    }
}

From source file:org.apache.hadoop.hbase.master.DefaultLoadBalancer.java

/**
 * Generate a global load balancing plan according to the specified map of
 * server information to the most loaded regions of each server.
 *
 * The load balancing invariant is that all servers are within 1 region of the
 * average number of regions per server.  If the average is an integer number,
 * all servers will be balanced to the average.  Otherwise, all servers will
 * have either floor(average) or ceiling(average) regions.
 *
 * HBASE-3609 Modeled regionsToMove using Guava's MinMaxPriorityQueue so that
 *   we can fetch from both ends of the queue. 
 * At the beginning, we check whether there was empty region server 
 *   just discovered by Master. If so, we alternately choose new / old
 *   regions from head / tail of regionsToMove, respectively. This alternation
 *   avoids clustering young regions on the newly discovered region server.
 *   Otherwise, we choose new regions from head of regionsToMove.
 *   //from   w w w  . j a  va 2  s.co  m
 * Another improvement from HBASE-3609 is that we assign regions from
 *   regionsToMove to underloaded servers in round-robin fashion.
 *   Previously one underloaded server would be filled before we move onto
 *   the next underloaded server, leading to clustering of young regions.
 *   
 * Finally, we randomly shuffle underloaded servers so that they receive
 *   offloaded regions relatively evenly across calls to balanceCluster().
 *         
 * The algorithm is currently implemented as such:
 *
 * <ol>
 * <li>Determine the two valid numbers of regions each server should have,
 *     <b>MIN</b>=floor(average) and <b>MAX</b>=ceiling(average).
 *
 * <li>Iterate down the most loaded servers, shedding regions from each so
 *     each server hosts exactly <b>MAX</b> regions.  Stop once you reach a
 *     server that already has &lt;= <b>MAX</b> regions.
 *     <p>
 *     Order the regions to move from most recent to least.
 *
 * <li>Iterate down the least loaded servers, assigning regions so each server
 *     has exactly </b>MIN</b> regions.  Stop once you reach a server that
 *     already has &gt;= <b>MIN</b> regions.
 *
 *     Regions being assigned to underloaded servers are those that were shed
 *     in the previous step.  It is possible that there were not enough
 *     regions shed to fill each underloaded server to <b>MIN</b>.  If so we
 *     end up with a number of regions required to do so, <b>neededRegions</b>.
 *
 *     It is also possible that we were able to fill each underloaded but ended
 *     up with regions that were unassigned from overloaded servers but that
 *     still do not have assignment.
 *
 *     If neither of these conditions hold (no regions needed to fill the
 *     underloaded servers, no regions leftover from overloaded servers),
 *     we are done and return.  Otherwise we handle these cases below.
 *
 * <li>If <b>neededRegions</b> is non-zero (still have underloaded servers),
 *     we iterate the most loaded servers again, shedding a single server from
 *     each (this brings them from having <b>MAX</b> regions to having
 *     <b>MIN</b> regions).
 *
 * <li>We now definitely have more regions that need assignment, either from
 *     the previous step or from the original shedding from overloaded servers.
 *     Iterate the least loaded servers filling each to <b>MIN</b>.
 *
 * <li>If we still have more regions that need assignment, again iterate the
 *     least loaded servers, this time giving each one (filling them to
 *     </b>MAX</b>) until we run out.
 *
 * <li>All servers will now either host <b>MIN</b> or <b>MAX</b> regions.
 *
 *     In addition, any server hosting &gt;= <b>MAX</b> regions is guaranteed
 *     to end up with <b>MAX</b> regions at the end of the balancing.  This
 *     ensures the minimal number of regions possible are moved.
 * </ol>
 *
 * TODO: We can at-most reassign the number of regions away from a particular
 *       server to be how many they report as most loaded.
 *       Should we just keep all assignment in memory?  Any objections?
 *       Does this mean we need HeapSize on HMaster?  Or just careful monitor?
 *       (current thinking is we will hold all assignments in memory)
 *
 * @param clusterState Map of regionservers and their load/region information to
 *                   a list of their most loaded regions
 * @return a list of regions to be moved, including source and destination,
 *         or null if cluster is already balanced
 */
public List<RegionPlan> balanceCluster(Map<ServerName, List<HRegionInfo>> clusterState) {
    boolean emptyRegionServerPresent = false;
    long startTime = System.currentTimeMillis();

    int numServers = clusterState.size();
    if (numServers == 0) {
        LOG.debug("numServers=0 so skipping load balancing");
        return null;
    }
    NavigableMap<ServerAndLoad, List<HRegionInfo>> serversByLoad = new TreeMap<ServerAndLoad, List<HRegionInfo>>();
    int numRegions = 0;
    // Iterate so we can count regions as we build the map
    for (Map.Entry<ServerName, List<HRegionInfo>> server : clusterState.entrySet()) {
        List<HRegionInfo> regions = server.getValue();
        int sz = regions.size();
        if (sz == 0)
            emptyRegionServerPresent = true;
        numRegions += sz;
        serversByLoad.put(new ServerAndLoad(server.getKey(), sz), regions);
    }
    // Check if we even need to do any load balancing
    float average = (float) numRegions / numServers; // for logging
    // HBASE-3681 check sloppiness first
    int floor = (int) Math.floor(average * (1 - slop));
    int ceiling = (int) Math.ceil(average * (1 + slop));
    if (serversByLoad.lastKey().getLoad() <= ceiling && serversByLoad.firstKey().getLoad() >= floor) {
        // Skipped because no server outside (min,max) range
        LOG.info("Skipping load balancing because balanced cluster; " + "servers=" + numServers + " "
                + "regions=" + numRegions + " average=" + average + " " + "mostloaded="
                + serversByLoad.lastKey().getLoad() + " leastloaded=" + serversByLoad.firstKey().getLoad());
        return null;
    }
    int min = numRegions / numServers;
    int max = numRegions % numServers == 0 ? min : min + 1;

    // Using to check balance result.
    StringBuilder strBalanceParam = new StringBuilder();
    strBalanceParam.append("Balance parameter: numRegions=").append(numRegions).append(", numServers=")
            .append(numServers).append(", max=").append(max).append(", min=").append(min);
    LOG.debug(strBalanceParam.toString());

    // Balance the cluster
    // TODO: Look at data block locality or a more complex load to do this
    MinMaxPriorityQueue<RegionPlan> regionsToMove = MinMaxPriorityQueue.orderedBy(rpComparator).create();
    List<RegionPlan> regionsToReturn = new ArrayList<RegionPlan>();

    // Walk down most loaded, pruning each to the max
    int serversOverloaded = 0;
    // flag used to fetch regions from head and tail of list, alternately
    boolean fetchFromTail = false;
    Map<ServerName, BalanceInfo> serverBalanceInfo = new TreeMap<ServerName, BalanceInfo>();
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
        ServerAndLoad sal = server.getKey();
        int regionCount = sal.getLoad();
        if (regionCount <= max) {
            serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(0, 0));
            break;
        }
        serversOverloaded++;
        List<HRegionInfo> regions = server.getValue();
        int numToOffload = Math.min(regionCount - max, regions.size());
        // account for the out-of-band regions which were assigned to this server
        // after some other region server crashed 
        Collections.sort(regions, riComparator);
        int numTaken = 0;
        for (int i = 0; i <= numToOffload;) {
            HRegionInfo hri = regions.get(i); // fetch from head
            if (fetchFromTail) {
                hri = regions.get(regions.size() - 1 - i);
            }
            i++;
            // Don't rebalance meta regions.
            if (hri.isMetaRegion())
                continue;
            regionsToMove.add(new RegionPlan(hri, sal.getServerName(), null));
            numTaken++;
            if (numTaken >= numToOffload)
                break;
            // fetch in alternate order if there is new region server
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
        serverBalanceInfo.put(sal.getServerName(), new BalanceInfo(numToOffload, (-1) * numTaken));
    }
    int totalNumMoved = regionsToMove.size();

    // Walk down least loaded, filling each to the min
    int neededRegions = 0; // number of regions needed to bring all up to min
    fetchFromTail = false;

    Map<ServerName, Integer> underloadedServers = new HashMap<ServerName, Integer>();
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad();
        if (regionCount >= min) {
            break;
        }
        underloadedServers.put(server.getKey().getServerName(), min - regionCount);
    }
    // number of servers that get new regions
    int serversUnderloaded = underloadedServers.size();
    int incr = 1;
    List<ServerName> sns = Arrays
            .asList(underloadedServers.keySet().toArray(new ServerName[serversUnderloaded]));
    Collections.shuffle(sns, RANDOM);
    while (regionsToMove.size() > 0) {
        int cnt = 0;
        int i = incr > 0 ? 0 : underloadedServers.size() - 1;
        for (; i >= 0 && i < underloadedServers.size(); i += incr) {
            if (regionsToMove.isEmpty())
                break;
            ServerName si = sns.get(i);
            int numToTake = underloadedServers.get(si);
            if (numToTake == 0)
                continue;

            addRegionPlan(regionsToMove, fetchFromTail, si, regionsToReturn);
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }

            underloadedServers.put(si, numToTake - 1);
            cnt++;
            BalanceInfo bi = serverBalanceInfo.get(si);
            if (bi == null) {
                bi = new BalanceInfo(0, 0);
                serverBalanceInfo.put(si, bi);
            }
            bi.setNumRegionsAdded(bi.getNumRegionsAdded() + 1);
        }
        if (cnt == 0)
            break;
        // iterates underloadedServers in the other direction
        incr = -incr;
    }
    for (Integer i : underloadedServers.values()) {
        // If we still want to take some, increment needed
        neededRegions += i;
    }

    // If none needed to fill all to min and none left to drain all to max,
    // we are done
    if (neededRegions == 0 && regionsToMove.isEmpty()) {
        long endTime = System.currentTimeMillis();
        LOG.info("Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
                + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
                + " less loaded servers");
        return regionsToReturn;
    }

    // Need to do a second pass.
    // Either more regions to assign out or servers that are still underloaded

    // If we need more to fill min, grab one from each most loaded until enough
    if (neededRegions != 0) {
        // Walk down most loaded, grabbing one from each until we get enough
        for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.descendingMap().entrySet()) {
            BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
            int idx = balanceInfo == null ? 0 : balanceInfo.getNextRegionForUnload();
            if (idx >= server.getValue().size())
                break;
            HRegionInfo region = server.getValue().get(idx);
            if (region.isMetaRegion())
                continue; // Don't move meta regions.
            regionsToMove.add(new RegionPlan(region, server.getKey().getServerName(), null));
            totalNumMoved++;
            if (--neededRegions == 0) {
                // No more regions needed, done shedding
                break;
            }
        }
    }

    // Now we have a set of regions that must be all assigned out
    // Assign each underloaded up to the min, then if leftovers, assign to max

    // Walk down least loaded, assigning to each to fill up to min
    for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
        int regionCount = server.getKey().getLoad();
        if (regionCount >= min)
            break;
        BalanceInfo balanceInfo = serverBalanceInfo.get(server.getKey().getServerName());
        if (balanceInfo != null) {
            regionCount += balanceInfo.getNumRegionsAdded();
        }
        if (regionCount >= min) {
            continue;
        }
        int numToTake = min - regionCount;
        int numTaken = 0;
        while (numTaken < numToTake && 0 < regionsToMove.size()) {
            addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
            numTaken++;
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
        }
    }

    // If we still have regions to dish out, assign underloaded to max
    if (0 < regionsToMove.size()) {
        for (Map.Entry<ServerAndLoad, List<HRegionInfo>> server : serversByLoad.entrySet()) {
            int regionCount = server.getKey().getLoad();
            if (regionCount >= max) {
                break;
            }
            addRegionPlan(regionsToMove, fetchFromTail, server.getKey().getServerName(), regionsToReturn);
            if (emptyRegionServerPresent) {
                fetchFromTail = !fetchFromTail;
            }
            if (regionsToMove.isEmpty()) {
                break;
            }
        }
    }

    long endTime = System.currentTimeMillis();

    if (!regionsToMove.isEmpty() || neededRegions != 0) {
        // Emit data so can diagnose how balancer went astray.
        LOG.warn("regionsToMove=" + totalNumMoved + ", numServers=" + numServers + ", serversOverloaded="
                + serversOverloaded + ", serversUnderloaded=" + serversUnderloaded);
        StringBuilder sb = new StringBuilder();
        for (Map.Entry<ServerName, List<HRegionInfo>> e : clusterState.entrySet()) {
            if (sb.length() > 0)
                sb.append(", ");
            sb.append(e.getKey().toString());
            sb.append(" ");
            sb.append(e.getValue().size());
        }
        LOG.warn("Input " + sb.toString());
    }

    // All done!
    LOG.info("Done. Calculated a load balance in " + (endTime - startTime) + "ms. " + "Moving " + totalNumMoved
            + " regions off of " + serversOverloaded + " overloaded servers onto " + serversUnderloaded
            + " less loaded servers");

    return regionsToReturn;
}

From source file:com.google.gwt.emultest.java.util.TreeMapTest.java

public void testSubMap_entrySet() {
    K[] keys = getSortedKeys();//from  w w w. j  a v a 2s .c  o m
    V[] values = getSortedValues();
    NavigableMap<K, V> map = createNavigableMap();
    map.put(keys[0], values[0]);
    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);
    map.put(keys[3], values[3]);

    SortedMap<K, V> subMap = map.subMap(keys[1], keys[3]);
    Set<Entry<K, V>> entries = subMap.entrySet();
    assertEquals(2, subMap.size());
    assertEquals(subMap.size(), entries.size());
    assertFalse(entries.contains(new SimpleEntry<K, V>(keys[0], values[0])));
    assertTrue(entries.contains(new SimpleEntry<K, V>(keys[1], values[1])));
    assertTrue(entries.contains(new SimpleEntry<K, V>(keys[2], values[2])));
    assertFalse(entries.contains(new SimpleEntry<K, V>(keys[3], values[3])));

    entries.remove(new SimpleEntry<K, V>(keys[1], values[1]));
    assertEquals(3, map.size());
    assertEquals(subMap.size(), entries.size());
    assertFalse(entries.contains(new SimpleEntry<K, V>(keys[1], values[1])));
    assertFalse(subMap.containsKey(keys[1]));
    assertFalse(subMap.containsValue(values[1]));

    entries.clear();
    assertEquals(2, map.size());
    assertEquals(subMap.size(), entries.size());
    assertTrue(entries.isEmpty());
    assertTrue(subMap.isEmpty());

    subMap.put(keys[2], values[2]);
    assertEquals(1, subMap.size());
    assertEquals(subMap.size(), entries.size());

    subMap.put(keys[1], values[1]);
    Iterator<Entry<K, V>> it = entries.iterator();
    while (it.hasNext()) {
        Map.Entry<K, V> entry = it.next();
        subMap.containsKey(entry.getKey());
        subMap.containsValue(entry.getValue());
        it.remove();
    }
    try {
        it.next();
        fail("should throw NoSuchElementException");
    } catch (NoSuchElementException expected) {
    }
    assertEquals(2, map.size());
    assertEquals(0, subMap.size());
    assertEquals(subMap.size(), entries.size());

    map = createNavigableMap();
    Set<Entry<K, V>> entrySet = map.entrySet();
    map.put(keys[0], values[0]);
    map.put(keys[1], values[1]);
    map.put(keys[2], values[2]);
    assertEquals(map.size(), entrySet.size());
    _assertEquals(entrySet, map.entrySet());
    map.clear();
    assertEquals(map.size(), entrySet.size());
    _assertEquals(entrySet, map.entrySet());
    map.put(keys[0], values[0]);
    assertEquals(map.size(), entrySet.size());
    _assertEquals(entrySet, map.entrySet());
    entrySet.clear();
    assertEquals(map.size(), entrySet.size());
    _assertEquals(entrySet, map.entrySet());
}

From source file:org.apache.hadoop.hbase.client.TestFromClientSide.java

@Test
public void testClientPoolRoundRobin() throws IOException {
    final byte[] tableName = Bytes.toBytes("testClientPoolRoundRobin");

    int poolSize = 3;
    int numVersions = poolSize * 2;
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "round-robin");
    conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize);

    HTable table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, conf, Integer.MAX_VALUE);

    final long ts = EnvironmentEdgeManager.currentTimeMillis();
    Get get = new Get(ROW);
    get.addColumn(FAMILY, QUALIFIER);// w ww .  ja v  a 2s  .  c  o m
    get.setMaxVersions();

    for (int versions = 1; versions <= numVersions; versions++) {
        Put put = new Put(ROW);
        put.add(FAMILY, QUALIFIER, ts + versions, VALUE);
        table.put(put);

        Result result = table.get(get);
        NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER);

        assertEquals("The number of versions of '" + FAMILY + ":" + QUALIFIER + " did not match " + versions,
                versions, navigableMap.size());
        for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
            assertTrue("The value at time " + entry.getKey() + " did not match what was put",
                    Bytes.equals(VALUE, entry.getValue()));
        }
    }
}

From source file:org.apache.hadoop.hbase.client.TestFromClientSide.java

@Ignore("Flakey: HBASE-8989")
@Test/*from  w ww  .  j a v  a 2 s .c  o m*/
public void testClientPoolThreadLocal() throws IOException {
    final byte[] tableName = Bytes.toBytes("testClientPoolThreadLocal");

    int poolSize = Integer.MAX_VALUE;
    int numVersions = 3;
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "thread-local");
    conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize);

    final HTable table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, conf, 3);

    final long ts = EnvironmentEdgeManager.currentTimeMillis();
    final Get get = new Get(ROW);
    get.addColumn(FAMILY, QUALIFIER);
    get.setMaxVersions();

    for (int versions = 1; versions <= numVersions; versions++) {
        Put put = new Put(ROW);
        put.add(FAMILY, QUALIFIER, ts + versions, VALUE);
        table.put(put);

        Result result = table.get(get);
        NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER);

        assertEquals("The number of versions of '" + FAMILY + ":" + QUALIFIER + " did not match " + versions
                + "; " + put.toString() + ", " + get.toString(), versions, navigableMap.size());
        for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
            assertTrue("The value at time " + entry.getKey() + " did not match what was put",
                    Bytes.equals(VALUE, entry.getValue()));
        }
    }

    final Object waitLock = new Object();
    ExecutorService executorService = Executors.newFixedThreadPool(numVersions);
    final AtomicReference<AssertionError> error = new AtomicReference<AssertionError>(null);
    for (int versions = numVersions; versions < numVersions * 2; versions++) {
        final int versionsCopy = versions;
        executorService.submit(new Callable<Void>() {
            @Override
            public Void call() {
                try {
                    Put put = new Put(ROW);
                    put.add(FAMILY, QUALIFIER, ts + versionsCopy, VALUE);
                    table.put(put);

                    Result result = table.get(get);
                    NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER);

                    assertEquals(
                            "The number of versions of '" + Bytes.toString(FAMILY) + ":"
                                    + Bytes.toString(QUALIFIER) + " did not match " + versionsCopy,
                            versionsCopy, navigableMap.size());
                    for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
                        assertTrue("The value at time " + entry.getKey() + " did not match what was put",
                                Bytes.equals(VALUE, entry.getValue()));
                    }
                    synchronized (waitLock) {
                        waitLock.wait();
                    }
                } catch (Exception e) {
                } catch (AssertionError e) {
                    // the error happens in a thread, it won't fail the test,
                    // need to pass it to the caller for proper handling.
                    error.set(e);
                    LOG.error(e);
                }

                return null;
            }
        });
    }
    synchronized (waitLock) {
        waitLock.notifyAll();
    }
    executorService.shutdownNow();
    assertNull(error.get());
}