Example usage for java.util NavigableMap size

List of usage examples for java.util NavigableMap size

Introduction

In this page you can find the example usage for java.util NavigableMap size.

Prototype

int size();

Source Link

Document

Returns the number of key-value mappings in this map.

Usage

From source file:org.apache.hadoop.hbase.client.TestFromClientSide.java

@Ignore("Flakey: HBASE-8989")
@Test//  w w w .ja  va  2  s  .  co  m
public void testClientPoolThreadLocal() throws IOException {
    final byte[] tableName = Bytes.toBytes("testClientPoolThreadLocal");

    int poolSize = Integer.MAX_VALUE;
    int numVersions = 3;
    Configuration conf = TEST_UTIL.getConfiguration();
    conf.set(HConstants.HBASE_CLIENT_IPC_POOL_TYPE, "thread-local");
    conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize);

    final HTable table = TEST_UTIL.createTable(tableName, new byte[][] { FAMILY }, conf, 3);

    final long ts = EnvironmentEdgeManager.currentTimeMillis();
    final Get get = new Get(ROW);
    get.addColumn(FAMILY, QUALIFIER);
    get.setMaxVersions();

    for (int versions = 1; versions <= numVersions; versions++) {
        Put put = new Put(ROW);
        put.add(FAMILY, QUALIFIER, ts + versions, VALUE);
        table.put(put);

        Result result = table.get(get);
        NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER);

        assertEquals("The number of versions of '" + FAMILY + ":" + QUALIFIER + " did not match " + versions
                + "; " + put.toString() + ", " + get.toString(), versions, navigableMap.size());
        for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
            assertTrue("The value at time " + entry.getKey() + " did not match what was put",
                    Bytes.equals(VALUE, entry.getValue()));
        }
    }

    final Object waitLock = new Object();
    ExecutorService executorService = Executors.newFixedThreadPool(numVersions);
    final AtomicReference<AssertionError> error = new AtomicReference<AssertionError>(null);
    for (int versions = numVersions; versions < numVersions * 2; versions++) {
        final int versionsCopy = versions;
        executorService.submit(new Callable<Void>() {
            @Override
            public Void call() {
                try {
                    Put put = new Put(ROW);
                    put.add(FAMILY, QUALIFIER, ts + versionsCopy, VALUE);
                    table.put(put);

                    Result result = table.get(get);
                    NavigableMap<Long, byte[]> navigableMap = result.getMap().get(FAMILY).get(QUALIFIER);

                    assertEquals(
                            "The number of versions of '" + Bytes.toString(FAMILY) + ":"
                                    + Bytes.toString(QUALIFIER) + " did not match " + versionsCopy,
                            versionsCopy, navigableMap.size());
                    for (Map.Entry<Long, byte[]> entry : navigableMap.entrySet()) {
                        assertTrue("The value at time " + entry.getKey() + " did not match what was put",
                                Bytes.equals(VALUE, entry.getValue()));
                    }
                    synchronized (waitLock) {
                        waitLock.wait();
                    }
                } catch (Exception e) {
                } catch (AssertionError e) {
                    // the error happens in a thread, it won't fail the test,
                    // need to pass it to the caller for proper handling.
                    error.set(e);
                    LOG.error(e);
                }

                return null;
            }
        });
    }
    synchronized (waitLock) {
        waitLock.notifyAll();
    }
    executorService.shutdownNow();
    assertNull(error.get());
}

From source file:org.apache.hadoop.hbase.client.TestGetRowVersions.java

/** @throws Exception */
public void testGetRowMultipleVersions() throws Exception {
    Put put = new Put(ROW, TIMESTAMP1, null);
    put.add(CONTENTS, CONTENTS, VALUE1);
    HTable table = new HTable(new Configuration(conf), TABLE_NAME);
    table.put(put);/*from   w w  w  . ja  v a2s.c o m*/
    // Shut down and restart the HBase cluster
    this.cluster.shutdown();
    this.zooKeeperCluster.shutdown();
    LOG.debug("HBase cluster shut down -- restarting");
    this.hBaseClusterSetup();
    // Make a new connection.  Use new Configuration instance because old one
    // is tied to an HConnection that has since gone statle.
    table = new HTable(new Configuration(conf), TABLE_NAME);
    // Overwrite previous value
    put = new Put(ROW, TIMESTAMP2, null);
    put.add(CONTENTS, CONTENTS, VALUE2);
    table.put(put);
    // Now verify that getRow(row, column, latest) works
    Get get = new Get(ROW);
    // Should get one version by default
    Result r = table.get(get);
    assertNotNull(r);
    assertFalse(r.isEmpty());
    assertTrue(r.size() == 1);
    byte[] value = r.getValue(CONTENTS, CONTENTS);
    assertTrue(value.length != 0);
    assertTrue(Bytes.equals(value, VALUE2));
    // Now check getRow with multiple versions
    get = new Get(ROW);
    get.setMaxVersions();
    r = table.get(get);
    assertTrue(r.size() == 2);
    value = r.getValue(CONTENTS, CONTENTS);
    assertTrue(value.length != 0);
    assertTrue(Bytes.equals(value, VALUE2));
    NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = r.getMap();
    NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap = map.get(CONTENTS);
    NavigableMap<Long, byte[]> versionMap = familyMap.get(CONTENTS);
    assertTrue(versionMap.size() == 2);
    assertTrue(Bytes.equals(VALUE1, versionMap.get(TIMESTAMP1)));
    assertTrue(Bytes.equals(VALUE2, versionMap.get(TIMESTAMP2)));
}

From source file:org.apache.hadoop.hbase.extended.loadbalance.strategies.hotspot.HotSpotLoadBalancer.java

private boolean loadBalancingNeeded(int numServers,
        NavigableMap<HotSpotServerAndLoad, List<HotSpotRegionLoad>> serversByLoad,
        double normalisedTotalLoadOfAllRegions, double average) {
    double floor = Math.floor(average * (1 - slop));
    double ceiling = Math.ceil(average * (1 + slop));
    if (serversByLoad.size() > 0) {
        if (serversByLoad.lastKey().getLoad() <= ceiling && serversByLoad.firstKey().getLoad() >= floor) {
            // as it is sorted ascending we know that the lastKey has the
            // most
            // load.
            // Skipped because no server outside (min,max) range
            LOG.info("##########Skipping load balancing because balanced cluster; " + "servers=" + numServers
                    + " " + "regions=" + normalisedTotalLoadOfAllRegions + " average=" + average + " "
                    + "mostloaded=" + serversByLoad.lastKey().getLoad() + " leastloaded="
                    + serversByLoad.firstKey().getLoad());
            return false;
        } else {/* w  w  w .  j  a  v a2  s.c  o  m*/
            // only case where load balancing is required
            return true;
        }
    }
    return false;
}

From source file:org.apache.hadoop.hbase.master.handler.ServerShutdownHandler.java

@Override
public void process() throws IOException {
    boolean hasLogReplayWork = false;
    final ServerName serverName = this.serverName;
    try {/*from  ww w .  ja  v a  2s  .co m*/

        // We don't want worker thread in the MetaServerShutdownHandler
        // executor pool to block by waiting availability of hbase:meta
        // Otherwise, it could run into the following issue:
        // 1. The current MetaServerShutdownHandler instance For RS1 waits for the hbase:meta
        //    to come online.
        // 2. The newly assigned hbase:meta region server RS2 was shutdown right after
        //    it opens the hbase:meta region. So the MetaServerShutdownHandler
        //    instance For RS1 will still be blocked.
        // 3. The new instance of MetaServerShutdownHandler for RS2 is queued.
        // 4. The newly assigned hbase:meta region server RS3 was shutdown right after
        //    it opens the hbase:meta region. So the MetaServerShutdownHandler
        //    instance For RS1 and RS2 will still be blocked.
        // 5. The new instance of MetaServerShutdownHandler for RS3 is queued.
        // 6. Repeat until we run out of MetaServerShutdownHandler worker threads
        // The solution here is to resubmit a ServerShutdownHandler request to process
        // user regions on that server so that MetaServerShutdownHandler
        // executor pool is always available.
        //
        // If AssignmentManager hasn't finished rebuilding user regions,
        // we are not ready to assign dead regions either. So we re-queue up
        // the dead server for further processing too.
        AssignmentManager am = services.getAssignmentManager();
        if (isCarryingMeta() // hbase:meta
                || !am.isFailoverCleanupDone()) {
            this.services.getServerManager().processDeadServer(serverName, this.shouldSplitHlog);
            return;
        }

        // Wait on meta to come online; we need it to progress.
        // TODO: Best way to hold strictly here?  We should build this retry logic
        // into the MetaReader operations themselves.
        // TODO: Is the reading of hbase:meta necessary when the Master has state of
        // cluster in its head?  It should be possible to do without reading hbase:meta
        // in all but one case. On split, the RS updates the hbase:meta
        // table and THEN informs the master of the split via zk nodes in
        // 'unassigned' dir.  Currently the RS puts ephemeral nodes into zk so if
        // the regionserver dies, these nodes do not stick around and this server
        // shutdown processing does fixup (see the fixupDaughters method below).
        // If we wanted to skip the hbase:meta scan, we'd have to change at least the
        // final SPLIT message to be permanent in zk so in here we'd know a SPLIT
        // completed (zk is updated after edits to hbase:meta have gone in).  See
        // {@link SplitTransaction}.  We'd also have to be figure another way for
        // doing the below hbase:meta daughters fixup.
        NavigableMap<HRegionInfo, Result> hris = null;
        while (!this.server.isStopped()) {
            try {
                this.server.getCatalogTracker().waitForMeta();
                // Skip getting user regions if the server is stopped.
                if (!this.server.isStopped()) {
                    hris = MetaReader.getServerUserRegions(this.server.getCatalogTracker(), this.serverName);
                }
                break;
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                throw (InterruptedIOException) new InterruptedIOException().initCause(e);
            } catch (IOException ioe) {
                LOG.info("Received exception accessing hbase:meta during server shutdown of " + serverName
                        + ", retrying hbase:meta read", ioe);
            }
        }
        if (this.server.isStopped()) {
            throw new IOException("Server is stopped");
        }

        try {
            if (this.shouldSplitHlog) {
                LOG.info("Splitting logs for " + serverName + " before assignment.");
                if (this.distributedLogReplay) {
                    LOG.info("Mark regions in recovery before assignment.");
                    Set<ServerName> serverNames = new HashSet<ServerName>();
                    serverNames.add(serverName);
                    this.services.getMasterFileSystem().prepareLogReplay(serverNames);
                } else {
                    this.services.getMasterFileSystem().splitLog(serverName);
                }
                am.getRegionStates().logSplit(serverName);
            } else {
                LOG.info("Skipping log splitting for " + serverName);
            }
        } catch (IOException ioe) {
            resubmit(serverName, ioe);
        }

        // Clean out anything in regions in transition.  Being conservative and
        // doing after log splitting.  Could do some states before -- OPENING?
        // OFFLINE? -- and then others after like CLOSING that depend on log
        // splitting.
        List<HRegionInfo> regionsInTransition = am.processServerShutdown(serverName);
        LOG.info("Reassigning " + ((hris == null) ? 0 : hris.size()) + " region(s) that "
                + (serverName == null ? "null" : serverName) + " was carrying (and "
                + regionsInTransition.size() + " regions(s) that were opening on this server)");

        List<HRegionInfo> toAssignRegions = new ArrayList<HRegionInfo>();
        toAssignRegions.addAll(regionsInTransition);

        // Iterate regions that were on this server and assign them
        if (hris != null) {
            RegionStates regionStates = am.getRegionStates();
            for (Map.Entry<HRegionInfo, Result> e : hris.entrySet()) {
                HRegionInfo hri = e.getKey();
                if (regionsInTransition.contains(hri)) {
                    continue;
                }
                String encodedName = hri.getEncodedName();
                Lock lock = am.acquireRegionLock(encodedName);
                try {
                    RegionState rit = regionStates.getRegionTransitionState(hri);
                    if (processDeadRegion(hri, e.getValue(), am, server.getCatalogTracker())) {
                        ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
                        if (addressFromAM != null && !addressFromAM.equals(this.serverName)) {
                            // If this region is in transition on the dead server, it must be
                            // opening or pending_open, which should have been covered by AM#processServerShutdown
                            LOG.info("Skip assigning region " + hri.getRegionNameAsString()
                                    + " because it has been opened in " + addressFromAM.getServerName());
                            continue;
                        }
                        if (rit != null) {
                            if (rit.getServerName() != null && !rit.isOnServer(serverName)) {
                                // Skip regions that are in transition on other server
                                LOG.info("Skip assigning region in transition on other server" + rit);
                                continue;
                            }
                            try {
                                //clean zk node
                                LOG.info("Reassigning region with rs = " + rit
                                        + " and deleting zk node if exists");
                                ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
                                regionStates.updateRegionState(hri, State.OFFLINE);
                            } catch (KeeperException ke) {
                                this.server.abort("Unexpected ZK exception deleting unassigned node " + hri,
                                        ke);
                                return;
                            }
                        } else if (regionStates.isRegionInState(hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
                            regionStates.regionOffline(hri);
                        }
                        toAssignRegions.add(hri);
                    } else if (rit != null) {
                        if (rit.isPendingCloseOrClosing() && am.getTableStateManager().isTableState(
                                hri.getTable(), ZooKeeperProtos.Table.State.DISABLED,
                                ZooKeeperProtos.Table.State.DISABLING)) {
                            // If the table was partially disabled and the RS went down, we should clear the RIT
                            // and remove the node for the region.
                            // The rit that we use may be stale in case the table was in DISABLING state
                            // but though we did assign we will not be clearing the znode in CLOSING state.
                            // Doing this will have no harm. See HBASE-5927
                            regionStates.updateRegionState(hri, State.OFFLINE);
                            am.deleteClosingOrClosedNode(hri, rit.getServerName());
                            am.offlineDisabledRegion(hri);
                        } else {
                            LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition " + rit
                                    + " not to be assigned by SSH of server " + serverName);
                        }
                    }
                } finally {
                    lock.unlock();
                }
            }
        }

        try {
            am.assign(toAssignRegions);
        } catch (InterruptedException ie) {
            LOG.error("Caught " + ie + " during round-robin assignment");
            throw (InterruptedIOException) new InterruptedIOException().initCause(ie);
        }

        if (this.shouldSplitHlog && this.distributedLogReplay) {
            // wait for region assignment completes
            for (HRegionInfo hri : toAssignRegions) {
                try {
                    if (!am.waitOnRegionToClearRegionsInTransition(hri, regionAssignmentWaitTimeout)) {
                        // Wait here is to avoid log replay hits current dead server and incur a RPC timeout
                        // when replay happens before region assignment completes.
                        LOG.warn("Region " + hri.getEncodedName() + " didn't complete assignment in time");
                    }
                } catch (InterruptedException ie) {
                    throw new InterruptedIOException(
                            "Caught " + ie + " during waitOnRegionToClearRegionsInTransition");
                }
            }
            // submit logReplay work
            this.services.getExecutorService().submit(
                    new LogReplayHandler(this.server, this.services, this.deadServers, this.serverName));
            hasLogReplayWork = true;
        }
    } finally {
        this.deadServers.finish(serverName);
    }

    if (!hasLogReplayWork) {
        LOG.info("Finished processing of shutdown of " + serverName);
    }
}

From source file:org.apache.hadoop.hbase.master.RegionManager.java

static MetaRegion getMetaRegionPointingTo(NavigableMap<byte[], MetaRegion> metaRegions, HRegionInfo newRegion) {
    if (metaRegions.isEmpty()) {
        return null;
    } else if (metaRegions.size() == 1) {
        return metaRegions.get(metaRegions.firstKey());
    } else {/* w w  w .  j  a  v a 2  s .c o  m*/
        if (metaRegions.containsKey(newRegion.getRegionName())) {
            return metaRegions.get(newRegion.getRegionName());
        }
        return metaRegions.get(metaRegions.headMap(newRegion.getRegionName()).lastKey());
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.HLogKey.java

public void readOlderScopes(NavigableMap<byte[], Integer> scopes) {
    if (scopes != null) {
        Iterator<Map.Entry<byte[], Integer>> iterator = scopes.entrySet().iterator();
        while (iterator.hasNext()) {
            Map.Entry<byte[], Integer> scope = iterator.next();
            String key = Bytes.toString(scope.getKey());
            if (key.startsWith(PREFIX_CLUSTER_KEY)) {
                addClusterId(UUID.fromString(key.substring(PREFIX_CLUSTER_KEY.length())));
                iterator.remove();/*www . ja va  2 s  .co m*/
            }
        }
        if (scopes.size() > 0) {
            this.scopes = scopes;
        }
    }
}

From source file:org.apache.hadoop.hbase.TestMetaTableAccessorNoCluster.java

/**
 * Test that MetaTableAccessor will ride over server throwing
 * "Server not running" IOEs./*from  www.j a  v a  2s .c  o  m*/
 * @see @link {https://issues.apache.org/jira/browse/HBASE-3446}
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testRideOverServerNotRunning() throws IOException, InterruptedException, ServiceException {
    // Need a zk watcher.
    ZooKeeperWatcher zkw = new ZooKeeperWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(),
            ABORTABLE, true);
    // This is a servername we use in a few places below.
    ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());

    ClusterConnection connection = null;
    try {
        // Mock an ClientProtocol. Our mock implementation will fail a few
        // times when we go to open a scanner.
        final ClientProtos.ClientService.BlockingInterface implementation = Mockito
                .mock(ClientProtos.ClientService.BlockingInterface.class);
        // When scan called throw IOE 'Server not running' a few times
        // before we return a scanner id.  Whats WEIRD is that these
        // exceptions do not show in the log because they are caught and only
        // printed if we FAIL.  We eventually succeed after retry so these don't
        // show.  We will know if they happened or not because we will ask
        // mockito at the end of this test to verify that scan was indeed
        // called the wanted number of times.
        List<Cell> kvs = new ArrayList<Cell>();
        final byte[] rowToVerify = Bytes.toBytes("rowToVerify");
        kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
                HRegionInfo.FIRST_META_REGIONINFO.toByteArray()));
        kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
                Bytes.toBytes(sn.getHostAndPort())));
        kvs.add(new KeyValue(rowToVerify, HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER,
                Bytes.toBytes(sn.getStartcode())));
        final List<CellScannable> cellScannables = new ArrayList<CellScannable>(1);
        cellScannables.add(Result.create(kvs));
        final ScanResponse.Builder builder = ScanResponse.newBuilder();
        for (CellScannable result : cellScannables) {
            builder.addCellsPerResult(((Result) result).size());
        }
        Mockito.when(implementation.scan((RpcController) Mockito.any(), (ScanRequest) Mockito.any()))
                .thenThrow(new ServiceException("Server not running (1 of 3)"))
                .thenThrow(new ServiceException("Server not running (2 of 3)"))
                .thenThrow(new ServiceException("Server not running (3 of 3)"))
                .thenReturn(ScanResponse.newBuilder().setScannerId(1234567890L).build())
                .thenAnswer(new Answer<ScanResponse>() {
                    public ScanResponse answer(InvocationOnMock invocation) throws Throwable {
                        ((PayloadCarryingRpcController) invocation.getArguments()[0])
                                .setCellScanner(CellUtil.createCellScanner(cellScannables));
                        return builder.build();
                    }
                }).thenReturn(ScanResponse.newBuilder().setMoreResults(false).build());
        // Associate a spied-upon HConnection with UTIL.getConfiguration.  Need
        // to shove this in here first so it gets picked up all over; e.g. by
        // HTable.
        connection = HConnectionTestingUtility.getSpiedConnection(UTIL.getConfiguration());

        // Fix the location lookup so it 'works' though no network.  First
        // make an 'any location' object.
        final HRegionLocation anyLocation = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, sn);
        final RegionLocations rl = new RegionLocations(anyLocation);
        // Return the RegionLocations object when locateRegion
        // The ugly format below comes of 'Important gotcha on spying real objects!' from
        // http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html
        Mockito.doReturn(rl).when(connection).locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any(),
                Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt());

        // Now shove our HRI implementation into the spied-upon connection.
        Mockito.doReturn(implementation).when(connection).getClient(Mockito.any(ServerName.class));

        // Scan meta for user tables and verify we got back expected answer.
        NavigableMap<HRegionInfo, Result> hris = MetaTableAccessor.getServerUserRegions(connection, sn);
        assertEquals(1, hris.size());
        assertTrue(hris.firstEntry().getKey().equals(HRegionInfo.FIRST_META_REGIONINFO));
        assertTrue(Bytes.equals(rowToVerify, hris.firstEntry().getValue().getRow()));
        // Finally verify that scan was called four times -- three times
        // with exception and then on 4th, 5th and 6th attempt we succeed
        Mockito.verify(implementation, Mockito.times(6)).scan((RpcController) Mockito.any(),
                (ScanRequest) Mockito.any());
    } finally {
        if (connection != null && !connection.isClosed())
            connection.close();
        zkw.close();
    }
}

From source file:org.apache.hadoop.hbase.TestMultiVersions.java

/**
 * Verifies versions across a cluster restart.
 * Port of old TestGetRowVersions test to here so can better utilize the spun
 * up cluster running more than a single test per spin up.  Keep old tests'
 * crazyness.//from  w w  w .  j av a 2 s. c o m
 */
@Test
public void testGetRowVersions() throws Exception {
    final String tableName = "testGetRowVersions";
    final byte[] contents = Bytes.toBytes("contents");
    final byte[] row = Bytes.toBytes("row");
    final byte[] value1 = Bytes.toBytes("value1");
    final byte[] value2 = Bytes.toBytes("value2");
    final long timestamp1 = 100L;
    final long timestamp2 = 200L;
    final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor hcd = new HColumnDescriptor(contents);
    hcd.setMaxVersions(3);
    desc.addFamily(hcd);
    this.admin.createTable(desc);
    Put put = new Put(row, timestamp1);
    put.add(contents, contents, value1);
    HTable table = new HTable(UTIL.getConfiguration(), tableName);
    table.put(put);
    // Shut down and restart the HBase cluster
    table.close();
    UTIL.shutdownMiniHBaseCluster();
    LOG.debug("HBase cluster shut down -- restarting");
    UTIL.startMiniHBaseCluster(1, NUM_SLAVES);
    // Make a new connection.  Use new Configuration instance because old one
    // is tied to an HConnection that has since gone stale.
    table = new HTable(new Configuration(UTIL.getConfiguration()), tableName);
    // Overwrite previous value
    put = new Put(row, timestamp2);
    put.add(contents, contents, value2);
    table.put(put);
    // Now verify that getRow(row, column, latest) works
    Get get = new Get(row);
    // Should get one version by default
    Result r = table.get(get);
    assertNotNull(r);
    assertFalse(r.isEmpty());
    assertTrue(r.size() == 1);
    byte[] value = r.getValue(contents, contents);
    assertTrue(value.length != 0);
    assertTrue(Bytes.equals(value, value2));
    // Now check getRow with multiple versions
    get = new Get(row);
    get.setMaxVersions();
    r = table.get(get);
    assertTrue(r.size() == 2);
    value = r.getValue(contents, contents);
    assertTrue(value.length != 0);
    assertTrue(Bytes.equals(value, value2));
    NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = r.getMap();
    NavigableMap<byte[], NavigableMap<Long, byte[]>> familyMap = map.get(contents);
    NavigableMap<Long, byte[]> versionMap = familyMap.get(contents);
    assertTrue(versionMap.size() == 2);
    assertTrue(Bytes.equals(value1, versionMap.get(timestamp1)));
    assertTrue(Bytes.equals(value2, versionMap.get(timestamp2)));
    table.close();
}

From source file:org.apache.hadoop.hbase.TestMultiVersions.java

/**
 * Port of old TestScanMultipleVersions test here so can better utilize the
 * spun up cluster running more than just a single test.  Keep old tests
 * crazyness.//  w  ww . ja v a 2 s .c  o m
 * 
 * <p>Tests five cases of scans and timestamps.
 * @throws Exception
 */
@Test
public void testScanMultipleVersions() throws Exception {
    final byte[] tableName = Bytes.toBytes("testScanMultipleVersions");
    final HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
    desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
    final byte[][] rows = new byte[][] { Bytes.toBytes("row_0200"), Bytes.toBytes("row_0800") };
    final byte[][] splitRows = new byte[][] { Bytes.toBytes("row_0500") };
    final long[] timestamp = new long[] { 100L, 1000L };
    this.admin.createTable(desc, splitRows);
    HTable table = new HTable(UTIL.getConfiguration(), tableName);
    // Assert we got the region layout wanted.
    NavigableMap<HRegionInfo, ServerName> locations = table.getRegionLocations();
    assertEquals(2, locations.size());
    int index = 0;
    for (Map.Entry<HRegionInfo, ServerName> e : locations.entrySet()) {
        HRegionInfo hri = e.getKey();
        if (index == 0) {
            assertTrue(Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey()));
            assertTrue(Bytes.equals(hri.getEndKey(), splitRows[0]));
        } else if (index == 1) {
            assertTrue(Bytes.equals(splitRows[0], hri.getStartKey()));
            assertTrue(Bytes.equals(hri.getEndKey(), HConstants.EMPTY_END_ROW));
        }
        index++;
    }
    // Insert data
    for (int i = 0; i < locations.size(); i++) {
        for (int j = 0; j < timestamp.length; j++) {
            Put put = new Put(rows[i], timestamp[j]);
            put.add(HConstants.CATALOG_FAMILY, null, timestamp[j], Bytes.toBytes(timestamp[j]));
            table.put(put);
        }
    }
    // There are 5 cases we have to test. Each is described below.
    for (int i = 0; i < rows.length; i++) {
        for (int j = 0; j < timestamp.length; j++) {
            Get get = new Get(rows[i]);
            get.addFamily(HConstants.CATALOG_FAMILY);
            get.setTimeStamp(timestamp[j]);
            Result result = table.get(get);
            int cellCount = 0;
            for (@SuppressWarnings("unused")
            Cell kv : result.listCells()) {
                cellCount++;
            }
            assertTrue(cellCount == 1);
        }
        table.close();
    }

    // Case 1: scan with LATEST_TIMESTAMP. Should get two rows
    int count = 0;
    Scan scan = new Scan();
    scan.addFamily(HConstants.CATALOG_FAMILY);
    ResultScanner s = table.getScanner(scan);
    try {
        for (Result rr = null; (rr = s.next()) != null;) {
            System.out.println(rr.toString());
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }

    // Case 2: Scan with a timestamp greater than most recent timestamp
    // (in this case > 1000 and < LATEST_TIMESTAMP. Should get 2 rows.

    count = 0;
    scan = new Scan();
    scan.setTimeRange(1000L, Long.MAX_VALUE);
    scan.addFamily(HConstants.CATALOG_FAMILY);

    s = table.getScanner(scan);
    try {
        while (s.next() != null) {
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }

    // Case 3: scan with timestamp equal to most recent timestamp
    // (in this case == 1000. Should get 2 rows.

    count = 0;
    scan = new Scan();
    scan.setTimeStamp(1000L);
    scan.addFamily(HConstants.CATALOG_FAMILY);

    s = table.getScanner(scan);
    try {
        while (s.next() != null) {
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }

    // Case 4: scan with timestamp greater than first timestamp but less than
    // second timestamp (100 < timestamp < 1000). Should get 2 rows.

    count = 0;
    scan = new Scan();
    scan.setTimeRange(100L, 1000L);
    scan.addFamily(HConstants.CATALOG_FAMILY);

    s = table.getScanner(scan);
    try {
        while (s.next() != null) {
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }

    // Case 5: scan with timestamp equal to first timestamp (100)
    // Should get 2 rows.

    count = 0;
    scan = new Scan();
    scan.setTimeStamp(100L);
    scan.addFamily(HConstants.CATALOG_FAMILY);

    s = table.getScanner(scan);
    try {
        while (s.next() != null) {
            count += 1;
        }
        assertEquals("Number of rows should be 2", 2, count);
    } finally {
        s.close();
    }
}

From source file:org.apache.metron.indexing.dao.UpdateIntegrationTest.java

@Test
public void test() throws Exception {
    List<Map<String, Object>> inputData = new ArrayList<>();
    for (int i = 0; i < 10; ++i) {
        final String name = "message" + i;
        inputData.add(new HashMap<String, Object>() {
            {/*from  www .j  ava 2  s.  c  o  m*/
                put("source.type", SENSOR_NAME);
                put("name", name);
                put("timestamp", System.currentTimeMillis());
                put(Constants.GUID, name);
            }
        });
    }
    addTestData(getIndexName(), SENSOR_NAME, inputData);
    List<Map<String, Object>> docs = null;
    for (int t = 0; t < MAX_RETRIES; ++t, Thread.sleep(SLEEP_MS)) {
        docs = getIndexedTestData(getIndexName(), SENSOR_NAME);
        if (docs.size() >= 10) {
            break;
        }
    }
    Assert.assertEquals(10, docs.size());
    //modify the first message and add a new field
    {
        Map<String, Object> message0 = new HashMap<String, Object>(inputData.get(0)) {
            {
                put("new-field", "metron");
            }
        };
        String guid = "" + message0.get(Constants.GUID);
        getDao().replace(new ReplaceRequest() {
            {
                setReplacement(message0);
                setGuid(guid);
                setSensorType(SENSOR_NAME);
                setIndex(getIndexName());
            }
        }, Optional.empty());

        Assert.assertEquals(1, getMockHTable().size());
        findUpdatedDoc(message0, guid, SENSOR_NAME);
        {
            //ensure hbase is up to date
            Get g = new Get(HBaseDao.Key.toBytes(new HBaseDao.Key(guid, SENSOR_NAME)));
            Result r = getMockHTable().get(g);
            NavigableMap<byte[], byte[]> columns = r.getFamilyMap(CF.getBytes());
            Assert.assertEquals(1, columns.size());
            Assert.assertEquals(message0, JSONUtils.INSTANCE.load(new String(columns.lastEntry().getValue()),
                    JSONUtils.MAP_SUPPLIER));
        }
        {
            //ensure ES is up-to-date
            long cnt = 0;
            for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t, Thread.sleep(SLEEP_MS)) {
                docs = getIndexedTestData(getIndexName(), SENSOR_NAME);
                cnt = docs.stream().filter(d -> message0.get("new-field").equals(d.get("new-field"))).count();
            }
            Assert.assertNotEquals("Data store is not updated!", cnt, 0);
        }
    }
    //modify the same message and modify the new field
    {
        Map<String, Object> message0 = new HashMap<String, Object>(inputData.get(0)) {
            {
                put("new-field", "metron2");
            }
        };
        String guid = "" + message0.get(Constants.GUID);
        getDao().replace(new ReplaceRequest() {
            {
                setReplacement(message0);
                setGuid(guid);
                setSensorType(SENSOR_NAME);
                setIndex(getIndexName());
            }
        }, Optional.empty());
        Assert.assertEquals(1, getMockHTable().size());
        Document doc = getDao().getLatest(guid, SENSOR_NAME);
        Assert.assertEquals(message0, doc.getDocument());
        findUpdatedDoc(message0, guid, SENSOR_NAME);
        {
            //ensure hbase is up to date
            Get g = new Get(HBaseDao.Key.toBytes(new HBaseDao.Key(guid, SENSOR_NAME)));
            Result r = getMockHTable().get(g);
            NavigableMap<byte[], byte[]> columns = r.getFamilyMap(CF.getBytes());
            Assert.assertEquals(2, columns.size());
            Assert.assertEquals(message0, JSONUtils.INSTANCE.load(new String(columns.lastEntry().getValue()),
                    JSONUtils.MAP_SUPPLIER));
            Assert.assertNotEquals(message0, JSONUtils.INSTANCE
                    .load(new String(columns.firstEntry().getValue()), JSONUtils.MAP_SUPPLIER));
        }
        {
            //ensure ES is up-to-date
            long cnt = 0;
            for (int t = 0; t < MAX_RETRIES && cnt == 0; ++t, Thread.sleep(SLEEP_MS)) {
                docs = getIndexedTestData(getIndexName(), SENSOR_NAME);
                cnt = docs.stream().filter(d -> message0.get("new-field").equals(d.get("new-field"))).count();
            }

            Assert.assertNotEquals("Data store is not updated!", cnt, 0);
        }
    }
}