Example usage for java.util NavigableMap put

List of usage examples for java.util NavigableMap put

Introduction

In this page you can find the example usage for java.util NavigableMap put.

Prototype

V put(K key, V value);

Source Link

Document

Associates the specified value with the specified key in this map (optional operation).

Usage

From source file:org.apache.hadoop.hbase.regionserver.tableindexed.IndexedRegion.java

private void updateIndexes(BatchUpdate batchUpdate) throws IOException {
    List<IndexSpecification> indexesToUpdate = new LinkedList<IndexSpecification>();

    // Find the indexes we need to update
    for (IndexSpecification index : getIndexes()) {
        if (possiblyAppliesToIndex(index, batchUpdate)) {
            indexesToUpdate.add(index);//from   w  w w. jav a 2  s  .c o  m
        }
    }

    if (indexesToUpdate.size() == 0) {
        return;
    }

    NavigableSet<byte[]> neededColumns = getColumnsForIndexes(indexesToUpdate);

    NavigableMap<byte[], byte[]> newColumnValues = getColumnsFromBatchUpdate(batchUpdate);
    Map<byte[], Cell> oldColumnCells = super.getFull(batchUpdate.getRow(), neededColumns,
            HConstants.LATEST_TIMESTAMP, 1, null);

    // Handle delete batch updates. Go back and get the next older values
    for (BatchOperation op : batchUpdate) {
        if (!op.isPut()) {
            Cell current = oldColumnCells.get(op.getColumn());
            if (current != null) {
                // TODO: Fix this profligacy!!! St.Ack
                Cell[] older = Cell.createSingleCellArray(
                        super.get(batchUpdate.getRow(), op.getColumn(), current.getTimestamp(), 1));
                if (older != null && older.length > 0) {
                    newColumnValues.put(op.getColumn(), older[0].getValue());
                }
            }
        }
    }

    // Add the old values to the new if they are not there
    for (Entry<byte[], Cell> oldEntry : oldColumnCells.entrySet()) {
        if (!newColumnValues.containsKey(oldEntry.getKey())) {
            newColumnValues.put(oldEntry.getKey(), oldEntry.getValue().getValue());
        }
    }

    Iterator<IndexSpecification> indexIterator = indexesToUpdate.iterator();
    while (indexIterator.hasNext()) {
        IndexSpecification indexSpec = indexIterator.next();
        if (!doesApplyToIndex(indexSpec, newColumnValues)) {
            indexIterator.remove();
        }
    }

    SortedMap<byte[], byte[]> oldColumnValues = convertToValueMap(oldColumnCells);

    for (IndexSpecification indexSpec : indexesToUpdate) {
        removeOldIndexEntry(indexSpec, batchUpdate.getRow(), oldColumnValues);
        updateIndex(indexSpec, batchUpdate.getRow(), newColumnValues);
    }
}

From source file:org.apache.hadoop.hbase.regionserver.tableindexed.IndexedRegion.java

private NavigableMap<byte[], byte[]> getColumnsFromBatchUpdate(BatchUpdate b) {
    NavigableMap<byte[], byte[]> columnValues = new TreeMap<byte[], byte[]>(Bytes.BYTES_COMPARATOR);
    for (BatchOperation op : b) {
        if (op.isPut()) {
            columnValues.put(op.getColumn(), op.getValue());
        }/* www. j ava 2  s . c o m*/
    }
    return columnValues;
}

From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java

@Test
public void testDelete_CheckFamily() throws IOException {
    byte[] row1 = Bytes.toBytes("row1");
    byte[] fam1 = Bytes.toBytes("fam1");
    byte[] fam2 = Bytes.toBytes("fam2");
    byte[] fam3 = Bytes.toBytes("fam3");
    byte[] fam4 = Bytes.toBytes("fam4");

    // Setting up region
    String method = this.getName();
    this.region = initHRegion(tableName, method, CONF, fam1, fam2, fam3);
    try {/*from   www . j  a  va  2 s  .  c o m*/
        List<Cell> kvs = new ArrayList<Cell>();
        kvs.add(new KeyValue(row1, fam4, null, null));

        // testing existing family
        byte[] family = fam2;
        try {
            NavigableMap<byte[], List<Cell>> deleteMap = new TreeMap<byte[], List<Cell>>(
                    Bytes.BYTES_COMPARATOR);
            deleteMap.put(family, kvs);
            region.delete(deleteMap, Durability.SYNC_WAL);
        } catch (Exception e) {
            assertTrue("Family " + new String(family) + " does not exist", false);
        }

        // testing non existing family
        boolean ok = false;
        family = fam4;
        try {
            NavigableMap<byte[], List<Cell>> deleteMap = new TreeMap<byte[], List<Cell>>(
                    Bytes.BYTES_COMPARATOR);
            deleteMap.put(family, kvs);
            region.delete(deleteMap, Durability.SYNC_WAL);
        } catch (Exception e) {
            ok = true;
        }
        assertEquals("Family " + new String(family) + " does exist", true, ok);
    } finally {
        HRegion.closeHRegion(this.region);
        this.region = null;
    }
}

From source file:org.apache.hadoop.hbase.regionserver.TestHRegion.java

@Test
public void testDelete_CheckTimestampUpdated() throws IOException {
    TableName tableName = TableName.valueOf(name.getMethodName());
    byte[] row1 = Bytes.toBytes("row1");
    byte[] col1 = Bytes.toBytes("col1");
    byte[] col2 = Bytes.toBytes("col2");
    byte[] col3 = Bytes.toBytes("col3");

    // Setting up region
    String method = this.getName();
    this.region = initHRegion(tableName, method, CONF, fam1);
    try {/*from  www. jav a2 s  .  c om*/
        // Building checkerList
        List<Cell> kvs = new ArrayList<Cell>();
        kvs.add(new KeyValue(row1, fam1, col1, null));
        kvs.add(new KeyValue(row1, fam1, col2, null));
        kvs.add(new KeyValue(row1, fam1, col3, null));

        NavigableMap<byte[], List<Cell>> deleteMap = new TreeMap<byte[], List<Cell>>(Bytes.BYTES_COMPARATOR);
        deleteMap.put(fam1, kvs);
        region.delete(deleteMap, Durability.SYNC_WAL);

        // extract the key values out the memstore:
        // This is kinda hacky, but better than nothing...
        long now = System.currentTimeMillis();
        DefaultMemStore memstore = (DefaultMemStore) ((HStore) region.getStore(fam1)).memstore;
        KeyValue firstKv = memstore.kvset.first();
        assertTrue(firstKv.getTimestamp() <= now);
        now = firstKv.getTimestamp();
        for (Cell cell : memstore.kvset) {
            assertTrue(cell.getTimestamp() <= now);
            now = cell.getTimestamp();
        }
    } finally {
        HRegion.closeHRegion(this.region);
        this.region = null;
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestFSWAL.java

/**
 * On rolling a wal after reaching the threshold, {@link WAL#rollWriter()} returns the list of
 * regions which should be flushed in order to archive the oldest wal file.
 * <p>//from w w w.j a  va  2 s . co  m
 * This method tests this behavior by inserting edits and rolling the wal enough times to reach
 * the max number of logs threshold. It checks whether we get the "right regions" for flush on
 * rolling the wal.
 * @throws Exception
 */
@Test
public void testFindMemStoresEligibleForFlush() throws Exception {
    LOG.debug("testFindMemStoresEligibleForFlush");
    Configuration conf1 = HBaseConfiguration.create(CONF);
    conf1.setInt("hbase.regionserver.maxlogs", 1);
    AbstractFSWAL<?> wal = newWAL(FS, FSUtils.getRootDir(conf1), DIR.toString(),
            HConstants.HREGION_OLDLOGDIR_NAME, conf1, null, true, null, null);
    HTableDescriptor t1 = new HTableDescriptor(TableName.valueOf("t1")).addFamily(new HColumnDescriptor("row"));
    HTableDescriptor t2 = new HTableDescriptor(TableName.valueOf("t2")).addFamily(new HColumnDescriptor("row"));
    HRegionInfo hri1 = new HRegionInfo(t1.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    HRegionInfo hri2 = new HRegionInfo(t2.getTableName(), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    // add edits and roll the wal
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    NavigableMap<byte[], Integer> scopes1 = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : t1.getFamiliesKeys()) {
        scopes1.put(fam, 0);
    }
    NavigableMap<byte[], Integer> scopes2 = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : t2.getFamiliesKeys()) {
        scopes2.put(fam, 0);
    }
    try {
        addEdits(wal, hri1, t1, 2, mvcc, scopes1);
        wal.rollWriter();
        // add some more edits and roll the wal. This would reach the log number threshold
        addEdits(wal, hri1, t1, 2, mvcc, scopes1);
        wal.rollWriter();
        // with above rollWriter call, the max logs limit is reached.
        assertTrue(wal.getNumRolledLogFiles() == 2);

        // get the regions to flush; since there is only one region in the oldest wal, it should
        // return only one region.
        byte[][] regionsToFlush = wal.findRegionsToForceFlush();
        assertEquals(1, regionsToFlush.length);
        assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]);
        // insert edits in second region
        addEdits(wal, hri2, t2, 2, mvcc, scopes2);
        // get the regions to flush, it should still read region1.
        regionsToFlush = wal.findRegionsToForceFlush();
        assertEquals(regionsToFlush.length, 1);
        assertEquals(hri1.getEncodedNameAsBytes(), regionsToFlush[0]);
        // flush region 1, and roll the wal file. Only last wal which has entries for region1 should
        // remain.
        flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
        wal.rollWriter();
        // only one wal should remain now (that is for the second region).
        assertEquals(1, wal.getNumRolledLogFiles());
        // flush the second region
        flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getFamiliesKeys());
        wal.rollWriter(true);
        // no wal should remain now.
        assertEquals(0, wal.getNumRolledLogFiles());
        // add edits both to region 1 and region 2, and roll.
        addEdits(wal, hri1, t1, 2, mvcc, scopes1);
        addEdits(wal, hri2, t2, 2, mvcc, scopes2);
        wal.rollWriter();
        // add edits and roll the writer, to reach the max logs limit.
        assertEquals(1, wal.getNumRolledLogFiles());
        addEdits(wal, hri1, t1, 2, mvcc, scopes1);
        wal.rollWriter();
        // it should return two regions to flush, as the oldest wal file has entries
        // for both regions.
        regionsToFlush = wal.findRegionsToForceFlush();
        assertEquals(2, regionsToFlush.length);
        // flush both regions
        flushRegion(wal, hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
        flushRegion(wal, hri2.getEncodedNameAsBytes(), t2.getFamiliesKeys());
        wal.rollWriter(true);
        assertEquals(0, wal.getNumRolledLogFiles());
        // Add an edit to region1, and roll the wal.
        addEdits(wal, hri1, t1, 2, mvcc, scopes1);
        // tests partial flush: roll on a partial flush, and ensure that wal is not archived.
        wal.startCacheFlush(hri1.getEncodedNameAsBytes(), t1.getFamiliesKeys());
        wal.rollWriter();
        wal.completeCacheFlush(hri1.getEncodedNameAsBytes());
        assertEquals(1, wal.getNumRolledLogFiles());
    } finally {
        if (wal != null) {
            wal.close();
        }
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestFSWAL.java

/**
 * Test flush for sure has a sequence id that is beyond the last edit appended. We do this by
 * slowing appends in the background ring buffer thread while in foreground we call flush. The
 * addition of the sync over HRegion in flush should fix an issue where flush was returning before
 * all of its appends had made it out to the WAL (HBASE-11109).
 * @throws IOException/*from   w w  w . j a  v  a 2 s.  c om*/
 * @see <a href="https://issues.apache.org/jira/browse/HBASE-11109">HBASE-11109</a>
 */
@Test
public void testFlushSequenceIdIsGreaterThanAllEditsInHFile() throws IOException {
    String testName = currentTest.getMethodName();
    final TableName tableName = TableName.valueOf(testName);
    final HRegionInfo hri = new HRegionInfo(tableName);
    final byte[] rowName = tableName.getName();
    final HTableDescriptor htd = new HTableDescriptor(tableName);
    htd.addFamily(new HColumnDescriptor("f"));
    HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDefaultRootDirPath(),
            TEST_UTIL.getConfiguration(), htd);
    HBaseTestingUtility.closeRegionAndWAL(r);
    final int countPerFamily = 10;
    final AtomicBoolean goslow = new AtomicBoolean(false);
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    // subclass and doctor a method.
    AbstractFSWAL<?> wal = newSlowWAL(FS, FSUtils.getRootDir(CONF), DIR.toString(), testName, CONF, null, true,
            null, null, new Runnable() {

                @Override
                public void run() {
                    if (goslow.get()) {
                        Threads.sleep(100);
                        LOG.debug("Sleeping before appending 100ms");
                    }
                }
            });
    HRegion region = HRegion.openHRegion(TEST_UTIL.getConfiguration(), TEST_UTIL.getTestFileSystem(),
            TEST_UTIL.getDefaultRootDirPath(), hri, htd, wal);
    EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
    try {
        List<Put> puts = null;
        for (HColumnDescriptor hcd : htd.getFamilies()) {
            puts = TestWALReplay.addRegionEdits(rowName, hcd.getName(), countPerFamily, ee, region, "x");
        }

        // Now assert edits made it in.
        final Get g = new Get(rowName);
        Result result = region.get(g);
        assertEquals(countPerFamily * htd.getFamilies().size(), result.size());

        // Construct a WALEdit and add it a few times to the WAL.
        WALEdit edits = new WALEdit();
        for (Put p : puts) {
            CellScanner cs = p.cellScanner();
            while (cs.advance()) {
                edits.add(cs.current());
            }
        }
        // Add any old cluster id.
        List<UUID> clusterIds = new ArrayList<UUID>();
        clusterIds.add(UUID.randomUUID());
        // Now make appends run slow.
        goslow.set(true);
        for (int i = 0; i < countPerFamily; i++) {
            final HRegionInfo info = region.getRegionInfo();
            final WALKey logkey = new WALKey(info.getEncodedNameAsBytes(), tableName,
                    System.currentTimeMillis(), clusterIds, -1, -1, region.getMVCC(), scopes);
            wal.append(info, logkey, edits, true);
        }
        region.flush(true);
        // FlushResult.flushSequenceId is not visible here so go get the current sequence id.
        long currentSequenceId = region.getReadPoint(null);
        // Now release the appends
        goslow.set(false);
        assertTrue(currentSequenceId >= region.getReadPoint(null));
    } finally {
        region.close(true);
        wal.close();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java

/**
 * Tests for hbase-2727.//from ww  w  . j a  v  a  2 s .  com
 * @throws Exception
 * @see <a href="https://issues.apache.org/jira/browse/HBASE-2727">HBASE-2727</a>
 */
@Test
public void test2727() throws Exception {
    // Test being able to have > 1 set of edits in the recovered.edits directory.
    // Ensure edits are replayed properly.
    final TableName tableName = TableName.valueOf("test2727");

    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);

    HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    Region region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region2);
    final byte[] rowName = tableName.getName();

    WAL wal1 = createWAL(this.conf, hbaseRootDir, logName);
    // Add 1k to each family.
    final int countPerFamily = 1000;

    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal1, htd, mvcc, scopes);
    }
    wal1.shutdown();
    runWALSplit(this.conf);

    WAL wal2 = createWAL(this.conf, hbaseRootDir, logName);
    // Add 1k to each family.
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal2, htd, mvcc, scopes);
    }
    wal2.shutdown();
    runWALSplit(this.conf);

    WAL wal3 = createWAL(this.conf, hbaseRootDir, logName);
    try {
        HRegion region = HRegion.openHRegion(this.conf, this.fs, hbaseRootDir, hri, htd, wal3);
        long seqid = region.getOpenSeqNum();
        // The regions opens with sequenceId as 1. With 6k edits, its sequence number reaches 6k + 1.
        // When opened, this region would apply 6k edits, and increment the sequenceId by 1
        assertTrue(seqid > mvcc.getWritePoint());
        assertEquals(seqid - 1, mvcc.getWritePoint());
        LOG.debug("region.getOpenSeqNum(): " + region.getOpenSeqNum() + ", wal3.id: " + mvcc.getReadPoint());

        // TODO: Scan all.
        region.close();
    } finally {
        wal3.close();
    }
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java

/**
 * Create an HRegion with the result of a WAL split and test we only see the
 * good edits//w ww  .ja v a  2  s. c  o m
 * @throws Exception
 */
@Test
public void testReplayEditsWrittenIntoWAL() throws Exception {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);

    final HTableDescriptor htd = createBasic3FamilyHTD(tableName);
    HRegion region2 = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region2);
    final WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    final byte[] rowName = tableName.getName();
    final byte[] regionName = hri.getEncodedNameAsBytes();

    // Add 1k to each family.
    final int countPerFamily = 1000;
    Set<byte[]> familyNames = new HashSet<byte[]>();
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    for (HColumnDescriptor hcd : htd.getFamilies()) {
        addWALEdits(tableName, hri, rowName, hcd.getName(), countPerFamily, ee, wal, htd, mvcc, scopes);
        familyNames.add(hcd.getName());
    }

    // Add a cache flush, shouldn't have any effect
    wal.startCacheFlush(regionName, familyNames);
    wal.completeCacheFlush(regionName);

    // Add an edit to another family, should be skipped.
    WALEdit edit = new WALEdit();
    long now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("another family"), rowName, now, rowName));
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true);

    // Delete the c family to verify deletes make it over.
    edit = new WALEdit();
    now = ee.currentTime();
    edit.add(new KeyValue(rowName, Bytes.toBytes("c"), null, now, KeyValue.Type.DeleteFamily));
    wal.append(hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, now, mvcc, scopes), edit, true);

    // Sync.
    wal.sync();
    // Make a new conf and a new fs for the splitter to run on so we can take
    // over old wal.
    final Configuration newConf = HBaseConfiguration.create(this.conf);
    User user = HBaseTestingUtility.getDifferentUser(newConf, ".replay.wal.secondtime");
    user.runAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws Exception {
            runWALSplit(newConf);
            FileSystem newFS = FileSystem.get(newConf);
            // 100k seems to make for about 4 flushes during HRegion#initialize.
            newConf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 100);
            // Make a new wal for new region.
            WAL newWal = createWAL(newConf, hbaseRootDir, logName);
            final AtomicInteger flushcount = new AtomicInteger(0);
            try {
                final HRegion region = new HRegion(basedir, newWal, newFS, newConf, hri, htd, null) {
                    @Override
                    protected FlushResult internalFlushcache(final WAL wal, final long myseqid,
                            final Collection<Store> storesToFlush, MonitoredTask status,
                            boolean writeFlushWalMarker) throws IOException {
                        LOG.info("InternalFlushCache Invoked");
                        FlushResult fs = super.internalFlushcache(wal, myseqid, storesToFlush,
                                Mockito.mock(MonitoredTask.class), writeFlushWalMarker);
                        flushcount.incrementAndGet();
                        return fs;
                    }
                };
                // The seq id this region has opened up with
                long seqid = region.initialize();

                // The mvcc readpoint of from inserting data.
                long writePoint = mvcc.getWritePoint();

                // We flushed during init.
                assertTrue("Flushcount=" + flushcount.get(), flushcount.get() > 0);
                assertTrue((seqid - 1) == writePoint);

                Get get = new Get(rowName);
                Result result = region.get(get);
                // Make sure we only see the good edits
                assertEquals(countPerFamily * (htd.getFamilies().size() - 1), result.size());
                region.close();
            } finally {
                newWal.close();
            }
            return null;
        }
    });
}

From source file:org.apache.hadoop.hbase.regionserver.wal.AbstractTestWALReplay.java

/**
 * testcase for https://issues.apache.org/jira/browse/HBASE-14949.
 *///w  w  w .ja v  a2  s  .c  om
private void testNameConflictWhenSplit(boolean largeFirst) throws IOException {
    final TableName tableName = TableName.valueOf("testReplayEditsWrittenIntoWAL");
    final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl();
    final HRegionInfo hri = createBasic3FamilyHRegionInfo(tableName);
    final Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    deleteDir(basedir);

    final HTableDescriptor htd = createBasic1FamilyHTD(tableName);
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    for (byte[] fam : htd.getFamiliesKeys()) {
        scopes.put(fam, 0);
    }
    HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, hbaseRootDir, this.conf, htd);
    HBaseTestingUtility.closeRegionAndWAL(region);
    final byte[] family = htd.getColumnFamilies()[0].getName();
    final byte[] rowName = tableName.getName();
    FSWALEntry entry1 = createFSWALEntry(htd, hri, 1L, rowName, family, ee, mvcc, 1, scopes);
    FSWALEntry entry2 = createFSWALEntry(htd, hri, 2L, rowName, family, ee, mvcc, 2, scopes);

    Path largeFile = new Path(logDir, "wal-1");
    Path smallFile = new Path(logDir, "wal-2");
    writerWALFile(largeFile, Arrays.asList(entry1, entry2));
    writerWALFile(smallFile, Arrays.asList(entry2));
    FileStatus first, second;
    if (largeFirst) {
        first = fs.getFileStatus(largeFile);
        second = fs.getFileStatus(smallFile);
    } else {
        first = fs.getFileStatus(smallFile);
        second = fs.getFileStatus(largeFile);
    }
    WALSplitter.splitLogFile(hbaseRootDir, first, fs, conf, null, null, null, RecoveryMode.LOG_SPLITTING, wals);
    WALSplitter.splitLogFile(hbaseRootDir, second, fs, conf, null, null, null, RecoveryMode.LOG_SPLITTING,
            wals);
    WAL wal = createWAL(this.conf, hbaseRootDir, logName);
    region = HRegion.openHRegion(conf, this.fs, hbaseRootDir, hri, htd, wal);
    assertTrue(region.getOpenSeqNum() > mvcc.getWritePoint());
    assertEquals(2, region.get(new Get(rowName)).size());
}

From source file:org.apache.hadoop.hbase.replication.regionserver.Replication.java

/**
 * Utility method used to set the correct scopes on each log key. Doesn't set a scope on keys
 * from compaction WAL edits and if the scope is local.
 * @param htd Descriptor used to find the scope to use
 * @param logKey Key that may get scoped according to its edits
 * @param logEdit Edits used to lookup the scopes
 *//*from   ww  w . ja v  a2s.  c  o  m*/
public static void scopeWALEdits(HTableDescriptor htd, HLogKey logKey, WALEdit logEdit) {
    NavigableMap<byte[], Integer> scopes = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
    byte[] family;
    for (KeyValue kv : logEdit.getKeyValues()) {
        family = kv.getFamily();
        // This is expected and the KV should not be replicated
        if (CellUtil.matchingFamily(kv, WALEdit.METAFAMILY))
            continue;
        // Unexpected, has a tendency to happen in unit tests
        assert htd.getFamily(family) != null;

        int scope = htd.getFamily(family).getScope();
        if (scope != REPLICATION_SCOPE_LOCAL && !scopes.containsKey(family)) {
            scopes.put(family, scope);
        }
    }
    if (!scopes.isEmpty()) {
        logKey.setScopes(scopes);
    }
}