Example usage for io.netty.buffer ByteBuf readLong

List of usage examples for io.netty.buffer ByteBuf readLong

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf readLong.

Prototype

public abstract long readLong();

Source Link

Document

Gets a 64-bit integer at the current readerIndex and increases the readerIndex by 8 in this buffer.

Usage

From source file:org.apache.bookkeeper.bookie.EntryLogTest.java

License:Apache License

public void testSwappingEntryLogManager(boolean initialEntryLogPerLedgerEnabled,
        boolean laterEntryLogPerLedgerEnabled) throws Exception {
    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
    conf.setEntryLogPerLedgerEnabled(initialEntryLogPerLedgerEnabled);
    conf.setLedgerDirNames(createAndGetLedgerDirs(2));
    // pre allocation enabled
    conf.setEntryLogFilePreAllocationEnabled(true);
    LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(),
            new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));

    EntryLogger entryLogger = new EntryLogger(conf, ledgerDirsManager);
    EntryLogManagerBase entryLogManager = (EntryLogManagerBase) entryLogger.getEntryLogManager();
    Assert.assertEquals("EntryLogManager class type",
            initialEntryLogPerLedgerEnabled ? EntryLogManagerForEntryLogPerLedger.class
                    : EntryLogManagerForSingleEntryLog.class,
            entryLogManager.getClass());

    int numOfActiveLedgers = 10;
    int numEntries = 10;
    long[][] positions = new long[numOfActiveLedgers][];
    for (int i = 0; i < numOfActiveLedgers; i++) {
        positions[i] = new long[numEntries];
    }/*from  w  w  w . j  a  va2 s  . c  o  m*/

    /*
     * addentries to the ledgers
     */
    for (int j = 0; j < numEntries; j++) {
        for (int i = 0; i < numOfActiveLedgers; i++) {
            positions[i][j] = entryLogger.addEntry((long) i, generateEntry(i, j));
            long entryLogId = (positions[i][j] >> 32L);
            if (initialEntryLogPerLedgerEnabled) {
                Assert.assertEquals("EntryLogId for ledger: " + i, i, entryLogId);
            } else {
                Assert.assertEquals("EntryLogId for ledger: " + i, 0, entryLogId);
            }
        }
    }

    for (long i = 0; i < numOfActiveLedgers; i++) {
        entryLogManager.createNewLog(i);
    }

    /**
     * since new entrylog is created for all the ledgers, the previous
     * entrylogs must be rotated and with the following flushRotatedLogs
     * call they should be forcewritten and file should be closed.
     */
    entryLogManager.flushRotatedLogs();

    /*
     * new entrylogger and entryLogManager are created with
     * 'laterEntryLogPerLedgerEnabled' conf
     */
    conf.setEntryLogPerLedgerEnabled(laterEntryLogPerLedgerEnabled);
    LedgerDirsManager newLedgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(),
            new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));
    EntryLogger newEntryLogger = new EntryLogger(conf, newLedgerDirsManager);
    EntryLogManager newEntryLogManager = newEntryLogger.getEntryLogManager();
    Assert.assertEquals("EntryLogManager class type",
            laterEntryLogPerLedgerEnabled ? EntryLogManagerForEntryLogPerLedger.class
                    : EntryLogManagerForSingleEntryLog.class,
            newEntryLogManager.getClass());

    /*
     * read the entries (which are written with previous entrylogger) with
     * new entrylogger
     */
    for (int j = 0; j < numEntries; j++) {
        for (int i = 0; i < numOfActiveLedgers; i++) {
            String expectedValue = "ledger-" + i + "-" + j;
            ByteBuf buf = newEntryLogger.readEntry(i, j, positions[i][j]);
            long ledgerId = buf.readLong();
            long entryId = buf.readLong();
            byte[] data = new byte[buf.readableBytes()];
            buf.readBytes(data);
            assertEquals("LedgerId ", i, ledgerId);
            assertEquals("EntryId ", j, entryId);
            assertEquals("Entry Data ", expectedValue, new String(data));
        }
    }
}

From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorage.java

License:Apache License

@Override
public long getLastAddConfirmed(long ledgerId) throws IOException {
    Long lac = ledgerCache.getLastAddConfirmed(ledgerId);
    if (lac == null) {
        ByteBuf bb = getEntry(ledgerId, BookieProtocol.LAST_ADD_CONFIRMED);
        if (null == bb) {
            return BookieProtocol.INVALID_ENTRY_ID;
        } else {/*  w  ww  .j  a v  a 2 s .  c om*/
            try {
                bb.skipBytes(2 * Long.BYTES); // skip ledger & entry id
                lac = bb.readLong();
                lac = ledgerCache.updateLastAddConfirmed(ledgerId, lac);
            } finally {
                bb.release();
            }
        }
    }
    return lac;
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.LocationsIndexRebuildTest.java

License:Apache License

@Test
public void test() throws Exception {
    File tmpDir = File.createTempFile("bkTest", ".dir");
    tmpDir.delete();//  www  .  j ava2  s  .c  o  m
    tmpDir.mkdir();
    File curDir = Bookie.getCurrentDirectory(tmpDir);
    Bookie.checkDirectoryStructure(curDir);

    System.out.println(tmpDir);

    ServerConfiguration conf = TestBKConfiguration.newServerConfiguration();
    conf.setLedgerDirNames(new String[] { tmpDir.toString() });
    conf.setLedgerStorageClass(DbLedgerStorage.class.getName());
    LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(),
            new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()));

    DbLedgerStorage ledgerStorage = new DbLedgerStorage();
    ledgerStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource,
            checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT);

    // Insert some ledger & entries in the storage
    for (long ledgerId = 0; ledgerId < 5; ledgerId++) {
        ledgerStorage.setMasterKey(ledgerId, ("ledger-" + ledgerId).getBytes());
        ledgerStorage.setFenced(ledgerId);

        for (long entryId = 0; entryId < 100; entryId++) {
            ByteBuf entry = Unpooled.buffer(128);
            entry.writeLong(ledgerId);
            entry.writeLong(entryId);
            entry.writeBytes(("entry-" + entryId).getBytes());

            ledgerStorage.addEntry(entry);
        }
    }

    ledgerStorage.flush();
    ledgerStorage.shutdown();

    // Rebuild index through the tool
    BookieShell shell = new BookieShell();
    shell.setConf(conf);
    int res = shell.run(new String[] { "rebuild-db-ledger-locations-index" });

    Assert.assertEquals(0, res);

    // Verify that db index has the same entries
    ledgerStorage = new DbLedgerStorage();
    ledgerStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource,
            checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT);

    Set<Long> ledgers = Sets.newTreeSet(ledgerStorage.getActiveLedgersInRange(0, Long.MAX_VALUE));
    Assert.assertEquals(Sets.newTreeSet(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)), ledgers);

    for (long ledgerId = 0; ledgerId < 5; ledgerId++) {
        Assert.assertEquals(true, ledgerStorage.isFenced(ledgerId));
        Assert.assertEquals("ledger-" + ledgerId, new String(ledgerStorage.readMasterKey(ledgerId)));

        ByteBuf lastEntry = ledgerStorage.getLastEntry(ledgerId);
        assertEquals(ledgerId, lastEntry.readLong());
        long lastEntryId = lastEntry.readLong();
        assertEquals(99, lastEntryId);

        for (long entryId = 0; entryId < 100; entryId++) {
            ByteBuf entry = Unpooled.buffer(1024);
            entry.writeLong(ledgerId);
            entry.writeLong(entryId);
            entry.writeBytes(("entry-" + entryId).getBytes());

            ByteBuf result = ledgerStorage.getEntry(ledgerId, entryId);
            Assert.assertEquals(entry, result);
        }
    }

    ledgerStorage.shutdown();
    FileUtils.forceDelete(tmpDir);
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.SingleDirectoryDbLedgerStorage.java

License:Apache License

public ByteBuf getLastEntry(long ledgerId) throws IOException {
    long startTime = MathUtils.nowInNano();

    long stamp = writeCacheRotationLock.readLock();
    try {/*from  w w  w  . j av a 2 s. c  om*/
        // First try to read from the write cache of recent entries
        ByteBuf entry = writeCache.getLastEntry(ledgerId);
        if (entry != null) {
            if (log.isDebugEnabled()) {
                long foundLedgerId = entry.readLong(); // ledgedId
                long entryId = entry.readLong();
                entry.resetReaderIndex();
                if (log.isDebugEnabled()) {
                    log.debug("Found last entry for ledger {} in write cache: {}@{}", ledgerId, foundLedgerId,
                            entryId);
                }
            }

            recordSuccessfulEvent(dbLedgerStorageStats.getReadCacheHitStats(), startTime);
            recordSuccessfulEvent(dbLedgerStorageStats.getReadEntryStats(), startTime);
            return entry;
        }

        // If there's a flush going on, the entry might be in the flush buffer
        entry = writeCacheBeingFlushed.getLastEntry(ledgerId);
        if (entry != null) {
            if (log.isDebugEnabled()) {
                entry.readLong(); // ledgedId
                long entryId = entry.readLong();
                entry.resetReaderIndex();
                if (log.isDebugEnabled()) {
                    log.debug("Found last entry for ledger {} in write cache being flushed: {}", ledgerId,
                            entryId);
                }
            }

            recordSuccessfulEvent(dbLedgerStorageStats.getReadCacheHitStats(), startTime);
            recordSuccessfulEvent(dbLedgerStorageStats.getReadEntryStats(), startTime);
            return entry;
        }
    } finally {
        writeCacheRotationLock.unlockRead(stamp);
    }

    // Search the last entry in storage
    long lastEntryId = entryLocationIndex.getLastEntryInLedger(ledgerId);
    if (log.isDebugEnabled()) {
        log.debug("Found last entry for ledger {} in db: {}", ledgerId, lastEntryId);
    }

    long entryLocation = entryLocationIndex.getLocation(ledgerId, lastEntryId);
    ByteBuf content = entryLogger.readEntry(ledgerId, lastEntryId, entryLocation);

    recordSuccessfulEvent(dbLedgerStorageStats.getReadCacheMissStats(), startTime);
    recordSuccessfulEvent(dbLedgerStorageStats.getReadEntryStats(), startTime);
    return content;
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.SingleDirectoryDbLedgerStorage.java

License:Apache License

@Override
public long getLastAddConfirmed(long ledgerId) throws IOException {
    TransientLedgerInfo ledgerInfo = transientLedgerInfoCache.get(ledgerId);
    long lac = null != ledgerInfo ? ledgerInfo.getLastAddConfirmed() : TransientLedgerInfo.NOT_ASSIGNED_LAC;
    if (lac == TransientLedgerInfo.NOT_ASSIGNED_LAC) {
        ByteBuf bb = getEntry(ledgerId, BookieProtocol.LAST_ADD_CONFIRMED);
        try {// w w w  . ja  va2 s  .  c o  m
            bb.skipBytes(2 * Long.BYTES); // skip ledger id and entry id
            lac = bb.readLong();
            lac = getOrAddLedgerInfo(ledgerId).setLastAddConfirmed(lac);
        } finally {
            bb.release();
        }
    }
    return lac;
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.OffloadIndexTest.java

License:Apache License

@Test
public void offloadIndexBlockImplTest() throws Exception {
    OffloadIndexBlockBuilder blockBuilder = OffloadIndexBlockBuilder.create();
    LedgerMetadata metadata = createLedgerMetadata();
    log.debug("created metadata: {}", metadata.toString());

    blockBuilder.withLedgerMetadata(metadata).withDataObjectLength(1).withDataBlockHeaderLength(23455);

    blockBuilder.addBlock(0, 2, 64 * 1024 * 1024);
    blockBuilder.addBlock(1000, 3, 64 * 1024 * 1024);
    blockBuilder.addBlock(2000, 4, 64 * 1024 * 1024);
    OffloadIndexBlock indexBlock = blockBuilder.build();

    // verify getEntryCount and getLedgerMetadata
    assertEquals(indexBlock.getEntryCount(), 3);
    assertEquals(indexBlock.getLedgerMetadata(), metadata);

    // verify getIndexEntryForEntry
    OffloadIndexEntry entry1 = indexBlock.getIndexEntryForEntry(0);
    assertEquals(entry1.getEntryId(), 0);
    assertEquals(entry1.getPartId(), 2);
    assertEquals(entry1.getOffset(), 0);

    OffloadIndexEntry entry11 = indexBlock.getIndexEntryForEntry(500);
    assertEquals(entry11, entry1);//from w  ww  .  j a  v  a  2 s .com

    OffloadIndexEntry entry2 = indexBlock.getIndexEntryForEntry(1000);
    assertEquals(entry2.getEntryId(), 1000);
    assertEquals(entry2.getPartId(), 3);
    assertEquals(entry2.getOffset(), 64 * 1024 * 1024);

    OffloadIndexEntry entry22 = indexBlock.getIndexEntryForEntry(1300);
    assertEquals(entry22, entry2);

    OffloadIndexEntry entry3 = indexBlock.getIndexEntryForEntry(2000);

    assertEquals(entry3.getEntryId(), 2000);
    assertEquals(entry3.getPartId(), 4);
    assertEquals(entry3.getOffset(), 2 * 64 * 1024 * 1024);

    OffloadIndexEntry entry33 = indexBlock.getIndexEntryForEntry(3000);
    assertEquals(entry33, entry3);

    try {
        OffloadIndexEntry entry4 = indexBlock.getIndexEntryForEntry(6000);
        fail("Should throw IndexOutOfBoundsException.");
    } catch (Exception e) {
        assertTrue(e instanceof IndexOutOfBoundsException);
        assertEquals(e.getMessage(), "Entry index: 6000 beyond lastEntryId: 5000");
    }

    // verify toStream
    InputStream out = indexBlock.toStream();
    byte b[] = new byte[1024];
    int readoutLen = out.read(b);
    out.close();
    ByteBuf wrapper = Unpooled.wrappedBuffer(b);
    int magic = wrapper.readInt();
    int indexBlockLength = wrapper.readInt();
    long dataObjectLength = wrapper.readLong();
    long dataHeaderLength = wrapper.readLong();
    int indexEntryCount = wrapper.readInt();
    int segmentMetadataLength = wrapper.readInt();

    // verify counter
    assertEquals(magic, OffloadIndexBlockImpl.getIndexMagicWord());
    assertEquals(indexBlockLength, readoutLen);
    assertEquals(indexEntryCount, 3);
    assertEquals(dataObjectLength, 1);
    assertEquals(dataHeaderLength, 23455);

    wrapper.readBytes(segmentMetadataLength);
    log.debug("magic: {}, blockLength: {}, metadataLength: {}, indexCount: {}", magic, indexBlockLength,
            segmentMetadataLength, indexEntryCount);

    // verify entry
    OffloadIndexEntry e1 = OffloadIndexEntryImpl.of(wrapper.readLong(), wrapper.readInt(), wrapper.readLong(),
            dataHeaderLength);
    OffloadIndexEntry e2 = OffloadIndexEntryImpl.of(wrapper.readLong(), wrapper.readInt(), wrapper.readLong(),
            dataHeaderLength);
    OffloadIndexEntry e3 = OffloadIndexEntryImpl.of(wrapper.readLong(), wrapper.readInt(), wrapper.readLong(),
            dataHeaderLength);
    ;

    assertEquals(e1.getEntryId(), entry1.getEntryId());
    assertEquals(e1.getPartId(), entry1.getPartId());
    assertEquals(e1.getOffset(), entry1.getOffset());
    assertEquals(e1.getDataOffset(), entry1.getDataOffset());
    assertEquals(e2.getEntryId(), entry2.getEntryId());
    assertEquals(e2.getPartId(), entry2.getPartId());
    assertEquals(e2.getOffset(), entry2.getOffset());
    assertEquals(e2.getDataOffset(), entry2.getDataOffset());
    assertEquals(e3.getEntryId(), entry3.getEntryId());
    assertEquals(e3.getPartId(), entry3.getPartId());
    assertEquals(e3.getOffset(), entry3.getOffset());
    assertEquals(e3.getDataOffset(), entry3.getDataOffset());
    wrapper.release();

    // verify build OffloadIndexBlock from InputStream
    InputStream out2 = indexBlock.toStream();
    int streamLength = out2.available();
    out2.mark(0);
    OffloadIndexBlock indexBlock2 = blockBuilder.fromStream(out2);
    // 1. verify metadata that got from inputstream success.
    LedgerMetadata metadata2 = indexBlock2.getLedgerMetadata();
    log.debug("built metadata: {}", metadata2.toString());
    assertEquals(metadata2.getAckQuorumSize(), metadata.getAckQuorumSize());
    assertEquals(metadata2.getEnsembleSize(), metadata.getEnsembleSize());
    assertEquals(metadata2.getDigestType(), metadata.getDigestType());
    assertEquals(metadata2.getAllEnsembles().entrySet(), metadata.getAllEnsembles().entrySet());
    // 2. verify set all the entries
    assertEquals(indexBlock2.getEntryCount(), indexBlock.getEntryCount());
    // 3. verify reach end
    assertEquals(out2.read(), -1);

    out2.reset();
    byte streamContent[] = new byte[streamLength];
    // stream with all 0, simulate junk data, should throw exception for header magic not match.
    try (InputStream stream3 = new ByteArrayInputStream(streamContent, 0, streamLength)) {
        OffloadIndexBlock indexBlock3 = blockBuilder.fromStream(stream3);
        fail("Should throw IOException");
    } catch (Exception e) {
        assertTrue(e instanceof IOException);
        assertTrue(e.getMessage().contains("Invalid MagicWord"));
    }

    // simulate read header too small, throw EOFException.
    out2.read(streamContent);
    try (InputStream stream4 = new ByteArrayInputStream(streamContent, 0, streamLength - 1)) {
        OffloadIndexBlock indexBlock4 = blockBuilder.fromStream(stream4);
        fail("Should throw EOFException");
    } catch (Exception e) {
        assertTrue(e instanceof java.io.EOFException);
    }

    out2.close();
    indexBlock.close();
}

From source file:org.apache.bookkeeper.proto.checksum.DigestManager.java

License:Apache License

private void verifyDigest(long entryId, ByteBuf dataReceived, boolean skipEntryIdCheck)
        throws BKDigestMatchException {

    if ((METADATA_LENGTH + macCodeLength) > dataReceived.readableBytes()) {
        logger.error(//  w  ww .j av  a  2  s  .  co m
                "Data received is smaller than the minimum for this digest type. "
                        + " Either the packet it corrupt, or the wrong digest is configured. "
                        + " Digest type: {}, Packet Length: {}",
                this.getClass().getName(), dataReceived.readableBytes());
        throw new BKDigestMatchException();
    }
    update(dataReceived.slice(0, METADATA_LENGTH));

    int offset = METADATA_LENGTH + macCodeLength;
    update(dataReceived.slice(offset, dataReceived.readableBytes() - offset));

    ByteBuf digest = allocator.buffer(macCodeLength);
    populateValueAndReset(digest);

    try {
        if (digest.compareTo(dataReceived.slice(METADATA_LENGTH, macCodeLength)) != 0) {
            logger.error("Mac mismatch for ledger-id: " + ledgerId + ", entry-id: " + entryId);
            throw new BKDigestMatchException();
        }
    } finally {
        digest.release();
    }

    long actualLedgerId = dataReceived.readLong();
    long actualEntryId = dataReceived.readLong();

    if (actualLedgerId != ledgerId) {
        logger.error("Ledger-id mismatch in authenticated message, expected: " + ledgerId + " , actual: "
                + actualLedgerId);
        throw new BKDigestMatchException();
    }

    if (!skipEntryIdCheck && actualEntryId != entryId) {
        logger.error("Entry-id mismatch in authenticated message, expected: " + entryId + " , actual: "
                + actualEntryId);
        throw new BKDigestMatchException();
    }

}

From source file:org.apache.bookkeeper.proto.checksum.DigestManager.java

License:Apache License

public long verifyDigestAndReturnLac(ByteBuf dataReceived) throws BKDigestMatchException {
    if ((LAC_METADATA_LENGTH + macCodeLength) > dataReceived.readableBytes()) {
        logger.error(//  w  ww . java2s. co m
                "Data received is smaller than the minimum for this digest type."
                        + " Either the packet it corrupt, or the wrong digest is configured. "
                        + " Digest type: {}, Packet Length: {}",
                this.getClass().getName(), dataReceived.readableBytes());
        throw new BKDigestMatchException();
    }

    update(dataReceived.slice(0, LAC_METADATA_LENGTH));

    ByteBuf digest = allocator.buffer(macCodeLength);
    try {
        populateValueAndReset(digest);

        if (digest.compareTo(dataReceived.slice(LAC_METADATA_LENGTH, macCodeLength)) != 0) {
            logger.error("Mac mismatch for ledger-id LAC: " + ledgerId);
            throw new BKDigestMatchException();
        }
    } finally {
        digest.release();
    }

    long actualLedgerId = dataReceived.readLong();
    long lac = dataReceived.readLong();
    if (actualLedgerId != ledgerId) {
        logger.error("Ledger-id mismatch in authenticated message, expected: " + ledgerId + " , actual: "
                + actualLedgerId);
        throw new BKDigestMatchException();
    }
    return lac;
}

From source file:org.apache.bookkeeper.proto.checksum.DigestManager.java

License:Apache License

public RecoveryData verifyDigestAndReturnLastConfirmed(ByteBuf dataReceived) throws BKDigestMatchException {
    verifyDigest(dataReceived);/*from  w w  w .  j  a  va2 s .c  o m*/
    dataReceived.readerIndex(8);

    dataReceived.readLong(); // skip unused entryId
    long lastAddConfirmed = dataReceived.readLong();
    long length = dataReceived.readLong();
    return new RecoveryData(lastAddConfirmed, length);
}

From source file:org.apache.bookkeeper.test.ConcurrentLedgerTest.java

License:Apache License

private long doReads(int ledgers, int size, int totalwrites)
        throws IOException, InterruptedException, BookieException {
    long start = System.currentTimeMillis();
    for (int i = 1; i <= totalwrites / ledgers; i++) {
        for (int j = 1; j <= ledgers; j++) {
            ByteBuf entry = bookie.readEntry(j, i);
            // skip the ledger id and the entry id
            entry.readLong();
            entry.readLong();/*from  w  w  w.ja  va 2s. co  m*/
            assertEquals(j + "@" + i, j + 2, entry.readLong());
            assertEquals(j + "@" + i, i + 3, entry.readLong());
        }
    }
    long finish = System.currentTimeMillis();
    return finish - start;
}