Example usage for io.netty.buffer ByteBuf capacity

List of usage examples for io.netty.buffer ByteBuf capacity

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf capacity.

Prototype

public abstract int capacity();

Source Link

Document

Returns the number of bytes (octets) this buffer can contain.

Usage

From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java

License:Apache License

@Test
public void simple() throws Exception {
    WriteCache cache = new WriteCache(allocator, 10 * 1024);

    ByteBuf entry1 = allocator.buffer(1024);
    ByteBufUtil.writeUtf8(entry1, "entry-1");
    entry1.writerIndex(entry1.capacity());

    assertTrue(cache.isEmpty());//from  w  ww  . j  a v  a  2  s.  co m
    assertEquals(0, cache.count());
    assertEquals(0, cache.size());

    cache.put(1, 1, entry1);

    assertFalse(cache.isEmpty());
    assertEquals(1, cache.count());
    assertEquals(entry1.readableBytes(), cache.size());

    assertEquals(entry1, cache.get(1, 1));
    assertNull(cache.get(1, 2));
    assertNull(cache.get(2, 1));

    assertEquals(entry1, cache.getLastEntry(1));
    assertEquals(null, cache.getLastEntry(2));

    cache.clear();

    assertTrue(cache.isEmpty());
    assertEquals(0, cache.count());
    assertEquals(0, cache.size());

    entry1.release();
    cache.close();
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java

License:Apache License

@Test
public void cacheFull() throws Exception {
    int cacheSize = 10 * 1024;
    int entrySize = 1024;
    int entriesCount = cacheSize / entrySize;

    WriteCache cache = new WriteCache(allocator, cacheSize);

    ByteBuf entry = allocator.buffer(entrySize);
    entry.writerIndex(entry.capacity());

    for (int i = 0; i < entriesCount; i++) {
        assertTrue(cache.put(1, i, entry));
    }//from w ww  . jav  a2s.  c o m

    assertFalse(cache.put(1, 11, entry));

    assertFalse(cache.isEmpty());
    assertEquals(entriesCount, cache.count());
    assertEquals(cacheSize, cache.size());

    AtomicInteger findCount = new AtomicInteger(0);
    cache.forEach((ledgerId, entryId, data) -> {
        findCount.incrementAndGet();
    });

    assertEquals(entriesCount, findCount.get());

    cache.deleteLedger(1);

    findCount.set(0);
    cache.forEach((ledgerId, entryId, data) -> {
        findCount.incrementAndGet();
    });

    assertEquals(0, findCount.get());

    entry.release();
    cache.close();
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java

License:Apache License

@Test
public void testMultipleSegments() {
    // Create cache with max size 1Mb and each segment is 16Kb
    WriteCache cache = new WriteCache(allocator, 1024 * 1024, 16 * 1024);

    ByteBuf entry = Unpooled.buffer(1024);
    entry.writerIndex(entry.capacity());

    for (int i = 0; i < 48; i++) {
        cache.put(1, i, entry);//  w  w  w  . j  a  va2 s  .  c o  m
    }

    assertEquals(48, cache.count());
    assertEquals(48 * 1024, cache.size());

    cache.close();
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java

License:Apache License

@Test
public void testMultipleWriters() throws Exception {
    // Create cache with max size 1Mb and each segment is 16Kb
    WriteCache cache = new WriteCache(allocator, 10 * 1024 * 1024, 16 * 1024);

    ExecutorService executor = Executors.newCachedThreadPool();

    int numThreads = 10;
    int entriesPerThread = 10 * 1024 / numThreads;

    CyclicBarrier barrier = new CyclicBarrier(numThreads);
    CountDownLatch latch = new CountDownLatch(numThreads);

    for (int i = 0; i < numThreads; i++) {
        int ledgerId = i;

        executor.submit(() -> {/*from   w  w  w  . ja v  a2 s. c om*/
            try {
                barrier.await();
            } catch (InterruptedException ie) {
                Thread.currentThread().interrupt();
                throw new RuntimeException(ie);
            } catch (BrokenBarrierException e) {
                throw new RuntimeException(e);
            }

            ByteBuf entry = Unpooled.buffer(1024);
            entry.writerIndex(entry.capacity());

            for (int entryId = 0; entryId < entriesPerThread; entryId++) {
                assertTrue(cache.put(ledgerId, entryId, entry));
            }

            latch.countDown();
        });
    }

    // Wait for all tasks to be completed
    latch.await();

    // assertEquals(numThreads * entriesPerThread, cache.count());
    assertEquals(cache.count() * 1024, cache.size());

    // Verify entries by iterating over write cache
    AtomicLong currentLedgerId = new AtomicLong(0);
    AtomicLong currentEntryId = new AtomicLong(0);

    cache.forEach((ledgerId, entryId, entry) -> {
        assertEquals(currentLedgerId.get(), ledgerId);
        assertEquals(currentEntryId.get(), entryId);

        if (currentEntryId.incrementAndGet() == entriesPerThread) {
            currentLedgerId.incrementAndGet();
            currentEntryId.set(0);
        }
    });

    cache.close();
    executor.shutdown();
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java

License:Apache License

@Test
public void testLedgerDeletion() {
    WriteCache cache = new WriteCache(allocator, 1024 * 1024, 16 * 1024);

    ByteBuf entry = Unpooled.buffer(1024);
    entry.writerIndex(entry.capacity());

    for (long ledgerId = 0; ledgerId < 10; ledgerId++) {
        for (int entryId = 0; entryId < 10; entryId++) {
            cache.put(ledgerId, entryId, entry);
        }//ww  w .ja  v a2 s  . c o m
    }

    assertEquals(100, cache.count());
    assertEquals(100 * 1024, cache.size());

    cache.deleteLedger(5);

    // Entries are not immediately deleted, just ignored on scan
    assertEquals(100, cache.count());
    assertEquals(100 * 1024, cache.size());

    // Verify entries by iterating over write cache
    AtomicLong currentLedgerId = new AtomicLong(0);
    AtomicLong currentEntryId = new AtomicLong(0);

    cache.forEach((ledgerId, entryId, e) -> {
        assertEquals(currentLedgerId.get(), ledgerId);
        assertEquals(currentEntryId.get(), entryId);

        if (currentEntryId.incrementAndGet() == 10) {
            currentLedgerId.incrementAndGet();
            currentEntryId.set(0);

            if (currentLedgerId.get() == 5) {
                // Ledger 5 was deleted
                currentLedgerId.incrementAndGet();
            }
        }
    });

    cache.close();
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlockAwareSegmentInputStreamTest.java

License:Apache License

@Test
public void testHaveEndPadding() throws Exception {
    int ledgerId = 1;
    int entrySize = 8;
    int lac = 160;
    ReadHandle readHandle = new MockReadHandle(ledgerId, entrySize, lac);

    // set block size bigger than to (header + entry) size.
    int blockSize = 3148 + 5;
    BlockAwareSegmentInputStreamImpl inputStream = new BlockAwareSegmentInputStreamImpl(readHandle, 0,
            blockSize);/* ww  w  .j ava2 s.  co  m*/
    int expectedEntryCount = (blockSize - DataBlockHeaderImpl.getDataStartOffset()) / (entrySize + 4 + 8);

    // verify get methods
    assertEquals(inputStream.getLedger(), readHandle);
    assertEquals(inputStream.getStartEntryId(), 0);
    assertEquals(inputStream.getBlockSize(), blockSize);

    // verify read inputStream
    // 1. read header. 128
    byte headerB[] = new byte[DataBlockHeaderImpl.getDataStartOffset()];
    ByteStreams.readFully(inputStream, headerB);
    DataBlockHeader headerRead = DataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB));
    assertEquals(headerRead.getBlockLength(), blockSize);
    assertEquals(headerRead.getFirstEntryId(), 0);

    byte[] entryData = new byte[entrySize];
    Arrays.fill(entryData, (byte) 0xB); // 0xB is MockLedgerEntry.blockPadding

    // 2. read Ledger entries. 201 * 20
    IntStream.range(0, expectedEntryCount).forEach(i -> {
        try {
            byte lengthBuf[] = new byte[4];
            byte entryIdBuf[] = new byte[8];
            byte content[] = new byte[entrySize];
            inputStream.read(lengthBuf);
            inputStream.read(entryIdBuf);
            inputStream.read(content);

            assertEquals(entrySize, Ints.fromByteArray(lengthBuf));
            assertEquals(i, Longs.fromByteArray(entryIdBuf));
            assertArrayEquals(entryData, content);
        } catch (Exception e) {
            fail("meet exception", e);
        }
    });

    // 3. read padding
    int left = blockSize - DataBlockHeaderImpl.getDataStartOffset() - expectedEntryCount * (entrySize + 4 + 8);
    assertEquals(left, 5);
    byte padding[] = new byte[left];
    inputStream.read(padding);
    ByteBuf paddingBuf = Unpooled.wrappedBuffer(padding);
    IntStream.range(0, paddingBuf.capacity() / 4).forEach(
            i -> assertEquals(Integer.toHexString(paddingBuf.readInt()), Integer.toHexString(0xFEDCDEAD)));

    // 4. reach end.
    assertEquals(inputStream.read(), -1);

    assertEquals(inputStream.getBlockEntryCount(), expectedEntryCount);
    assertEquals(inputStream.getBlockEntryBytesCount(), entrySize * expectedEntryCount);
    assertEquals(inputStream.getEndEntryId(), expectedEntryCount - 1);

    inputStream.close();
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlockAwareSegmentInputStreamTest.java

License:Apache License

@Test
public void testNoEntryPutIn() throws Exception {
    // simulate first entry size over the block size budget, it shouldn't be added.
    // 2 entries, each with bigger size than block size, so there should no entry added into block.
    int ledgerId = 1;
    int entrySize = 1000;
    int lac = 1;/*www .jav  a2 s.  co m*/
    ReadHandle readHandle = new MockReadHandle(ledgerId, entrySize, lac);

    // set block size not able to hold one entry
    int blockSize = DataBlockHeaderImpl.getDataStartOffset() + entrySize;
    BlockAwareSegmentInputStreamImpl inputStream = new BlockAwareSegmentInputStreamImpl(readHandle, 0,
            blockSize);
    int expectedEntryCount = 0;

    // verify get methods
    assertEquals(inputStream.getLedger(), readHandle);
    assertEquals(inputStream.getStartEntryId(), 0);
    assertEquals(inputStream.getBlockSize(), blockSize);

    // verify read inputStream
    // 1. read header. 128
    byte headerB[] = new byte[DataBlockHeaderImpl.getDataStartOffset()];
    ByteStreams.readFully(inputStream, headerB);
    DataBlockHeader headerRead = DataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB));
    assertEquals(headerRead.getBlockLength(), blockSize);
    assertEquals(headerRead.getFirstEntryId(), 0);

    // 2. since no entry put in, it should only get padding after header.
    byte padding[] = new byte[blockSize - DataBlockHeaderImpl.getDataStartOffset()];
    inputStream.read(padding);
    ByteBuf paddingBuf = Unpooled.wrappedBuffer(padding);
    IntStream.range(0, paddingBuf.capacity() / 4).forEach(
            i -> assertEquals(Integer.toHexString(paddingBuf.readInt()), Integer.toHexString(0xFEDCDEAD)));

    // 3. reach end.
    assertEquals(inputStream.read(), -1);

    assertEquals(inputStream.getBlockEntryCount(), 0);
    assertEquals(inputStream.getBlockEntryBytesCount(), 0);
    assertEquals(inputStream.getEndEntryId(), -1);

    inputStream.close();
}

From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.BlockAwareSegmentInputStreamTest.java

License:Apache License

@Test
public void testPaddingOnLastBlock() throws Exception {
    int ledgerId = 1;
    int entrySize = 1000;
    int lac = 0;//from w  ww. ja va 2  s  .com
    ReadHandle readHandle = new MockReadHandle(ledgerId, entrySize, lac);

    // set block size not able to hold one entry
    int blockSize = DataBlockHeaderImpl.getDataStartOffset() + entrySize * 2;
    BlockAwareSegmentInputStreamImpl inputStream = new BlockAwareSegmentInputStreamImpl(readHandle, 0,
            blockSize);
    int expectedEntryCount = 1;

    // verify get methods
    assertEquals(inputStream.getLedger(), readHandle);
    assertEquals(inputStream.getStartEntryId(), 0);
    assertEquals(inputStream.getBlockSize(), blockSize);

    // verify read inputStream
    // 1. read header. 128
    byte headerB[] = new byte[DataBlockHeaderImpl.getDataStartOffset()];
    ByteStreams.readFully(inputStream, headerB);
    DataBlockHeader headerRead = DataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB));
    assertEquals(headerRead.getBlockLength(), blockSize);
    assertEquals(headerRead.getFirstEntryId(), 0);

    // 2. There should be a single entry
    byte[] entryData = new byte[entrySize];
    Arrays.fill(entryData, (byte) 0xB); // 0xB is MockLedgerEntry.blockPadding

    IntStream.range(0, expectedEntryCount).forEach(i -> {
        try {
            byte lengthBuf[] = new byte[4];
            byte entryIdBuf[] = new byte[8];
            byte content[] = new byte[entrySize];
            inputStream.read(lengthBuf);
            inputStream.read(entryIdBuf);
            inputStream.read(content);

            assertEquals(entrySize, Ints.fromByteArray(lengthBuf));
            assertEquals(i, Longs.fromByteArray(entryIdBuf));
            assertArrayEquals(entryData, content);
        } catch (Exception e) {
            fail("meet exception", e);
        }
    });

    // 3. Then padding
    int consumedBytes = DataBlockHeaderImpl.getDataStartOffset()
            + expectedEntryCount * (entrySize + BlockAwareSegmentInputStreamImpl.ENTRY_HEADER_SIZE);
    byte padding[] = new byte[blockSize - consumedBytes];
    inputStream.read(padding);
    ByteBuf paddingBuf = Unpooled.wrappedBuffer(padding);
    IntStream.range(0, paddingBuf.capacity() / 4).forEach(
            i -> assertEquals(Integer.toHexString(paddingBuf.readInt()), Integer.toHexString(0xFEDCDEAD)));

    // 3. reach end.
    assertEquals(inputStream.read(), -1);

    assertEquals(inputStream.getBlockEntryCount(), 1);
    assertEquals(inputStream.getBlockEntryBytesCount(), entrySize);
    assertEquals(inputStream.getEndEntryId(), 0);

    inputStream.close();
}

From source file:org.apache.bookkeeper.proto.BookieProtoEncoding.java

License:Apache License

private static ByteBuf serializeProtobuf(MessageLite msg, ByteBufAllocator allocator) {
    int size = msg.getSerializedSize();
    ByteBuf buf = allocator.heapBuffer(size, size);

    try {//from  ww w  .j a v  a  2 s.c o  m
        msg.writeTo(CodedOutputStream.newInstance(buf.array(), buf.arrayOffset() + buf.writerIndex(), size));
    } catch (IOException e) {
        // This is in-memory serialization, should not fail
        throw new RuntimeException(e);
    }

    // Advance writer idx
    buf.writerIndex(buf.capacity());
    return buf;
}

From source file:org.apache.bookkeeper.util.ByteBufListTest.java

License:Apache License

@Test
public void testSingle() throws Exception {
    ByteBuf b1 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128);
    b1.writerIndex(b1.capacity());
    ByteBufList buf = ByteBufList.get(b1);

    assertEquals(1, buf.size());//ww w.j av  a2  s .c  o  m
    assertEquals(128, buf.readableBytes());
    assertEquals(b1, buf.getBuffer(0));

    assertEquals(buf.refCnt(), 1);
    assertEquals(b1.refCnt(), 1);

    buf.release();

    assertEquals(buf.refCnt(), 0);
    assertEquals(b1.refCnt(), 0);
}