List of usage examples for io.netty.buffer ByteBuf writerIndex
public abstract ByteBuf writerIndex(int writerIndex);
From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java
License:Apache License
@Test public void testMultipleSegments() { // Create cache with max size 1Mb and each segment is 16Kb WriteCache cache = new WriteCache(allocator, 1024 * 1024, 16 * 1024); ByteBuf entry = Unpooled.buffer(1024); entry.writerIndex(entry.capacity()); for (int i = 0; i < 48; i++) { cache.put(1, i, entry);//from w w w . ja va2s . c o m } assertEquals(48, cache.count()); assertEquals(48 * 1024, cache.size()); cache.close(); }
From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java
License:Apache License
@Test public void testMultipleWriters() throws Exception { // Create cache with max size 1Mb and each segment is 16Kb WriteCache cache = new WriteCache(allocator, 10 * 1024 * 1024, 16 * 1024); ExecutorService executor = Executors.newCachedThreadPool(); int numThreads = 10; int entriesPerThread = 10 * 1024 / numThreads; CyclicBarrier barrier = new CyclicBarrier(numThreads); CountDownLatch latch = new CountDownLatch(numThreads); for (int i = 0; i < numThreads; i++) { int ledgerId = i; executor.submit(() -> {//from w ww .j a va2s . c om try { barrier.await(); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new RuntimeException(ie); } catch (BrokenBarrierException e) { throw new RuntimeException(e); } ByteBuf entry = Unpooled.buffer(1024); entry.writerIndex(entry.capacity()); for (int entryId = 0; entryId < entriesPerThread; entryId++) { assertTrue(cache.put(ledgerId, entryId, entry)); } latch.countDown(); }); } // Wait for all tasks to be completed latch.await(); // assertEquals(numThreads * entriesPerThread, cache.count()); assertEquals(cache.count() * 1024, cache.size()); // Verify entries by iterating over write cache AtomicLong currentLedgerId = new AtomicLong(0); AtomicLong currentEntryId = new AtomicLong(0); cache.forEach((ledgerId, entryId, entry) -> { assertEquals(currentLedgerId.get(), ledgerId); assertEquals(currentEntryId.get(), entryId); if (currentEntryId.incrementAndGet() == entriesPerThread) { currentLedgerId.incrementAndGet(); currentEntryId.set(0); } }); cache.close(); executor.shutdown(); }
From source file:org.apache.bookkeeper.bookie.storage.ldb.WriteCacheTest.java
License:Apache License
@Test public void testLedgerDeletion() { WriteCache cache = new WriteCache(allocator, 1024 * 1024, 16 * 1024); ByteBuf entry = Unpooled.buffer(1024); entry.writerIndex(entry.capacity()); for (long ledgerId = 0; ledgerId < 10; ledgerId++) { for (int entryId = 0; entryId < 10; entryId++) { cache.put(ledgerId, entryId, entry); }/*from ww w . java 2 s.c om*/ } assertEquals(100, cache.count()); assertEquals(100 * 1024, cache.size()); cache.deleteLedger(5); // Entries are not immediately deleted, just ignored on scan assertEquals(100, cache.count()); assertEquals(100 * 1024, cache.size()); // Verify entries by iterating over write cache AtomicLong currentLedgerId = new AtomicLong(0); AtomicLong currentEntryId = new AtomicLong(0); cache.forEach((ledgerId, entryId, e) -> { assertEquals(currentLedgerId.get(), ledgerId); assertEquals(currentEntryId.get(), entryId); if (currentEntryId.incrementAndGet() == 10) { currentLedgerId.incrementAndGet(); currentEntryId.set(0); if (currentLedgerId.get() == 5) { // Ledger 5 was deleted currentLedgerId.incrementAndGet(); } } }); cache.close(); }
From source file:org.apache.bookkeeper.statelib.impl.mvcc.MVCCRecordCoder.java
License:Apache License
@Override public byte[] encode(MVCCRecord record) { KeyMeta meta = KeyMeta.newBuilder().setCreateRevision(record.getCreateRev()) .setModRevision(record.getModRev()).setVersion(record.getVersion()) .setValueType(record.getValueType()).build(); int metaLen = meta.getSerializedSize(); int valLen = record.getValue().readableBytes(); int totalLen = Integer.BYTES // meta len + metaLen // meta bytes + Integer.BYTES // val len + valLen; // val bytes // NOTE: currently rocksdb jni only supports `byte[]` // we can improve this if rocksdb jni support ByteBuffer or ByteBuf byte[] data = new byte[totalLen]; ByteBuf buf = Unpooled.wrappedBuffer(data); buf.writerIndex(0); buf.writeInt(metaLen);/*from w ww . jav a 2 s. c o m*/ CodedOutputStream out = CodedOutputStream.newInstance(data, Integer.BYTES, metaLen); try { meta.writeTo(out); } catch (IOException e) { throw new StateStoreRuntimeException("Failed to serialize key metadata", e); } buf.writerIndex(buf.writerIndex() + metaLen); buf.writeInt(valLen); buf.writeBytes(record.getValue().slice()); buf.release(); return data; }
From source file:org.apache.bookkeeper.tools.perf.table.IncrementTask.java
License:Apache License
void incKey(long i) { ByteBuf keyBuf = PooledByteBufAllocator.DEFAULT.heapBuffer(flags.keySize); getKey(keyBuf, i, keyRange);/* w w w. j a v a2 s.c o m*/ keyBuf.writerIndex(keyBuf.readerIndex() + keyBuf.writableBytes()); final long startTime = System.nanoTime(); table.increment(keyBuf, 100).whenComplete((result, cause) -> { if (null != semaphore) { semaphore.release(); } if (null != cause) { log.error("Error at increment key/amount", cause); } else { long latencyMicros = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - startTime); writeOpStats.recordOp(latencyMicros); } keyBuf.release(); }); }
From source file:org.apache.bookkeeper.tools.perf.table.WriteTask.java
License:Apache License
void writeKey(long i, byte[] valueBytes) { final ByteBuf keyBuf = PooledByteBufAllocator.DEFAULT.heapBuffer(flags.keySize); getKey(keyBuf, i, keyRange);//from w ww . j a v a2 s . c o m keyBuf.writerIndex(keyBuf.readerIndex() + keyBuf.writableBytes()); final ByteBuf valBuf = Unpooled.wrappedBuffer(valueBytes); final long startTime = System.nanoTime(); table.put(keyBuf, valBuf).whenComplete((result, cause) -> { if (null != semaphore) { semaphore.release(); } if (null != cause) { log.error("Error at put key/value", cause); } else { long latencyMicros = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - startTime); writeOpStats.recordOp(latencyMicros); } keyBuf.release(); valBuf.release(); }); }
From source file:org.apache.bookkeeper.util.ByteBufListTest.java
License:Apache License
@Test public void testSingle() throws Exception { ByteBuf b1 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b1.writerIndex(b1.capacity()); ByteBufList buf = ByteBufList.get(b1); assertEquals(1, buf.size());/*from w w w . j av a 2s .com*/ assertEquals(128, buf.readableBytes()); assertEquals(b1, buf.getBuffer(0)); assertEquals(buf.refCnt(), 1); assertEquals(b1.refCnt(), 1); buf.release(); assertEquals(buf.refCnt(), 0); assertEquals(b1.refCnt(), 0); }
From source file:org.apache.bookkeeper.util.ByteBufListTest.java
License:Apache License
@Test public void testDouble() throws Exception { ByteBuf b1 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b1.writerIndex(b1.capacity()); ByteBuf b2 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b2.writerIndex(b2.capacity());/*from w w w .ja v a 2s. c om*/ ByteBufList buf = ByteBufList.get(b1, b2); assertEquals(2, buf.size()); assertEquals(256, buf.readableBytes()); assertEquals(b1, buf.getBuffer(0)); assertEquals(b2, buf.getBuffer(1)); assertEquals(buf.refCnt(), 1); assertEquals(b1.refCnt(), 1); assertEquals(b2.refCnt(), 1); buf.release(); assertEquals(buf.refCnt(), 0); assertEquals(b1.refCnt(), 0); assertEquals(b2.refCnt(), 0); }
From source file:org.apache.bookkeeper.util.ByteBufListTest.java
License:Apache License
@Test public void testClone() throws Exception { ByteBuf b1 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b1.writerIndex(b1.capacity()); ByteBuf b2 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b2.writerIndex(b2.capacity());/*from w w w .j a v a 2 s.c om*/ ByteBufList buf = ByteBufList.get(b1, b2); ByteBufList clone = ByteBufList.clone(buf); assertEquals(2, buf.size()); assertEquals(256, buf.readableBytes()); assertEquals(b1, buf.getBuffer(0)); assertEquals(b2, buf.getBuffer(1)); assertEquals(2, clone.size()); assertEquals(256, clone.readableBytes()); assertEquals(b1, clone.getBuffer(0)); assertEquals(b2, clone.getBuffer(1)); assertEquals(buf.refCnt(), 1); assertEquals(clone.refCnt(), 1); assertEquals(b1.refCnt(), 2); assertEquals(b2.refCnt(), 2); buf.release(); assertEquals(buf.refCnt(), 0); assertEquals(clone.refCnt(), 1); assertEquals(b1.refCnt(), 1); assertEquals(b2.refCnt(), 1); clone.release(); assertEquals(buf.refCnt(), 0); assertEquals(clone.refCnt(), 0); assertEquals(b1.refCnt(), 0); assertEquals(b2.refCnt(), 0); }
From source file:org.apache.bookkeeper.util.ByteBufListTest.java
License:Apache License
@Test public void testRetain() throws Exception { ByteBuf b1 = PooledByteBufAllocator.DEFAULT.heapBuffer(128, 128); b1.writerIndex(b1.capacity()); ByteBufList buf = ByteBufList.get(b1); assertEquals(1, buf.size());/* w w w . jav a 2s . c o m*/ assertEquals(128, buf.readableBytes()); assertEquals(b1, buf.getBuffer(0)); assertEquals(buf.refCnt(), 1); assertEquals(b1.refCnt(), 1); buf.retain(); assertEquals(buf.refCnt(), 2); assertEquals(b1.refCnt(), 1); buf.release(); assertEquals(buf.refCnt(), 1); assertEquals(b1.refCnt(), 1); buf.release(); assertEquals(buf.refCnt(), 0); assertEquals(b1.refCnt(), 0); }