List of usage examples for io.netty.buffer ByteBuf writeLong
public abstract ByteBuf writeLong(long value);
From source file:org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorageTest.java
License:Apache License
@Test public void testEntriesOutOfOrderWithFlush() throws Exception { storage.setMasterKey(1, "key".getBytes()); ByteBuf entry2 = Unpooled.buffer(1024); entry2.writeLong(1); // ledger id entry2.writeLong(2); // entry id entry2.writeBytes("entry-2".getBytes()); storage.addEntry(entry2);/*www .j a v a 2 s . co m*/ try { storage.getEntry(1, 1); fail("Entry doesn't exist"); } catch (NoEntryException e) { // Ok, entry doesn't exist } ByteBuf res = storage.getEntry(1, 2); assertEquals(entry2, res); res.release(); storage.flush(); try { storage.getEntry(1, 1); fail("Entry doesn't exist"); } catch (NoEntryException e) { // Ok, entry doesn't exist } res = storage.getEntry(1, 2); assertEquals(entry2, res); res.release(); ByteBuf entry1 = Unpooled.buffer(1024); entry1.writeLong(1); // ledger id entry1.writeLong(1); // entry id entry1.writeBytes("entry-1".getBytes()); storage.addEntry(entry1); res = storage.getEntry(1, 1); assertEquals(entry1, res); res.release(); res = storage.getEntry(1, 2); assertEquals(entry2, res); res.release(); storage.flush(); res = storage.getEntry(1, 1); assertEquals(entry1, res); res.release(); res = storage.getEntry(1, 2); assertEquals(entry2, res); res.release(); }
From source file:org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorageTest.java
License:Apache License
@Test public void testAddEntriesAfterDelete() throws Exception { storage.setMasterKey(1, "key".getBytes()); ByteBuf entry0 = Unpooled.buffer(1024); entry0.writeLong(1); // ledger id entry0.writeLong(0); // entry id entry0.writeBytes("entry-0".getBytes()); ByteBuf entry1 = Unpooled.buffer(1024); entry1.writeLong(1); // ledger id entry1.writeLong(1); // entry id entry1.writeBytes("entry-1".getBytes()); storage.addEntry(entry0);// w w w.j av a 2s . co m storage.addEntry(entry1); storage.flush(); storage.deleteLedger(1); storage.setMasterKey(1, "key".getBytes()); entry0 = Unpooled.buffer(1024); entry0.writeLong(1); // ledger id entry0.writeLong(0); // entry id entry0.writeBytes("entry-0".getBytes()); entry1 = Unpooled.buffer(1024); entry1.writeLong(1); // ledger id entry1.writeLong(1); // entry id entry1.writeBytes("entry-1".getBytes()); storage.addEntry(entry0); storage.addEntry(entry1); assertEquals(entry0, storage.getEntry(1, 0)); assertEquals(entry1, storage.getEntry(1, 1)); storage.flush(); }
From source file:org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorageWriteCacheTest.java
License:Apache License
@Test public void writeCacheFull() throws Exception { storage.setMasterKey(4, "key".getBytes()); assertEquals(false, storage.isFenced(4)); assertEquals(true, storage.ledgerExists(4)); assertEquals("key", new String(storage.readMasterKey(4))); // Add enough entries to fill the 1st write cache for (int i = 0; i < 5; i++) { ByteBuf entry = Unpooled.buffer(100 * 1024 + 2 * 8); entry.writeLong(4); // ledger id entry.writeLong(i); // entry id entry.writeZero(100 * 1024);// w w w .j a v a2s.c om storage.addEntry(entry); } for (int i = 0; i < 5; i++) { ByteBuf entry = Unpooled.buffer(100 * 1024 + 2 * 8); entry.writeLong(4); // ledger id entry.writeLong(5 + i); // entry id entry.writeZero(100 * 1024); storage.addEntry(entry); } // Next add should fail for cache full ByteBuf entry = Unpooled.buffer(100 * 1024 + 2 * 8); entry.writeLong(4); // ledger id entry.writeLong(22); // entry id entry.writeZero(100 * 1024); try { storage.addEntry(entry); fail("Should have thrown exception"); } catch (OperationRejectedException e) { // Expected } }
From source file:org.apache.bookkeeper.bookie.storage.ldb.LocationsIndexRebuildTest.java
License:Apache License
@Test public void test() throws Exception { File tmpDir = File.createTempFile("bkTest", ".dir"); tmpDir.delete();//from ww w. jav a 2s.c o m tmpDir.mkdir(); File curDir = Bookie.getCurrentDirectory(tmpDir); Bookie.checkDirectoryStructure(curDir); System.out.println(tmpDir); ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); conf.setLedgerDirNames(new String[] { tmpDir.toString() }); conf.setLedgerStorageClass(DbLedgerStorage.class.getName()); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); DbLedgerStorage ledgerStorage = new DbLedgerStorage(); ledgerStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); // Insert some ledger & entries in the storage for (long ledgerId = 0; ledgerId < 5; ledgerId++) { ledgerStorage.setMasterKey(ledgerId, ("ledger-" + ledgerId).getBytes()); ledgerStorage.setFenced(ledgerId); for (long entryId = 0; entryId < 100; entryId++) { ByteBuf entry = Unpooled.buffer(128); entry.writeLong(ledgerId); entry.writeLong(entryId); entry.writeBytes(("entry-" + entryId).getBytes()); ledgerStorage.addEntry(entry); } } ledgerStorage.flush(); ledgerStorage.shutdown(); // Rebuild index through the tool BookieShell shell = new BookieShell(); shell.setConf(conf); int res = shell.run(new String[] { "rebuild-db-ledger-locations-index" }); Assert.assertEquals(0, res); // Verify that db index has the same entries ledgerStorage = new DbLedgerStorage(); ledgerStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); Set<Long> ledgers = Sets.newTreeSet(ledgerStorage.getActiveLedgersInRange(0, Long.MAX_VALUE)); Assert.assertEquals(Sets.newTreeSet(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)), ledgers); for (long ledgerId = 0; ledgerId < 5; ledgerId++) { Assert.assertEquals(true, ledgerStorage.isFenced(ledgerId)); Assert.assertEquals("ledger-" + ledgerId, new String(ledgerStorage.readMasterKey(ledgerId))); ByteBuf lastEntry = ledgerStorage.getLastEntry(ledgerId); assertEquals(ledgerId, lastEntry.readLong()); long lastEntryId = lastEntry.readLong(); assertEquals(99, lastEntryId); for (long entryId = 0; entryId < 100; entryId++) { ByteBuf entry = Unpooled.buffer(1024); entry.writeLong(ledgerId); entry.writeLong(entryId); entry.writeBytes(("entry-" + entryId).getBytes()); ByteBuf result = ledgerStorage.getEntry(ledgerId, entryId); Assert.assertEquals(entry, result); } } ledgerStorage.shutdown(); FileUtils.forceDelete(tmpDir); }
From source file:org.apache.bookkeeper.mledger.offload.jcloud.impl.OffloadIndexBlockImpl.java
License:Apache License
/** * Get the content of the index block as InputStream. * Read out in format:// ww w. j av a 2s . co m * | index_magic_header | index_block_len | data_object_len | data_header_len | * | index_entry_count | segment_metadata_len | segment metadata | index entries... | */ @Override public OffloadIndexBlock.IndexInputStream toStream() throws IOException { int indexEntryCount = this.indexEntries.size(); byte[] ledgerMetadataByte = buildLedgerMetadataFormat(this.segmentMetadata); int segmentMetadataLength = ledgerMetadataByte.length; int indexBlockLength = 4 /* magic header */ + 4 /* index block length */ + 8 /* data object length */ + 8 /* data header length */ + 4 /* index entry count */ + 4 /* segment metadata length */ + segmentMetadataLength + indexEntryCount * (8 + 4 + 8); /* messageEntryId + blockPartId + blockOffset */ ByteBuf out = PooledByteBufAllocator.DEFAULT.buffer(indexBlockLength, indexBlockLength); out.writeInt(INDEX_MAGIC_WORD).writeInt(indexBlockLength).writeLong(dataObjectLength) .writeLong(dataHeaderLength).writeInt(indexEntryCount).writeInt(segmentMetadataLength); // write metadata out.writeBytes(ledgerMetadataByte); // write entries this.indexEntries.entrySet().forEach(entry -> out.writeLong(entry.getValue().getEntryId()) .writeInt(entry.getValue().getPartId()).writeLong(entry.getValue().getOffset())); return new OffloadIndexBlock.IndexInputStream(new ByteBufInputStream(out, true), indexBlockLength); }
From source file:org.apache.bookkeeper.proto.checksum.CRC32DigestManager.java
License:Apache License
@Override void populateValueAndReset(ByteBuf buf) { buf.writeLong(crc.get().getValueAndReset()); }
From source file:org.apache.bookkeeper.proto.checksum.DigestManager.java
License:Apache License
/** * Computes the digest for an entry and put bytes together for sending. * * @param entryId//from w w w .jav a 2 s . co m * @param lastAddConfirmed * @param length * @param data * @return */ public ByteBufList computeDigestAndPackageForSending(long entryId, long lastAddConfirmed, long length, ByteBuf data) { ByteBuf headersBuffer; if (this.useV2Protocol) { headersBuffer = allocator.buffer(METADATA_LENGTH + macCodeLength); } else { headersBuffer = Unpooled.buffer(METADATA_LENGTH + macCodeLength); } headersBuffer.writeLong(ledgerId); headersBuffer.writeLong(entryId); headersBuffer.writeLong(lastAddConfirmed); headersBuffer.writeLong(length); update(headersBuffer); update(data); populateValueAndReset(headersBuffer); return ByteBufList.get(headersBuffer, data); }
From source file:org.apache.bookkeeper.proto.checksum.DigestManager.java
License:Apache License
/** * Computes the digest for writeLac for sending. * * @param lac/* w ww . j a v a 2 s.co m*/ * @return */ public ByteBufList computeDigestAndPackageForSendingLac(long lac) { ByteBuf headersBuffer; if (this.useV2Protocol) { headersBuffer = allocator.buffer(LAC_METADATA_LENGTH + macCodeLength); } else { headersBuffer = Unpooled.buffer(LAC_METADATA_LENGTH + macCodeLength); } headersBuffer.writeLong(ledgerId); headersBuffer.writeLong(lac); update(headersBuffer); populateValueAndReset(headersBuffer); return ByteBufList.get(headersBuffer); }
From source file:org.apache.bookkeeper.statelib.impl.kv.KVUtils.java
License:Apache License
static ByteBuf serialize(ByteBuf valBuf, long revision) { int serializedSize = valBuf.readableBytes() + Long.BYTES; ByteBuf buffer = PooledByteBufAllocator.DEFAULT.heapBuffer(serializedSize); buffer.writeLong(revision); buffer.writeBytes(valBuf);//from w ww. j a va2s . c om return buffer; }
From source file:org.apache.bookkeeper.statelib.impl.kv.KVUtils.java
License:Apache License
static ByteBuf serialize(byte[] value, long revision) { int serializedSize = value.length + Long.BYTES; ByteBuf buffer = PooledByteBufAllocator.DEFAULT.heapBuffer(serializedSize); buffer.writeLong(revision); buffer.writeBytes(value);//from w w w .j a v a 2 s . c o m return buffer; }