List of usage examples for io.netty.buffer UnpooledByteBufAllocator DEFAULT
UnpooledByteBufAllocator DEFAULT
To view the source code for io.netty.buffer UnpooledByteBufAllocator DEFAULT.
Click Source Link
From source file:org.apache.bookkeeper.bookie.EntryLogTest.java
License:Apache License
private EntryLogger.BufferedLogChannel createDummyBufferedLogChannel(EntryLogger entryLogger, long logid, ServerConfiguration servConf) throws IOException { File tmpFile = File.createTempFile("entrylog", logid + ""); tmpFile.deleteOnExit();//w w w . j a va2s .co m FileChannel fc = new RandomAccessFile(tmpFile, "rw").getChannel(); EntryLogger.BufferedLogChannel logChannel = new BufferedLogChannel(UnpooledByteBufAllocator.DEFAULT, fc, 10, 10, logid, tmpFile, servConf.getFlushIntervalInBytes()); return logChannel; }
From source file:org.apache.bookkeeper.bookie.IndexPersistenceMgrTest.java
License:Apache License
void validateFileInfo(IndexPersistenceMgr indexPersistenceMgr, long ledgerId, int headerVersion) throws IOException, GeneralSecurityException { BookKeeper.DigestType digestType = BookKeeper.DigestType.CRC32; boolean getUseV2WireProtocol = true; preCreateFileInfoForLedger(ledgerId, headerVersion); DigestManager digestManager = DigestManager.instantiate(ledgerId, masterKey, BookKeeper.DigestType.toProtoDigestType(digestType), UnpooledByteBufAllocator.DEFAULT, getUseV2WireProtocol);/*from w w w . j av a2 s . c o m*/ CachedFileInfo fileInfo = indexPersistenceMgr.getFileInfo(ledgerId, masterKey); fileInfo.readHeader(); assertEquals("ExplicitLac should be null", null, fileInfo.getExplicitLac()); assertEquals("Header Version should match with precreated fileinfos headerversion", headerVersion, fileInfo.headerVersion); assertTrue("Masterkey should match with precreated fileinfos masterkey", Arrays.equals(masterKey, fileInfo.masterKey)); long explicitLac = 22; ByteBuf explicitLacByteBuf = digestManager.computeDigestAndPackageForSendingLac(explicitLac).getBuffer(0); explicitLacByteBuf.markReaderIndex(); indexPersistenceMgr.setExplicitLac(ledgerId, explicitLacByteBuf); explicitLacByteBuf.resetReaderIndex(); assertEquals("explicitLac ByteBuf contents should match", 0, ByteBufUtil.compare(explicitLacByteBuf, indexPersistenceMgr.getExplicitLac(ledgerId))); /* * release fileInfo untill it is marked dead and closed, so that * contents of it are persisted. */ while (fileInfo.refCount.get() != FileInfoBackingCache.DEAD_REF) { fileInfo.release(); } /* * reopen the fileinfo and readHeader, so that whatever was persisted * would be read. */ fileInfo = indexPersistenceMgr.getFileInfo(ledgerId, masterKey); fileInfo.readHeader(); assertEquals("Header Version should match with precreated fileinfos headerversion even after reopening", headerVersion, fileInfo.headerVersion); assertTrue("Masterkey should match with precreated fileinfos masterkey", Arrays.equals(masterKey, fileInfo.masterKey)); if (headerVersion == FileInfo.V0) { assertEquals( "Since it is V0 Header, explicitLac will not be persisted and should be null after reopening", null, indexPersistenceMgr.getExplicitLac(ledgerId)); } else { explicitLacByteBuf.resetReaderIndex(); assertEquals( "Since it is V1 Header, explicitLac will be persisted and should not be null after reopening", 0, ByteBufUtil.compare(explicitLacByteBuf, indexPersistenceMgr.getExplicitLac(ledgerId))); } }
From source file:org.apache.bookkeeper.bookie.Journal.java
License:Apache License
public Journal(int journalIndex, File journalDirectory, ServerConfiguration conf, LedgerDirsManager ledgerDirsManager) { this(journalIndex, journalDirectory, conf, ledgerDirsManager, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); }
From source file:org.apache.bookkeeper.bookie.LedgerStorageTest.java
License:Apache License
public void testExplicitLacWriteToJournal(int journalFormatVersionToWrite, int fileInfoFormatVersionToWrite) throws Exception { ServerConfiguration bookieServerConfig = bsConfs.get(0); bookieServerConfig.setJournalFormatVersionToWrite(journalFormatVersionToWrite); bookieServerConfig.setFileInfoFormatVersionToWrite(fileInfoFormatVersionToWrite); restartBookies(bookieServerConfig);//w w w .j ava 2 s .c o m ClientConfiguration confWithExplicitLAC = new ClientConfiguration(); confWithExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); /* * enable explicitLacFlush by setting non-zero value for * explictLacInterval */ int explictLacInterval = 100; BookKeeper.DigestType digestType = BookKeeper.DigestType.CRC32; byte[] passwdBytes = "testPasswd".getBytes(); confWithExplicitLAC.setExplictLacInterval(explictLacInterval); BookKeeper bkcWithExplicitLAC = new BookKeeper(confWithExplicitLAC); LedgerHandle wlh = bkcWithExplicitLAC.createLedger(1, 1, 1, digestType, passwdBytes); long ledgerId = wlh.getId(); int numOfEntries = 5; for (int i = 0; i < numOfEntries; i++) { wlh.addEntry(("foobar" + i).getBytes()); } LedgerHandle rlh = bkcWithExplicitLAC.openLedgerNoRecovery(ledgerId, digestType, passwdBytes); assertEquals("LAC of rlh", (long) numOfEntries - 2, rlh.getLastAddConfirmed()); assertEquals("Read explicit LAC of rlh", (long) numOfEntries - 2, rlh.readExplicitLastConfirmed()); /* * we need to wait for atleast 2 explicitlacintervals, since in * writehandle for the first call lh.getExplicitLastAddConfirmed() will * be < lh.getPiggyBackedLastAddConfirmed(), so it wont make explicit * writelac in the first run */ long readExplicitLastConfirmed = TestUtils.waitUntilExplicitLacUpdated(rlh, numOfEntries - 1); assertEquals("Read explicit LAC of rlh after wait for explicitlacflush", (numOfEntries - 1), readExplicitLastConfirmed); ServerConfiguration newBookieConf = new ServerConfiguration(bsConfs.get(0)); /* * by reusing bookieServerConfig and setting metadataServiceUri to null * we can create/start new Bookie instance using the same data * (journal/ledger/index) of the existing BookeieServer for our testing * purpose. */ newBookieConf.setMetadataServiceUri(null); Bookie newbookie = new Bookie(newBookieConf); /* * since 'newbookie' uses the same data as original Bookie, it should be * able to read journal of the original bookie and hence explicitLac buf * entry written to Journal in the original bookie. */ newbookie.readJournal(); ByteBuf explicitLacBuf = newbookie.getExplicitLac(ledgerId); if ((journalFormatVersionToWrite >= 6) && (fileInfoFormatVersionToWrite >= 1)) { DigestManager digestManager = DigestManager.instantiate(ledgerId, passwdBytes, BookKeeper.DigestType.toProtoDigestType(digestType), UnpooledByteBufAllocator.DEFAULT, confWithExplicitLAC.getUseV2WireProtocol()); long explicitLacPersistedInJournal = digestManager.verifyDigestAndReturnLac(explicitLacBuf); assertEquals("explicitLac persisted in journal", (numOfEntries - 1), explicitLacPersistedInJournal); } else { assertEquals("explicitLac is not expected to be persisted, so it should be null", null, explicitLacBuf); } bkcWithExplicitLAC.close(); }
From source file:org.apache.bookkeeper.bookie.LedgerStorageTest.java
License:Apache License
public void testExplicitLacWriteToFileInfo(int journalFormatVersionToWrite, int fileInfoFormatVersionToWrite) throws Exception { ServerConfiguration bookieServerConfig = bsConfs.get(0); bookieServerConfig.setJournalFormatVersionToWrite(journalFormatVersionToWrite); bookieServerConfig.setFileInfoFormatVersionToWrite(fileInfoFormatVersionToWrite); restartBookies(bookieServerConfig);//from w w w . j ava 2 s . c om ClientConfiguration confWithExplicitLAC = new ClientConfiguration(); confWithExplicitLAC.setMetadataServiceUri(zkUtil.getMetadataServiceUri()); /* * enable explicitLacFlush by setting non-zero value for * explictLacInterval */ int explictLacInterval = 100; BookKeeper.DigestType digestType = BookKeeper.DigestType.CRC32; byte[] passwdBytes = "testPasswd".getBytes(); confWithExplicitLAC.setExplictLacInterval(explictLacInterval); BookKeeper bkcWithExplicitLAC = new BookKeeper(confWithExplicitLAC); LedgerHandle wlh = bkcWithExplicitLAC.createLedger(1, 1, 1, digestType, passwdBytes); long ledgerId = wlh.getId(); int numOfEntries = 5; for (int i = 0; i < numOfEntries; i++) { wlh.addEntry(("foobar" + i).getBytes()); } LedgerHandle rlh = bkcWithExplicitLAC.openLedgerNoRecovery(ledgerId, digestType, passwdBytes); assertEquals("LAC of rlh", (long) numOfEntries - 2, rlh.getLastAddConfirmed()); assertEquals("Read explicit LAC of rlh", (long) numOfEntries - 2, rlh.readExplicitLastConfirmed()); /* * we need to wait for atleast 2 explicitlacintervals, since in * writehandle for the first call lh.getExplicitLastAddConfirmed() will * be < lh.getPiggyBackedLastAddConfirmed(), so it wont make explicit * writelac in the first run */ long readExplicitLastConfirmed = TestUtils.waitUntilExplicitLacUpdated(rlh, numOfEntries - 1); assertEquals("Read explicit LAC of rlh after wait for explicitlacflush", (numOfEntries - 1), readExplicitLastConfirmed); /* * flush ledgerStorage so that header of fileinfo is flushed. */ bs.get(0).getBookie().ledgerStorage.flush(); ReadOnlyFileInfo fileInfo = getFileInfo(ledgerId, Bookie.getCurrentDirectories(bsConfs.get(0).getLedgerDirs())); fileInfo.readHeader(); ByteBuf explicitLacBufReadFromFileInfo = fileInfo.getExplicitLac(); if ((journalFormatVersionToWrite >= 6) && (fileInfoFormatVersionToWrite >= 1)) { DigestManager digestManager = DigestManager.instantiate(ledgerId, passwdBytes, BookKeeper.DigestType.toProtoDigestType(digestType), UnpooledByteBufAllocator.DEFAULT, confWithExplicitLAC.getUseV2WireProtocol()); long explicitLacReadFromFileInfo = digestManager .verifyDigestAndReturnLac(explicitLacBufReadFromFileInfo); assertEquals("explicitLac persisted in FileInfo", (numOfEntries - 1), explicitLacReadFromFileInfo); } else { assertEquals("explicitLac is not expected to be persisted, so it should be null", null, explicitLacBufReadFromFileInfo); } bkcWithExplicitLAC.close(); }
From source file:org.apache.bookkeeper.bookie.SortedLedgerStorageCheckpointTest.java
License:Apache License
@Before @Override/*from w ww . j a v a 2s .co m*/ public void setUp() throws Exception { super.setUp(); // initial checkpoint this.storage = new SortedLedgerStorage(); this.checkpointer = new Checkpointer() { @Override public void startCheckpoint(Checkpoint checkpoint) { storage.getScheduler().submit(() -> { log.info("Checkpoint the storage at {}", checkpoint); try { storage.checkpoint(checkpoint); checkpoints.add(checkpoint); } catch (IOException e) { log.error("Failed to checkpoint at {}", checkpoint, e); } }); } @Override public void start() { // no-op } }; // if the SortedLedgerStorage need not to change bookie's state, pass StateManager==null is ok this.storage.initialize(conf, mock(LedgerManager.class), ledgerDirsManager, ledgerDirsManager, null, checkpointSrc, checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); }
From source file:org.apache.bookkeeper.bookie.SortedLedgerStorageTest.java
License:Apache License
@Before public void setUp() throws Exception { File tmpDir = File.createTempFile("bkTest", ".dir"); tmpDir.delete();/* w ww . j av a2 s . c om*/ tmpDir.mkdir(); File curDir = Bookie.getCurrentDirectory(tmpDir); Bookie.checkDirectoryStructure(curDir); conf.setLedgerDirNames(new String[] { tmpDir.toString() }); ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); sortedLedgerStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, statsProvider.getStatsLogger(BOOKIE_SCOPE), UnpooledByteBufAllocator.DEFAULT); }
From source file:org.apache.bookkeeper.bookie.StateManagerTest.java
License:Apache License
@Test public void testReadOnlyBookieTransitions() throws Exception { // readOnlybk, which use override stateManager impl File tmpDir = createTempDir("stateManger", "test-readonly"); final ServerConfiguration readOnlyConf = TestBKConfiguration.newServerConfiguration(); readOnlyConf.setJournalDirName(tmpDir.getPath()).setLedgerDirNames(new String[] { tmpDir.getPath() }) .setJournalDirName(tmpDir.toString()).setMetadataServiceUri(zkUtil.getMetadataServiceUri()) .setForceReadOnlyBookie(true); ReadOnlyBookie readOnlyBookie = new ReadOnlyBookie(readOnlyConf, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); readOnlyBookie.start();/*w ww .j a v a 2 s .c o m*/ assertTrue(readOnlyBookie.isRunning()); assertTrue(readOnlyBookie.isReadOnly()); // transition has no effect if bookie start with readOnly mode readOnlyBookie.getStateManager().transitionToWritableMode().get(); assertTrue(readOnlyBookie.isRunning()); assertTrue(readOnlyBookie.isReadOnly()); readOnlyBookie.shutdown(); }
From source file:org.apache.bookkeeper.bookie.storage.ldb.ConversionRollbackTest.java
License:Apache License
@Test public void convertFromDbStorageToInterleaved() throws Exception { File tmpDir = File.createTempFile("bkTest", ".dir"); tmpDir.delete();//from w w w. j ava 2 s. com tmpDir.mkdir(); File curDir = Bookie.getCurrentDirectory(tmpDir); Bookie.checkDirectoryStructure(curDir); log.info("Using temp directory: {}", tmpDir); ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); conf.setLedgerDirNames(new String[] { tmpDir.toString() }); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); DbLedgerStorage dbStorage = new DbLedgerStorage(); dbStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); // Insert some ledger & entries in the dbStorage for (long ledgerId = 0; ledgerId < 5; ledgerId++) { dbStorage.setMasterKey(ledgerId, ("ledger-" + ledgerId).getBytes()); dbStorage.setFenced(ledgerId); for (long entryId = 0; entryId < 10000; entryId++) { ByteBuf entry = Unpooled.buffer(128); entry.writeLong(ledgerId); entry.writeLong(entryId); entry.writeBytes(("entry-" + entryId).getBytes()); dbStorage.addEntry(entry); } } dbStorage.flush(); dbStorage.shutdown(); // Run conversion tool BookieShell shell = new BookieShell(); shell.setConf(conf); int res = shell.run(new String[] { "convert-to-interleaved-storage" }); Assert.assertEquals(0, res); // Verify that interleaved storage index has the same entries InterleavedLedgerStorage interleavedStorage = new InterleavedLedgerStorage(); interleavedStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); Set<Long> ledgers = Sets.newTreeSet(interleavedStorage.getActiveLedgersInRange(0, Long.MAX_VALUE)); Assert.assertEquals(Sets.newTreeSet(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)), ledgers); for (long ledgerId = 0; ledgerId < 5; ledgerId++) { Assert.assertEquals(true, interleavedStorage.isFenced(ledgerId)); Assert.assertEquals("ledger-" + ledgerId, new String(interleavedStorage.readMasterKey(ledgerId))); for (long entryId = 0; entryId < 10000; entryId++) { ByteBuf entry = Unpooled.buffer(1024); entry.writeLong(ledgerId); entry.writeLong(entryId); entry.writeBytes(("entry-" + entryId).getBytes()); ByteBuf result = interleavedStorage.getEntry(ledgerId, entryId); Assert.assertEquals(entry, result); } } interleavedStorage.shutdown(); FileUtils.forceDelete(tmpDir); }
From source file:org.apache.bookkeeper.bookie.storage.ldb.ConversionTest.java
License:Apache License
@Test public void test() throws Exception { File tmpDir = File.createTempFile("bkTest", ".dir"); tmpDir.delete();//from w ww .jav a 2 s . c om tmpDir.mkdir(); File curDir = Bookie.getCurrentDirectory(tmpDir); Bookie.checkDirectoryStructure(curDir); System.out.println(tmpDir); ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); conf.setLedgerDirNames(new String[] { tmpDir.toString() }); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); InterleavedLedgerStorage interleavedStorage = new InterleavedLedgerStorage(); interleavedStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); // Insert some ledger & entries in the interleaved storage for (long ledgerId = 0; ledgerId < 5; ledgerId++) { interleavedStorage.setMasterKey(ledgerId, ("ledger-" + ledgerId).getBytes()); interleavedStorage.setFenced(ledgerId); for (long entryId = 0; entryId < 10000; entryId++) { ByteBuf entry = Unpooled.buffer(128); entry.writeLong(ledgerId); entry.writeLong(entryId); entry.writeBytes(("entry-" + entryId).getBytes()); interleavedStorage.addEntry(entry); } } interleavedStorage.flush(); interleavedStorage.shutdown(); // Run conversion tool BookieShell shell = new BookieShell(); shell.setConf(conf); int res = shell.run(new String[] { "convert-to-db-storage" }); Assert.assertEquals(0, res); // Verify that db index has the same entries DbLedgerStorage dbStorage = new DbLedgerStorage(); dbStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); interleavedStorage = new InterleavedLedgerStorage(); interleavedStorage.initialize(conf, null, ledgerDirsManager, ledgerDirsManager, null, checkpointSource, checkpointer, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); Set<Long> ledgers = Sets.newTreeSet(dbStorage.getActiveLedgersInRange(0, Long.MAX_VALUE)); Assert.assertEquals(Sets.newTreeSet(Lists.newArrayList(0L, 1L, 2L, 3L, 4L)), ledgers); ledgers = Sets.newTreeSet(interleavedStorage.getActiveLedgersInRange(0, Long.MAX_VALUE)); Assert.assertEquals(Sets.newTreeSet(), ledgers); for (long ledgerId = 0; ledgerId < 5; ledgerId++) { Assert.assertEquals(true, dbStorage.isFenced(ledgerId)); Assert.assertEquals("ledger-" + ledgerId, new String(dbStorage.readMasterKey(ledgerId))); for (long entryId = 0; entryId < 10000; entryId++) { ByteBuf entry = Unpooled.buffer(1024); entry.writeLong(ledgerId); entry.writeLong(entryId); entry.writeBytes(("entry-" + entryId).getBytes()); ByteBuf result = dbStorage.getEntry(ledgerId, entryId); Assert.assertEquals(entry, result); result.release(); try { interleavedStorage.getEntry(ledgerId, entryId); Assert.fail("entry should not exist"); } catch (NoLedgerException e) { // Ok } } } interleavedStorage.shutdown(); dbStorage.shutdown(); FileUtils.forceDelete(tmpDir); }