List of usage examples for io.netty.buffer UnpooledByteBufAllocator DEFAULT
UnpooledByteBufAllocator DEFAULT
To view the source code for io.netty.buffer UnpooledByteBufAllocator DEFAULT.
Click Source Link
From source file:org.apache.bookkeeper.bookie.Bookie.java
License:Apache License
/** * Initialize LedgerStorage instance without checkpointing for use within the shell * and other RO users. ledgerStorage must not have already been initialized. * * <p>The caller is responsible for disposing of the ledgerStorage object. * * @param conf Bookie config./*from w ww . j av a 2 s .c om*/ * @param ledgerStorage Instance to initialize. * @return Passed ledgerStorage instance * @throws IOException */ public static LedgerStorage mountLedgerStorageOffline(ServerConfiguration conf, LedgerStorage ledgerStorage) throws IOException { StatsLogger statsLogger = NullStatsLogger.INSTANCE; DiskChecker diskChecker = new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold()); LedgerDirsManager ledgerDirsManager = createLedgerDirsManager(conf, diskChecker, statsLogger.scope(LD_LEDGER_SCOPE)); LedgerDirsManager indexDirsManager = createIndexDirsManager(conf, diskChecker, statsLogger.scope(LD_INDEX_SCOPE), ledgerDirsManager); if (null == ledgerStorage) { ledgerStorage = buildLedgerStorage(conf); } CheckpointSource checkpointSource = new CheckpointSource() { @Override public Checkpoint newCheckpoint() { return Checkpoint.MAX; } @Override public void checkpointComplete(Checkpoint checkpoint, boolean compact) throws IOException { } }; Checkpointer checkpointer = Checkpointer.NULL; ledgerStorage.initialize(conf, null, ledgerDirsManager, indexDirsManager, null, checkpointSource, checkpointer, statsLogger, UnpooledByteBufAllocator.DEFAULT); return ledgerStorage; }
From source file:org.apache.bookkeeper.bookie.BufferedChannelTest.java
License:Apache License
public void testBufferedChannel(int byteBufLength, int numOfWrites, int unpersistedBytesBound, boolean flush, boolean shouldForceWrite) throws Exception { File newLogFile = File.createTempFile("test", "log"); newLogFile.deleteOnExit();/*from www. java 2 s.c o m*/ FileChannel fileChannel = new RandomAccessFile(newLogFile, "rw").getChannel(); BufferedChannel logChannel = new BufferedChannel(UnpooledByteBufAllocator.DEFAULT, fileChannel, INTERNAL_BUFFER_WRITE_CAPACITY, INTERNAL_BUFFER_READ_CAPACITY, unpersistedBytesBound); ByteBuf dataBuf = generateEntry(byteBufLength); dataBuf.markReaderIndex(); dataBuf.markWriterIndex(); for (int i = 0; i < numOfWrites; i++) { logChannel.write(dataBuf); dataBuf.resetReaderIndex(); dataBuf.resetWriterIndex(); } if (flush && shouldForceWrite) { logChannel.flushAndForceWrite(false); } else if (flush) { logChannel.flush(); } else if (shouldForceWrite) { logChannel.forceWrite(false); } int expectedNumOfUnpersistedBytes = 0; if (flush && shouldForceWrite) { /* * if flush call is made with shouldForceWrite, * then expectedNumOfUnpersistedBytes should be zero. */ expectedNumOfUnpersistedBytes = 0; } else if (!flush && shouldForceWrite) { /* * if flush is not called then internal write buffer is not flushed, * but while adding entries to BufferedChannel if writeBuffer has * reached its capacity then it will call flush method, and the data * gets added to the file buffer. So though explicitly we are not * calling flush method, implicitly flush gets called when * writeBuffer reaches its capacity. */ expectedNumOfUnpersistedBytes = (byteBufLength * numOfWrites) % INTERNAL_BUFFER_WRITE_CAPACITY; } else { expectedNumOfUnpersistedBytes = (byteBufLength * numOfWrites) - unpersistedBytesBound; } Assert.assertEquals("Unpersisted bytes", expectedNumOfUnpersistedBytes, logChannel.getUnpersistedBytes()); logChannel.close(); fileChannel.close(); }
From source file:org.apache.bookkeeper.bookie.CompactionTest.java
License:Apache License
@Test public void testForceGarbageCollection() throws Exception { ServerConfiguration conf = newServerConfiguration(); conf.setGcWaitTime(60000);/*from w w w . j a v a 2s. co m*/ conf.setMinorCompactionInterval(120000); conf.setMajorCompactionInterval(240000); LedgerDirsManager dirManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); CheckpointSource cp = new CheckpointSource() { @Override public Checkpoint newCheckpoint() { // Do nothing. return null; } @Override public void checkpointComplete(Checkpoint checkPoint, boolean compact) throws IOException { // Do nothing. } }; for (File journalDir : conf.getJournalDirs()) { Bookie.checkDirectoryStructure(journalDir); } for (File dir : dirManager.getAllLedgerDirs()) { Bookie.checkDirectoryStructure(dir); } runFunctionWithLedgerManagerFactory(conf, lmf -> { try (LedgerManager lm = lmf.newLedgerManager()) { InterleavedLedgerStorage storage = new InterleavedLedgerStorage(); storage.initialize(conf, lm, dirManager, dirManager, null, cp, Checkpointer.NULL, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); storage.start(); long startTime = System.currentTimeMillis(); storage.gcThread.enableForceGC(); storage.gcThread.triggerGC().get(); //major storage.gcThread.triggerGC().get(); //minor // Minor and Major compaction times should be larger than when we started // this test. assertTrue("Minor or major compaction did not trigger even on forcing.", storage.gcThread.lastMajorCompactionTime > startTime && storage.gcThread.lastMinorCompactionTime > startTime); storage.shutdown(); } catch (Exception e) { throw new UncheckedExecutionException(e.getMessage(), e); } return null; }); }
From source file:org.apache.bookkeeper.bookie.CompactionTest.java
License:Apache License
@Test public void testCompactionPersistence() throws Exception { /*//from w w w. j a v a 2 s. c om * for this test scenario we are assuming that there will be only one * bookie in the cluster */ assertEquals("Numbers of Bookies in this cluster", 1, numBookies); /* * this test is for validating EntryLogCompactor, so make sure * TransactionalCompaction is not enabled. */ assertFalse("Bookies must be using EntryLogCompactor", baseConf.getUseTransactionalCompaction()); // prepare data LedgerHandle[] lhs = prepareData(3, true); for (LedgerHandle lh : lhs) { lh.close(); } // disable minor compaction baseConf.setMinorCompactionThreshold(0.0f); baseConf.setGcWaitTime(60000); baseConf.setMinorCompactionInterval(120000); baseConf.setMajorCompactionInterval(240000); // restart bookies restartBookies(baseConf); long lastMinorCompactionTime = getGCThread().lastMinorCompactionTime; long lastMajorCompactionTime = getGCThread().lastMajorCompactionTime; assertTrue(getGCThread().enableMajorCompaction); assertFalse(getGCThread().enableMinorCompaction); // remove ledger1 and ledger3 bkc.deleteLedger(lhs[0].getId()); bkc.deleteLedger(lhs[2].getId()); LOG.info("Finished deleting the ledgers contains most entries."); getGCThread().enableForceGC(); getGCThread().triggerGC().get(); // after garbage collection, minor compaction should not be executed assertTrue(getGCThread().lastMinorCompactionTime > lastMinorCompactionTime); assertTrue(getGCThread().lastMajorCompactionTime > lastMajorCompactionTime); // entry logs ([0,1,2].log) should be compacted for (File ledgerDirectory : tmpDirs) { assertFalse("Found entry log file ([0,1,2].log that should have not been compacted in ledgerDirectory: " + ledgerDirectory, TestUtils.hasLogFiles(ledgerDirectory, true, 0, 1, 2)); } // even entry log files are removed, we still can access entries for // ledger2 // since those entries has been compacted to new entry log long ledgerId = lhs[1].getId(); long lastAddConfirmed = lhs[1].getLastAddConfirmed(); verifyLedger(ledgerId, 0, lastAddConfirmed); /* * there is only one bookie in the cluster so we should be able to read * entries from this bookie. */ ServerConfiguration bookieServerConfig = bs.get(0).getBookie().conf; ServerConfiguration newBookieConf = new ServerConfiguration(bookieServerConfig); /* * by reusing bookieServerConfig and setting metadataServiceUri to null * we can create/start new Bookie instance using the same data * (journal/ledger/index) of the existing BookeieServer for our testing * purpose. */ newBookieConf.setMetadataServiceUri(null); Bookie newbookie = new Bookie(newBookieConf); DigestManager digestManager = DigestManager.instantiate(ledgerId, passwdBytes, BookKeeper.DigestType.toProtoDigestType(digestType), UnpooledByteBufAllocator.DEFAULT, baseClientConf.getUseV2WireProtocol()); for (long entryId = 0; entryId <= lastAddConfirmed; entryId++) { ByteBuf readEntryBufWithChecksum = newbookie.readEntry(ledgerId, entryId); ByteBuf readEntryBuf = digestManager.verifyDigestAndReturnData(entryId, readEntryBufWithChecksum); byte[] readEntryBytes = new byte[readEntryBuf.readableBytes()]; readEntryBuf.readBytes(readEntryBytes); assertEquals(msg, new String(readEntryBytes)); } }
From source file:org.apache.bookkeeper.bookie.CompactionTest.java
License:Apache License
/** * Test that compaction doesnt add to index without having persisted * entrylog first. This is needed because compaction doesn't go through the journal. * {@see https://issues.apache.org/jira/browse/BOOKKEEPER-530} * {@see https://issues.apache.org/jira/browse/BOOKKEEPER-664} *///from w ww .j av a 2 s. c om @Test public void testCompactionSafety() throws Exception { tearDown(); // I dont want the test infrastructure ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); final Set<Long> ledgers = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>()); LedgerManager manager = getLedgerManager(ledgers); File tmpDir = createTempDir("bkTest", ".dir"); File curDir = Bookie.getCurrentDirectory(tmpDir); Bookie.checkDirectoryStructure(curDir); conf.setLedgerDirNames(new String[] { tmpDir.toString() }); conf.setEntryLogSizeLimit(EntryLogger.LOGFILE_HEADER_SIZE + 3 * (4 + ENTRY_SIZE)); conf.setGcWaitTime(100); conf.setMinorCompactionThreshold(0.7f); conf.setMajorCompactionThreshold(0.0f); conf.setMinorCompactionInterval(1); conf.setMajorCompactionInterval(10); conf.setPageLimit(1); CheckpointSource checkpointSource = new CheckpointSource() { AtomicInteger idGen = new AtomicInteger(0); class MyCheckpoint implements CheckpointSource.Checkpoint { int id = idGen.incrementAndGet(); @Override public int compareTo(CheckpointSource.Checkpoint o) { if (o == CheckpointSource.Checkpoint.MAX) { return -1; } else if (o == CheckpointSource.Checkpoint.MIN) { return 1; } return id - ((MyCheckpoint) o).id; } } @Override public CheckpointSource.Checkpoint newCheckpoint() { return new MyCheckpoint(); } public void checkpointComplete(CheckpointSource.Checkpoint checkpoint, boolean compact) throws IOException { } }; final byte[] key = "foobar".getBytes(); File log0 = new File(curDir, "0.log"); LedgerDirsManager dirs = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); assertFalse("Log shouldnt exist", log0.exists()); InterleavedLedgerStorage storage = new InterleavedLedgerStorage(); storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); ledgers.add(1L); ledgers.add(2L); ledgers.add(3L); storage.setMasterKey(1, key); storage.setMasterKey(2, key); storage.setMasterKey(3, key); storage.addEntry(genEntry(1, 1, ENTRY_SIZE)); storage.addEntry(genEntry(2, 1, ENTRY_SIZE)); storage.addEntry(genEntry(2, 2, ENTRY_SIZE)); storage.addEntry(genEntry(3, 2, ENTRY_SIZE)); storage.flush(); storage.shutdown(); assertTrue("Log should exist", log0.exists()); ledgers.remove(2L); ledgers.remove(3L); storage = new InterleavedLedgerStorage(); storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); storage.start(); for (int i = 0; i < 10; i++) { if (!log0.exists()) { break; } Thread.sleep(1000); storage.entryLogger.flush(); // simulate sync thread } assertFalse("Log shouldnt exist", log0.exists()); ledgers.add(4L); storage.setMasterKey(4, key); storage.addEntry(genEntry(4, 1, ENTRY_SIZE)); // force ledger 1 page to flush storage.shutdown(); storage = new InterleavedLedgerStorage(); storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); storage.getEntry(1, 1); // entry should exist }
From source file:org.apache.bookkeeper.bookie.CompactionTest.java
License:Apache License
/** * Test that compaction should execute silently when there is no entry logs * to compact. {@see https://issues.apache.org/jira/browse/BOOKKEEPER-700} *//*from w ww.j av a2 s . c o m*/ @Test public void testWhenNoLogsToCompact() throws Exception { tearDown(); // I dont want the test infrastructure ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); File tmpDir = createTempDir("bkTest", ".dir"); File curDir = Bookie.getCurrentDirectory(tmpDir); Bookie.checkDirectoryStructure(curDir); conf.setLedgerDirNames(new String[] { tmpDir.toString() }); LedgerDirsManager dirs = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); final Set<Long> ledgers = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>()); LedgerManager manager = getLedgerManager(ledgers); CheckpointSource checkpointSource = new CheckpointSource() { @Override public Checkpoint newCheckpoint() { return null; } @Override public void checkpointComplete(Checkpoint checkpoint, boolean compact) throws IOException { } }; InterleavedLedgerStorage storage = new InterleavedLedgerStorage(); storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); double threshold = 0.1; // shouldn't throw exception storage.gcThread.doCompactEntryLogs(threshold); }
From source file:org.apache.bookkeeper.bookie.CompactionTest.java
License:Apache License
/** * Test extractMetaFromEntryLogs optimized method to avoid excess memory usage. *//* w w w .ja v a 2s. c o m*/ public void testExtractMetaFromEntryLogs() throws Exception { // Always run this test with Throttle enabled. baseConf.setIsThrottleByBytes(true); // restart bookies restartBookies(baseConf); ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); File tmpDir = createTempDir("bkTest", ".dir"); File curDir = Bookie.getCurrentDirectory(tmpDir); Bookie.checkDirectoryStructure(curDir); conf.setLedgerDirNames(new String[] { tmpDir.toString() }); LedgerDirsManager dirs = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); final Set<Long> ledgers = Collections.newSetFromMap(new ConcurrentHashMap<Long, Boolean>()); LedgerManager manager = getLedgerManager(ledgers); CheckpointSource checkpointSource = new CheckpointSource() { @Override public Checkpoint newCheckpoint() { return null; } @Override public void checkpointComplete(Checkpoint checkpoint, boolean compact) throws IOException { } }; InterleavedLedgerStorage storage = new InterleavedLedgerStorage(); storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); for (long ledger = 0; ledger <= 10; ledger++) { ledgers.add(ledger); for (int entry = 1; entry <= 50; entry++) { try { storage.addEntry(genEntry(ledger, entry, ENTRY_SIZE)); } catch (IOException e) { //ignore exception on failure to add entry. } } } storage.flush(); storage.shutdown(); storage = new InterleavedLedgerStorage(); storage.initialize(conf, manager, dirs, dirs, null, checkpointSource, Checkpointer.NULL, NullStatsLogger.INSTANCE, UnpooledByteBufAllocator.DEFAULT); long startingEntriesCount = storage.gcThread.entryLogger.getLeastUnflushedLogId() - storage.gcThread.scannedLogId; LOG.info("The old Log Entry count is: " + startingEntriesCount); Map<Long, EntryLogMetadata> entryLogMetaData = new HashMap<>(); long finalEntriesCount = storage.gcThread.entryLogger.getLeastUnflushedLogId() - storage.gcThread.scannedLogId; LOG.info("The latest Log Entry count is: " + finalEntriesCount); assertTrue("The GC did not clean up entries...", startingEntriesCount != finalEntriesCount); assertTrue("Entries Count is zero", finalEntriesCount == 0); }
From source file:org.apache.bookkeeper.bookie.CompactionTest.java
License:Apache License
private void testSuspendGarbageCollection(ServerConfiguration conf, LedgerManager lm) throws Exception { LedgerDirsManager dirManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); CheckpointSource cp = new CheckpointSource() { @Override/*from www.j a v a2s . co m*/ public Checkpoint newCheckpoint() { // Do nothing. return null; } @Override public void checkpointComplete(Checkpoint checkPoint, boolean compact) throws IOException { // Do nothing. } }; for (File journalDir : conf.getJournalDirs()) { Bookie.checkDirectoryStructure(journalDir); } for (File dir : dirManager.getAllLedgerDirs()) { Bookie.checkDirectoryStructure(dir); } InterleavedLedgerStorage storage = new InterleavedLedgerStorage(); TestStatsProvider stats = new TestStatsProvider(); storage.initialize(conf, lm, dirManager, dirManager, null, cp, Checkpointer.NULL, stats.getStatsLogger("storage"), UnpooledByteBufAllocator.DEFAULT); storage.start(); int majorCompactions = stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get().intValue(); int minorCompactions = stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get().intValue(); Thread.sleep(conf.getMajorCompactionInterval() * 1000 + conf.getGcWaitTime()); assertTrue("Major compaction should have happened", stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get() > majorCompactions); // test suspend Major GC. storage.gcThread.suspendMajorGC(); Thread.sleep(1000); long startTime = System.currentTimeMillis(); majorCompactions = stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get().intValue(); Thread.sleep(conf.getMajorCompactionInterval() * 1000 + conf.getGcWaitTime()); assertTrue("major compaction triggered while suspended", storage.gcThread.lastMajorCompactionTime < startTime); assertTrue("major compaction triggered while suspended", stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get() == majorCompactions); // test suspend Major GC. Thread.sleep(conf.getMinorCompactionInterval() * 1000 + conf.getGcWaitTime()); assertTrue("Minor compaction should have happened", stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get() > minorCompactions); // test suspend Minor GC. storage.gcThread.suspendMinorGC(); Thread.sleep(1000); startTime = System.currentTimeMillis(); minorCompactions = stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get().intValue(); Thread.sleep(conf.getMajorCompactionInterval() * 1000 + conf.getGcWaitTime()); assertTrue("minor compaction triggered while suspended", storage.gcThread.lastMinorCompactionTime < startTime); assertTrue("minor compaction triggered while suspended", stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get() == minorCompactions); // test resume storage.gcThread.resumeMinorGC(); storage.gcThread.resumeMajorGC(); Thread.sleep((conf.getMajorCompactionInterval() + conf.getMinorCompactionInterval()) * 1000 + (conf.getGcWaitTime() * 2)); assertTrue("Major compaction should have happened", stats.getCounter("storage.gc." + MAJOR_COMPACTION_COUNT).get() > majorCompactions); assertTrue("Minor compaction should have happened", stats.getCounter("storage.gc." + MINOR_COMPACTION_COUNT).get() > minorCompactions); assertTrue("gcThreadRunttime should be non-zero", stats.getOpStatsLogger("storage.gc." + THREAD_RUNTIME).getSuccessCount() > 0); }
From source file:org.apache.bookkeeper.bookie.CreateNewLogTest.java
License:Apache License
@Test public void testEntryLogManagerMetrics() throws Exception { ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); TestStatsProvider statsProvider = new TestStatsProvider(); TestStatsLogger statsLogger = statsProvider.getStatsLogger(BookKeeperServerStats.ENTRYLOGGER_SCOPE); int maximumNumberOfActiveEntryLogs = 3; int entryLogPerLedgerCounterLimitsMultFactor = 2; // Creating a new configuration with a number of ledger directories. conf.setLedgerDirNames(ledgerDirs);//from w w w . j av a 2 s .c o m // pre-allocation is enabled conf.setEntryLogFilePreAllocationEnabled(true); conf.setEntryLogPerLedgerEnabled(true); conf.setMaximumNumberOfActiveEntryLogs(maximumNumberOfActiveEntryLogs); conf.setEntryLogPerLedgerCounterLimitsMultFactor(entryLogPerLedgerCounterLimitsMultFactor); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); EntryLogger entryLogger = new EntryLogger(conf, ledgerDirsManager, null, statsLogger, UnpooledByteBufAllocator.DEFAULT); EntryLogManagerForEntryLogPerLedger entrylogManager = (EntryLogManagerForEntryLogPerLedger) entryLogger .getEntryLogManager(); // set same thread executor for entryLoggerAllocator's allocatorExecutor setSameThreadExecutorForEntryLoggerAllocator(entryLogger.getEntryLoggerAllocator()); Counter numOfWriteActiveLedgers = statsLogger.getCounter(BookKeeperServerStats.NUM_OF_WRITE_ACTIVE_LEDGERS); Counter numOfWriteLedgersRemovedCacheExpiry = statsLogger .getCounter(BookKeeperServerStats.NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY); Counter numOfWriteLedgersRemovedCacheMaxSize = statsLogger .getCounter(BookKeeperServerStats.NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE); Counter numLedgersHavingMultipleEntrylogs = statsLogger .getCounter(BookKeeperServerStats.NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS); TestOpStatsLogger entryLogsPerLedger = (TestOpStatsLogger) statsLogger .getOpStatsLogger(BookKeeperServerStats.ENTRYLOGS_PER_LEDGER); // initially all the counters should be 0 Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", 0, numOfWriteActiveLedgers.get().intValue()); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY", 0, numOfWriteLedgersRemovedCacheExpiry.get().intValue()); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 0, numOfWriteLedgersRemovedCacheMaxSize.get().intValue()); Assert.assertEquals("NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS", 0, numLedgersHavingMultipleEntrylogs.get().intValue()); Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 0, entryLogsPerLedger.getSuccessCount()); // lid-1 : 3 entrylogs, lid-2 : 2 entrylogs, lid-3 : 1 entrylog int numOfEntrylogsForLedger1 = 3; createNewLogs(entrylogManager, 1L, numOfEntrylogsForLedger1); int numOfEntrylogsForLedger2 = 2; createNewLogs(entrylogManager, 2L, numOfEntrylogsForLedger2); createNewLogs(entrylogManager, 3L, 1); Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", 3, numOfWriteActiveLedgers.get().intValue()); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY", 0, numOfWriteLedgersRemovedCacheExpiry.get().intValue()); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 0, numOfWriteLedgersRemovedCacheMaxSize.get().intValue()); Assert.assertEquals("NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS", 2, numLedgersHavingMultipleEntrylogs.get().intValue()); Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 0, entryLogsPerLedger.getSuccessCount()); /* * since entrylog for lid-4 is created and entrylogmap cachesize is 3, * lid-1 will be removed from entrylogmap cache */ createNewLogs(entrylogManager, 4L, 1); Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", maximumNumberOfActiveEntryLogs, numOfWriteActiveLedgers.get().intValue()); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 1, numOfWriteLedgersRemovedCacheMaxSize.get().intValue()); Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 0, entryLogsPerLedger.getSuccessCount()); /* * entrylog for lid-5, lid-6, lid-7 are created. Since * maximumNumberOfActiveEntryLogs = 3 and * entryLogPerLedgerCounterLimitsMultFactor = 2, when the entrylog for * lid-7 is created, count of lid-1 should be removed from countermap. */ createNewLogs(entrylogManager, 5L, 1); createNewLogs(entrylogManager, 6L, 1); createNewLogs(entrylogManager, 7L, 1); Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", maximumNumberOfActiveEntryLogs, numOfWriteActiveLedgers.get().intValue()); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 4, numOfWriteLedgersRemovedCacheMaxSize.get().intValue()); Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 1, entryLogsPerLedger.getSuccessCount()); Assert.assertTrue("ENTRYLOGS_PER_LEDGER average value", Double.compare(numOfEntrylogsForLedger1, entryLogsPerLedger.getSuccessAverage()) == 0); /* * entrylog for new lid-8 is created so one more entry from countermap * should be removed. */ createNewLogs(entrylogManager, 8L, 4); Assert.assertEquals("NUM_OF_WRITE_ACTIVE_LEDGERS", maximumNumberOfActiveEntryLogs, numOfWriteActiveLedgers.get().intValue()); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 5, numOfWriteLedgersRemovedCacheMaxSize.get().intValue()); Assert.assertEquals("NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS", 3, numLedgersHavingMultipleEntrylogs.get().intValue()); Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 2, entryLogsPerLedger.getSuccessCount()); Assert.assertTrue("ENTRYLOGS_PER_LEDGER average value", Double.compare((numOfEntrylogsForLedger1 + numOfEntrylogsForLedger2) / 2.0, entryLogsPerLedger.getSuccessAverage()) == 0); /* * lid-3 is still in countermap. So when new entrylogs are created for * lid-3, no new entry from counter should be removed. so * entryLogsPerLedger.getSuccessCount() should be still old value. Also, * since lid-3 is still in countermap, these new 4 entrylogs should be * added to previous value 1 and hence the EntryLogsPerLedger for ledger * - 3l should be updated to 5. */ createNewLogs(entrylogManager, 3L, 4); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_MAXSIZE", 6, numOfWriteLedgersRemovedCacheMaxSize.get().intValue()); Assert.assertEquals("NUM_LEDGERS_HAVING_MULTIPLE_ENTRYLOGS", 4, numLedgersHavingMultipleEntrylogs.get().intValue()); Assert.assertEquals("Numofentrylogs for ledger: 3l", 5, entrylogManager.entryLogsPerLedgerCounter.getCounterMap().get(3L).intValue()); Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 2, entryLogsPerLedger.getSuccessCount()); }
From source file:org.apache.bookkeeper.bookie.CreateNewLogTest.java
License:Apache License
@Test public void testEntryLogManagerMetricsFromExpiryAspect() throws Exception { ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); TestStatsProvider statsProvider = new TestStatsProvider(); TestStatsLogger statsLogger = statsProvider.getStatsLogger(BookKeeperServerStats.ENTRYLOGGER_SCOPE); int entrylogMapAccessExpiryTimeInSeconds = 1; int entryLogPerLedgerCounterLimitsMultFactor = 2; // Creating a new configuration with a number of ledger directories. conf.setLedgerDirNames(ledgerDirs);// w w w . j av a 2 s .co m // pre-allocation is enabled conf.setEntryLogFilePreAllocationEnabled(true); conf.setEntryLogPerLedgerEnabled(true); conf.setEntrylogMapAccessExpiryTimeInSeconds(entrylogMapAccessExpiryTimeInSeconds); conf.setEntryLogPerLedgerCounterLimitsMultFactor(entryLogPerLedgerCounterLimitsMultFactor); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); EntryLogger entryLogger = new EntryLogger(conf, ledgerDirsManager, null, statsLogger, UnpooledByteBufAllocator.DEFAULT); EntryLogManagerForEntryLogPerLedger entrylogManager = (EntryLogManagerForEntryLogPerLedger) entryLogger .getEntryLogManager(); // set same thread executor for entryLoggerAllocator's allocatorExecutor setSameThreadExecutorForEntryLoggerAllocator(entryLogger.getEntryLoggerAllocator()); Counter numOfWriteLedgersRemovedCacheExpiry = statsLogger .getCounter(BookKeeperServerStats.NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY); TestOpStatsLogger entryLogsPerLedger = (TestOpStatsLogger) statsLogger .getOpStatsLogger(BookKeeperServerStats.ENTRYLOGS_PER_LEDGER); int numOfEntrylogsForLedger1 = 3; createNewLogs(entrylogManager, 1L, numOfEntrylogsForLedger1); Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 0, entryLogsPerLedger.getSuccessCount()); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY", 0, numOfWriteLedgersRemovedCacheExpiry.get().intValue()); Thread.sleep(entrylogMapAccessExpiryTimeInSeconds * 1000 + 100); entrylogManager.doEntryLogMapCleanup(); entrylogManager.entryLogsPerLedgerCounter.doCounterMapCleanup(); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY", 1, numOfWriteLedgersRemovedCacheExpiry.get().intValue()); Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 0, entryLogsPerLedger.getSuccessCount()); Thread.sleep(entrylogMapAccessExpiryTimeInSeconds * 1000 + 100); entrylogManager.doEntryLogMapCleanup(); entrylogManager.entryLogsPerLedgerCounter.doCounterMapCleanup(); Assert.assertEquals("NUM_OF_WRITE_LEDGERS_REMOVED_CACHE_EXPIRY", 1, numOfWriteLedgersRemovedCacheExpiry.get().intValue()); Assert.assertEquals("ENTRYLOGS_PER_LEDGER SuccessCount", 1, entryLogsPerLedger.getSuccessCount()); Assert.assertTrue("ENTRYLOGS_PER_LEDGER average value", Double.compare(numOfEntrylogsForLedger1, entryLogsPerLedger.getSuccessAverage()) == 0); }