List of usage examples for io.netty.buffer ByteBuf readLong
public abstract long readLong();
From source file:org.apache.bookkeeper.bookie.BookieJournalTest.java
License:Apache License
/** * Test that if the bookie crashes in the middle of writing * the actual entry it can recover.//from www. j a va 2 s. c om * In this case the entry will be available, but it will corrupt. * This is ok, as the client will disregard the entry after looking * at its checksum. */ @Test public void testTruncatedInEntryJournal() throws Exception { File journalDir = createTempDir("bookie", "journal"); Bookie.checkDirectoryStructure(Bookie.getCurrentDirectory(journalDir)); File ledgerDir = createTempDir("bookie", "ledger"); Bookie.checkDirectoryStructure(Bookie.getCurrentDirectory(ledgerDir)); JournalChannel jc = writeV2Journal(Bookie.getCurrentDirectory(journalDir), 100); ByteBuffer zeros = ByteBuffer.allocate(2048); jc.fc.position(jc.getBufferedChannel().position() - 0x300); jc.fc.write(zeros); jc.fc.force(false); writeIndexFileForLedger(Bookie.getCurrentDirectory(ledgerDir), 1, "testPasswd".getBytes()); ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); conf.setJournalDirName(journalDir.getPath()).setLedgerDirNames(new String[] { ledgerDir.getPath() }) .setMetadataServiceUri(null); Bookie b = new Bookie(conf); b.readJournal(); b.readEntry(1, 99); // still able to read last entry, but it's junk ByteBuf buf = b.readEntry(1, 100); assertEquals("Ledger Id is wrong", buf.readLong(), 1); assertEquals("Entry Id is wrong", buf.readLong(), 100); assertEquals("Last confirmed is wrong", buf.readLong(), 99); assertEquals("Length is wrong", buf.readLong(), 100 * 1024); buf.readLong(); // skip checksum boolean allX = true; for (int i = 0; i < 1024; i++) { byte x = buf.readByte(); allX = allX && x == (byte) 'X'; } assertFalse("Some of buffer should have been zeroed", allX); try { b.readEntry(1, 101); fail("Shouldn't have found entry 101"); } catch (Bookie.NoEntryException e) { // correct behaviour } }
From source file:org.apache.bookkeeper.bookie.BookieShell.java
License:Apache License
/** * Format the message into a readable format. * * @param pos//w w w.j av a 2 s. c o m * File offset of the message stored in entry log file * @param recBuff * Entry Data * @param printMsg * Whether printing the message body */ private void formatEntry(long pos, ByteBuf recBuff, boolean printMsg) { int entrySize = recBuff.readableBytes(); long ledgerId = recBuff.readLong(); long entryId = recBuff.readLong(); System.out.println("--------- Lid=" + ledgerIdFormatter.formatLedgerId(ledgerId) + ", Eid=" + entryId + ", ByteOffset=" + pos + ", EntrySize=" + entrySize + " ---------"); if (entryId == Bookie.METAENTRY_ID_LEDGER_KEY) { int masterKeyLen = recBuff.readInt(); byte[] masterKey = new byte[masterKeyLen]; recBuff.readBytes(masterKey); System.out.println("Type: META"); System.out.println("MasterKey: " + bytes2Hex(masterKey)); System.out.println(); return; } if (entryId == Bookie.METAENTRY_ID_FENCE_KEY) { System.out.println("Type: META"); System.out.println("Fenced"); System.out.println(); return; } // process a data entry long lastAddConfirmed = recBuff.readLong(); System.out.println("Type: DATA"); System.out.println("LastConfirmed: " + lastAddConfirmed); if (!printMsg) { System.out.println(); return; } // skip digest checking recBuff.skipBytes(8); System.out.println("Data:"); System.out.println(); try { byte[] ret = new byte[recBuff.readableBytes()]; recBuff.readBytes(ret); entryFormatter.formatEntry(ret); } catch (Exception e) { System.out.println("N/A. Corrupted."); } System.out.println(); }
From source file:org.apache.bookkeeper.bookie.CheckpointOnNewLedgersTest.java
License:Apache License
@Test public void testCheckpoint() throws Exception { int entrySize = 1024; long l1 = 1L; long l2 = 2L; final CountDownLatch writeL1Latch = new CountDownLatch(1); Thread t1 = new Thread(() -> { ByteBuf entry = createByteBuf(l1, 0L, entrySize); try {/*from w ww. j a v a 2s. co m*/ bookie.addEntry(entry, false, (rc, ledgerId, entryId, addr, ctx) -> writeL1Latch.countDown(), null, new byte[0]); } catch (Exception e) { log.info("Failed to write entry to l1", e); } }, "ledger-1-writer"); t1.start(); // wait until the ledger desc is opened getLedgerDescCalledLatch.await(); LastLogMark logMark = bookie.journals.get(0).getLastLogMark().markLog(); // keep write entries to l2 to trigger entry log rolling to checkpoint int numEntries = 10; final CountDownLatch writeL2Latch = new CountDownLatch(numEntries); for (int i = 0; i < numEntries; i++) { ByteBuf entry = createByteBuf(l2, i, entrySize); bookie.addEntry(entry, false, (rc, ledgerId, entryId, addr, ctx) -> writeL2Latch.countDown(), null, new byte[0]); } writeL2Latch.await(); // wait until checkpoint to complete and journal marker is rolled. bookie.syncThread.getExecutor().submit(() -> { }).get(); log.info("Wait until checkpoint is completed"); // the journal mark is rolled. LastLogMark newLogMark = bookie.journals.get(0).getLastLogMark().markLog(); assertTrue(newLogMark.getCurMark().compare(logMark.getCurMark()) > 0); // resume l1-writer to continue writing the entries getLedgerDescWaitLatch.countDown(); // wait until the l1 entry is written writeL1Latch.await(); t1.join(); // construct a new bookie to simulate "bookie restart from crash" Bookie newBookie = new Bookie(conf); newBookie.start(); for (int i = 0; i < numEntries; i++) { ByteBuf entry = newBookie.readEntry(l2, i); assertNotNull(entry); assertEquals(l2, entry.readLong()); assertEquals((long) i, entry.readLong()); entry.release(); } ByteBuf entry = newBookie.readEntry(l1, 0L); assertNotNull(entry); assertEquals(l1, entry.readLong()); assertEquals(0L, entry.readLong()); entry.release(); newBookie.shutdown(); }
From source file:org.apache.bookkeeper.bookie.EntryLogger.java
License:Apache License
/** * Read the header of an entry log./* www. java2s.c o m*/ */ private Header getHeaderForLogId(long entryLogId) throws IOException { BufferedReadChannel bc = getChannelForLogId(entryLogId); // Allocate buffer to read (version, ledgersMapOffset, ledgerCount) ByteBuf headers = allocator.directBuffer(LOGFILE_HEADER_SIZE); try { bc.read(headers, 0); // Skip marker string "BKLO" headers.readInt(); int headerVersion = headers.readInt(); if (headerVersion < HEADER_V0 || headerVersion > HEADER_CURRENT_VERSION) { LOG.info("Unknown entry log header version for log {}: {}", entryLogId, headerVersion); } long ledgersMapOffset = headers.readLong(); int ledgersCount = headers.readInt(); return new Header(headerVersion, ledgersMapOffset, ledgersCount); } finally { headers.release(); } }
From source file:org.apache.bookkeeper.bookie.EntryLogger.java
License:Apache License
/** * Scan entry log.//from www . ja va2 s . com * * @param entryLogId Entry Log Id * @param scanner Entry Log Scanner * @throws IOException */ public void scanEntryLog(long entryLogId, EntryLogScanner scanner) throws IOException { // Buffer where to read the entrySize (4 bytes) and the ledgerId (8 bytes) ByteBuf headerBuffer = Unpooled.buffer(4 + 8); BufferedReadChannel bc; // Get the BufferedChannel for the current entry log file try { bc = getChannelForLogId(entryLogId); } catch (IOException e) { LOG.warn("Failed to get channel to scan entry log: " + entryLogId + ".log"); throw e; } // Start the read position in the current entry log file to be after // the header where all of the ledger entries are. long pos = LOGFILE_HEADER_SIZE; // Start with a reasonably sized buffer size ByteBuf data = allocator.directBuffer(1024 * 1024); try { // Read through the entry log file and extract the ledger ID's. while (true) { // Check if we've finished reading the entry log file. if (pos >= bc.size()) { break; } if (readFromLogChannel(entryLogId, bc, headerBuffer, pos) != headerBuffer.capacity()) { LOG.warn("Short read for entry size from entrylog {}", entryLogId); return; } long offset = pos; pos += 4; int entrySize = headerBuffer.readInt(); long ledgerId = headerBuffer.readLong(); headerBuffer.clear(); if (ledgerId == INVALID_LID || !scanner.accept(ledgerId)) { // skip this entry pos += entrySize; continue; } // read the entry data.clear(); data.capacity(entrySize); int rc = readFromLogChannel(entryLogId, bc, data, pos); if (rc != entrySize) { LOG.warn("Short read for ledger entry from entryLog {}@{} ({} != {})", entryLogId, pos, rc, entrySize); return; } // process the entry scanner.process(ledgerId, offset, data); // Advance position to the next entry pos += entrySize; } } finally { data.release(); } }
From source file:org.apache.bookkeeper.bookie.EntryLogger.java
License:Apache License
EntryLogMetadata extractEntryLogMetadataFromIndex(long entryLogId) throws IOException { Header header = getHeaderForLogId(entryLogId); if (header.version < HEADER_V1) { throw new IOException("Old log file header without ledgers map on entryLogId " + entryLogId); }/* w w w. ja v a 2 s. c o m*/ if (header.ledgersMapOffset == 0L) { // The index was not stored in the log file (possibly because the bookie crashed before flushing it) throw new IOException("No ledgers map index found on entryLogId" + entryLogId); } if (LOG.isDebugEnabled()) { LOG.debug("Recovering ledgers maps for log {} at offset: {}", entryLogId, header.ledgersMapOffset); } BufferedReadChannel bc = getChannelForLogId(entryLogId); // There can be multiple entries containing the various components of the serialized ledgers map long offset = header.ledgersMapOffset; EntryLogMetadata meta = new EntryLogMetadata(entryLogId); final int maxMapSize = LEDGERS_MAP_HEADER_SIZE + LEDGERS_MAP_ENTRY_SIZE * LEDGERS_MAP_MAX_BATCH_SIZE; ByteBuf ledgersMap = allocator.directBuffer(maxMapSize); try { while (offset < bc.size()) { // Read ledgers map size sizeBuffer.get().clear(); bc.read(sizeBuffer.get(), offset); int ledgersMapSize = sizeBuffer.get().readInt(); // Read the index into a buffer ledgersMap.clear(); bc.read(ledgersMap, offset + 4, ledgersMapSize); // Discard ledgerId and entryId long lid = ledgersMap.readLong(); if (lid != INVALID_LID) { throw new IOException("Cannot deserialize ledgers map from ledger " + lid); } long entryId = ledgersMap.readLong(); if (entryId != LEDGERS_MAP_ENTRY_ID) { throw new IOException("Cannot deserialize ledgers map from entryId " + entryId); } // Read the number of ledgers in the current entry batch int ledgersCount = ledgersMap.readInt(); // Extract all (ledger,size) tuples from buffer for (int i = 0; i < ledgersCount; i++) { long ledgerId = ledgersMap.readLong(); long size = ledgersMap.readLong(); if (LOG.isDebugEnabled()) { LOG.debug("Recovering ledgers maps for log {} -- Found ledger: {} with size: {}", entryLogId, ledgerId, size); } meta.addLedgerSize(ledgerId, size); } if (ledgersMap.isReadable()) { throw new IOException("Invalid entry size when reading ledgers map"); } // Move to next entry, if any offset += ledgersMapSize + 4; } } catch (IndexOutOfBoundsException e) { throw new IOException(e); } finally { ledgersMap.release(); } if (meta.getLedgersMap().size() != header.ledgersCount) { throw new IOException( "Not all ledgers were found in ledgers map index. expected: " + header.ledgersCount + " -- found: " + meta.getLedgersMap().size() + " -- entryLogId: " + entryLogId); } return meta; }
From source file:org.apache.bookkeeper.bookie.EntryLogTest.java
License:Apache License
@Test public void testMissingLogId() throws Exception { // create some entries int numLogs = 3; int numEntries = 10; long[][] positions = new long[2 * numLogs][]; for (int i = 0; i < numLogs; i++) { positions[i] = new long[numEntries]; EntryLogger logger = new EntryLogger(conf, dirsMgr); for (int j = 0; j < numEntries; j++) { positions[i][j] = logger.addEntry((long) i, generateEntry(i, j).nioBuffer()); }/*from w w w.j a v a 2 s . c o m*/ logger.flush(); logger.shutdown(); } // delete last log id File lastLogId = new File(curDir, "lastId"); lastLogId.delete(); // write another entries for (int i = numLogs; i < 2 * numLogs; i++) { positions[i] = new long[numEntries]; EntryLogger logger = new EntryLogger(conf, dirsMgr); for (int j = 0; j < numEntries; j++) { positions[i][j] = logger.addEntry((long) i, generateEntry(i, j).nioBuffer()); } logger.flush(); logger.shutdown(); } EntryLogger newLogger = new EntryLogger(conf, dirsMgr); for (int i = 0; i < (2 * numLogs + 1); i++) { File logFile = new File(curDir, Long.toHexString(i) + ".log"); assertTrue(logFile.exists()); } for (int i = 0; i < 2 * numLogs; i++) { for (int j = 0; j < numEntries; j++) { String expectedValue = "ledger-" + i + "-" + j; ByteBuf value = newLogger.readEntry(i, j, positions[i][j]); long ledgerId = value.readLong(); long entryId = value.readLong(); byte[] data = new byte[value.readableBytes()]; value.readBytes(data); value.release(); assertEquals(i, ledgerId); assertEquals(j, entryId); assertEquals(expectedValue, new String(data)); } } }
From source file:org.apache.bookkeeper.bookie.EntryLogTest.java
License:Apache License
@Test public void testFlushIntervalInBytes() throws Exception { long flushIntervalInBytes = 5000; ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); conf.setEntryLogPerLedgerEnabled(true); conf.setFlushIntervalInBytes(flushIntervalInBytes); conf.setLedgerDirNames(createAndGetLedgerDirs(2)); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); EntryLogger entryLogger = new EntryLogger(conf, ledgerDirsManager); EntryLogManagerBase entryLogManagerBase = ((EntryLogManagerBase) entryLogger.getEntryLogManager()); /*//from www.j a va 2s.c om * when entryLogger is created Header of length EntryLogger.LOGFILE_HEADER_SIZE is created */ long ledgerId = 0L; int firstEntrySize = 1000; long entry0Position = entryLogger.addEntry(0L, generateEntry(ledgerId, 0L, firstEntrySize)); // entrylogger writes length of the entry (4 bytes) before writing entry long expectedUnpersistedBytes = EntryLogger.LOGFILE_HEADER_SIZE + firstEntrySize + 4; Assert.assertEquals("Unpersisted Bytes of entrylog", expectedUnpersistedBytes, entryLogManagerBase.getCurrentLogForLedger(ledgerId).getUnpersistedBytes()); /* * 'flushIntervalInBytes' number of bytes are flushed so BufferedChannel should be forcewritten */ int secondEntrySize = (int) (flushIntervalInBytes - expectedUnpersistedBytes); long entry1Position = entryLogger.addEntry(0L, generateEntry(ledgerId, 1L, secondEntrySize)); Assert.assertEquals("Unpersisted Bytes of entrylog", 0, entryLogManagerBase.getCurrentLogForLedger(ledgerId).getUnpersistedBytes()); /* * since entrylog/Bufferedchannel is persisted (forcewritten), we should be able to read the entrylog using * newEntryLogger */ conf.setEntryLogPerLedgerEnabled(false); EntryLogger newEntryLogger = new EntryLogger(conf, ledgerDirsManager); EntryLogManager newEntryLogManager = newEntryLogger.getEntryLogManager(); Assert.assertEquals("EntryLogManager class type", EntryLogManagerForSingleEntryLog.class, newEntryLogManager.getClass()); ByteBuf buf = newEntryLogger.readEntry(ledgerId, 0L, entry0Position); long readLedgerId = buf.readLong(); long readEntryId = buf.readLong(); Assert.assertEquals("LedgerId", ledgerId, readLedgerId); Assert.assertEquals("EntryId", 0L, readEntryId); buf = newEntryLogger.readEntry(ledgerId, 1L, entry1Position); readLedgerId = buf.readLong(); readEntryId = buf.readLong(); Assert.assertEquals("LedgerId", ledgerId, readLedgerId); Assert.assertEquals("EntryId", 1L, readEntryId); }
From source file:org.apache.bookkeeper.bookie.EntryLogTest.java
License:Apache License
@Test public void testLongLedgerIdsWithEntryLogPerLedger() throws Exception { ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); conf.setEntryLogFilePreAllocationEnabled(true); conf.setEntryLogPerLedgerEnabled(true); conf.setLedgerDirNames(createAndGetLedgerDirs(1)); conf.setLedgerStorageClass(InterleavedLedgerStorage.class.getName()); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); EntryLogger entryLogger = new EntryLogger(conf, ledgerDirsManager); EntryLogManagerForEntryLogPerLedger entryLogManager = (EntryLogManagerForEntryLogPerLedger) entryLogger .getEntryLogManager();//from w w w. j a v a 2 s . c om int numOfLedgers = 5; int numOfEntries = 4; long[][] pos = new long[numOfLedgers][numOfEntries]; for (int i = 0; i < numOfLedgers; i++) { long ledgerId = Long.MAX_VALUE - i; entryLogManager.createNewLog(ledgerId); for (int entryId = 0; entryId < numOfEntries; entryId++) { pos[i][entryId] = entryLogger.addEntry(ledgerId, generateEntry(ledgerId, entryId).nioBuffer()); } } /* * do checkpoint to make sure entrylog files are persisted */ entryLogger.checkpoint(); for (int i = 0; i < numOfLedgers; i++) { long ledgerId = Long.MAX_VALUE - i; for (int entryId = 0; entryId < numOfEntries; entryId++) { String expectedValue = generateDataString(ledgerId, entryId); ByteBuf buf = entryLogger.readEntry(ledgerId, entryId, pos[i][entryId]); long readLedgerId = buf.readLong(); long readEntryId = buf.readLong(); byte[] readData = new byte[buf.readableBytes()]; buf.readBytes(readData); assertEquals("LedgerId ", ledgerId, readLedgerId); assertEquals("EntryId ", entryId, readEntryId); assertEquals("Entry Data ", expectedValue, new String(readData)); } } }
From source file:org.apache.bookkeeper.bookie.EntryLogTest.java
License:Apache License
@Test public void testReadAddCallsOfMultipleEntryLogs() throws Exception { ServerConfiguration conf = TestBKConfiguration.newServerConfiguration(); conf.setEntryLogPerLedgerEnabled(true); conf.setLedgerDirNames(createAndGetLedgerDirs(2)); // pre allocation enabled conf.setEntryLogFilePreAllocationEnabled(true); LedgerDirsManager ledgerDirsManager = new LedgerDirsManager(conf, conf.getLedgerDirs(), new DiskChecker(conf.getDiskUsageThreshold(), conf.getDiskUsageWarnThreshold())); EntryLogger entryLogger = new EntryLogger(conf, ledgerDirsManager); EntryLogManagerBase entryLogManagerBase = ((EntryLogManagerBase) entryLogger.getEntryLogManager()); int numOfActiveLedgers = 10; int numEntries = 10; long[][] positions = new long[numOfActiveLedgers][]; for (int i = 0; i < numOfActiveLedgers; i++) { positions[i] = new long[numEntries]; }// www . j a v a2 s.c om /* * addentries to the ledgers */ for (int j = 0; j < numEntries; j++) { for (int i = 0; i < numOfActiveLedgers; i++) { positions[i][j] = entryLogger.addEntry((long) i, generateEntry(i, j)); long entryLogId = (positions[i][j] >> 32L); /** * * Though EntryLogFilePreAllocation is enabled, Since things are not done concurrently here, * entryLogIds will be sequential. */ Assert.assertEquals("EntryLogId for ledger: " + i, i, entryLogId); } } /* * read the entries which are written */ for (int j = 0; j < numEntries; j++) { for (int i = 0; i < numOfActiveLedgers; i++) { String expectedValue = "ledger-" + i + "-" + j; ByteBuf buf = entryLogger.readEntry(i, j, positions[i][j]); long ledgerId = buf.readLong(); long entryId = buf.readLong(); byte[] data = new byte[buf.readableBytes()]; buf.readBytes(data); assertEquals("LedgerId ", i, ledgerId); assertEquals("EntryId ", j, entryId); assertEquals("Entry Data ", expectedValue, new String(data)); } } for (long i = 0; i < numOfActiveLedgers; i++) { entryLogManagerBase.createNewLog(i); } entryLogManagerBase.flushRotatedLogs(); // reading after flush of rotatedlogs for (int j = 0; j < numEntries; j++) { for (int i = 0; i < numOfActiveLedgers; i++) { String expectedValue = "ledger-" + i + "-" + j; ByteBuf buf = entryLogger.readEntry(i, j, positions[i][j]); long ledgerId = buf.readLong(); long entryId = buf.readLong(); byte[] data = new byte[buf.readableBytes()]; buf.readBytes(data); assertEquals("LedgerId ", i, ledgerId); assertEquals("EntryId ", j, entryId); assertEquals("Entry Data ", expectedValue, new String(data)); } } }