List of usage examples for io.netty.buffer ByteBuf clear
public abstract ByteBuf clear();
From source file:org.apache.bookkeeper.bookie.EntryLogger.java
License:Apache License
EntryLogMetadata extractEntryLogMetadataFromIndex(long entryLogId) throws IOException { Header header = getHeaderForLogId(entryLogId); if (header.version < HEADER_V1) { throw new IOException("Old log file header without ledgers map on entryLogId " + entryLogId); }// ww w . j a v a2 s. c o m if (header.ledgersMapOffset == 0L) { // The index was not stored in the log file (possibly because the bookie crashed before flushing it) throw new IOException("No ledgers map index found on entryLogId" + entryLogId); } if (LOG.isDebugEnabled()) { LOG.debug("Recovering ledgers maps for log {} at offset: {}", entryLogId, header.ledgersMapOffset); } BufferedReadChannel bc = getChannelForLogId(entryLogId); // There can be multiple entries containing the various components of the serialized ledgers map long offset = header.ledgersMapOffset; EntryLogMetadata meta = new EntryLogMetadata(entryLogId); final int maxMapSize = LEDGERS_MAP_HEADER_SIZE + LEDGERS_MAP_ENTRY_SIZE * LEDGERS_MAP_MAX_BATCH_SIZE; ByteBuf ledgersMap = allocator.directBuffer(maxMapSize); try { while (offset < bc.size()) { // Read ledgers map size sizeBuffer.get().clear(); bc.read(sizeBuffer.get(), offset); int ledgersMapSize = sizeBuffer.get().readInt(); // Read the index into a buffer ledgersMap.clear(); bc.read(ledgersMap, offset + 4, ledgersMapSize); // Discard ledgerId and entryId long lid = ledgersMap.readLong(); if (lid != INVALID_LID) { throw new IOException("Cannot deserialize ledgers map from ledger " + lid); } long entryId = ledgersMap.readLong(); if (entryId != LEDGERS_MAP_ENTRY_ID) { throw new IOException("Cannot deserialize ledgers map from entryId " + entryId); } // Read the number of ledgers in the current entry batch int ledgersCount = ledgersMap.readInt(); // Extract all (ledger,size) tuples from buffer for (int i = 0; i < ledgersCount; i++) { long ledgerId = ledgersMap.readLong(); long size = ledgersMap.readLong(); if (LOG.isDebugEnabled()) { LOG.debug("Recovering ledgers maps for log {} -- Found ledger: {} with size: {}", entryLogId, ledgerId, size); } meta.addLedgerSize(ledgerId, size); } if (ledgersMap.isReadable()) { throw new IOException("Invalid entry size when reading ledgers map"); } // Move to next entry, if any offset += ledgersMapSize + 4; } } catch (IndexOutOfBoundsException e) { throw new IOException(e); } finally { ledgersMap.release(); } if (meta.getLedgersMap().size() != header.ledgersCount) { throw new IOException( "Not all ledgers were found in ledgers map index. expected: " + header.ledgersCount + " -- found: " + meta.getLedgersMap().size() + " -- entryLogId: " + entryLogId); } return meta; }
From source file:org.apache.bookkeeper.bookie.EntryLogManagerBase.java
License:Apache License
@Override public long addEntry(long ledger, ByteBuf entry, boolean rollLog) throws IOException { int entrySize = entry.readableBytes() + 4; // Adding 4 bytes to prepend the size BufferedLogChannel logChannel = getCurrentLogForLedgerForAddEntry(ledger, entrySize, rollLog); ByteBuf sizeBuffer = sizeBufferForAdd.get(); sizeBuffer.clear(); sizeBuffer.writeInt(entry.readableBytes()); logChannel.write(sizeBuffer);/* w w w. ja va 2 s.c o m*/ long pos = logChannel.position(); logChannel.write(entry); logChannel.registerWrittenEntry(ledger, entrySize); return (logChannel.getLogId() << 32L) | pos; }
From source file:org.apache.bookkeeper.bookie.Journal.java
License:Apache License
static void writePaddingBytes(JournalChannel jc, ByteBuf paddingBuffer, int journalAlignSize) throws IOException { int bytesToAlign = (int) (jc.bc.position() % journalAlignSize); if (0 != bytesToAlign) { int paddingBytes = journalAlignSize - bytesToAlign; if (paddingBytes < 8) { paddingBytes = journalAlignSize - (8 - paddingBytes); } else {/* ww w .ja v a2 s . com*/ paddingBytes -= 8; } paddingBuffer.clear(); // padding mask paddingBuffer.writeInt(PADDING_MASK); // padding len paddingBuffer.writeInt(paddingBytes); // padding bytes paddingBuffer.writerIndex(paddingBuffer.writerIndex() + paddingBytes); jc.preAllocIfNeeded(paddingBuffer.readableBytes()); // write padding bytes jc.bc.write(paddingBuffer); } }
From source file:org.apache.bookkeeper.bookie.Journal.java
License:Apache License
/** * A thread used for persisting journal entries to journal files. * * <p>/*from w w w . ja v a 2s . com*/ * Besides persisting journal entries, it also takes responsibility of * rolling journal files when a journal file reaches journal file size * limitation. * </p> * <p> * During journal rolling, it first closes the writing journal, generates * new journal file using current timestamp, and continue persistence logic. * Those journals will be garbage collected in SyncThread. * </p> * @see org.apache.bookkeeper.bookie.SyncThread */ @Override public void run() { LOG.info("Starting journal on {}", journalDirectory); if (conf.isBusyWaitEnabled()) { try { CpuAffinity.acquireCore(); } catch (Exception e) { LOG.warn("Unable to acquire CPU core for Journal thread: {}", e.getMessage(), e); } } RecyclableArrayList<QueueEntry> toFlush = entryListRecycler.newInstance(); int numEntriesToFlush = 0; ByteBuf lenBuff = Unpooled.buffer(4); ByteBuf paddingBuff = Unpooled.buffer(2 * conf.getJournalAlignmentSize()); paddingBuff.writeZero(paddingBuff.capacity()); BufferedChannel bc = null; JournalChannel logFile = null; forceWriteThread.start(); Stopwatch journalCreationWatcher = Stopwatch.createUnstarted(); Stopwatch journalFlushWatcher = Stopwatch.createUnstarted(); long batchSize = 0; try { List<Long> journalIds = listJournalIds(journalDirectory, null); // Should not use MathUtils.now(), which use System.nanoTime() and // could only be used to measure elapsed time. // http://docs.oracle.com/javase/1.5.0/docs/api/java/lang/System.html#nanoTime%28%29 long logId = journalIds.isEmpty() ? System.currentTimeMillis() : journalIds.get(journalIds.size() - 1); long lastFlushPosition = 0; boolean groupWhenTimeout = false; long dequeueStartTime = 0L; QueueEntry qe = null; while (true) { // new journal file to write if (null == logFile) { logId = logId + 1; journalCreationWatcher.reset().start(); logFile = new JournalChannel(journalDirectory, logId, journalPreAllocSize, journalWriteBufferSize, journalAlignmentSize, removePagesFromCache, journalFormatVersionToWrite, getBufferedChannelBuilder()); journalStats.getJournalCreationStats().registerSuccessfulEvent( journalCreationWatcher.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS); bc = logFile.getBufferedChannel(); lastFlushPosition = bc.position(); } if (qe == null) { if (dequeueStartTime != 0) { journalStats.getJournalProcessTimeStats().registerSuccessfulEvent( MathUtils.elapsedNanos(dequeueStartTime), TimeUnit.NANOSECONDS); } if (numEntriesToFlush == 0) { qe = queue.take(); dequeueStartTime = MathUtils.nowInNano(); journalStats.getJournalQueueStats().registerSuccessfulEvent( MathUtils.elapsedNanos(qe.enqueueTime), TimeUnit.NANOSECONDS); } else { long pollWaitTimeNanos = maxGroupWaitInNanos - MathUtils.elapsedNanos(toFlush.get(0).enqueueTime); if (flushWhenQueueEmpty || pollWaitTimeNanos < 0) { pollWaitTimeNanos = 0; } qe = queue.poll(pollWaitTimeNanos, TimeUnit.NANOSECONDS); dequeueStartTime = MathUtils.nowInNano(); if (qe != null) { journalStats.getJournalQueueStats().registerSuccessfulEvent( MathUtils.elapsedNanos(qe.enqueueTime), TimeUnit.NANOSECONDS); } boolean shouldFlush = false; // We should issue a forceWrite if any of the three conditions below holds good // 1. If the oldest pending entry has been pending for longer than the max wait time if (maxGroupWaitInNanos > 0 && !groupWhenTimeout && (MathUtils.elapsedNanos(toFlush.get(0).enqueueTime) > maxGroupWaitInNanos)) { groupWhenTimeout = true; } else if (maxGroupWaitInNanos > 0 && groupWhenTimeout && qe != null && MathUtils.elapsedNanos(qe.enqueueTime) < maxGroupWaitInNanos) { // when group timeout, it would be better to look forward, as there might be lots of // entries already timeout // due to a previous slow write (writing to filesystem which impacted by force write). // Group those entries in the queue // a) already timeout // b) limit the number of entries to group groupWhenTimeout = false; shouldFlush = true; journalStats.getFlushMaxWaitCounter().inc(); } else if (qe != null && ((bufferedEntriesThreshold > 0 && toFlush.size() > bufferedEntriesThreshold) || (bc.position() > lastFlushPosition + bufferedWritesThreshold))) { // 2. If we have buffered more than the buffWriteThreshold or bufferedEntriesThreshold shouldFlush = true; journalStats.getFlushMaxOutstandingBytesCounter().inc(); } else if (qe == null) { // We should get here only if we flushWhenQueueEmpty is true else we would wait // for timeout that would put is past the maxWait threshold // 3. If the queue is empty i.e. no benefit of grouping. This happens when we have one // publish at a time - common case in tests. shouldFlush = true; journalStats.getFlushEmptyQueueCounter().inc(); } // toFlush is non null and not empty so should be safe to access getFirst if (shouldFlush) { if (journalFormatVersionToWrite >= JournalChannel.V5) { writePaddingBytes(logFile, paddingBuff, journalAlignmentSize); } journalFlushWatcher.reset().start(); bc.flush(); for (int i = 0; i < toFlush.size(); i++) { QueueEntry entry = toFlush.get(i); if (entry != null && (!syncData || entry.ackBeforeSync)) { toFlush.set(i, null); numEntriesToFlush--; cbThreadPool.execute(entry); } } lastFlushPosition = bc.position(); journalStats.getJournalFlushStats().registerSuccessfulEvent( journalFlushWatcher.stop().elapsed(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS); // Trace the lifetime of entries through persistence if (LOG.isDebugEnabled()) { for (QueueEntry e : toFlush) { if (e != null) { LOG.debug("Written and queuing for flush Ledger: {} Entry: {}", e.ledgerId, e.entryId); } } } journalStats.getForceWriteBatchEntriesStats() .registerSuccessfulValue(numEntriesToFlush); journalStats.getForceWriteBatchBytesStats().registerSuccessfulValue(batchSize); boolean shouldRolloverJournal = (lastFlushPosition > maxJournalSize); if (syncData) { // Trigger data sync to disk in the "Force-Write" thread. // Callback will be triggered after data is committed to disk forceWriteRequests.put(createForceWriteRequest(logFile, logId, lastFlushPosition, toFlush, shouldRolloverJournal, false)); toFlush = entryListRecycler.newInstance(); numEntriesToFlush = 0; } else { // Data is already written on the file (though it might still be in the OS page-cache) lastLogMark.setCurLogMark(logId, lastFlushPosition); toFlush.clear(); numEntriesToFlush = 0; if (shouldRolloverJournal) { forceWriteRequests.put(createForceWriteRequest(logFile, logId, lastFlushPosition, EMPTY_ARRAY_LIST, shouldRolloverJournal, false)); } } batchSize = 0L; // check whether journal file is over file limit if (shouldRolloverJournal) { // if the journal file is rolled over, the journal file will be closed after last // entry is force written to disk. logFile = null; continue; } } } } if (!running) { LOG.info("Journal Manager is asked to shut down, quit."); break; } if (qe == null) { // no more queue entry continue; } if ((qe.entryId == Bookie.METAENTRY_ID_LEDGER_EXPLICITLAC) && (journalFormatVersionToWrite < JournalChannel.V6)) { /* * this means we are using new code which supports * persisting explicitLac, but "journalFormatVersionToWrite" * is set to some older value (< V6). In this case we * shouldn't write this special entry * (METAENTRY_ID_LEDGER_EXPLICITLAC) to Journal. */ qe.entry.release(); } else if (qe.entryId != Bookie.METAENTRY_ID_FORCE_LEDGER) { int entrySize = qe.entry.readableBytes(); journalStats.getJournalWriteBytes().add(entrySize); journalStats.getJournalQueueSize().dec(); batchSize += (4 + entrySize); lenBuff.clear(); lenBuff.writeInt(entrySize); // preAlloc based on size logFile.preAllocIfNeeded(4 + entrySize); bc.write(lenBuff); bc.write(qe.entry); qe.entry.release(); } toFlush.add(qe); numEntriesToFlush++; qe = null; } } catch (IOException ioe) { LOG.error("I/O exception in Journal thread!", ioe); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); LOG.info("Journal exits when shutting down"); } finally { // There could be packets queued for forceWrite on this logFile // That is fine as this exception is going to anyway take down the // the bookie. If we execute this as a part of graceful shutdown, // close will flush the file system cache making any previous // cached writes durable so this is fine as well. IOUtils.close(LOG, bc); } LOG.info("Journal exited loop!"); }
From source file:org.apache.giraph.utils.DynamicChannelBufferOutputStream.java
License:Apache License
/** * Constructor with the buffer to use//from ww w. j a va 2 s . com * * @param buffer Buffer to be written to (cleared before use) */ public DynamicChannelBufferOutputStream(ByteBuf buffer) { this.buffer = buffer; buffer.clear(); }
From source file:org.apache.pulsar.common.util.protobuf.ByteBufCodedInputStreamTest.java
License:Apache License
@Test public void testWritingDouble() throws IOException { ByteBuf buf = Unpooled.buffer(); buf.clear(); ByteBufCodedOutputStream outputStream = ByteBufCodedOutputStream.get(buf); outputStream.writeDouble(12, 23d);/*from www .j a v a2 s . c o m*/ outputStream.writeDouble(15, 13.13d); outputStream.writeDouble(1, -0.003d); ByteBufCodedInputStream inputStream = ByteBufCodedInputStream.get(buf); assertEquals(WireFormat.getTagFieldNumber(inputStream.readTag()), 12); assertEquals(inputStream.readDouble(), 23d); assertEquals(WireFormat.getTagFieldNumber(inputStream.readTag()), 15); assertEquals(inputStream.readDouble(), 13.13d); assertEquals(WireFormat.getTagFieldNumber(inputStream.readTag()), 1); assertEquals(inputStream.readDouble(), -0.003d); }
From source file:org.apache.reef.wake.remote.transport.netty.ChunkedReadWriteHandler.java
License:Apache License
/** * Converts the int size into a byte[]//from w w w.j a v a 2 s . co m * * @return the bit representation of size */ private byte[] sizeAsByteArr(final int size) { final byte[] ret = new byte[INT_SIZE]; final ByteBuf intBuffer = Unpooled.wrappedBuffer(ret).order(Unpooled.LITTLE_ENDIAN); intBuffer.clear(); intBuffer.writeInt(size); intBuffer.release(); return ret; }
From source file:org.apache.spark.network.sasl.aes.AesConfigMessage.java
License:Apache License
/** * Encode the config message.//ww w .j ava 2s. c om * @return ByteBuffer which contains encoded config message. */ public ByteBuffer encodeMessage() { ByteBuffer buf = ByteBuffer.allocate(encodedLength()); ByteBuf wrappedBuf = Unpooled.wrappedBuffer(buf); wrappedBuf.clear(); encode(wrappedBuf); return buf; }
From source file:org.asynchttpclient.request.body.generator.ByteArrayBodyGeneratorTest.java
License:Open Source License
@Test public void testSingleRead() throws IOException { final int srcArraySize = chunkSize - 1; final byte[] srcArray = new byte[srcArraySize]; random.nextBytes(srcArray);/*from w w w . jav a2s.co m*/ final ByteArrayBodyGenerator babGen = new ByteArrayBodyGenerator(srcArray); final Body body = babGen.createBody(); final ByteBuf chunkBuffer = Unpooled.buffer(chunkSize); try { // should take 1 read to get through the srcArray body.transferTo(chunkBuffer); assertEquals(chunkBuffer.readableBytes(), srcArraySize, "bytes read"); chunkBuffer.clear(); assertEquals(body.transferTo(chunkBuffer), BodyState.STOP, "body at EOF"); } finally { chunkBuffer.release(); } }
From source file:org.asynchttpclient.request.body.generator.ByteArrayBodyGeneratorTest.java
License:Open Source License
@Test public void testMultipleReads() throws IOException { final int srcArraySize = (3 * chunkSize) + 42; final byte[] srcArray = new byte[srcArraySize]; random.nextBytes(srcArray);/*from w w w .j av a 2 s .c o m*/ final ByteArrayBodyGenerator babGen = new ByteArrayBodyGenerator(srcArray); final Body body = babGen.createBody(); final ByteBuf chunkBuffer = Unpooled.buffer(chunkSize); try { int reads = 0; int bytesRead = 0; while (body.transferTo(chunkBuffer) != BodyState.STOP) { reads += 1; bytesRead += chunkBuffer.readableBytes(); chunkBuffer.clear(); } assertEquals(reads, 4, "reads to drain generator"); assertEquals(bytesRead, srcArraySize, "bytes read"); } finally { chunkBuffer.release(); } }