Example usage for io.netty.buffer ByteBuf resetReaderIndex

List of usage examples for io.netty.buffer ByteBuf resetReaderIndex

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf resetReaderIndex.

Prototype

public abstract ByteBuf resetReaderIndex();

Source Link

Document

Repositions the current readerIndex to the marked readerIndex in this buffer.

Usage

From source file:org.apache.bookkeeper.bookie.Bookie.java

License:Apache License

public void setExplicitLac(ByteBuf entry, WriteCallback writeCallback, Object ctx, byte[] masterKey)
        throws IOException, InterruptedException, BookieException {
    try {/*from  w ww.j  a  v  a2  s.  c  o  m*/
        long ledgerId = entry.getLong(entry.readerIndex());
        LedgerDescriptor handle = handles.getHandle(ledgerId, masterKey);
        synchronized (handle) {
            entry.markReaderIndex();
            handle.setExplicitLac(entry);
            entry.resetReaderIndex();
            ByteBuf explicitLACEntry = createExplicitLACEntry(ledgerId, entry);
            getJournal(ledgerId).logAddEntry(explicitLACEntry, false /* ackBeforeSync */, writeCallback, ctx);
        }
    } catch (NoWritableLedgerDirException e) {
        stateManager.transitionToReadOnlyMode();
        throw new IOException(e);
    }
}

From source file:org.apache.bookkeeper.bookie.BufferedChannelTest.java

License:Apache License

public void testBufferedChannel(int byteBufLength, int numOfWrites, int unpersistedBytesBound, boolean flush,
        boolean shouldForceWrite) throws Exception {
    File newLogFile = File.createTempFile("test", "log");
    newLogFile.deleteOnExit();/*w  w  w . j a  v  a  2 s .  c o  m*/
    FileChannel fileChannel = new RandomAccessFile(newLogFile, "rw").getChannel();

    BufferedChannel logChannel = new BufferedChannel(UnpooledByteBufAllocator.DEFAULT, fileChannel,
            INTERNAL_BUFFER_WRITE_CAPACITY, INTERNAL_BUFFER_READ_CAPACITY, unpersistedBytesBound);

    ByteBuf dataBuf = generateEntry(byteBufLength);
    dataBuf.markReaderIndex();
    dataBuf.markWriterIndex();

    for (int i = 0; i < numOfWrites; i++) {
        logChannel.write(dataBuf);
        dataBuf.resetReaderIndex();
        dataBuf.resetWriterIndex();
    }

    if (flush && shouldForceWrite) {
        logChannel.flushAndForceWrite(false);
    } else if (flush) {
        logChannel.flush();
    } else if (shouldForceWrite) {
        logChannel.forceWrite(false);
    }

    int expectedNumOfUnpersistedBytes = 0;

    if (flush && shouldForceWrite) {
        /*
         * if flush call is made with shouldForceWrite,
         * then expectedNumOfUnpersistedBytes should be zero.
         */
        expectedNumOfUnpersistedBytes = 0;
    } else if (!flush && shouldForceWrite) {
        /*
         * if flush is not called then internal write buffer is not flushed,
         * but while adding entries to BufferedChannel if writeBuffer has
         * reached its capacity then it will call flush method, and the data
         * gets added to the file buffer. So though explicitly we are not
         * calling flush method, implicitly flush gets called when
         * writeBuffer reaches its capacity.
         */
        expectedNumOfUnpersistedBytes = (byteBufLength * numOfWrites) % INTERNAL_BUFFER_WRITE_CAPACITY;
    } else {
        expectedNumOfUnpersistedBytes = (byteBufLength * numOfWrites) - unpersistedBytesBound;
    }

    Assert.assertEquals("Unpersisted bytes", expectedNumOfUnpersistedBytes, logChannel.getUnpersistedBytes());
    logChannel.close();
    fileChannel.close();
}

From source file:org.apache.bookkeeper.bookie.IndexPersistenceMgrTest.java

License:Apache License

void validateFileInfo(IndexPersistenceMgr indexPersistenceMgr, long ledgerId, int headerVersion)
        throws IOException, GeneralSecurityException {
    BookKeeper.DigestType digestType = BookKeeper.DigestType.CRC32;
    boolean getUseV2WireProtocol = true;

    preCreateFileInfoForLedger(ledgerId, headerVersion);
    DigestManager digestManager = DigestManager.instantiate(ledgerId, masterKey,
            BookKeeper.DigestType.toProtoDigestType(digestType), UnpooledByteBufAllocator.DEFAULT,
            getUseV2WireProtocol);//from ww  w.jav a  2s. c o  m

    CachedFileInfo fileInfo = indexPersistenceMgr.getFileInfo(ledgerId, masterKey);
    fileInfo.readHeader();
    assertEquals("ExplicitLac should be null", null, fileInfo.getExplicitLac());
    assertEquals("Header Version should match with precreated fileinfos headerversion", headerVersion,
            fileInfo.headerVersion);
    assertTrue("Masterkey should match with precreated fileinfos masterkey",
            Arrays.equals(masterKey, fileInfo.masterKey));
    long explicitLac = 22;
    ByteBuf explicitLacByteBuf = digestManager.computeDigestAndPackageForSendingLac(explicitLac).getBuffer(0);
    explicitLacByteBuf.markReaderIndex();
    indexPersistenceMgr.setExplicitLac(ledgerId, explicitLacByteBuf);
    explicitLacByteBuf.resetReaderIndex();
    assertEquals("explicitLac ByteBuf contents should match", 0,
            ByteBufUtil.compare(explicitLacByteBuf, indexPersistenceMgr.getExplicitLac(ledgerId)));
    /*
     * release fileInfo untill it is marked dead and closed, so that
     * contents of it are persisted.
     */
    while (fileInfo.refCount.get() != FileInfoBackingCache.DEAD_REF) {
        fileInfo.release();
    }
    /*
     * reopen the fileinfo and readHeader, so that whatever was persisted
     * would be read.
     */
    fileInfo = indexPersistenceMgr.getFileInfo(ledgerId, masterKey);
    fileInfo.readHeader();
    assertEquals("Header Version should match with precreated fileinfos headerversion even after reopening",
            headerVersion, fileInfo.headerVersion);
    assertTrue("Masterkey should match with precreated fileinfos masterkey",
            Arrays.equals(masterKey, fileInfo.masterKey));
    if (headerVersion == FileInfo.V0) {
        assertEquals(
                "Since it is V0 Header, explicitLac will not be persisted and should be null after reopening",
                null, indexPersistenceMgr.getExplicitLac(ledgerId));
    } else {
        explicitLacByteBuf.resetReaderIndex();
        assertEquals(
                "Since it is V1 Header, explicitLac will be persisted and should not be null after reopening",
                0, ByteBufUtil.compare(explicitLacByteBuf, indexPersistenceMgr.getExplicitLac(ledgerId)));
    }
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.SingleDirectoryDbLedgerStorage.java

License:Apache License

public ByteBuf getLastEntry(long ledgerId) throws IOException {
    long startTime = MathUtils.nowInNano();

    long stamp = writeCacheRotationLock.readLock();
    try {/* w ww.ja v  a 2  s . co  m*/
        // First try to read from the write cache of recent entries
        ByteBuf entry = writeCache.getLastEntry(ledgerId);
        if (entry != null) {
            if (log.isDebugEnabled()) {
                long foundLedgerId = entry.readLong(); // ledgedId
                long entryId = entry.readLong();
                entry.resetReaderIndex();
                if (log.isDebugEnabled()) {
                    log.debug("Found last entry for ledger {} in write cache: {}@{}", ledgerId, foundLedgerId,
                            entryId);
                }
            }

            recordSuccessfulEvent(dbLedgerStorageStats.getReadCacheHitStats(), startTime);
            recordSuccessfulEvent(dbLedgerStorageStats.getReadEntryStats(), startTime);
            return entry;
        }

        // If there's a flush going on, the entry might be in the flush buffer
        entry = writeCacheBeingFlushed.getLastEntry(ledgerId);
        if (entry != null) {
            if (log.isDebugEnabled()) {
                entry.readLong(); // ledgedId
                long entryId = entry.readLong();
                entry.resetReaderIndex();
                if (log.isDebugEnabled()) {
                    log.debug("Found last entry for ledger {} in write cache being flushed: {}", ledgerId,
                            entryId);
                }
            }

            recordSuccessfulEvent(dbLedgerStorageStats.getReadCacheHitStats(), startTime);
            recordSuccessfulEvent(dbLedgerStorageStats.getReadEntryStats(), startTime);
            return entry;
        }
    } finally {
        writeCacheRotationLock.unlockRead(stamp);
    }

    // Search the last entry in storage
    long lastEntryId = entryLocationIndex.getLastEntryInLedger(ledgerId);
    if (log.isDebugEnabled()) {
        log.debug("Found last entry for ledger {} in db: {}", ledgerId, lastEntryId);
    }

    long entryLocation = entryLocationIndex.getLocation(ledgerId, lastEntryId);
    ByteBuf content = entryLogger.readEntry(ledgerId, lastEntryId, entryLocation);

    recordSuccessfulEvent(dbLedgerStorageStats.getReadCacheMissStats(), startTime);
    recordSuccessfulEvent(dbLedgerStorageStats.getReadEntryStats(), startTime);
    return content;
}

From source file:org.apache.bookkeeper.client.MockBookKeeperTestCase.java

License:Apache License

private byte[] extractEntryPayload(long ledgerId, long entryId, ByteBufList toSend)
        throws BKException.BKDigestMatchException {
    ByteBuf toSendCopy = Unpooled.copiedBuffer(toSend.toArray());
    toSendCopy.resetReaderIndex();
    DigestManager macManager = null;// w  ww  . j av a  2 s.c  o  m
    try {
        macManager = getDigestType(ledgerId);
    } catch (GeneralSecurityException gse) {
        LOG.error("Initialize macManager fail", gse);
    }
    ByteBuf content = macManager.verifyDigestAndReturnData(entryId, toSendCopy);
    byte[] entry = new byte[content.readableBytes()];
    content.readBytes(entry);
    content.resetReaderIndex();
    content.release();
    return entry;
}

From source file:org.apache.drill.exec.rpc.ProtobufLengthDecoder.java

License:Apache License

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
    if (!ctx.channel().isOpen()) {
        if (in.readableBytes() > 0) {
            logger.info("Channel is closed, discarding remaining {} byte(s) in buffer.", in.readableBytes());
        }/*from  w  w  w.  j  a va2s .co m*/
        in.skipBytes(in.readableBytes());
        return;
    }

    in.markReaderIndex();
    final byte[] buf = new byte[5];
    for (int i = 0; i < buf.length; i++) {
        if (!in.isReadable()) {
            in.resetReaderIndex();
            return;
        }

        buf[i] = in.readByte();
        if (buf[i] >= 0) {

            int length = CodedInputStream.newInstance(buf, 0, i + 1).readRawVarint32();

            if (length < 0) {
                throw new CorruptedFrameException("negative length: " + length);
            }
            if (length == 0) {
                throw new CorruptedFrameException("Received a message of length 0.");
            }

            if (in.readableBytes() < length) {
                in.resetReaderIndex();
                return;
            } else {
                // need to make buffer copy, otherwise netty will try to refill this buffer if we move the readerIndex forward...
                // TODO: Can we avoid this copy?
                ByteBuf outBuf;
                try {
                    outBuf = allocator.buffer(length);
                } catch (OutOfMemoryException e) {
                    logger.warn(
                            "Failure allocating buffer on incoming stream due to memory limits.  Current Allocation: {}.",
                            allocator.getAllocatedMemory());
                    in.resetReaderIndex();
                    outOfMemoryHandler.handle();
                    return;
                }
                outBuf.writeBytes(in, in.readerIndex(), length);

                in.skipBytes(length);

                if (RpcConstants.EXTRA_DEBUGGING) {
                    logger.debug(String.format(
                            "ReaderIndex is %d after length header of %d bytes and frame body of length %d bytes.",
                            in.readerIndex(), i + 1, length));
                }

                out.add(outBuf);
                return;
            }
        }
    }

    // Couldn't find the byte whose MSB is off.
    throw new CorruptedFrameException("length wider than 32-bit");

}

From source file:org.apache.drill.exec.rpc.SaslDecryptionHandler.java

License:Apache License

public void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws IOException {

    if (!ctx.channel().isOpen()) {
        logger.trace("Channel closed before decoding the message of {} bytes", msg.readableBytes());
        msg.skipBytes(msg.readableBytes());
        return;/* ww  w .ja v  a 2  s.com*/
    }

    try {
        if (logger.isTraceEnabled()) {
            logger.trace("Trying to decrypt the encrypted message of size: {} with maxWrappedSize",
                    msg.readableBytes());
        }

        // All the encrypted blocks are prefixed with it's length in network byte order (or BigEndian format). Netty's
        // default Byte order of ByteBuf is Little Endian, so we cannot just do msg.getInt() as that will read the 4
        // octets in little endian format.
        //
        // We will read the length of one complete encrypted chunk and decode that.
        msg.getBytes(msg.readerIndex(), lengthOctets.array(), 0, RpcConstants.LENGTH_FIELD_LENGTH);
        final int wrappedMsgLength = lengthOctets.getInt(0);
        msg.skipBytes(RpcConstants.LENGTH_FIELD_LENGTH);

        // Since lengthBasedFrameDecoder will ensure we have enough bytes it's good to have this check here.
        assert (msg.readableBytes() == wrappedMsgLength);

        // Uncomment the below code if msg can contain both of Direct and Heap ByteBuf. Currently Drill only supports
        // DirectByteBuf so the below condition will always be false. If the msg are always HeapByteBuf then in
        // addition also remove the allocation of encodedMsg from constructor.
        /*if (msg.hasArray()) {
          wrappedMsg = msg.array();
        } else {
        if (RpcConstants.EXTRA_DEBUGGING) {
          logger.debug("The input bytebuf is not backed by a byte array so allocating a new one");
        }*/

        // Check if the wrappedMsgLength doesn't exceed agreed upon maxWrappedSize. As per SASL RFC 2222/4422 we
        // should close the connection since it represents a security attack.
        if (wrappedMsgLength > maxWrappedSize) {
            throw new RpcException(String.format(
                    "Received encoded buffer size: %d is larger than negotiated "
                            + "maxWrappedSize: %d. Closing the connection as this is unexpected.",
                    wrappedMsgLength, maxWrappedSize));
        }

        final byte[] wrappedMsg = encodedMsg;
        // Copy the wrappedMsgLength of bytes into the byte array
        msg.getBytes(msg.readerIndex(), wrappedMsg, 0, wrappedMsgLength);
        //}

        // SASL library always copies the origMsg internally to a new byte array
        // and return another new byte array after decrypting the message. The memory for this
        // will be Garbage collected by JVM since SASL Library releases it's reference after
        // returning the byte array.
        final byte[] decodedMsg = saslCodec.unwrap(wrappedMsg, 0, wrappedMsgLength);

        if (logger.isTraceEnabled()) {
            logger.trace("Successfully decrypted incoming message. Length after decryption: {}",
                    decodedMsg.length);
        }

        // Update the msg reader index since we have decrypted this chunk
        msg.skipBytes(wrappedMsgLength);

        // Allocate a new Bytebuf to copy the decrypted chunk.
        final ByteBuf decodedMsgBuf = ctx.alloc().buffer(decodedMsg.length);
        decodedMsgBuf.writeBytes(decodedMsg);

        // Add the decrypted chunk to output buffer for next handler to take care of it.
        out.add(decodedMsgBuf);

    } catch (OutOfMemoryException e) {
        logger.warn("Failure allocating buffer on incoming stream due to memory limits.");
        msg.resetReaderIndex();
        outOfMemoryHandler.handle();
    } catch (IOException e) {
        logger.error("Something went wrong while unwrapping the message: {} with MaxEncodeSize: {} and "
                + "error: {}", msg, maxWrappedSize, e.getMessage());
        throw e;
    }
}

From source file:org.apache.drill.exec.rpc.SaslEncryptionHandler.java

License:Apache License

public void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws IOException {

    if (!ctx.channel().isOpen()) {
        logger.debug("In " + RpcConstants.SASL_ENCRYPTION_HANDLER + " and channel is not open. "
                + "So releasing msg memory before encryption.");
        msg.release();/*from   www.  j a  v  a 2 s  .c o m*/
        return;
    }

    try {
        // If encryption is enabled then this handler will always get ByteBuf of type Composite ByteBuf
        assert (msg instanceof CompositeByteBuf);

        final CompositeByteBuf cbb = (CompositeByteBuf) msg;
        final int numComponents = cbb.numComponents();

        // Get all the components inside the Composite ByteBuf for encryption
        for (int currentIndex = 0; currentIndex < numComponents; ++currentIndex) {
            final ByteBuf component = cbb.component(currentIndex);

            // Each component ByteBuf size should not be greater than wrapSizeLimit since ChunkCreationHandler
            // will break the RPC message into chunks of wrapSizeLimit.
            if (component.readableBytes() > wrapSizeLimit) {
                throw new RpcException(
                        String.format("Component Chunk size: %d is greater than the wrapSizeLimit: %d",
                                component.readableBytes(), wrapSizeLimit));
            }

            // Uncomment the below code if msg can contain both of Direct and Heap ByteBuf. Currently Drill only supports
            // DirectByteBuf so the below condition will always be false. If the msg are always HeapByteBuf then in
            // addition also remove the allocation of origMsgBuffer from constructor.
            /*if (component.hasArray()) {
              origMsg = component.array();
            } else {
                    
            if (RpcConstants.EXTRA_DEBUGGING) {
              logger.trace("The input bytebuf is not backed by a byte array so allocating a new one");
            }*/
            final byte[] origMsg = origMsgBuffer;
            component.getBytes(component.readerIndex(), origMsg, 0, component.readableBytes());
            //}

            if (logger.isTraceEnabled()) {
                logger.trace("Trying to encrypt chunk of size:{} with wrapSizeLimit:{} and chunkMode: {}",
                        component.readableBytes(), wrapSizeLimit);
            }

            // Length to encrypt will be component length not origMsg length since that can be greater.
            final byte[] wrappedMsg = saslCodec.wrap(origMsg, 0, component.readableBytes());

            if (logger.isTraceEnabled()) {
                logger.trace("Successfully encrypted message, original size: {} Final Size: {}",
                        component.readableBytes(), wrappedMsg.length);
            }

            // Allocate the buffer (directByteBuff) for copying the encrypted byte array and 4 octets for length of the
            // encrypted message. This is preferred since later on if the passed buffer is not in direct memory then it
            // will be copied by the channel into a temporary direct memory which will be cached to the thread. The size
            // of that temporary direct memory will be size of largest message send.
            final ByteBuf encryptedBuf = ctx.alloc()
                    .buffer(wrappedMsg.length + RpcConstants.LENGTH_FIELD_LENGTH);

            // Based on SASL RFC 2222/4422 we should have starting 4 octet as the length of the encrypted buffer in network
            // byte order. SASL framework provided by JDK doesn't do that by default and leaves it upto application. Whereas
            // Cyrus SASL implementation of sasl_encode does take care of this.
            lengthOctets.putInt(wrappedMsg.length);
            encryptedBuf.writeBytes(lengthOctets.array());

            // reset the position for re-use in next round
            lengthOctets.rewind();

            // Write the encrypted bytes inside the buffer
            encryptedBuf.writeBytes(wrappedMsg);

            // Update the msg and component reader index
            msg.skipBytes(component.readableBytes());
            component.skipBytes(component.readableBytes());

            // Add the encrypted buffer into the output to send it on wire.
            out.add(encryptedBuf);
        }
    } catch (OutOfMemoryException e) {
        logger.warn("Failure allocating buffer on incoming stream due to memory limits.");
        msg.resetReaderIndex();
        outOfMemoryHandler.handle();
    } catch (IOException e) {
        logger.error(
                "Something went wrong while wrapping the message: {} with MaxRawWrapSize: {}, ChunkMode: {} "
                        + "and error: {}",
                msg, wrapSizeLimit, e.getMessage());
        throw e;
    }
}

From source file:org.apache.drill.exec.rpc.ZeroCopyProtobufLengthDecoder.java

License:Apache License

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {

    if (!ctx.channel().isOpen()) {
        if (in.readableBytes() > 0)
            logger.info("Channel is closed, discarding remaining {} byte(s) in buffer.", in.readableBytes());
        in.skipBytes(in.readableBytes());
        return;//from   ww  w .ja v  a  2 s  .  com
    }

    in.markReaderIndex();
    final byte[] buf = new byte[5];
    for (int i = 0; i < buf.length; i++) {
        if (!in.isReadable()) {
            in.resetReaderIndex();
            return;
        }

        buf[i] = in.readByte();
        if (buf[i] >= 0) {

            int length = CodedInputStream.newInstance(buf, 0, i + 1).readRawVarint32();

            if (length < 0) {
                throw new CorruptedFrameException("negative length: " + length);
            }
            if (length == 0) {
                throw new CorruptedFrameException("Received a message of length 0.");
            }

            if (in.readableBytes() < length) {
                in.resetReaderIndex();
                return;
            } else {
                // need to make buffer copy, otherwise netty will try to refill this buffer if we move the readerIndex forward...
                // TODO: Can we avoid this copy?
                ByteBuf outBuf = in.copy(in.readerIndex(), length);
                in.skipBytes(length);

                if (RpcConstants.EXTRA_DEBUGGING)
                    logger.debug(String.format(
                            "ReaderIndex is %d after length header of %d bytes and frame body of length %d bytes.",
                            in.readerIndex(), i + 1, length));
                out.add(outBuf);
                return;
            }
        }
    }

    // Couldn't find the byte whose MSB is off.
    throw new CorruptedFrameException("length wider than 32-bit");

}

From source file:org.apache.hive.spark.client.rpc.KryoMessageCodec.java

License:Apache License

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
    if (in.readableBytes() < 4) {
        return;/*from  ww  w .  j  ava2s. c  om*/
    }

    in.markReaderIndex();
    int msgSize = in.readInt();
    checkSize(msgSize);

    if (in.readableBytes() < msgSize) {
        // Incomplete message in buffer.
        in.resetReaderIndex();
        return;
    }

    try {
        ByteBuffer nioBuffer = maybeDecrypt(in.nioBuffer(in.readerIndex(), msgSize));
        Input kryoIn = new Input(new ByteBufferInputStream(nioBuffer));

        Object msg = kryos.get().readClassAndObject(kryoIn);
        LOG.debug("Decoded message of type {} ({} bytes)", msg != null ? msg.getClass().getName() : msg,
                msgSize);
        out.add(msg);
    } finally {
        in.skipBytes(msgSize);
    }
}