Example usage for io.netty.buffer ByteBuf getBytes

List of usage examples for io.netty.buffer ByteBuf getBytes

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf getBytes.

Prototype

public abstract int getBytes(int index, FileChannel out, long position, int length) throws IOException;

Source Link

Document

Transfers this buffer's data starting at the specified absolute index to the specified channel starting at the given file position.

Usage

From source file:org.apache.bookkeeper.proto.checksum.DirectMemoryCRC32Digest.java

License:Apache License

@Override
public void update(ByteBuf buf) {
    int index = buf.readerIndex();
    int length = buf.readableBytes();

    try {/*  w w  w.  ja  v a  2 s.com*/
        if (buf.hasMemoryAddress()) {
            // Calculate CRC directly from the direct memory pointer
            crcValue = (int) updateByteBuffer.invoke(null, crcValue, buf.memoryAddress(), index, length);
        } else if (buf.hasArray()) {
            // Use the internal method to update from array based
            crcValue = (int) updateBytes.invoke(null, crcValue, buf.array(), buf.arrayOffset() + index, length);
        } else {
            // Fallback to data copy if buffer is not contiguous
            byte[] b = new byte[length];
            buf.getBytes(index, b, 0, length);
            crcValue = (int) updateBytes.invoke(null, crcValue, b, 0, b.length);
        }
    } catch (IllegalAccessException | InvocationTargetException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.bookkeeper.util.AvailabilityOfEntriesOfLedger.java

License:Apache License

public AvailabilityOfEntriesOfLedger(ByteBuf byteBuf) {
    byte[] header = new byte[HEADER_SIZE];
    byte[] serializedSequenceGroupByteArray = new byte[SequenceGroup.SEQUENCEGROUP_BYTES];
    int readerIndex = byteBuf.readerIndex();
    byteBuf.getBytes(readerIndex, header, 0, HEADER_SIZE);

    ByteBuffer headerByteBuf = ByteBuffer.wrap(header);
    int headerVersion = headerByteBuf.getInt();
    if (headerVersion > CURRENT_HEADER_VERSION) {
        throw new IllegalArgumentException("Unsupported Header Version: " + headerVersion);
    }//from   w  w w  .jav a 2s . c o  m
    int numOfSequenceGroups = headerByteBuf.getInt();
    SequenceGroup newSequenceGroup;
    for (int i = 0; i < numOfSequenceGroups; i++) {
        Arrays.fill(serializedSequenceGroupByteArray, (byte) 0);
        byteBuf.getBytes(readerIndex + HEADER_SIZE + (i * SequenceGroup.SEQUENCEGROUP_BYTES),
                serializedSequenceGroupByteArray, 0, SequenceGroup.SEQUENCEGROUP_BYTES);
        newSequenceGroup = new SequenceGroup(serializedSequenceGroupByteArray);
        sortedSequenceGroups.put(newSequenceGroup.getFirstSequenceStart(), newSequenceGroup);
    }
    setAvailabilityOfEntriesOfLedgerClosed();
}

From source file:org.apache.bookkeeper.util.ByteBufList.java

License:Apache License

/**
 * Write bytes from the current {@link ByteBufList} into a byte array.
 *
 * <p>This won't modify the reader index of the internal buffers.
 *
 * @param dst/*from  w w  w.ja va2s .c  o  m*/
 *            the destination byte array
 * @return the number of copied bytes
 */
public int getBytes(byte[] dst) {
    int copied = 0;
    for (int idx = 0; idx < buffers.size() && copied < dst.length; idx++) {
        ByteBuf b = buffers.get(idx);
        int len = Math.min(b.readableBytes(), dst.length - copied);
        b.getBytes(b.readerIndex(), dst, copied, len);

        copied += len;
    }

    return copied;
}

From source file:org.apache.drill.exec.rpc.SaslDecryptionHandler.java

License:Apache License

public void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws IOException {

    if (!ctx.channel().isOpen()) {
        logger.trace("Channel closed before decoding the message of {} bytes", msg.readableBytes());
        msg.skipBytes(msg.readableBytes());
        return;//from  w ww  . j av  a2 s .  c  om
    }

    try {
        if (logger.isTraceEnabled()) {
            logger.trace("Trying to decrypt the encrypted message of size: {} with maxWrappedSize",
                    msg.readableBytes());
        }

        // All the encrypted blocks are prefixed with it's length in network byte order (or BigEndian format). Netty's
        // default Byte order of ByteBuf is Little Endian, so we cannot just do msg.getInt() as that will read the 4
        // octets in little endian format.
        //
        // We will read the length of one complete encrypted chunk and decode that.
        msg.getBytes(msg.readerIndex(), lengthOctets.array(), 0, RpcConstants.LENGTH_FIELD_LENGTH);
        final int wrappedMsgLength = lengthOctets.getInt(0);
        msg.skipBytes(RpcConstants.LENGTH_FIELD_LENGTH);

        // Since lengthBasedFrameDecoder will ensure we have enough bytes it's good to have this check here.
        assert (msg.readableBytes() == wrappedMsgLength);

        // Uncomment the below code if msg can contain both of Direct and Heap ByteBuf. Currently Drill only supports
        // DirectByteBuf so the below condition will always be false. If the msg are always HeapByteBuf then in
        // addition also remove the allocation of encodedMsg from constructor.
        /*if (msg.hasArray()) {
          wrappedMsg = msg.array();
        } else {
        if (RpcConstants.EXTRA_DEBUGGING) {
          logger.debug("The input bytebuf is not backed by a byte array so allocating a new one");
        }*/

        // Check if the wrappedMsgLength doesn't exceed agreed upon maxWrappedSize. As per SASL RFC 2222/4422 we
        // should close the connection since it represents a security attack.
        if (wrappedMsgLength > maxWrappedSize) {
            throw new RpcException(String.format(
                    "Received encoded buffer size: %d is larger than negotiated "
                            + "maxWrappedSize: %d. Closing the connection as this is unexpected.",
                    wrappedMsgLength, maxWrappedSize));
        }

        final byte[] wrappedMsg = encodedMsg;
        // Copy the wrappedMsgLength of bytes into the byte array
        msg.getBytes(msg.readerIndex(), wrappedMsg, 0, wrappedMsgLength);
        //}

        // SASL library always copies the origMsg internally to a new byte array
        // and return another new byte array after decrypting the message. The memory for this
        // will be Garbage collected by JVM since SASL Library releases it's reference after
        // returning the byte array.
        final byte[] decodedMsg = saslCodec.unwrap(wrappedMsg, 0, wrappedMsgLength);

        if (logger.isTraceEnabled()) {
            logger.trace("Successfully decrypted incoming message. Length after decryption: {}",
                    decodedMsg.length);
        }

        // Update the msg reader index since we have decrypted this chunk
        msg.skipBytes(wrappedMsgLength);

        // Allocate a new Bytebuf to copy the decrypted chunk.
        final ByteBuf decodedMsgBuf = ctx.alloc().buffer(decodedMsg.length);
        decodedMsgBuf.writeBytes(decodedMsg);

        // Add the decrypted chunk to output buffer for next handler to take care of it.
        out.add(decodedMsgBuf);

    } catch (OutOfMemoryException e) {
        logger.warn("Failure allocating buffer on incoming stream due to memory limits.");
        msg.resetReaderIndex();
        outOfMemoryHandler.handle();
    } catch (IOException e) {
        logger.error("Something went wrong while unwrapping the message: {} with MaxEncodeSize: {} and "
                + "error: {}", msg, maxWrappedSize, e.getMessage());
        throw e;
    }
}

From source file:org.apache.drill.exec.rpc.SaslEncryptionHandler.java

License:Apache License

public void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws IOException {

    if (!ctx.channel().isOpen()) {
        logger.debug("In " + RpcConstants.SASL_ENCRYPTION_HANDLER + " and channel is not open. "
                + "So releasing msg memory before encryption.");
        msg.release();/*from   w ww . jav  a  2 s  .  co  m*/
        return;
    }

    try {
        // If encryption is enabled then this handler will always get ByteBuf of type Composite ByteBuf
        assert (msg instanceof CompositeByteBuf);

        final CompositeByteBuf cbb = (CompositeByteBuf) msg;
        final int numComponents = cbb.numComponents();

        // Get all the components inside the Composite ByteBuf for encryption
        for (int currentIndex = 0; currentIndex < numComponents; ++currentIndex) {
            final ByteBuf component = cbb.component(currentIndex);

            // Each component ByteBuf size should not be greater than wrapSizeLimit since ChunkCreationHandler
            // will break the RPC message into chunks of wrapSizeLimit.
            if (component.readableBytes() > wrapSizeLimit) {
                throw new RpcException(
                        String.format("Component Chunk size: %d is greater than the wrapSizeLimit: %d",
                                component.readableBytes(), wrapSizeLimit));
            }

            // Uncomment the below code if msg can contain both of Direct and Heap ByteBuf. Currently Drill only supports
            // DirectByteBuf so the below condition will always be false. If the msg are always HeapByteBuf then in
            // addition also remove the allocation of origMsgBuffer from constructor.
            /*if (component.hasArray()) {
              origMsg = component.array();
            } else {
                    
            if (RpcConstants.EXTRA_DEBUGGING) {
              logger.trace("The input bytebuf is not backed by a byte array so allocating a new one");
            }*/
            final byte[] origMsg = origMsgBuffer;
            component.getBytes(component.readerIndex(), origMsg, 0, component.readableBytes());
            //}

            if (logger.isTraceEnabled()) {
                logger.trace("Trying to encrypt chunk of size:{} with wrapSizeLimit:{} and chunkMode: {}",
                        component.readableBytes(), wrapSizeLimit);
            }

            // Length to encrypt will be component length not origMsg length since that can be greater.
            final byte[] wrappedMsg = saslCodec.wrap(origMsg, 0, component.readableBytes());

            if (logger.isTraceEnabled()) {
                logger.trace("Successfully encrypted message, original size: {} Final Size: {}",
                        component.readableBytes(), wrappedMsg.length);
            }

            // Allocate the buffer (directByteBuff) for copying the encrypted byte array and 4 octets for length of the
            // encrypted message. This is preferred since later on if the passed buffer is not in direct memory then it
            // will be copied by the channel into a temporary direct memory which will be cached to the thread. The size
            // of that temporary direct memory will be size of largest message send.
            final ByteBuf encryptedBuf = ctx.alloc()
                    .buffer(wrappedMsg.length + RpcConstants.LENGTH_FIELD_LENGTH);

            // Based on SASL RFC 2222/4422 we should have starting 4 octet as the length of the encrypted buffer in network
            // byte order. SASL framework provided by JDK doesn't do that by default and leaves it upto application. Whereas
            // Cyrus SASL implementation of sasl_encode does take care of this.
            lengthOctets.putInt(wrappedMsg.length);
            encryptedBuf.writeBytes(lengthOctets.array());

            // reset the position for re-use in next round
            lengthOctets.rewind();

            // Write the encrypted bytes inside the buffer
            encryptedBuf.writeBytes(wrappedMsg);

            // Update the msg and component reader index
            msg.skipBytes(component.readableBytes());
            component.skipBytes(component.readableBytes());

            // Add the encrypted buffer into the output to send it on wire.
            out.add(encryptedBuf);
        }
    } catch (OutOfMemoryException e) {
        logger.warn("Failure allocating buffer on incoming stream due to memory limits.");
        msg.resetReaderIndex();
        outOfMemoryHandler.handle();
    } catch (IOException e) {
        logger.error(
                "Something went wrong while wrapping the message: {} with MaxRawWrapSize: {}, ChunkMode: {} "
                        + "and error: {}",
                msg, wrapSizeLimit, e.getMessage());
        throw e;
    }
}

From source file:org.asynchttpclient.providers.netty.util.ByteBufUtil.java

License:Apache License

public static byte[] byteBufs2Bytes(List<ByteBuf> bufs) {

    if (bufs.isEmpty()) {
        return EMPTY_BYTE_ARRAY;

    } else if (bufs.size() == 1) {
        return byteBuf2Bytes(bufs.get(0));

    } else {//from   ww w  .ja va 2  s.c o m
        int totalSize = 0;
        for (ByteBuf buf : bufs) {
            totalSize += buf.readableBytes();
        }

        byte[] bytes = new byte[totalSize];
        int offset = 0;
        for (ByteBuf buf : bufs) {
            int readable = buf.readableBytes();
            buf.getBytes(buf.readerIndex(), bytes, offset, readable);
            offset += readable;
        }
        return bytes;
    }
}

From source file:org.fiware.kiara.netty.Buffers.java

License:Open Source License

public static ByteBuffer toByteBuffer(ByteBuf msg) {
    final byte[] array;
    final int offset;
    final int length = msg.readableBytes();
    if (msg.hasArray()) {
        array = msg.array();//from w  w  w. java  2  s .  c  om
        offset = msg.arrayOffset() + msg.readerIndex();
    } else {
        array = new byte[length];
        msg.getBytes(msg.readerIndex(), array, 0, length);
        offset = 0;
    }

    return ByteBuffer.wrap(array, offset, length);
}

From source file:org.opendaylight.netconf.nettyutil.handler.NetconfXMLToMessageDecoder.java

License:Open Source License

@Override
public void decode(final ChannelHandlerContext ctx, final ByteBuf in, final List<Object> out)
        throws IOException, SAXException {
    if (in.isReadable()) {
        if (LOG.isTraceEnabled()) {
            LOG.trace("Received to decode: {}", ByteBufUtil.hexDump(in));
        }//w ww . j a  v a2  s  . com

        /* According to the XML 1.0 specifications, when there is an XML declaration
         * at the beginning of an XML document, it is invalid to have
         * white spaces before that declaration (reminder: a XML declaration looks like:
         * <?xml version="1.0" encoding="UTF-8"?>). In contrast, when there is no XML declaration,
         * it is valid to have white spaces at the beginning of the document.
         *
         * When they send a NETCONF message, several NETCONF servers start with a new line (either
         * LF or CRLF), presumably to improve readability in interactive sessions with a human being.
         * Some NETCONF servers send an XML declaration, some others do not.
         *
         * If a server starts a NETCONF message with white spaces and follows with an XML
         * declaration, XmlUtil.readXmlToDocument() will fail because this is invalid XML.
         * But in the spirit of the "NETCONF over SSH" RFC 4742 and to improve interoperability, we want
         * to accept those messages.
         *
         * To do this, the following code strips the leading bytes before the start of the XML messages.
         */

        // Skip all leading whitespaces by moving the reader index to the first non whitespace character
        while (in.isReadable()) {
            if (!isWhitespace(in.readByte())) {
                // return reader index to the first non whitespace character
                in.readerIndex(in.readerIndex() - 1);
                break;
            }
        }

        // Warn about leading whitespaces
        if (in.readerIndex() != 0 && LOG.isWarnEnabled()) {
            final byte[] strippedBytes = new byte[in.readerIndex()];
            in.getBytes(0, strippedBytes, 0, in.readerIndex());
            LOG.warn("XML message with unwanted leading bytes detected. Discarded the {} leading byte(s): '{}'",
                    in.readerIndex(), ByteBufUtil.hexDump(Unpooled.wrappedBuffer(strippedBytes)));
        }
    }
    if (in.isReadable()) {
        out.add(new NetconfMessage(XmlUtil.readXmlToDocument(new ByteBufInputStream(in))));
    } else {
        LOG.debug("No more content in incoming buffer.");
    }
}

From source file:org.rzo.netty.mcast.MulticastEndpoint.java

License:Apache License

public boolean checkMessage(ByteBuf e) {
    byte[] eId = new byte[id.length];
    e.getBytes(0, eId, 0, eId.length);
    return (!Arrays.equals(id, eId));
}

From source file:org.spout.api.protocol.builder.GenericMessage.java

License:Open Source License

@Override
@SuppressWarnings("unchecked")
public T decode(ByteBuf b) throws IOException {
    CompoundMessageField root = Spout.getPlatform() == Platform.CLIENT ? getToClientFieldRoot()
            : getToServerFieldRoot();//from   w w  w . ja  va  2s  . c  om
    int start = b.readerIndex();
    int fieldCount = root.getSubFieldCount();
    int[] indexArray = new int[fieldCount];
    int length = root.skip(b, indexArray);
    this.buffer = Unpooled.buffer(length);
    b.getBytes(start, this.buffer, 0, length);
    return (T) this;
}