Example usage for io.netty.buffer ByteBuf readableBytes

List of usage examples for io.netty.buffer ByteBuf readableBytes

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf readableBytes.

Prototype

public abstract int readableBytes();

Source Link

Document

Returns the number of readable bytes which is equal to (this.writerIndex - this.readerIndex) .

Usage

From source file:com.couchbase.client.core.message.kv.subdoc.simple.AbstractSubdocRequest.java

License:Apache License

protected ByteBuf createContent(ByteBuf pathByteBuf, ByteBuf... restOfContent) {
    if (restOfContent == null || restOfContent.length == 0) {
        return pathByteBuf;
    } else {/* www . ja v  a 2  s . c o m*/
        CompositeByteBuf composite = Unpooled.compositeBuffer(1 + restOfContent.length);
        composite.addComponent(pathByteBuf);
        composite.writerIndex(composite.writerIndex() + pathByteBuf.readableBytes());

        for (ByteBuf component : restOfContent) {
            composite.addComponent(component);
            composite.writerIndex(composite.writerIndex() + component.readableBytes());
        }

        return composite;
    }
}

From source file:com.couchbase.client.io.QueryEncoder.java

License:Open Source License

@Override
protected void encode(ChannelHandlerContext ctx, QueryEvent ev, List<Object> out) throws Exception {
    ByteBuf queryBuf = Unpooled.copiedBuffer(ev.getQuery(), CharsetUtil.UTF_8);
    HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/query", queryBuf);

    request.headers().set("Content-Length", queryBuf.readableBytes());
    request.headers().set("Content-Type", "text/plain");

    out.add(request);//w  ww.  j av a2 s .  co m
    queue.add(ev);
}

From source file:com.datastax.driver.core.CBUtil.java

License:Apache License

public static byte[] readRawBytes(ByteBuf cb) {
    if (cb.hasArray() && cb.readableBytes() == cb.array().length) {
        // Move the readerIndex just so we consistently consume the input
        cb.readerIndex(cb.writerIndex());
        return cb.array();
    }/* w ww . ja  v a  2s. c  o  m*/

    // Otherwise, just read the bytes in a new array
    byte[] bytes = new byte[cb.readableBytes()];
    cb.readBytes(bytes);
    return bytes;
}

From source file:com.datastax.driver.core.Frame.java

License:Apache License

private static Frame create(ByteBuf fullFrame) {
    assert fullFrame.readableBytes() >= 1 : String.format("Frame too short (%d bytes)",
            fullFrame.readableBytes());/*from  w  w  w  . ja v  a 2 s  . c  om*/

    int versionBytes = fullFrame.readByte();
    // version first byte is the "direction" of the frame (request or response)
    ProtocolVersion version = ProtocolVersion.fromInt(versionBytes & 0x7F);
    int hdrLen = Header.lengthFor(version);
    assert fullFrame.readableBytes() >= (hdrLen - 1) : String.format("Frame too short (%d bytes)",
            fullFrame.readableBytes());

    int flags = fullFrame.readByte();
    int streamId = readStreamid(fullFrame, version);
    int opcode = fullFrame.readByte();
    int length = fullFrame.readInt();
    assert length == fullFrame.readableBytes();

    Header header = new Header(version, flags, streamId, opcode);
    return new Frame(header, fullFrame);
}

From source file:com.datastax.driver.core.FrameCompressor.java

License:Apache License

protected static ByteBuffer inputNioBuffer(ByteBuf buf) {
    // Using internalNioBuffer(...) as we only hold the reference in this method and so can
    // reduce Object allocations.
    int index = buf.readerIndex();
    int len = buf.readableBytes();
    return buf.nioBufferCount() == 1 ? buf.internalNioBuffer(index, len) : buf.nioBuffer(index, len);
}

From source file:com.datastax.driver.core.LZ4Compressor.java

License:Apache License

private ByteBuf compressDirect(ByteBuf input) throws IOException {
    int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes());
    // If the input is direct we will allocate a direct output buffer as well as this will allow us to use
    // LZ4Compressor.compress and so eliminate memory copies.
    ByteBuf output = input.alloc().directBuffer(INTEGER_BYTES + maxCompressedLength);
    try {//  w  w w . j  ava 2  s . com
        ByteBuffer in = inputNioBuffer(input);
        // Increase reader index.
        input.readerIndex(input.writerIndex());

        output.writeInt(in.remaining());

        ByteBuffer out = outputNioBuffer(output);
        int written = compressor.compress(in, in.position(), in.remaining(), out, out.position(),
                out.remaining());
        // Set the writer index so the amount of written bytes is reflected
        output.writerIndex(output.writerIndex() + written);
    } catch (Exception e) {
        // release output buffer so we not leak and rethrow exception.
        output.release();
        throw new IOException(e);
    }
    return output;
}

From source file:com.datastax.driver.core.LZ4Compressor.java

License:Apache License

private ByteBuf compressHeap(ByteBuf input) throws IOException {
    int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes());

    // Not a direct buffer so use byte arrays...
    int inOffset = input.arrayOffset() + input.readerIndex();
    byte[] in = input.array();
    int len = input.readableBytes();
    // Increase reader index.
    input.readerIndex(input.writerIndex());

    // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so
    // can eliminate the overhead of allocate a new byte[].
    ByteBuf output = input.alloc().heapBuffer(INTEGER_BYTES + maxCompressedLength);
    try {//from w w  w .  j  ava 2s.  c  o m
        output.writeInt(len);
        // calculate the correct offset.
        int offset = output.arrayOffset() + output.writerIndex();
        byte[] out = output.array();
        int written = compressor.compress(in, inOffset, len, out, offset);

        // Set the writer index so the amount of written bytes is reflected
        output.writerIndex(output.writerIndex() + written);
    } catch (Exception e) {
        // release output buffer so we not leak and rethrow exception.
        output.release();
        throw new IOException(e);
    }
    return output;
}

From source file:com.datastax.driver.core.LZ4Compressor.java

License:Apache License

private ByteBuf decompressDirect(ByteBuf input) throws IOException {
    // If the input is direct we will allocate a direct output buffer as well as this will allow us to use
    // LZ4Compressor.decompress and so eliminate memory copies.
    int readable = input.readableBytes();
    int uncompressedLength = input.readInt();
    ByteBuffer in = inputNioBuffer(input);
    // Increase reader index.
    input.readerIndex(input.writerIndex());
    ByteBuf output = input.alloc().directBuffer(uncompressedLength);
    try {/*w w w  . j  a v a 2 s  . c  o m*/
        ByteBuffer out = outputNioBuffer(output);
        int read = decompressor.decompress(in, in.position(), out, out.position(), out.remaining());
        if (read != readable - INTEGER_BYTES)
            throw new IOException("Compressed lengths mismatch");

        // Set the writer index so the amount of written bytes is reflected
        output.writerIndex(output.writerIndex() + uncompressedLength);
    } catch (Exception e) {
        // release output buffer so we not leak and rethrow exception.
        output.release();
        throw new IOException(e);
    }
    return output;
}

From source file:com.datastax.driver.core.LZ4Compressor.java

License:Apache License

private ByteBuf decompressHeap(ByteBuf input) throws IOException {
    // Not a direct buffer so use byte arrays...
    byte[] in = input.array();
    int len = input.readableBytes();
    int uncompressedLength = input.readInt();
    int inOffset = input.arrayOffset() + input.readerIndex();
    // Increase reader index.
    input.readerIndex(input.writerIndex());

    // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so
    // can eliminate the overhead of allocate a new byte[].
    ByteBuf output = input.alloc().heapBuffer(uncompressedLength);
    try {/*  w  ww.  j  av  a 2s  .com*/
        int offset = output.arrayOffset() + output.writerIndex();
        byte out[] = output.array();
        int read = decompressor.decompress(in, inOffset, out, offset, uncompressedLength);
        if (read != len - INTEGER_BYTES)
            throw new IOException("Compressed lengths mismatch");

        // Set the writer index so the amount of written bytes is reflected
        output.writerIndex(output.writerIndex() + uncompressedLength);
    } catch (Exception e) {
        // release output buffer so we not leak and rethrow exception.
        output.release();
        throw new IOException(e);
    }
    return output;
}

From source file:com.datastax.driver.core.SnappyCompressor.java

License:Apache License

private ByteBuf compressDirect(ByteBuf input) throws IOException {
    int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes());
    // If the input is direct we will allocate a direct output buffer as well as this will allow us to use
    // Snappy.compress(ByteBuffer, ByteBuffer) and so eliminate memory copies.
    ByteBuf output = input.alloc().directBuffer(maxCompressedLength);
    try {//from   w w  w  .j a  va 2  s  . c  om
        ByteBuffer in = inputNioBuffer(input);
        // Increase reader index.
        input.readerIndex(input.writerIndex());

        ByteBuffer out = outputNioBuffer(output);
        int written = Snappy.compress(in, out);
        // Set the writer index so the amount of written bytes is reflected
        output.writerIndex(output.writerIndex() + written);
    } catch (IOException e) {
        // release output buffer so we not leak and rethrow exception.
        output.release();
        throw e;
    }
    return output;
}