Example usage for io.netty.buffer ByteBuf arrayOffset

List of usage examples for io.netty.buffer ByteBuf arrayOffset

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf arrayOffset.

Prototype

public abstract int arrayOffset();

Source Link

Document

Returns the offset of the first byte within the backing byte array of this buffer.

Usage

From source file:at.yawk.accordion.compression.SnappyCompressor.java

License:Mozilla Public License

private static byte[] toArray(ByteBuf raw) {
    return Arrays.copyOfRange(raw.array(), raw.arrayOffset() + raw.readerIndex(),
            raw.arrayOffset() + raw.readerIndex() + raw.readableBytes());
}

From source file:com.beeswax.hexbid.parser.BidProtobufParser.java

License:Apache License

/**
 * Parse serialized protocol buffer Bytebuf to protobuf object.</br>
 * Preferencing implementation of {@link ProtobufDecoder}
 * //from   w  w w. j  a v a2  s . c  om
 * @param bytebuf
 * @return protocol buffer message
 * @throws InvalidProtocolBufferException
 */
public static <T extends Message.Builder> Message parseProtoBytebuf(ByteBuf bytebuf, T messageBuilder)
        throws InvalidProtocolBufferException {
    final byte[] array;
    final int offset;
    final int length = bytebuf.readableBytes();
    if (bytebuf.hasArray()) {
        array = bytebuf.array();
        offset = bytebuf.arrayOffset() + bytebuf.readerIndex();
    } else {
        array = new byte[length];
        bytebuf.getBytes(bytebuf.readerIndex(), array, 0, length);
        offset = 0;
    }
    return messageBuilder.mergeFrom(array, offset, length).buildPartial();
}

From source file:com.chat.common.netty.handler.decode.ProtobufDecoder.java

License:Apache License

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
    final byte[] array;
    final int offset;
    final int length = msg.readableBytes();
    if (msg.hasArray()) {
        array = msg.array();/*from ww  w.j a  v  a  2 s  .  c  o m*/
        offset = msg.arrayOffset() + msg.readerIndex();
    } else {
        array = new byte[length];
        msg.getBytes(msg.readerIndex(), array, 0, length);
        offset = 0;
    }

    if (extensionRegistry == null) {
        if (HAS_PARSER) {
            out.add(prototype.getParserForType().parseFrom(array, offset, length));
        } else {
            out.add(prototype.newBuilderForType().mergeFrom(array, offset, length).build());
        }
    } else {
        if (HAS_PARSER) {
            out.add(prototype.getParserForType().parseFrom(array, offset, length, extensionRegistry));
        } else {
            out.add(prototype.newBuilderForType().mergeFrom(array, offset, length, extensionRegistry).build());
        }
    }
}

From source file:com.datastax.driver.core.LZ4Compressor.java

License:Apache License

private ByteBuf compressHeap(ByteBuf input) throws IOException {
    int maxCompressedLength = compressor.maxCompressedLength(input.readableBytes());

    // Not a direct buffer so use byte arrays...
    int inOffset = input.arrayOffset() + input.readerIndex();
    byte[] in = input.array();
    int len = input.readableBytes();
    // Increase reader index.
    input.readerIndex(input.writerIndex());

    // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so
    // can eliminate the overhead of allocate a new byte[].
    ByteBuf output = input.alloc().heapBuffer(INTEGER_BYTES + maxCompressedLength);
    try {/*  w  w  w.  ja  v  a 2  s  .co  m*/
        output.writeInt(len);
        // calculate the correct offset.
        int offset = output.arrayOffset() + output.writerIndex();
        byte[] out = output.array();
        int written = compressor.compress(in, inOffset, len, out, offset);

        // Set the writer index so the amount of written bytes is reflected
        output.writerIndex(output.writerIndex() + written);
    } catch (Exception e) {
        // release output buffer so we not leak and rethrow exception.
        output.release();
        throw new IOException(e);
    }
    return output;
}

From source file:com.datastax.driver.core.LZ4Compressor.java

License:Apache License

private ByteBuf decompressHeap(ByteBuf input) throws IOException {
    // Not a direct buffer so use byte arrays...
    byte[] in = input.array();
    int len = input.readableBytes();
    int uncompressedLength = input.readInt();
    int inOffset = input.arrayOffset() + input.readerIndex();
    // Increase reader index.
    input.readerIndex(input.writerIndex());

    // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so
    // can eliminate the overhead of allocate a new byte[].
    ByteBuf output = input.alloc().heapBuffer(uncompressedLength);
    try {/* w w  w .j a va  2 s  .com*/
        int offset = output.arrayOffset() + output.writerIndex();
        byte out[] = output.array();
        int read = decompressor.decompress(in, inOffset, out, offset, uncompressedLength);
        if (read != len - INTEGER_BYTES)
            throw new IOException("Compressed lengths mismatch");

        // Set the writer index so the amount of written bytes is reflected
        output.writerIndex(output.writerIndex() + uncompressedLength);
    } catch (Exception e) {
        // release output buffer so we not leak and rethrow exception.
        output.release();
        throw new IOException(e);
    }
    return output;
}

From source file:com.datastax.driver.core.SnappyCompressor.java

License:Apache License

private ByteBuf compressHeap(ByteBuf input) throws IOException {
    int maxCompressedLength = Snappy.maxCompressedLength(input.readableBytes());
    int inOffset = input.arrayOffset() + input.readerIndex();
    byte[] in = input.array();
    int len = input.readableBytes();
    // Increase reader index.
    input.readerIndex(input.writerIndex());

    // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so
    // can eliminate the overhead of allocate a new byte[].
    ByteBuf output = input.alloc().heapBuffer(maxCompressedLength);
    try {//from w ww . j a  v  a 2  s. c  om
        // Calculate the correct offset.
        int offset = output.arrayOffset() + output.writerIndex();
        byte[] out = output.array();
        int written = Snappy.compress(in, inOffset, len, out, offset);

        // Increase the writerIndex with the written bytes.
        output.writerIndex(output.writerIndex() + written);
    } catch (IOException e) {
        // release output buffer so we not leak and rethrow exception.
        output.release();
        throw e;
    }
    return output;
}

From source file:com.datastax.driver.core.SnappyCompressor.java

License:Apache License

private ByteBuf decompressHeap(ByteBuf input) throws IOException {
    // Not a direct buffer so use byte arrays...
    int inOffset = input.arrayOffset() + input.readerIndex();
    byte[] in = input.array();
    int len = input.readableBytes();
    // Increase reader index.
    input.readerIndex(input.writerIndex());

    if (!Snappy.isValidCompressedBuffer(in, inOffset, len))
        throw new DriverInternalError("Provided frame does not appear to be Snappy compressed");

    // Allocate a heap buffer from the ByteBufAllocator as we may use a PooledByteBufAllocator and so
    // can eliminate the overhead of allocate a new byte[].
    ByteBuf output = input.alloc().heapBuffer(Snappy.uncompressedLength(in, inOffset, len));
    try {//ww  w .  j a va 2  s  .com
        // Calculate the correct offset.
        int offset = output.arrayOffset() + output.writerIndex();
        byte[] out = output.array();
        int written = Snappy.uncompress(in, inOffset, len, out, offset);

        // Increase the writerIndex with the written bytes.
        output.writerIndex(output.writerIndex() + written);
    } catch (IOException e) {
        // release output buffer so we not leak and rethrow exception.
        output.release();
        throw e;
    }
    return output;
}

From source file:com.linecorp.armeria.server.thrift.TByteBufTransport.java

License:Apache License

@Override
public int getBufferPosition() {
    final ByteBuf buf = this.buf;
    if (!buf.hasArray()) {
        return 0;
    } else {/*w ww . j a va  2  s .co m*/
        return buf.arrayOffset() + buf.readerIndex();
    }
}

From source file:com.minetats.mw.NamedPipe.java

License:Apache License

@Override
public ByteBuf readChunk(ByteBufAllocator bba) throws Exception {
    chunks++;/*from   www  .  j  av  a2 s .  c  om*/
    ByteBuf buf = bba.heapBuffer(chunkSize);
    boolean release = false;
    int read = 0;
    try {
        do {
            buf.writerIndex(buf.writerIndex() + read);
            read = file.read(buf.array(), buf.arrayOffset() + read, chunkSize - read);
        } while (read > 0);
        int index = buf.writerIndex() - 1;
        if (buf.getByte(index) == '\n' && buf.getByte(index - 1) == '\n') {
            endOfInput = true;
            System.out.println("endOfInput=" + endOfInput + ", read " + chunks + " chunks");
        }
        return buf;
    } finally {
        if (release) {
            buf.release();
        }
    }
}

From source file:com.necla.simba.server.gateway.server.frontend.FrontendFrameDecoder.java

License:Apache License

private ByteBuf decompress(ChannelHandlerContext ctx, ByteBuf frame) throws Exception {
    int readableBytes = frame.readableBytes();
    if (frame.hasArray()) {
        inflater.setInput(frame.array(), 0, readableBytes);
    } else {/*ww  w.ja  va 2s  .  c om*/
        byte[] array = new byte[frame.readableBytes()];
        frame.getBytes(0, array);
        inflater.setInput(array);
    }
    int totalLength = 0;
    List<ByteBuf> all = new LinkedList<ByteBuf>();
    int multiplier = 2;
    alldone: while (true) {

        int maxOutputLength = inflater.getRemaining() * multiplier;
        // multiplier keeps increasing, so we will keep picking
        // larger and larger buffers the more times we have to loop
        // around, i.e., the more we realize that the data was very
        // heavily compressed, the larger our buffers are going to be.
        multiplier += 1;
        ByteBuf decompressed = ctx.alloc().heapBuffer(maxOutputLength);
        while (!inflater.needsInput()) {
            byte[] outArray = decompressed.array();
            int outIndex = decompressed.arrayOffset() + decompressed.writerIndex();
            int length = outArray.length - outIndex;
            if (length == 0)
                break;
            try {
                //LOG.debug("here1");
                int outputLength = inflater.inflate(outArray, outIndex, length);
                totalLength += outputLength;
                //LOG.debug("here2");

                if (outputLength > 0)
                    decompressed.writerIndex(decompressed.writerIndex() + outputLength);
            } catch (DataFormatException e) {
                throw new Exception("Could not inflate" + e.getMessage());
            }
            if (inflater.finished()) {
                all.add(decompressed);
                break alldone;
            }

        }
        all.add(decompressed);
    }
    inflater.reset();
    if (all.size() == 1)
        return all.get(0);
    else {
        ByteBuf allData = ctx.alloc().heapBuffer(totalLength);
        for (ByteBuf b : all) {
            //LOG.debug("capacity=" + allData.capacity());
            allData.writeBytes(b);
            b.release();
        }
        return allData;
    }

}