Example usage for io.netty.buffer ByteBuf hasArray

List of usage examples for io.netty.buffer ByteBuf hasArray

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf hasArray.

Prototype

public abstract boolean hasArray();

Source Link

Document

Returns true if and only if this buffer has a backing byte array.

Usage

From source file:com.tesora.dve.db.mysql.common.MysqlAPIUtils.java

License:Open Source License

/**
 * Returns a byte[] that matches the readable content of the provided ByteBuf.  If possible, this method will
 * return a direct reference to the backing array, rather than copy into a new array.  If the readable content
 * is smaller than the backing array, or there is no backing array because the buffer is direct, the contents
 * are copied into a new byte[].  The readIndex and writeIndex of the provided ByteBuf are not modified by this call.
 * @param cb source of bytes/*  w w  w  .java 2 s  . co m*/
 * @return direct reference to backing byte[] data, or a copy if needed,
 */
public static byte[] unwrapOrCopyReadableBytes(ByteBuf cb) {
    if (cb.hasArray() && cb.readableBytes() == cb.array().length) {
        //already a heap array, and readable length is entire array
        return cb.array();
    } else {
        byte[] copy = new byte[cb.readableBytes()];
        cb.slice().readBytes(copy);
        return copy;
    }
}

From source file:com.yahoo.pulsar.common.compression.CompressionCodecZLib.java

License:Apache License

@Override
public ByteBuf encode(ByteBuf source) {
    byte[] array;
    int length = source.readableBytes();

    int sizeEstimate = (int) Math.ceil(source.readableBytes() * 1.001) + 14;
    ByteBuf compressed = PooledByteBufAllocator.DEFAULT.heapBuffer(sizeEstimate);

    int offset = 0;
    if (source.hasArray()) {
        array = source.array();/*  w  ww . j  ava2s . co m*/
        offset = source.arrayOffset() + source.readerIndex();
    } else {
        // If it's a direct buffer, we need to copy it
        array = new byte[length];
        source.getBytes(source.readerIndex(), array);
    }

    synchronized (deflater) {
        deflater.setInput(array, offset, length);
        while (!deflater.needsInput()) {
            deflate(compressed);
        }

        deflater.reset();
    }

    return compressed;
}

From source file:com.yahoo.pulsar.common.compression.CompressionCodecZLib.java

License:Apache License

@Override
public ByteBuf decode(ByteBuf encoded, int uncompressedLength) throws IOException {
    ByteBuf uncompressed = PooledByteBufAllocator.DEFAULT.heapBuffer(uncompressedLength, uncompressedLength);

    int len = encoded.readableBytes();

    byte[] array;
    int offset;/*  ww w . j ava  2 s.co m*/
    if (encoded.hasArray()) {
        array = encoded.array();
        offset = encoded.arrayOffset() + encoded.readerIndex();
    } else {
        array = new byte[len];
        encoded.getBytes(encoded.readerIndex(), array);
        offset = 0;
    }

    int resultLength;
    synchronized (inflater) {
        inflater.setInput(array, offset, len);
        try {
            resultLength = inflater.inflate(uncompressed.array(), uncompressed.arrayOffset(),
                    uncompressedLength);
        } catch (DataFormatException e) {
            throw new IOException(e);
        }
        inflater.reset();
    }

    checkArgument(resultLength == uncompressedLength);

    uncompressed.writerIndex(uncompressedLength);
    return uncompressed;
}

From source file:com.yahoo.pulsar.common.util.XXHashChecksum.java

License:Apache License

public static long computeChecksum(ByteBuf payload) {
    if (payload.hasArray()) {
        return checksum.hash(payload.array(), payload.arrayOffset() + payload.readerIndex(),
                payload.readableBytes(), 0L);
    } else {/*from   w w w . j  a v  a2  s .co  m*/
        ByteBuffer payloadNio = payload.nioBuffer(payload.readerIndex(), payload.readableBytes());
        return checksum.hash(payloadNio, 0, payload.readableBytes(), 0L);
    }
}

From source file:de.dfki.kiara.http.HttpHandler.java

License:Open Source License

@Override
protected void channelRead0(final ChannelHandlerContext ctx, Object msg) throws Exception {
    logger.debug("Handler: {} / Channel: {}", this, ctx.channel());
    if (mode == Mode.SERVER) {
        if (msg instanceof FullHttpRequest) {
            final FullHttpRequest request = (FullHttpRequest) msg;

            HttpRequestMessage transportMessage = new HttpRequestMessage(this, request);
            transportMessage.setPayload(request.content().nioBuffer());

            if (logger.isDebugEnabled()) {
                logger.debug("RECEIVED REQUEST WITH CONTENT {}",
                        Util.bufferToString(transportMessage.getPayload()));
            }//w  ww. j  av  a 2s  .  co  m

            synchronized (listeners) {
                if (!listeners.isEmpty()) {
                    for (TransportMessageListener listener : listeners) {
                        listener.onMessage(transportMessage);
                    }
                }
            }

            boolean keepAlive = HttpHeaders.isKeepAlive(request);
        }
    } else {
        // CLIENT
        if (msg instanceof HttpResponse) {
            HttpResponse response = (HttpResponse) msg;
            headers = response.headers();
            //if (!response.headers().isEmpty()) {
            //    contentType = response.headers().get("Content-Type");
            //}
        }
        if (msg instanceof HttpContent) {
            HttpContent content = (HttpContent) msg;
            ByteBuf buf = content.content();
            if (buf.isReadable()) {
                if (buf.hasArray()) {
                    bout.write(buf.array(), buf.readerIndex(), buf.readableBytes());
                } else {
                    byte[] bytes = new byte[buf.readableBytes()];
                    buf.getBytes(buf.readerIndex(), bytes);
                    bout.write(bytes);
                }
            }
            if (content instanceof LastHttpContent) {
                //ctx.close();
                bout.flush();
                HttpResponseMessage response = new HttpResponseMessage(this, headers);
                response.setPayload(ByteBuffer.wrap(bout.toByteArray(), 0, bout.size()));
                onResponse(response);
                bout.reset();
            }
        }
    }
}

From source file:de.dfki.kiara.netty.ByteBufferDecoder.java

License:Open Source License

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
    final byte[] array;
    final int offset;
    final int length = msg.readableBytes();
    if (msg.hasArray()) {
        array = msg.array();// w  w w .j a  va 2 s  .  c om
        offset = msg.arrayOffset() + msg.readerIndex();
    } else {
        array = new byte[length];
        msg.getBytes(msg.readerIndex(), array, 0, length);
        offset = 0;
    }

    out.add(ByteBuffer.wrap(array, offset, length));
}

From source file:de.saxsys.synchronizefx.netty.base.CommandToBinaryByteBuf.java

License:Open Source License

@Override
protected void decode(final ChannelHandlerContext ctx, final ByteBuf msg, final List<Object> out)
        throws Exception {
    byte[] data;//from w  w w.  j  a v a  2 s . co m
    if (msg.hasArray()) {
        data = msg.array();
    } else {
        data = new byte[msg.readableBytes()];
        msg.readBytes(data);
    }

    out.add(serializer.deserialize(data));
}

From source file:divconq.ctp.stream.UngzipStream.java

License:Open Source License

protected void inflate(ByteBuf in) {
    switch (this.gzipState) {
    case HEADER_START:
        if (in.readableBytes() < 10)
            return;

        // read magic numbers
        int magic0 = in.readByte();
        int magic1 = in.readByte();

        if (magic0 != 31) {
            OperationContext.get().getTaskRun().kill("Input is not in the GZIP format");
            return;
        }/* www.  j av a  2s.  c  o m*/

        this.crc.update(magic0);
        this.crc.update(magic1);

        int method = in.readUnsignedByte();

        if (method != Deflater.DEFLATED) {
            OperationContext.get().getTaskRun()
                    .kill("Unsupported compression method " + method + " in the GZIP header");
            return;
        }

        this.crc.update(method);

        this.flags = in.readUnsignedByte();
        this.crc.update(this.flags);

        if ((this.flags & FRESERVED) != 0) {
            OperationContext.get().getTaskRun().kill("Reserved flags are set in the GZIP header");
            return;
        }

        // mtime (int)
        this.crc.update(in.readByte());
        this.crc.update(in.readByte());
        this.crc.update(in.readByte());
        this.crc.update(in.readByte());

        this.crc.update(in.readUnsignedByte()); // extra flags
        this.crc.update(in.readUnsignedByte()); // operating system

        this.gzipState = GzipState.FLG_READ;
    case FLG_READ:
        if ((this.flags & FEXTRA) != 0) {
            if (in.readableBytes() < 2)
                return;

            int xlen1 = in.readUnsignedByte();
            int xlen2 = in.readUnsignedByte();

            this.crc.update(xlen1);
            this.crc.update(xlen2);

            this.xlen |= xlen1 << 8 | xlen2;
        }

        this.gzipState = GzipState.XLEN_READ;
    case XLEN_READ:
        if (this.xlen != -1) {
            if (in.readableBytes() < xlen)
                return;

            byte[] xtra = new byte[xlen];
            in.readBytes(xtra);
            this.crc.update(xtra);
        }

        this.gzipState = GzipState.SKIP_FNAME;
    case SKIP_FNAME:
        if ((this.flags & FNAME) != 0) {
            boolean gotend = false;

            while (in.isReadable()) {
                int b = in.readUnsignedByte();
                this.crc.update(b);

                if (b == 0x00) {
                    gotend = true;
                    break;
                }
            }

            if (!gotend)
                return;
        }

        this.gzipState = GzipState.SKIP_COMMENT;
    case SKIP_COMMENT:
        if ((this.flags & FCOMMENT) != 0) {
            boolean gotend = false;

            while (in.isReadable()) {
                int b = in.readUnsignedByte();
                this.crc.update(b);

                if (b == 0x00) {
                    gotend = true;
                    break;
                }
            }

            if (!gotend)
                return;
        }

        this.gzipState = GzipState.PROCESS_FHCRC;
    case PROCESS_FHCRC:
        if ((this.flags & FHCRC) != 0) {
            if (in.readableBytes() < 4)
                return;

            long crcValue = 0;

            for (int i = 0; i < 4; ++i)
                crcValue |= (long) in.readUnsignedByte() << i * 8;

            long readCrc = crc.getValue();

            if (crcValue != readCrc) {
                OperationContext.get().getTaskRun()
                        .kill("CRC value missmatch. Expected: " + crcValue + ", Got: " + readCrc);
                return;
            }
        }

        this.crc.reset();

        this.gzipState = GzipState.PRROCESS_CONTENT;
    case PRROCESS_CONTENT:
        int readableBytes = in.readableBytes();

        if (readableBytes < 1)
            return;

        if (in.hasArray()) {
            this.inflater.setInput(in.array(), in.arrayOffset() + in.readerIndex(), readableBytes);
        } else {
            byte[] array = new byte[readableBytes];
            in.getBytes(in.readerIndex(), array);
            this.inflater.setInput(array);
        }

        int maxOutputLength = this.inflater.getRemaining() << 1;
        ByteBuf decompressed = Hub.instance.getBufferAllocator().heapBuffer(maxOutputLength);

        boolean readFooter = false;
        byte[] outArray = decompressed.array();

        try {
            while (!this.inflater.needsInput()) {
                int writerIndex = decompressed.writerIndex();
                int outIndex = decompressed.arrayOffset() + writerIndex;
                int length = decompressed.writableBytes();

                if (length == 0) {
                    // completely filled the buffer allocate a new one and start to fill it
                    this.outlist.add(decompressed);
                    decompressed = Hub.instance.getBufferAllocator().heapBuffer(maxOutputLength);
                    outArray = decompressed.array();
                    continue;
                }

                int outputLength = this.inflater.inflate(outArray, outIndex, length);

                if (outputLength > 0) {
                    decompressed.writerIndex(writerIndex + outputLength);

                    this.crc.update(outArray, outIndex, outputLength);
                } else {
                    if (this.inflater.needsDictionary()) {
                        if (this.dictionary == null) {
                            OperationContext.get().getTaskRun().kill(
                                    "decompression failure, unable to set dictionary as non was specified");
                            return;
                        }

                        this.inflater.setDictionary(this.dictionary);
                    }
                }

                if (this.inflater.finished()) {
                    readFooter = true;
                    break;
                }
            }

            in.skipBytes(readableBytes - this.inflater.getRemaining());
        } catch (DataFormatException x) {
            OperationContext.get().getTaskRun().kill("decompression failure: " + x);
            return;
        } finally {
            if (decompressed.isReadable()) {
                this.outlist.add(decompressed);
            } else {
                decompressed.release();
            }
        }

        if (!readFooter)
            break;

        this.gzipState = GzipState.PROCESS_FOOTER;
    case PROCESS_FOOTER:
        if (in.readableBytes() < 8)
            return;

        long crcValue = 0;

        for (int i = 0; i < 4; ++i)
            crcValue |= (long) in.readUnsignedByte() << i * 8;

        long readCrc = this.crc.getValue();

        if (crcValue != readCrc) {
            OperationContext.get().getTaskRun()
                    .kill("CRC value missmatch. Expected: " + crcValue + ", Got: " + readCrc);
            return;
        }

        // read ISIZE and verify
        int dataLength = 0;

        for (int i = 0; i < 4; ++i)
            dataLength |= in.readUnsignedByte() << i * 8;

        int readLength = this.inflater.getTotalOut();

        if (dataLength != readLength) {
            OperationContext.get().getTaskRun()
                    .kill("Number of bytes mismatch. Expected: " + dataLength + ", Got: " + readLength);
            return;
        }

        this.gzipState = GzipState.DONE;
    case DONE:
        break;
    }
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

/**
 * This is NOT ENCRYPTED (and is only done on the loopback connection!)
 *//*from  w w  w .  j a  v  a2  s  .c om*/
public synchronized void writeCompressed(final Connection_ connection, final ByteBuf buffer,
        final Object message) throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ByteBuf objectOutputBuffer = this.tempBuffer;
    objectOutputBuffer.clear(); // always have to reset everything

    // write the object to a TEMP buffer! this will be compressed
    writer.setBuffer(objectOutputBuffer);

    writeClassAndObject(writer, message);

    // save off how much data the object took + magic byte
    int length = objectOutputBuffer.writerIndex();

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0)
            && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) {

        // we can use it...
        inputArray = objectOutputBuffer.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = objectOutputBuffer.arrayOffset();
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    ////////// compressing data
    // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger
    // output), will be negated by the increase in size by the encryption

    byte[] compressOutput = this.compressOutput;

    int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative)
    int maxCompressedLength = compressor.maxCompressedLength(length);

    // add 4 so there is room to write the compressed size to the buffer
    int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset;

    // lazy initialize the compression output buffer
    if (maxCompressedLengthWithOffset > compressOutputLength) {
        compressOutputLength = maxCompressedLengthWithOffset;
        compressOutput = new byte[maxCompressedLengthWithOffset];
        this.compressOutput = compressOutput;
    }

    // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data
    int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput,
            maxLengthLengthOffset, maxCompressedLength);

    // bytes can now be written to, because our compressed data is stored in a temp array.

    final int lengthLength = OptimizeUtilsByteArray.intLength(length, true);

    // correct input.  compression output is now buffer input
    inputArray = compressOutput;
    inputOffset = maxLengthLengthOffset - lengthLength;

    // now write the ORIGINAL (uncompressed) length to the front of the byte array (this is NOT THE BUFFER!). This is so we can use the FAST decompress version
    OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset);

    // have to copy over the orig data, because we used the temp buffer. Also have to account for the length of the uncompressed size
    buffer.writeBytes(inputArray, inputOffset, compressedLength + lengthLength);
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

/**
 * This is NOT ENCRYPTED (and is only done on the loopback connection!)
 *///from  www  .  j  a  v  a 2 s.  c  o  m
public Object readCompressed(final Connection_ connection, final ByteBuf buffer, int length)
        throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ////////////////
    // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it!
    ////////////////

    ByteBuf inputBuf = buffer;

    // get the decompressed length (at the beginning of the array)
    final int uncompressedLength = OptimizeUtilsByteBuf.readInt(buffer, true);
    final int lengthLength = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-5 bytes for the decompressed size

    // have to adjust for uncompressed length
    length = length - lengthLength;

    ///////// decompress data -- as it's ALWAYS compressed

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0)
            && inputBuf.array().length == inputBuf.capacity()) {

        // we can use it...
        inputArray = inputBuf.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = inputBuf.arrayOffset() + lengthLength;
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index.
    buffer.readerIndex(buffer.readerIndex() + length);

    ///////// decompress data -- as it's ALWAYS compressed

    byte[] decompressOutputArray = this.decompressOutput;
    if (uncompressedLength > decompressOutputLength) {
        decompressOutputLength = uncompressedLength;
        decompressOutputArray = new byte[uncompressedLength];
        this.decompressOutput = decompressOutputArray;

        decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo
    }
    inputBuf = decompressBuf;

    // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor)
    decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength);

    inputBuf.setIndex(0, uncompressedLength);

    // read the object from the buffer.
    reader.setBuffer(inputBuf);

    return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer
}