Example usage for io.netty.buffer ByteBuf capacity

List of usage examples for io.netty.buffer ByteBuf capacity

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf capacity.

Prototype

public abstract int capacity();

Source Link

Document

Returns the number of bytes (octets) this buffer can contain.

Usage

From source file:de.ocarthon.core.network.HttpClient.java

License:Apache License

public synchronized String postRequest(String query, List<Map.Entry<String, String>> postParameters,
        String filePostName, String fileName, ByteBuf fileData, String mime) {
    if (bootstrap == null) {
        setupBootstrap();/*from   ww  w. j  av a 2  s  . c o m*/
    }

    if (channel == null || forceReconnect) {
        ChannelFuture cf = bootstrap.connect(host, port);
        forceReconnect = false;
        cf.awaitUninterruptibly();
        channel = cf.channel();

        channel.pipeline().addLast("handler", new SimpleChannelInboundHandler<HttpObject>() {
            @Override
            protected void messageReceived(ChannelHandlerContext ctx, HttpObject msg) throws Exception {
                if (msg instanceof HttpResponse) {
                    HttpResponse response = ((HttpResponse) msg);
                    String connection = (String) response.headers().get(HttpHeaderNames.CONNECTION);
                    if (connection != null && connection.equalsIgnoreCase(HttpHeaderValues.CLOSE.toString()))
                        forceReconnect = true;
                }

                if (msg instanceof HttpContent) {
                    HttpContent chunk = (HttpContent) msg;
                    String message = chunk.content().toString(CharsetUtil.UTF_8);

                    if (!message.isEmpty()) {
                        result[0] = message;

                        synchronized (result) {
                            result.notify();
                        }
                    }
                }
            }
        });
    }
    boolean isFileAttached = fileData != null && fileData.isReadable();
    HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST,
            scheme + "://" + host + ":" + port + "/" + query);
    HttpPostRequestEncoder bodyReqEncoder;
    try {
        bodyReqEncoder = new HttpPostRequestEncoder(httpDataFactory, request, isFileAttached);

        for (Map.Entry<String, String> entry : postParameters) {
            bodyReqEncoder.addBodyAttribute(entry.getKey(), entry.getValue());
        }

        if (isFileAttached) {
            if (mime == null)
                mime = "application/octet-stream";

            MixedFileUpload mfu = new MixedFileUpload(filePostName, fileName, mime, "binary", null,
                    fileData.capacity(), DefaultHttpDataFactory.MINSIZE);
            mfu.addContent(fileData, true);
            bodyReqEncoder.addBodyHttpData(mfu);
        }

        HttpHeaders headers = request.headers();
        headers.set(HttpHeaderNames.HOST, host);
        headers.set(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE);
        headers.set(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP);
        headers.set(HttpHeaderNames.USER_AGENT, "OcarthonCore HttpClient");
        request = bodyReqEncoder.finalizeRequest();
    } catch (Exception e) {
        throw new NullPointerException("key or value is empty or null");
    }

    channel.write(request);

    if (bodyReqEncoder.isChunked()) {
        channel.write(bodyReqEncoder);
    }
    channel.flush();

    synchronized (result) {
        try {
            result.wait();
        } catch (InterruptedException e) {
            return null;
        }
    }

    return result[0];
}

From source file:divconq.api.internal.UploadPutHandler.java

License:Open Source License

public void start(final HyperSession parent, ScatteringByteChannel src, String chanid,
        Map<String, Cookie> cookies, long size, long offset, final OperationCallback callback) {
    this.src = src;
    this.cookies = cookies;
    this.callback = callback;

    this.dest = this.allocateChannel(parent, callback);

    if (this.callback.hasErrors()) {
        callback.complete();//from  w w w  .java 2s.c o  m
        return;
    }

    // send a request to get things going      
    HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT,
            "/upload/" + chanid + "/Final");

    req.headers().set(Names.HOST, parent.getInfo().getHost());
    req.headers().set(Names.USER_AGENT, "DivConq HyperAPI Client 1.0");
    req.headers().set(Names.CONNECTION, HttpHeaders.Values.CLOSE);
    req.headers().set(Names.COOKIE, ClientCookieEncoder.encode(this.cookies.values()));
    req.headers().set(HttpHeaders.Names.CONTENT_LENGTH, size - offset);

    // send request headers - must flush here in case CL = 0
    this.dest.writeAndFlush(req);

    // now start sending the file
    long sent = offset;
    callback.getContext().setAmountCompleted((int) (sent * 100 / size));

    ByteBuf bb = null;

    try {
        bb = Hub.instance.getBufferAllocator().directBuffer(64 * 1024); // TODO review if direct is best here

        long toskip = offset;

        if (src instanceof SeekableByteChannel) {
            ((SeekableByteChannel) src).position(toskip);
        } else {
            while (toskip > 0) {
                int skip = (int) Math.min(bb.capacity(), toskip);
                toskip -= bb.writeBytes(src, skip);
                bb.clear();
            }
        }

        // now start writing the upload
        int amt = bb.writeBytes(src, bb.capacity());

        while (amt != -1) {
            bb.retain(); // this ups ref cnt to 2 - we plan to reuse the buffer

            this.dest.writeAndFlush(bb).sync();

            sent += amt;

            if (size > 0)
                callback.getContext().setAmountCompleted((int) (sent * 100 / size));

            // by the time we get here, that buffer has been used up and we can use it for the next buffer
            if (bb.refCnt() != 1)
                throw new IOException("Buffer reference count is not correct");

            // stop writing if canceled
            if (!this.dest.isOpen()) {
                this.finish(); // might already be finished but to be sure (this is helpful when api.abortStream is called)
                break;
            }

            bb.clear();

            amt = bb.writeBytes(src, bb.capacity());
        }

        // we are now done with it
        bb.release();
    } catch (Exception x) {
        try {
            if (bb != null)
                bb.release();
        } catch (Exception x2) {
        }

        callback.error(1, "Local read error: " + x);
        this.finish();
    }
}

From source file:divconq.api.LocalSession.java

License:Open Source License

@Override
public void sendStream(ScatteringByteChannel in, long size, long offset, final String channelid,
        final OperationCallback callback) {
    final DataStreamChannel chan = this.session.getChannel(channelid);

    if (chan == null) {
        callback.error(1, "Missing channel");
        callback.complete();// w  w w  .  j  a v a 2s.  c  o  m
        return;
    }

    chan.setDriver(new IStreamDriver() {
        @Override
        public void cancel() {
            callback.error(1, "Transfer canceled");
            chan.complete();
            callback.complete();
        }

        @Override
        public void message(StreamMessage msg) {
            if (msg.isFinal()) {
                System.out.println("Final on channel: " + channelid);
                chan.complete();
                callback.complete();
            }
        }

        @Override
        public void nextChunk() {
            // won't chunk so won't happen here
        }
    });

    long sent = offset;
    int seq = 0;

    if (size > 0) {
        callback.getContext().setAmountCompleted((int) (sent * 100 / size));
        chan.getContext().setAmountCompleted((int) (sent * 100 / size)); // keep the channel active so it does not timeout
    }

    try {
        ByteBuf bb = Hub.instance.getBufferAllocator().directBuffer(64 * 1024);

        long toskip = offset;

        if (in instanceof SeekableByteChannel) {
            ((SeekableByteChannel) in).position(toskip);
        } else {
            while (toskip > 0) {
                int skip = (int) Math.min(bb.capacity(), toskip);
                toskip -= bb.writeBytes(in, skip);
                bb.clear();
            }
        }

        chan.touch();

        // now start writing the upload
        int amt = bb.writeBytes(in, bb.capacity());

        while (amt != -1) {
            bb.retain(); // this ups ref cnt to 2 - we plan to reuse the buffer

            StreamMessage b = new StreamMessage("Block", bb);
            b.setField("Sequence", seq);

            OperationResult sr = chan.send(b);

            if (sr.hasErrors()) {
                chan.close();
                break;
            }

            seq++;
            sent += amt;

            if (size > 0) {
                callback.getContext().setAmountCompleted((int) (sent * 100 / size));
                chan.getContext().setAmountCompleted((int) (sent * 100 / size)); // keep the channel active so it does not timeout
            }

            callback.touch();
            chan.touch();

            // by the time we get here, that buffer has been used up and we can use it for the next buffer
            if (bb.refCnt() != 1)
                throw new IOException("Buffer reference count is not correct");

            // stop writing if canceled
            if (chan.isClosed())
                break;

            bb.clear();

            amt = bb.writeBytes(in, bb.capacity());
        }

        // we are now done with it
        bb.release();

        // final only if not canceled
        if (!chan.isClosed())
            chan.send(MessageUtil.streamFinal());
    } catch (IOException x) {
        callback.error(1, "Local read error: " + x);

        chan.send(MessageUtil.streamError(1, "Source read error: " + x));
        chan.close();

        callback.complete();
    } finally {
        try {
            in.close();
        } catch (IOException x) {
        }
    }
}

From source file:divconq.ctp.stream.FileSourceStream.java

License:Open Source License

public void readLocalFile() {
    FileSystemFile fs = (FileSystemFile) this.current;

    if (this.in == null) {
        this.insize = fs.getSize();

        // As a source we are responsible for progress tracking
        OperationContext.get().setAmountCompleted(0);

        try {//from  w ww  . j  av  a2 s. c  o m
            this.in = FileChannel.open(fs.localPath(), StandardOpenOption.READ);
        } catch (IOException x) {
            OperationContext.get().getTaskRun().kill("Unable to read source file " + x);
            return;
        }
    }

    while (true) {
        // TODO sizing?
        ByteBuf data = Hub.instance.getBufferAllocator().heapBuffer(32768);

        ByteBuffer buffer = ByteBuffer.wrap(data.array(), data.arrayOffset(), data.capacity());

        int pos = -1;

        try {
            pos = (int) this.in.read(buffer);
        } catch (IOException x1) {
            OperationContext.get().getTaskRun().kill("Problem reading source file: " + x1);
            data.release();
            return;
        }

        FileDescriptor fref = FileDescriptor.fromFileStore(this.current);
        fref.setPath(this.current.path().subpath(this.source.path()));

        System.out.println("writing: " + fref.getPath() + " from: " + this.inprog);

        if (pos == -1) {
            try {
                this.in.close();
            } catch (IOException x) {
                OperationContext.get().getTaskRun().kill("Problem closing source file: " + x);
                data.release();
                return;
            }

            OperationContext.get().setAmountCompleted(100);

            fref.setEof(true);

            this.current = null;
            this.in = null;
            this.insize = 0;
            this.inprog = 0;
        } else {
            this.inprog += pos;

            data.writerIndex(pos);
            OperationContext.get().setAmountCompleted((int) (this.inprog * 100 / this.insize));
        }

        if (this.downstream.handle(fref, data) != ReturnOption.CONTINUE)
            break;

        if (this.current == null) {
            // we need the next file
            OperationContext.get().getTaskRun().resume();

            // wait on the implied request
            break;
        }
    }
}

From source file:divconq.http.multipart.HttpPostRequestEncoder.java

License:Apache License

/**
 * From the current context (currentBuffer and currentData), returns the next HttpChunk (if possible) trying to get
 * sizeleft bytes more into the currentBuffer. This is the Multipart version.
 *
 * @param sizeleft//from  w w  w . j av  a  2  s .  c  o  m
 *            the number of bytes to try to get from currentData
 * @return the next HttpChunk or null if not enough bytes were found
 * @throws ErrorDataEncoderException
 *             if the encoding is in error
 */
private HttpContent encodeNextChunkMultipart(int sizeleft) throws ErrorDataEncoderException {
    if (currentData == null) {
        return null;
    }
    ByteBuf buffer;
    if (currentData instanceof InternalAttribute) {
        buffer = ((InternalAttribute) currentData).toByteBuf();
        currentData = null;
    } else {
        if (currentData instanceof Attribute) {
            try {
                buffer = ((Attribute) currentData).getChunk(sizeleft);
            } catch (IOException e) {
                throw new ErrorDataEncoderException(e);
            }
        } else {
            try {
                buffer = ((HttpData) currentData).getChunk(sizeleft);
            } catch (IOException e) {
                throw new ErrorDataEncoderException(e);
            }
        }
        if (buffer.capacity() == 0) {
            // end for current InterfaceHttpData, need more data
            currentData = null;
            return null;
        }
    }
    if (currentBuffer == null) {
        currentBuffer = buffer;
    } else {
        currentBuffer = wrappedBuffer(currentBuffer, buffer);
    }
    if (currentBuffer.readableBytes() < HttpPostBodyUtil.chunkSize) {
        currentData = null;
        return null;
    }
    buffer = fillByteBuf();
    return new DefaultHttpContent(buffer);
}

From source file:divconq.http.multipart.HttpPostRequestEncoder.java

License:Apache License

/**
 * From the current context (currentBuffer and currentData), returns the next HttpChunk (if possible) trying to get
 * sizeleft bytes more into the currentBuffer. This is the UrlEncoded version.
 *
 * @param sizeleft/*from   w w w  .j  a v a  2 s. c o  m*/
 *            the number of bytes to try to get from currentData
 * @return the next HttpChunk or null if not enough bytes were found
 * @throws ErrorDataEncoderException
 *             if the encoding is in error
 */
private HttpContent encodeNextChunkUrlEncoded(int sizeleft) throws ErrorDataEncoderException {
    if (currentData == null) {
        return null;
    }
    int size = sizeleft;
    ByteBuf buffer;

    // Set name=
    if (isKey) {
        String key = currentData.getName();
        buffer = wrappedBuffer(key.getBytes());
        isKey = false;
        if (currentBuffer == null) {
            currentBuffer = wrappedBuffer(buffer, wrappedBuffer("=".getBytes()));
            // continue
            size -= buffer.readableBytes() + 1;
        } else {
            currentBuffer = wrappedBuffer(currentBuffer, buffer, wrappedBuffer("=".getBytes()));
            // continue
            size -= buffer.readableBytes() + 1;
        }
        if (currentBuffer.readableBytes() >= HttpPostBodyUtil.chunkSize) {
            buffer = fillByteBuf();
            return new DefaultHttpContent(buffer);
        }
    }

    // Put value into buffer
    try {
        buffer = ((HttpData) currentData).getChunk(size);
    } catch (IOException e) {
        throw new ErrorDataEncoderException(e);
    }

    // Figure out delimiter
    ByteBuf delimiter = null;
    if (buffer.readableBytes() < size) {
        isKey = true;
        delimiter = iterator.hasNext() ? wrappedBuffer("&".getBytes()) : null;
    }

    // End for current InterfaceHttpData, need potentially more data
    if (buffer.capacity() == 0) {
        currentData = null;
        if (currentBuffer == null) {
            currentBuffer = delimiter;
        } else {
            if (delimiter != null) {
                currentBuffer = wrappedBuffer(currentBuffer, delimiter);
            }
        }
        if (currentBuffer.readableBytes() >= HttpPostBodyUtil.chunkSize) {
            buffer = fillByteBuf();
            return new DefaultHttpContent(buffer);
        }
        return null;
    }

    // Put it all together: name=value&
    if (currentBuffer == null) {
        if (delimiter != null) {
            currentBuffer = wrappedBuffer(buffer, delimiter);
        } else {
            currentBuffer = buffer;
        }
    } else {
        if (delimiter != null) {
            currentBuffer = wrappedBuffer(currentBuffer, buffer, delimiter);
        } else {
            currentBuffer = wrappedBuffer(currentBuffer, buffer);
        }
    }

    // end for current InterfaceHttpData, need more data
    if (currentBuffer.readableBytes() < HttpPostBodyUtil.chunkSize) {
        currentData = null;
        isKey = true;
        return null;
    }

    buffer = fillByteBuf();
    return new DefaultHttpContent(buffer);
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

/**
 * This is NOT ENCRYPTED (and is only done on the loopback connection!)
 *//*from   w ww.  j a v a2s  .  co  m*/
public synchronized void writeCompressed(final Connection_ connection, final ByteBuf buffer,
        final Object message) throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ByteBuf objectOutputBuffer = this.tempBuffer;
    objectOutputBuffer.clear(); // always have to reset everything

    // write the object to a TEMP buffer! this will be compressed
    writer.setBuffer(objectOutputBuffer);

    writeClassAndObject(writer, message);

    // save off how much data the object took + magic byte
    int length = objectOutputBuffer.writerIndex();

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0)
            && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) {

        // we can use it...
        inputArray = objectOutputBuffer.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = objectOutputBuffer.arrayOffset();
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    ////////// compressing data
    // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger
    // output), will be negated by the increase in size by the encryption

    byte[] compressOutput = this.compressOutput;

    int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative)
    int maxCompressedLength = compressor.maxCompressedLength(length);

    // add 4 so there is room to write the compressed size to the buffer
    int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset;

    // lazy initialize the compression output buffer
    if (maxCompressedLengthWithOffset > compressOutputLength) {
        compressOutputLength = maxCompressedLengthWithOffset;
        compressOutput = new byte[maxCompressedLengthWithOffset];
        this.compressOutput = compressOutput;
    }

    // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data
    int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput,
            maxLengthLengthOffset, maxCompressedLength);

    // bytes can now be written to, because our compressed data is stored in a temp array.

    final int lengthLength = OptimizeUtilsByteArray.intLength(length, true);

    // correct input.  compression output is now buffer input
    inputArray = compressOutput;
    inputOffset = maxLengthLengthOffset - lengthLength;

    // now write the ORIGINAL (uncompressed) length to the front of the byte array (this is NOT THE BUFFER!). This is so we can use the FAST decompress version
    OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset);

    // have to copy over the orig data, because we used the temp buffer. Also have to account for the length of the uncompressed size
    buffer.writeBytes(inputArray, inputOffset, compressedLength + lengthLength);
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

/**
 * This is NOT ENCRYPTED (and is only done on the loopback connection!)
 *//*from w w  w  .jav  a 2s.c  om*/
public Object readCompressed(final Connection_ connection, final ByteBuf buffer, int length)
        throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ////////////////
    // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it!
    ////////////////

    ByteBuf inputBuf = buffer;

    // get the decompressed length (at the beginning of the array)
    final int uncompressedLength = OptimizeUtilsByteBuf.readInt(buffer, true);
    final int lengthLength = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-5 bytes for the decompressed size

    // have to adjust for uncompressed length
    length = length - lengthLength;

    ///////// decompress data -- as it's ALWAYS compressed

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0)
            && inputBuf.array().length == inputBuf.capacity()) {

        // we can use it...
        inputArray = inputBuf.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = inputBuf.arrayOffset() + lengthLength;
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index.
    buffer.readerIndex(buffer.readerIndex() + length);

    ///////// decompress data -- as it's ALWAYS compressed

    byte[] decompressOutputArray = this.decompressOutput;
    if (uncompressedLength > decompressOutputLength) {
        decompressOutputLength = uncompressedLength;
        decompressOutputArray = new byte[uncompressedLength];
        this.decompressOutput = decompressOutputArray;

        decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo
    }
    inputBuf = decompressBuf;

    // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor)
    decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength);

    inputBuf.setIndex(0, uncompressedLength);

    // read the object from the buffer.
    reader.setBuffer(inputBuf);

    return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

public synchronized void writeCrypto(final Connection_ connection, final ByteBuf buffer, final Object message)
        throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ByteBuf objectOutputBuffer = this.tempBuffer;
    objectOutputBuffer.clear(); // always have to reset everything

    // write the object to a TEMP buffer! this will be compressed
    writer.setBuffer(objectOutputBuffer);

    writeClassAndObject(writer, message);

    // save off how much data the object took
    int length = objectOutputBuffer.writerIndex();

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0)
            && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) {

        // we can use it...
        inputArray = objectOutputBuffer.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = objectOutputBuffer.arrayOffset();
    } else {// w  w  w .ja v  a2 s .  c  o  m
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    ////////// compressing data
    // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger
    // output), will be negated by the increase in size by the encryption

    byte[] compressOutput = this.compressOutput;

    int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative)
    int maxCompressedLength = compressor.maxCompressedLength(length);

    // add 4 so there is room to write the compressed size to the buffer
    int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset;

    // lazy initialize the compression output buffer
    if (maxCompressedLengthWithOffset > compressOutputLength) {
        compressOutputLength = maxCompressedLengthWithOffset;
        compressOutput = new byte[maxCompressedLengthWithOffset];
        this.compressOutput = compressOutput;
    }

    // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data
    int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput,
            maxLengthLengthOffset, maxCompressedLength);

    // bytes can now be written to, because our compressed data is stored in a temp array.

    final int lengthLength = OptimizeUtilsByteArray.intLength(length, true);

    // correct input.  compression output is now encryption input
    inputArray = compressOutput;
    inputOffset = maxLengthLengthOffset - lengthLength;

    // now write the ORIGINAL (uncompressed) length to the front of the byte array. This is so we can use the FAST decompress version
    OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset);

    // correct length for encryption
    length = compressedLength + lengthLength; // +1 to +4 for the uncompressed size bytes

    /////// encrypting data.
    final long nextGcmSequence = connection.getNextGcmSequence();

    // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time
    final ParametersWithIV cryptoParameters = connection.getCryptoParameters();
    BigEndian.Long_.toBytes(nextGcmSequence, cryptoParameters.getIV(), 4); // put our counter into the IV

    final GCMBlockCipher aes = this.aesEngine;
    aes.reset();
    aes.init(true, cryptoParameters);

    byte[] cryptoOutput;

    // lazy initialize the crypto output buffer
    int cryptoSize = length + 16; // from:  aes.getOutputSize(length);

    // 'output' is the temp byte array
    if (cryptoSize > cryptoOutputLength) {
        cryptoOutputLength = cryptoSize;
        cryptoOutput = new byte[cryptoSize];
        this.cryptoOutput = cryptoOutput;
    } else {
        cryptoOutput = this.cryptoOutput;
    }

    int encryptedLength = aes.processBytes(inputArray, inputOffset, length, cryptoOutput, 0);

    try {
        // authentication tag for GCM
        encryptedLength += aes.doFinal(cryptoOutput, encryptedLength);
    } catch (Exception e) {
        throw new IOException("Unable to AES encrypt the data", e);
    }

    // write out our GCM counter
    OptimizeUtilsByteBuf.writeLong(buffer, nextGcmSequence, true);

    // have to copy over the orig data, because we used the temp buffer
    buffer.writeBytes(cryptoOutput, 0, encryptedLength);
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

public Object readCrypto(final Connection_ connection, final ByteBuf buffer, int length) throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ////////////////
    // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it!
    ////////////////

    ByteBuf inputBuf = buffer;

    final long gcmIVCounter = OptimizeUtilsByteBuf.readLong(buffer, true);
    int lengthLength = OptimizeUtilsByteArray.longLength(gcmIVCounter, true);

    // have to adjust for the gcmIVCounter
    length = length - lengthLength;/*  ww w. j  av  a2 s. com*/

    /////////// decrypting data

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0)
            && inputBuf.array().length == inputBuf.capacity()) {

        // we can use it...
        inputArray = inputBuf.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = inputBuf.arrayOffset() + lengthLength;
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index.
    buffer.readerIndex(buffer.readerIndex() + length);

    // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time
    final ParametersWithIV cryptoParameters = connection.getCryptoParameters();
    BigEndian.Long_.toBytes(gcmIVCounter, cryptoParameters.getIV(), 4); // put our counter into the IV

    final GCMBlockCipher aes = this.aesEngine;
    aes.reset();
    aes.init(false, cryptoParameters);

    int cryptoSize = length - 16; // from:  aes.getOutputSize(length);

    // lazy initialize the decrypt output buffer
    byte[] decryptOutputArray;
    if (cryptoSize > decryptOutputLength) {
        decryptOutputLength = cryptoSize;
        decryptOutputArray = new byte[cryptoSize];
        this.decryptOutput = decryptOutputArray;

        decryptBuf = Unpooled.wrappedBuffer(decryptOutputArray);
    } else {
        decryptOutputArray = this.decryptOutput;
    }

    int decryptedLength = aes.processBytes(inputArray, inputOffset, length, decryptOutputArray, 0);

    try {
        // authentication tag for GCM
        decryptedLength += aes.doFinal(decryptOutputArray, decryptedLength);
    } catch (Exception e) {
        throw new IOException("Unable to AES decrypt the data", e);
    }

    ///////// decompress data -- as it's ALWAYS compressed

    // get the decompressed length (at the beginning of the array)
    inputArray = decryptOutputArray;
    final int uncompressedLength = OptimizeUtilsByteArray.readInt(inputArray, true);
    inputOffset = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-4 bytes for the decompressed size

    byte[] decompressOutputArray = this.decompressOutput;
    if (uncompressedLength > decompressOutputLength) {
        decompressOutputLength = uncompressedLength;
        decompressOutputArray = new byte[uncompressedLength];
        this.decompressOutput = decompressOutputArray;

        decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo
    }
    inputBuf = decompressBuf;

    // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor
    decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength);

    inputBuf.setIndex(0, uncompressedLength);

    // read the object from the buffer.
    reader.setBuffer(inputBuf);

    return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer
}