Example usage for io.netty.buffer ByteBuf getBytes

List of usage examples for io.netty.buffer ByteBuf getBytes

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf getBytes.

Prototype

public abstract int getBytes(int index, FileChannel out, long position, int length) throws IOException;

Source Link

Document

Transfers this buffer's data starting at the specified absolute index to the specified channel starting at the given file position.

Usage

From source file:de.dfki.kiara.netty.ByteBufferDecoder.java

License:Open Source License

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {
    final byte[] array;
    final int offset;
    final int length = msg.readableBytes();
    if (msg.hasArray()) {
        array = msg.array();/*  w w  w .j a  v a  2 s .  c o m*/
        offset = msg.arrayOffset() + msg.readerIndex();
    } else {
        array = new byte[length];
        msg.getBytes(msg.readerIndex(), array, 0, length);
        offset = 0;
    }

    out.add(ByteBuffer.wrap(array, offset, length));
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

/**
 * This is NOT ENCRYPTED (and is only done on the loopback connection!)
 *//*from   www  .ja  va2s.c  om*/
public synchronized void writeCompressed(final Connection_ connection, final ByteBuf buffer,
        final Object message) throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ByteBuf objectOutputBuffer = this.tempBuffer;
    objectOutputBuffer.clear(); // always have to reset everything

    // write the object to a TEMP buffer! this will be compressed
    writer.setBuffer(objectOutputBuffer);

    writeClassAndObject(writer, message);

    // save off how much data the object took + magic byte
    int length = objectOutputBuffer.writerIndex();

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0)
            && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) {

        // we can use it...
        inputArray = objectOutputBuffer.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = objectOutputBuffer.arrayOffset();
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    ////////// compressing data
    // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger
    // output), will be negated by the increase in size by the encryption

    byte[] compressOutput = this.compressOutput;

    int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative)
    int maxCompressedLength = compressor.maxCompressedLength(length);

    // add 4 so there is room to write the compressed size to the buffer
    int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset;

    // lazy initialize the compression output buffer
    if (maxCompressedLengthWithOffset > compressOutputLength) {
        compressOutputLength = maxCompressedLengthWithOffset;
        compressOutput = new byte[maxCompressedLengthWithOffset];
        this.compressOutput = compressOutput;
    }

    // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data
    int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput,
            maxLengthLengthOffset, maxCompressedLength);

    // bytes can now be written to, because our compressed data is stored in a temp array.

    final int lengthLength = OptimizeUtilsByteArray.intLength(length, true);

    // correct input.  compression output is now buffer input
    inputArray = compressOutput;
    inputOffset = maxLengthLengthOffset - lengthLength;

    // now write the ORIGINAL (uncompressed) length to the front of the byte array (this is NOT THE BUFFER!). This is so we can use the FAST decompress version
    OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset);

    // have to copy over the orig data, because we used the temp buffer. Also have to account for the length of the uncompressed size
    buffer.writeBytes(inputArray, inputOffset, compressedLength + lengthLength);
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

/**
 * This is NOT ENCRYPTED (and is only done on the loopback connection!)
 *///from  ww w.jav a 2 s  .c o m
public Object readCompressed(final Connection_ connection, final ByteBuf buffer, int length)
        throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ////////////////
    // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it!
    ////////////////

    ByteBuf inputBuf = buffer;

    // get the decompressed length (at the beginning of the array)
    final int uncompressedLength = OptimizeUtilsByteBuf.readInt(buffer, true);
    final int lengthLength = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-5 bytes for the decompressed size

    // have to adjust for uncompressed length
    length = length - lengthLength;

    ///////// decompress data -- as it's ALWAYS compressed

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0)
            && inputBuf.array().length == inputBuf.capacity()) {

        // we can use it...
        inputArray = inputBuf.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = inputBuf.arrayOffset() + lengthLength;
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index.
    buffer.readerIndex(buffer.readerIndex() + length);

    ///////// decompress data -- as it's ALWAYS compressed

    byte[] decompressOutputArray = this.decompressOutput;
    if (uncompressedLength > decompressOutputLength) {
        decompressOutputLength = uncompressedLength;
        decompressOutputArray = new byte[uncompressedLength];
        this.decompressOutput = decompressOutputArray;

        decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo
    }
    inputBuf = decompressBuf;

    // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor)
    decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength);

    inputBuf.setIndex(0, uncompressedLength);

    // read the object from the buffer.
    reader.setBuffer(inputBuf);

    return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

public synchronized void writeCrypto(final Connection_ connection, final ByteBuf buffer, final Object message)
        throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ByteBuf objectOutputBuffer = this.tempBuffer;
    objectOutputBuffer.clear(); // always have to reset everything

    // write the object to a TEMP buffer! this will be compressed
    writer.setBuffer(objectOutputBuffer);

    writeClassAndObject(writer, message);

    // save off how much data the object took
    int length = objectOutputBuffer.writerIndex();

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0)
            && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) {

        // we can use it...
        inputArray = objectOutputBuffer.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = objectOutputBuffer.arrayOffset();
    } else {//from   w  ww  .j  av a  2s .  c  o m
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    ////////// compressing data
    // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger
    // output), will be negated by the increase in size by the encryption

    byte[] compressOutput = this.compressOutput;

    int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative)
    int maxCompressedLength = compressor.maxCompressedLength(length);

    // add 4 so there is room to write the compressed size to the buffer
    int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset;

    // lazy initialize the compression output buffer
    if (maxCompressedLengthWithOffset > compressOutputLength) {
        compressOutputLength = maxCompressedLengthWithOffset;
        compressOutput = new byte[maxCompressedLengthWithOffset];
        this.compressOutput = compressOutput;
    }

    // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data
    int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput,
            maxLengthLengthOffset, maxCompressedLength);

    // bytes can now be written to, because our compressed data is stored in a temp array.

    final int lengthLength = OptimizeUtilsByteArray.intLength(length, true);

    // correct input.  compression output is now encryption input
    inputArray = compressOutput;
    inputOffset = maxLengthLengthOffset - lengthLength;

    // now write the ORIGINAL (uncompressed) length to the front of the byte array. This is so we can use the FAST decompress version
    OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset);

    // correct length for encryption
    length = compressedLength + lengthLength; // +1 to +4 for the uncompressed size bytes

    /////// encrypting data.
    final long nextGcmSequence = connection.getNextGcmSequence();

    // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time
    final ParametersWithIV cryptoParameters = connection.getCryptoParameters();
    BigEndian.Long_.toBytes(nextGcmSequence, cryptoParameters.getIV(), 4); // put our counter into the IV

    final GCMBlockCipher aes = this.aesEngine;
    aes.reset();
    aes.init(true, cryptoParameters);

    byte[] cryptoOutput;

    // lazy initialize the crypto output buffer
    int cryptoSize = length + 16; // from:  aes.getOutputSize(length);

    // 'output' is the temp byte array
    if (cryptoSize > cryptoOutputLength) {
        cryptoOutputLength = cryptoSize;
        cryptoOutput = new byte[cryptoSize];
        this.cryptoOutput = cryptoOutput;
    } else {
        cryptoOutput = this.cryptoOutput;
    }

    int encryptedLength = aes.processBytes(inputArray, inputOffset, length, cryptoOutput, 0);

    try {
        // authentication tag for GCM
        encryptedLength += aes.doFinal(cryptoOutput, encryptedLength);
    } catch (Exception e) {
        throw new IOException("Unable to AES encrypt the data", e);
    }

    // write out our GCM counter
    OptimizeUtilsByteBuf.writeLong(buffer, nextGcmSequence, true);

    // have to copy over the orig data, because we used the temp buffer
    buffer.writeBytes(cryptoOutput, 0, encryptedLength);
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

public Object readCrypto(final Connection_ connection, final ByteBuf buffer, int length) throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ////////////////
    // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it!
    ////////////////

    ByteBuf inputBuf = buffer;

    final long gcmIVCounter = OptimizeUtilsByteBuf.readLong(buffer, true);
    int lengthLength = OptimizeUtilsByteArray.longLength(gcmIVCounter, true);

    // have to adjust for the gcmIVCounter
    length = length - lengthLength;/*from   ww w.j  a  v  a2s.co m*/

    /////////// decrypting data

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0)
            && inputBuf.array().length == inputBuf.capacity()) {

        // we can use it...
        inputArray = inputBuf.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = inputBuf.arrayOffset() + lengthLength;
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index.
    buffer.readerIndex(buffer.readerIndex() + length);

    // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time
    final ParametersWithIV cryptoParameters = connection.getCryptoParameters();
    BigEndian.Long_.toBytes(gcmIVCounter, cryptoParameters.getIV(), 4); // put our counter into the IV

    final GCMBlockCipher aes = this.aesEngine;
    aes.reset();
    aes.init(false, cryptoParameters);

    int cryptoSize = length - 16; // from:  aes.getOutputSize(length);

    // lazy initialize the decrypt output buffer
    byte[] decryptOutputArray;
    if (cryptoSize > decryptOutputLength) {
        decryptOutputLength = cryptoSize;
        decryptOutputArray = new byte[cryptoSize];
        this.decryptOutput = decryptOutputArray;

        decryptBuf = Unpooled.wrappedBuffer(decryptOutputArray);
    } else {
        decryptOutputArray = this.decryptOutput;
    }

    int decryptedLength = aes.processBytes(inputArray, inputOffset, length, decryptOutputArray, 0);

    try {
        // authentication tag for GCM
        decryptedLength += aes.doFinal(decryptOutputArray, decryptedLength);
    } catch (Exception e) {
        throw new IOException("Unable to AES decrypt the data", e);
    }

    ///////// decompress data -- as it's ALWAYS compressed

    // get the decompressed length (at the beginning of the array)
    inputArray = decryptOutputArray;
    final int uncompressedLength = OptimizeUtilsByteArray.readInt(inputArray, true);
    inputOffset = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-4 bytes for the decompressed size

    byte[] decompressOutputArray = this.decompressOutput;
    if (uncompressedLength > decompressOutputLength) {
        decompressOutputLength = uncompressedLength;
        decompressOutputArray = new byte[uncompressedLength];
        this.decompressOutput = decompressOutputArray;

        decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo
    }
    inputBuf = decompressBuf;

    // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor
    decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength);

    inputBuf.setIndex(0, uncompressedLength);

    // read the object from the buffer.
    reader.setBuffer(inputBuf);

    return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer
}

From source file:dorkbox.network.serialization.Serialization.java

License:Apache License

/**
 * Called when initialization is complete. This is to prevent (and recognize) out-of-order class/serializer registration. If an ID
 * is already in use by a different type, a {@link KryoException} is thrown.
 *///from  ww w.  java  2  s . c  om
@Override
public synchronized void finishInit(final Logger wireReadLogger, final Logger wireWriteLogger) {
    this.wireReadLogger = wireReadLogger;
    this.wireWriteLogger = wireWriteLogger;

    initialized = true;

    // initialize the kryo pool with at least 1 kryo instance. This ALSO makes sure that all of our class registration is done
    // correctly and (if not) we are are notified on the initial thread (instead of on the network update thread)
    KryoExtra kryo = kryoPool.take();
    try {

        // now MERGE all of the registrations (since we can have registrations overwrite newer/specific registrations

        ArrayList<ClassRegistration> mergedRegistrations = new ArrayList<ClassRegistration>();

        for (ClassRegistration registration : classesToRegister) {
            int id = registration.id;

            // if we ALREADY contain this registration (based ONLY on ID), then overwrite the existing one and REMOVE the current one
            boolean found = false;
            for (int index = 0; index < mergedRegistrations.size(); index++) {
                final ClassRegistration existingRegistration = mergedRegistrations.get(index);
                if (existingRegistration.id == id) {
                    mergedRegistrations.set(index, registration);
                    found = true;
                    break;
                }
            }

            if (!found) {
                mergedRegistrations.add(registration);
            }
        }

        // now all of the registrations are IN ORDER and MERGED
        this.mergedRegistrations = mergedRegistrations.toArray(new ClassRegistration[0]);

        Object[][] registrationDetails = new Object[mergedRegistrations.size()][3];

        for (int i = 0; i < mergedRegistrations.size(); i++) {
            final ClassRegistration registration = mergedRegistrations.get(i);
            registration.log(logger);

            // now save all of the registration IDs for quick verification/access
            registrationDetails[i] = new Object[] { registration.id, registration.clazz.getName(),
                    registration.serializer.getClass().getName() };

            // now we have to manage caching methods (only as necessary)
            if (registration.clazz.isInterface()) {
                // can be a normal class or an RMI class...
                Class<?> implClass = null;

                if (registration instanceof ClassSerializerRmi) {
                    implClass = ((ClassSerializerRmi) registration).implClass;
                }

                CachedMethod[] cachedMethods = RmiUtils.getCachedMethods(Serialization.logger, kryo, useAsm,
                        registration.clazz, implClass, registration.id);
                methodCache.put(registration.id, cachedMethods);
            }
        }

        // save this as a byte array (so registration is faster)
        ByteBuf buffer = Unpooled.buffer(480);

        kryo.setRegistrationRequired(false);
        try {
            kryo.writeCompressed(buffer, registrationDetails);
        } catch (Exception e) {
            logger.error("Unable to write compressed data for registration details");
        }

        int length = buffer.readableBytes();

        savedRegistrationDetails = new byte[length];
        buffer.getBytes(0, savedRegistrationDetails, 0, length);

        buffer.release();
    } finally {
        if (registrationRequired) {
            kryo.setRegistrationRequired(true);
        }

        kryoPool.put(kryo);
    }
}

From source file:net.NettyEngine4.ServerHandler.java

License:Apache License

/**
 * Calls {@link io.netty.channel.ChannelHandlerContext#fireChannelRead(Object)} to forward
 * to the next {@link io.netty.channel.ChannelInboundHandler} in the {@link io.netty.channel.ChannelPipeline}.
 * ?channel  RecyclableArrayListchannel?
 *//*w  w  w.  jav  a  2  s. c  om*/
@Override
public void channelRead(ChannelHandlerContext ctx, Object message) throws Exception {
    ByteBuf msg = (ByteBuf) message;
    /**
     *     if channel inactive  ,the finally  callback method{@link io.netty.handler.codec.ByteToMessageDecoder#channelInactive(io.netty.channel.ChannelHandlerContext)}
     *     this method will do some clear work ,if Recycle ArrayList is not empty ,so will invoke method channelRead,and release byteBuffer,
     *     every task(byteBuffer data which was store blockingQueue will be executor by the "" ,no matter what it's status)  ,
     *     so if the channel  inactive ,we will clear some buf ,in case of memory crash application,like this :
     */
    if (!ctx.channel().isActive()) { //inactive status , release buffer
        msg.release();
        msg = null;
    } else {
        try {
            /**ID**/
            int ID = msg.readInt();
            /**byte[]pb?**/
            int length = msg.readableBytes();
            if (length != 0) {
                byte[] array = new byte[length];
                msg.getBytes(msg.readerIndex(), array, 0, length);
                msg.release(); //release byte buffer
                msg = null; //collection by GC directly
                MessageLite messageLite = IController.MESSAGE_LITE[ID].getParserForType().parseFrom(array, 0,
                        length, ExtensionRegistryLite.getEmptyRegistry());
                array = null; //collection by GC directly
                logger.debug("? &( ^___^ )&  -->" + messageLite.toString() + "ID:" + ID);
                IController.GAME_CONTROLLERS[ID].DispatchMessageLit(messageLite, player);
            } else {
                msg.release();
                msg = null;
                /**?null*/
                IController.GAME_CONTROLLERS[ID].DispatchMessageLit(null, player);
            }
        } catch (UninitializedMessageException e) {
            e.printStackTrace();
        }
    }
}

From source file:net.tomp2p.storage.AlternativeCompositeByteBuf.java

License:Apache License

@Override
public AlternativeCompositeByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) {
    checkDstIndex(index, length, dstIndex, dst.capacity());
    if (length == 0) {
        return this;
    }//from   w  w  w .  j  a v  a 2  s  .c o m

    int i = findIndex(index);
    while (length > 0) {
        Component c = components.get(i);
        ByteBuf s = c.buf;
        int adjustment = c.offset;
        int localLength = Math.min(length, s.readableBytes() - (index - adjustment));
        s.getBytes(index - adjustment, dst, dstIndex, localLength);
        index += localLength;
        dstIndex += localLength;
        length -= localLength;
        i++;
    }
    return this;
}

From source file:net.tomp2p.storage.AlternativeCompositeByteBuf.java

License:Apache License

@Override
public AlternativeCompositeByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) {
    checkDstIndex(index, length, dstIndex, dst.length);
    if (length == 0) {
        return this;
    }//from  w ww  . jav a  2 s  . c o  m

    int i = findIndex(index);
    while (length > 0) {
        Component c = components.get(i);
        ByteBuf s = c.buf;
        int adjustment = c.offset;
        int localLength = Math.min(length, s.readableBytes() - (index - adjustment));
        s.getBytes(index - adjustment, dst, dstIndex, localLength);
        index += localLength;
        dstIndex += localLength;
        length -= localLength;
        i++;
    }
    return this;
}

From source file:net.tomp2p.storage.AlternativeCompositeByteBuf.java

License:Apache License

private void copyTo(int index, int length, int componentId, ByteBuf dst) {
    int dstIndex = 0;
    int i = componentId;

    while (length > 0) {
        Component c = components.get(i);
        ByteBuf s = c.buf;
        int adjustment = c.offset;
        int localLength = Math.min(length, s.readableBytes() - (index - adjustment));
        s.getBytes(index - adjustment, dst, dstIndex, localLength);
        index += localLength;//from   ww w .j av  a  2 s .co  m
        dstIndex += localLength;
        length -= localLength;
        i++;
    }

    dst.writerIndex(dst.capacity());
}