Example usage for io.netty.buffer ByteBuf getByte

List of usage examples for io.netty.buffer ByteBuf getByte

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf getByte.

Prototype

public abstract byte getByte(int index);

Source Link

Document

Gets a byte at the specified absolute index in this buffer.

Usage

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

/**
 * This is NOT ENCRYPTED (and is only done on the loopback connection!)
 *///w  ww . j ava2s  . c  om
public Object readCompressed(final Connection_ connection, final ByteBuf buffer, int length)
        throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ////////////////
    // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it!
    ////////////////

    ByteBuf inputBuf = buffer;

    // get the decompressed length (at the beginning of the array)
    final int uncompressedLength = OptimizeUtilsByteBuf.readInt(buffer, true);
    final int lengthLength = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-5 bytes for the decompressed size

    // have to adjust for uncompressed length
    length = length - lengthLength;

    ///////// decompress data -- as it's ALWAYS compressed

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0)
            && inputBuf.array().length == inputBuf.capacity()) {

        // we can use it...
        inputArray = inputBuf.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = inputBuf.arrayOffset() + lengthLength;
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index.
    buffer.readerIndex(buffer.readerIndex() + length);

    ///////// decompress data -- as it's ALWAYS compressed

    byte[] decompressOutputArray = this.decompressOutput;
    if (uncompressedLength > decompressOutputLength) {
        decompressOutputLength = uncompressedLength;
        decompressOutputArray = new byte[uncompressedLength];
        this.decompressOutput = decompressOutputArray;

        decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo
    }
    inputBuf = decompressBuf;

    // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor)
    decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength);

    inputBuf.setIndex(0, uncompressedLength);

    // read the object from the buffer.
    reader.setBuffer(inputBuf);

    return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

public synchronized void writeCrypto(final Connection_ connection, final ByteBuf buffer, final Object message)
        throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ByteBuf objectOutputBuffer = this.tempBuffer;
    objectOutputBuffer.clear(); // always have to reset everything

    // write the object to a TEMP buffer! this will be compressed
    writer.setBuffer(objectOutputBuffer);

    writeClassAndObject(writer, message);

    // save off how much data the object took
    int length = objectOutputBuffer.writerIndex();

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0)
            && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) {

        // we can use it...
        inputArray = objectOutputBuffer.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = objectOutputBuffer.arrayOffset();
    } else {//from  w  ww.j av  a2 s .  com
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    ////////// compressing data
    // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger
    // output), will be negated by the increase in size by the encryption

    byte[] compressOutput = this.compressOutput;

    int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative)
    int maxCompressedLength = compressor.maxCompressedLength(length);

    // add 4 so there is room to write the compressed size to the buffer
    int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset;

    // lazy initialize the compression output buffer
    if (maxCompressedLengthWithOffset > compressOutputLength) {
        compressOutputLength = maxCompressedLengthWithOffset;
        compressOutput = new byte[maxCompressedLengthWithOffset];
        this.compressOutput = compressOutput;
    }

    // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data
    int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput,
            maxLengthLengthOffset, maxCompressedLength);

    // bytes can now be written to, because our compressed data is stored in a temp array.

    final int lengthLength = OptimizeUtilsByteArray.intLength(length, true);

    // correct input.  compression output is now encryption input
    inputArray = compressOutput;
    inputOffset = maxLengthLengthOffset - lengthLength;

    // now write the ORIGINAL (uncompressed) length to the front of the byte array. This is so we can use the FAST decompress version
    OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset);

    // correct length for encryption
    length = compressedLength + lengthLength; // +1 to +4 for the uncompressed size bytes

    /////// encrypting data.
    final long nextGcmSequence = connection.getNextGcmSequence();

    // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time
    final ParametersWithIV cryptoParameters = connection.getCryptoParameters();
    BigEndian.Long_.toBytes(nextGcmSequence, cryptoParameters.getIV(), 4); // put our counter into the IV

    final GCMBlockCipher aes = this.aesEngine;
    aes.reset();
    aes.init(true, cryptoParameters);

    byte[] cryptoOutput;

    // lazy initialize the crypto output buffer
    int cryptoSize = length + 16; // from:  aes.getOutputSize(length);

    // 'output' is the temp byte array
    if (cryptoSize > cryptoOutputLength) {
        cryptoOutputLength = cryptoSize;
        cryptoOutput = new byte[cryptoSize];
        this.cryptoOutput = cryptoOutput;
    } else {
        cryptoOutput = this.cryptoOutput;
    }

    int encryptedLength = aes.processBytes(inputArray, inputOffset, length, cryptoOutput, 0);

    try {
        // authentication tag for GCM
        encryptedLength += aes.doFinal(cryptoOutput, encryptedLength);
    } catch (Exception e) {
        throw new IOException("Unable to AES encrypt the data", e);
    }

    // write out our GCM counter
    OptimizeUtilsByteBuf.writeLong(buffer, nextGcmSequence, true);

    // have to copy over the orig data, because we used the temp buffer
    buffer.writeBytes(cryptoOutput, 0, encryptedLength);
}

From source file:dorkbox.network.connection.KryoExtra.java

License:Apache License

public Object readCrypto(final Connection_ connection, final ByteBuf buffer, int length) throws IOException {
    // required by RMI and some serializers to determine which connection wrote (or has info about) this object
    this.rmiSupport = connection.rmiSupport();

    ////////////////
    // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it!
    ////////////////

    ByteBuf inputBuf = buffer;

    final long gcmIVCounter = OptimizeUtilsByteBuf.readLong(buffer, true);
    int lengthLength = OptimizeUtilsByteArray.longLength(gcmIVCounter, true);

    // have to adjust for the gcmIVCounter
    length = length - lengthLength;//  w w  w. ja va2s .  co m

    /////////// decrypting data

    // NOTE: compression and encryption MUST work with byte[] because they use JNI!
    // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use
    // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it...
    // see:   https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf

    byte[] inputArray;
    int inputOffset;

    // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because
    // the buffer might be a slice of other buffer or a pooled buffer:
    //noinspection Duplicates
    if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0)
            && inputBuf.array().length == inputBuf.capacity()) {

        // we can use it...
        inputArray = inputBuf.array();
        inputArrayLength = -1; // this is so we don't REUSE this array accidentally!
        inputOffset = inputBuf.arrayOffset() + lengthLength;
    } else {
        // we can NOT use it.
        if (length > inputArrayLength) {
            inputArrayLength = length;
            inputArray = new byte[length];
            this.inputArray = inputArray;
        } else {
            inputArray = this.inputArray;
        }

        inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length);
        inputOffset = 0;
    }

    // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index.
    buffer.readerIndex(buffer.readerIndex() + length);

    // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time
    final ParametersWithIV cryptoParameters = connection.getCryptoParameters();
    BigEndian.Long_.toBytes(gcmIVCounter, cryptoParameters.getIV(), 4); // put our counter into the IV

    final GCMBlockCipher aes = this.aesEngine;
    aes.reset();
    aes.init(false, cryptoParameters);

    int cryptoSize = length - 16; // from:  aes.getOutputSize(length);

    // lazy initialize the decrypt output buffer
    byte[] decryptOutputArray;
    if (cryptoSize > decryptOutputLength) {
        decryptOutputLength = cryptoSize;
        decryptOutputArray = new byte[cryptoSize];
        this.decryptOutput = decryptOutputArray;

        decryptBuf = Unpooled.wrappedBuffer(decryptOutputArray);
    } else {
        decryptOutputArray = this.decryptOutput;
    }

    int decryptedLength = aes.processBytes(inputArray, inputOffset, length, decryptOutputArray, 0);

    try {
        // authentication tag for GCM
        decryptedLength += aes.doFinal(decryptOutputArray, decryptedLength);
    } catch (Exception e) {
        throw new IOException("Unable to AES decrypt the data", e);
    }

    ///////// decompress data -- as it's ALWAYS compressed

    // get the decompressed length (at the beginning of the array)
    inputArray = decryptOutputArray;
    final int uncompressedLength = OptimizeUtilsByteArray.readInt(inputArray, true);
    inputOffset = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-4 bytes for the decompressed size

    byte[] decompressOutputArray = this.decompressOutput;
    if (uncompressedLength > decompressOutputLength) {
        decompressOutputLength = uncompressedLength;
        decompressOutputArray = new byte[uncompressedLength];
        this.decompressOutput = decompressOutputArray;

        decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo
    }
    inputBuf = decompressBuf;

    // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor
    decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength);

    inputBuf.setIndex(0, uncompressedLength);

    // read the object from the buffer.
    reader.setBuffer(inputBuf);

    return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer
}

From source file:dorkbox.network.pipeline.ByteBufInput.java

License:Apache License

private String readAscii() {
    ByteBuf buffer = byteBuf;

    int start = buffer.readerIndex() - 1;
    int b;//from ww  w. j  av a  2 s  .  c o  m
    do {
        b = buffer.readByte();
    } while ((b & 0x80) == 0);
    int i = buffer.readerIndex() - 1;
    buffer.setByte(i, buffer.getByte(i) & 0x7F); // Mask end of ascii bit.

    int capp = buffer.readerIndex() - start;

    byte[] ba = new byte[capp];
    buffer.getBytes(start, ba);

    @SuppressWarnings("deprecation")
    String value = new String(ba, 0, 0, capp);

    buffer.setByte(i, buffer.getByte(i) | 0x80);
    return value;
}

From source file:dorkbox.network.pipeline.ByteBufOutput.java

License:Apache License

/** Writes the length and string, or null. Short strings are checked and if ASCII they are written more efficiently, else they
 * are written as UTF8. If a string is known to be ASCII, {@link #writeAscii(String)} may be used. The string can be read using
 * {@link ByteBufInput#readString()} or {@link ByteBufInput#readStringBuilder()}.
 * @param value May be null. *//*from   w  ww.ja va 2 s  .c o m*/
@Override
public void writeString(String value) throws KryoException {
    if (value == null) {
        writeByte(0x80); // 0 means null, bit 8 means UTF8.
        return;
    }
    int charCount = value.length();
    if (charCount == 0) {
        writeByte(1 | 0x80); // 1 means empty string, bit 8 means UTF8.
        return;
    }
    // Detect ASCII.
    boolean ascii = false;
    if (charCount > 1 && charCount < 64) { // only snoop 64 chars in
        ascii = true;
        for (int i = 0; i < charCount; i++) {
            int c = value.charAt(i);
            if (c > 127) {
                ascii = false;
                break;
            }
        }
    }

    ByteBuf buffer = byteBuf;
    if (buffer.writableBytes() < charCount) {
        buffer.capacity(buffer.capacity() + charCount + 1);
    }

    if (!ascii) {
        writeUtf8Length(charCount + 1);
    }

    int charIndex = 0;
    // Try to write 8 bit chars.
    for (; charIndex < charCount; charIndex++) {
        int c = value.charAt(charIndex);
        if (c > 127) {
            break; // whoops! detect ascii. have to continue with a slower method!
        }
        buffer.writeByte((byte) c);
    }
    if (charIndex < charCount) {
        writeString_slow(value, charCount, charIndex);
    } else if (ascii) {
        // specify it's ASCII
        int i = buffer.writerIndex() - 1;
        buffer.setByte(i, buffer.getByte(i) | 0x80); // Bit 8 means end of ASCII.
    }
}

From source file:dorkbox.network.pipeline.ByteBufOutput.java

License:Apache License

/** Writes a string that is known to contain only ASCII characters. Non-ASCII strings passed to this method will be corrupted.
 * Each byte is a 7 bit character with the remaining byte denoting if another character is available. This is slightly more
 * efficient than {@link #writeString(String)}. The string can be read using {@link ByteBufInput#readString()} or
 * {@link ByteBufInput#readStringBuilder()}.
 * @param value May be null. *///from  www.j av a 2  s.  c o m
@Override
public void writeAscii(String value) throws KryoException {
    if (value == null) {
        writeByte(0x80); // 0 means null, bit 8 means UTF8.
        return;
    }
    int charCount = value.length();
    if (charCount == 0) {
        writeByte(1 | 0x80); // 1 means empty string, bit 8 means UTF8.
        return;
    }

    ByteBuf buffer = byteBuf;
    if (buffer.writableBytes() < charCount) {
        buffer.capacity(buffer.capacity() + charCount + 1);
    }

    int charIndex = 0;
    // Try to write 8 bit chars.
    for (; charIndex < charCount; charIndex++) {
        int c = value.charAt(charIndex);
        buffer.writeByte((byte) c);
    }
    // specify it's ASCII
    int i = buffer.writerIndex() - 1;
    buffer.setByte(i, buffer.getByte(i) | 0x80); // Bit 8 means end of ASCII.
}

From source file:dorkbox.network.pipeline.discovery.BroadcastServer.java

License:Apache License

/**
 * @return true if the broadcast was responded to, false if it was not a broadcast (and there was no response)
 *//*from  w w w .j av a  2s . co  m*/
public boolean isDiscoveryRequest(final Channel channel, ByteBuf byteBuf, final InetSocketAddress localAddress,
        InetSocketAddress remoteAddress) {
    if (byteBuf.readableBytes() == 1) {
        // this is a BROADCAST discovery event. Don't read the byte unless it is...
        if (byteBuf.getByte(0) == MagicBytes.broadcastID) {
            byteBuf.readByte(); // read the byte to consume it (now that we verified it is a broadcast byte)

            // absolutely MUST send packet > 0 across, otherwise netty will think it failed to write to the socket, and keep trying.
            // (this bug was fixed by netty, however we are keeping this code)
            ByteBuf directBuffer = channel.alloc().ioBuffer(bufferSize);
            directBuffer.writeByte(MagicBytes.broadcastResponseID);

            // now output the port information for TCP/UDP so the broadcast client knows which port to connect to
            // either it will be TCP or UDP, or BOTH

            int enabledFlag = 0;
            if (tcpPort > 0) {
                enabledFlag |= MagicBytes.HAS_TCP;
            }

            if (udpPort > 0) {
                enabledFlag |= MagicBytes.HAS_UDP;
            }

            directBuffer.writeByte(enabledFlag);

            // TCP is always first
            if (tcpPort > 0) {
                directBuffer.writeShort(tcpPort);
            }

            if (udpPort > 0) {
                directBuffer.writeShort(udpPort);
            }

            channel.writeAndFlush(new DatagramPacket(directBuffer, remoteAddress, localAddress));

            logger.info("Responded to host discovery from [{}]", EndPoint.getHostDetails(remoteAddress));

            byteBuf.release();
            return true;
        }
    }

    return false;
}

From source file:dorkbox.network.pipeline.discovery.BroadcastServer.java

License:Apache License

/**
 * @return true if this is a broadcast response, false if it was not a broadcast response
 *///from  ww w.  j  a va  2  s.c o  m
public static boolean isDiscoveryResponse(ByteBuf byteBuf, final InetAddress remoteAddress,
        final Channel channel) {
    if (byteBuf.readableBytes() <= MagicBytes.maxPacketSize) {
        // this is a BROADCAST discovery RESPONSE event. Don't read the byte unless it is...
        if (byteBuf.getByte(0) == MagicBytes.broadcastResponseID) {
            byteBuf.readByte(); // read the byte to consume it (now that we verified it is a broadcast byte)

            // either it will be TCP or UDP, or BOTH
            int typeID = byteBuf.readByte();

            int tcpPort = 0;
            int udpPort = 0;

            // TCP is always first
            if ((typeID & MagicBytes.HAS_TCP) == MagicBytes.HAS_TCP) {
                tcpPort = byteBuf.readUnsignedShort();
            }

            if ((typeID & MagicBytes.HAS_UDP) == MagicBytes.HAS_UDP) {
                udpPort = byteBuf.readUnsignedShort();
            }

            channel.attr(ClientDiscoverHostHandler.STATE)
                    .set(new BroadcastResponse(remoteAddress, tcpPort, udpPort));

            byteBuf.release();
            return true;
        }
    }

    return false;
}

From source file:herddb.proto.PduCodec.java

License:Apache License

public static Pdu decodePdu(ByteBuf in) throws IOException {
    byte version = in.getByte(0);
    if (version == VERSION_3) {
        byte flags = in.getByte(1);
        byte type = in.getByte(2);
        long messageId = in.getLong(3);
        return Pdu.newPdu(in, type, flags, messageId);
    }/*  w ww .j  a v  a  2s .  c  o m*/
    throw new IOException("Cannot decode version " + version);
}

From source file:io.airlift.drift.transport.netty.codec.HeaderTransport.java

License:Apache License

public static Optional<FrameInfo> tryDecodeFrameInfo(ByteBuf input) {
    ByteBuf buffer = input.retainedDuplicate();
    try {//from  w w w .j a  va2s  .  com
        if (buffer.readableBytes() < FRAME_HEADER_SIZE) {
            return Optional.empty();
        }
        // skip magic
        buffer.readShort();
        short flags = buffer.readShort();
        boolean outOfOrderResponse = (flags & FLAG_SUPPORT_OUT_OF_ORDER_MASK) == 1;
        int headerSequenceId = buffer.readInt();
        int headerSize = buffer.readShort() << 2;

        if (buffer.readableBytes() < headerSize) {
            return Optional.empty();
        }

        byte protocolId = buffer.getByte(buffer.readerIndex());
        Protocol protocol = Protocol.getProtocolByHeaderTransportId(protocolId);

        buffer.skipBytes(headerSize);
        SimpleFrameInfoDecoder simpleFrameInfoDecoder = new SimpleFrameInfoDecoder(HEADER, protocol,
                outOfOrderResponse);
        Optional<FrameInfo> frameInfo = simpleFrameInfoDecoder.tryDecodeFrameInfo(buffer);
        if (frameInfo.isPresent()) {
            int messageSequenceId = frameInfo.get().getSequenceId();
            checkArgument(headerSequenceId == messageSequenceId,
                    "Sequence ids don't match. headerSequenceId: %s. messageSequenceId: %s", headerSequenceId,
                    messageSequenceId);
        }
        return frameInfo;
    } finally {
        buffer.release();
    }
}