List of usage examples for io.netty.buffer ByteBuf hasArray
public abstract boolean hasArray();
From source file:dorkbox.network.connection.KryoExtra.java
License:Apache License
public synchronized void writeCrypto(final Connection_ connection, final ByteBuf buffer, final Object message) throws IOException { // required by RMI and some serializers to determine which connection wrote (or has info about) this object this.rmiSupport = connection.rmiSupport(); ByteBuf objectOutputBuffer = this.tempBuffer; objectOutputBuffer.clear(); // always have to reset everything // write the object to a TEMP buffer! this will be compressed writer.setBuffer(objectOutputBuffer); writeClassAndObject(writer, message); // save off how much data the object took int length = objectOutputBuffer.writerIndex(); // NOTE: compression and encryption MUST work with byte[] because they use JNI! // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it... // see: https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf byte[] inputArray; int inputOffset; // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because // the buffer might be a slice of other buffer or a pooled buffer: //noinspection Duplicates if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0) && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) { // we can use it... inputArray = objectOutputBuffer.array(); inputArrayLength = -1; // this is so we don't REUSE this array accidentally! inputOffset = objectOutputBuffer.arrayOffset(); } else {//from ww w .java2 s .co m // we can NOT use it. if (length > inputArrayLength) { inputArrayLength = length; inputArray = new byte[length]; this.inputArray = inputArray; } else { inputArray = this.inputArray; } objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length); inputOffset = 0; } ////////// compressing data // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger // output), will be negated by the increase in size by the encryption byte[] compressOutput = this.compressOutput; int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative) int maxCompressedLength = compressor.maxCompressedLength(length); // add 4 so there is room to write the compressed size to the buffer int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset; // lazy initialize the compression output buffer if (maxCompressedLengthWithOffset > compressOutputLength) { compressOutputLength = maxCompressedLengthWithOffset; compressOutput = new byte[maxCompressedLengthWithOffset]; this.compressOutput = compressOutput; } // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput, maxLengthLengthOffset, maxCompressedLength); // bytes can now be written to, because our compressed data is stored in a temp array. final int lengthLength = OptimizeUtilsByteArray.intLength(length, true); // correct input. compression output is now encryption input inputArray = compressOutput; inputOffset = maxLengthLengthOffset - lengthLength; // now write the ORIGINAL (uncompressed) length to the front of the byte array. This is so we can use the FAST decompress version OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset); // correct length for encryption length = compressedLength + lengthLength; // +1 to +4 for the uncompressed size bytes /////// encrypting data. final long nextGcmSequence = connection.getNextGcmSequence(); // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time final ParametersWithIV cryptoParameters = connection.getCryptoParameters(); BigEndian.Long_.toBytes(nextGcmSequence, cryptoParameters.getIV(), 4); // put our counter into the IV final GCMBlockCipher aes = this.aesEngine; aes.reset(); aes.init(true, cryptoParameters); byte[] cryptoOutput; // lazy initialize the crypto output buffer int cryptoSize = length + 16; // from: aes.getOutputSize(length); // 'output' is the temp byte array if (cryptoSize > cryptoOutputLength) { cryptoOutputLength = cryptoSize; cryptoOutput = new byte[cryptoSize]; this.cryptoOutput = cryptoOutput; } else { cryptoOutput = this.cryptoOutput; } int encryptedLength = aes.processBytes(inputArray, inputOffset, length, cryptoOutput, 0); try { // authentication tag for GCM encryptedLength += aes.doFinal(cryptoOutput, encryptedLength); } catch (Exception e) { throw new IOException("Unable to AES encrypt the data", e); } // write out our GCM counter OptimizeUtilsByteBuf.writeLong(buffer, nextGcmSequence, true); // have to copy over the orig data, because we used the temp buffer buffer.writeBytes(cryptoOutput, 0, encryptedLength); }
From source file:dorkbox.network.connection.KryoExtra.java
License:Apache License
public Object readCrypto(final Connection_ connection, final ByteBuf buffer, int length) throws IOException { // required by RMI and some serializers to determine which connection wrote (or has info about) this object this.rmiSupport = connection.rmiSupport(); //////////////// // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it! //////////////// ByteBuf inputBuf = buffer; final long gcmIVCounter = OptimizeUtilsByteBuf.readLong(buffer, true); int lengthLength = OptimizeUtilsByteArray.longLength(gcmIVCounter, true); // have to adjust for the gcmIVCounter length = length - lengthLength;/*from w ww .j a v a 2 s . co m*/ /////////// decrypting data // NOTE: compression and encryption MUST work with byte[] because they use JNI! // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it... // see: https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf byte[] inputArray; int inputOffset; // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because // the buffer might be a slice of other buffer or a pooled buffer: //noinspection Duplicates if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0) && inputBuf.array().length == inputBuf.capacity()) { // we can use it... inputArray = inputBuf.array(); inputArrayLength = -1; // this is so we don't REUSE this array accidentally! inputOffset = inputBuf.arrayOffset() + lengthLength; } else { // we can NOT use it. if (length > inputArrayLength) { inputArrayLength = length; inputArray = new byte[length]; this.inputArray = inputArray; } else { inputArray = this.inputArray; } inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length); inputOffset = 0; } // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index. buffer.readerIndex(buffer.readerIndex() + length); // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time final ParametersWithIV cryptoParameters = connection.getCryptoParameters(); BigEndian.Long_.toBytes(gcmIVCounter, cryptoParameters.getIV(), 4); // put our counter into the IV final GCMBlockCipher aes = this.aesEngine; aes.reset(); aes.init(false, cryptoParameters); int cryptoSize = length - 16; // from: aes.getOutputSize(length); // lazy initialize the decrypt output buffer byte[] decryptOutputArray; if (cryptoSize > decryptOutputLength) { decryptOutputLength = cryptoSize; decryptOutputArray = new byte[cryptoSize]; this.decryptOutput = decryptOutputArray; decryptBuf = Unpooled.wrappedBuffer(decryptOutputArray); } else { decryptOutputArray = this.decryptOutput; } int decryptedLength = aes.processBytes(inputArray, inputOffset, length, decryptOutputArray, 0); try { // authentication tag for GCM decryptedLength += aes.doFinal(decryptOutputArray, decryptedLength); } catch (Exception e) { throw new IOException("Unable to AES decrypt the data", e); } ///////// decompress data -- as it's ALWAYS compressed // get the decompressed length (at the beginning of the array) inputArray = decryptOutputArray; final int uncompressedLength = OptimizeUtilsByteArray.readInt(inputArray, true); inputOffset = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-4 bytes for the decompressed size byte[] decompressOutputArray = this.decompressOutput; if (uncompressedLength > decompressOutputLength) { decompressOutputLength = uncompressedLength; decompressOutputArray = new byte[uncompressedLength]; this.decompressOutput = decompressOutputArray; decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo } inputBuf = decompressBuf; // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength); inputBuf.setIndex(0, uncompressedLength); // read the object from the buffer. reader.setBuffer(inputBuf); return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer }
From source file:io.atomix.cluster.messaging.impl.MessageDecoder.java
License:Apache License
static String readString(ByteBuf buffer, int length, Charset charset) { if (buffer.isDirect()) { final String result = buffer.toString(buffer.readerIndex(), length, charset); buffer.skipBytes(length);/*from w w w. j av a 2 s . c o m*/ return result; } else if (buffer.hasArray()) { final String result = new String(buffer.array(), buffer.arrayOffset() + buffer.readerIndex(), length, charset); buffer.skipBytes(length); return result; } else { final byte[] array = new byte[length]; buffer.readBytes(array); return new String(array, charset); } }
From source file:io.datty.aerospike.support.AerospikeValueUtil.java
License:Apache License
public static Value toValue(ByteBuf bufferOrNull) { if (bufferOrNull == null) { return new NullValue(); } else if (bufferOrNull.hasArray()) { int start = bufferOrNull.readerIndex(); int length = bufferOrNull.readableBytes(); if (start != 0 || length != bufferOrNull.capacity()) { int baseOffset = bufferOrNull.arrayOffset() + start; return new ByteSegmentValue(bufferOrNull.array(), baseOffset, baseOffset + length); } else {/* w ww.j a v a 2 s. c om*/ return new BytesValue(bufferOrNull.array()); } } else { byte[] bytes = new byte[bufferOrNull.readableBytes()]; bufferOrNull.getBytes(bufferOrNull.readerIndex(), bytes); return new BytesValue(bytes); } }
From source file:io.datty.msgpack.core.AbstractMessageReader.java
License:Apache License
public String readString(ByteBuf buffer, int length, Charset charset) { if (length > buffer.readableBytes()) { throw new MessageParseException( "insufficient buffer length: " + buffer.readableBytes() + ", required length: " + length); }/* w w w . ja v a2 s . co m*/ if (buffer.hasArray()) { int start = buffer.readerIndex(); int baseOffset = buffer.arrayOffset() + start; String str = new String(buffer.array(), baseOffset, length, charset); buffer.readerIndex(start + length); return str; } else { byte[] bytes = new byte[length]; buffer.readBytes(bytes); return new String(bytes, charset); } }
From source file:io.gatling.netty.util.ahc.ByteBufUtils.java
License:Apache License
public static byte[] byteBuf2Bytes(ByteBuf buf) { int readable = buf.readableBytes(); int readerIndex = buf.readerIndex(); if (buf.hasArray()) { byte[] array = buf.array(); if (buf.arrayOffset() == 0 && readerIndex == 0 && array.length == readable) { return array; }/*from w w w . ja v a2 s .co m*/ } byte[] array = new byte[readable]; buf.getBytes(readerIndex, array); return array; }
From source file:io.gatling.netty.util.ahc.Utf8ByteBufCharsetDecoder.java
License:Apache License
private void inspectByteBufs(ByteBuf[] bufs) { for (ByteBuf buf : bufs) { if (!buf.hasArray()) { withoutArray = true;/*from w w w . j a va2 s . c om*/ break; } totalSize += buf.readableBytes(); totalNioBuffers += buf.nioBufferCount(); } }
From source file:io.nebo.thrift.TNiftyTransport.java
License:Apache License
public TNiftyTransport(Channel channel, ByteBuf in, ThriftTransportType thriftTransportType) { this.channel = channel; this.in = in; this.out = Unpooled.buffer(DEFAULT_OUTPUT_BUFFER_SIZE); this.thriftTransportType = thriftTransportType; this.initialReaderIndex = in.readerIndex(); if (!in.hasArray()) { buffer = null;//from w w w . j a va 2 s . c o m bufferPosition = 0; initialBufferPosition = bufferEnd = -1; } else { buffer = in.array(); initialBufferPosition = bufferPosition = in.arrayOffset() + in.readerIndex(); bufferEnd = bufferPosition + in.readableBytes(); // Without this, reading from a !in.hasArray() buffer will advance the readerIndex // of the buffer, while reading from a in.hasArray() buffer will not advance the // readerIndex, and this has led to subtle bugs. This should help to identify // those problems by making things more consistent. in.readerIndex(in.readerIndex() + in.readableBytes()); } }
From source file:io.servicecomb.foundation.vertx.TestVertxUtils.java
License:Apache License
@Test public void testgetBytesFastByteBufCopy() { ByteBuf byteBuf = Unpooled.directBuffer(); byteBuf.writeByte(1);/*from www . j av a 2 s. c o m*/ Assert.assertFalse(byteBuf.hasArray()); byte[] result = VertxUtils.getBytesFast(byteBuf); Assert.assertEquals(1, result[0]); byteBuf.release(); }
From source file:io.servicecomb.foundation.vertx.VertxUtils.java
License:Apache License
public static byte[] getBytesFast(ByteBuf byteBuf) { if (byteBuf.hasArray()) { return byteBuf.array(); }//w ww . java2s .co m byte[] arr = new byte[byteBuf.writerIndex()]; byteBuf.getBytes(0, arr); return arr; }