List of usage examples for io.netty.buffer ByteBuf array
public abstract byte[] array();
From source file:dorkbox.network.connection.KryoExtra.java
License:Apache License
/** * This is NOT ENCRYPTED (and is only done on the loopback connection!) *///from w w w . ja v a2 s. co m public Object readCompressed(final Connection_ connection, final ByteBuf buffer, int length) throws IOException { // required by RMI and some serializers to determine which connection wrote (or has info about) this object this.rmiSupport = connection.rmiSupport(); //////////////// // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it! //////////////// ByteBuf inputBuf = buffer; // get the decompressed length (at the beginning of the array) final int uncompressedLength = OptimizeUtilsByteBuf.readInt(buffer, true); final int lengthLength = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-5 bytes for the decompressed size // have to adjust for uncompressed length length = length - lengthLength; ///////// decompress data -- as it's ALWAYS compressed // NOTE: compression and encryption MUST work with byte[] because they use JNI! // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it... // see: https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf byte[] inputArray; int inputOffset; // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because // the buffer might be a slice of other buffer or a pooled buffer: //noinspection Duplicates if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0) && inputBuf.array().length == inputBuf.capacity()) { // we can use it... inputArray = inputBuf.array(); inputArrayLength = -1; // this is so we don't REUSE this array accidentally! inputOffset = inputBuf.arrayOffset() + lengthLength; } else { // we can NOT use it. if (length > inputArrayLength) { inputArrayLength = length; inputArray = new byte[length]; this.inputArray = inputArray; } else { inputArray = this.inputArray; } inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length); inputOffset = 0; } // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index. buffer.readerIndex(buffer.readerIndex() + length); ///////// decompress data -- as it's ALWAYS compressed byte[] decompressOutputArray = this.decompressOutput; if (uncompressedLength > decompressOutputLength) { decompressOutputLength = uncompressedLength; decompressOutputArray = new byte[uncompressedLength]; this.decompressOutput = decompressOutputArray; decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo } inputBuf = decompressBuf; // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor) decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength); inputBuf.setIndex(0, uncompressedLength); // read the object from the buffer. reader.setBuffer(inputBuf); return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer }
From source file:dorkbox.network.connection.KryoExtra.java
License:Apache License
public synchronized void writeCrypto(final Connection_ connection, final ByteBuf buffer, final Object message) throws IOException { // required by RMI and some serializers to determine which connection wrote (or has info about) this object this.rmiSupport = connection.rmiSupport(); ByteBuf objectOutputBuffer = this.tempBuffer; objectOutputBuffer.clear(); // always have to reset everything // write the object to a TEMP buffer! this will be compressed writer.setBuffer(objectOutputBuffer); writeClassAndObject(writer, message); // save off how much data the object took int length = objectOutputBuffer.writerIndex(); // NOTE: compression and encryption MUST work with byte[] because they use JNI! // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it... // see: https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf byte[] inputArray; int inputOffset; // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because // the buffer might be a slice of other buffer or a pooled buffer: //noinspection Duplicates if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0) && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) { // we can use it... inputArray = objectOutputBuffer.array(); inputArrayLength = -1; // this is so we don't REUSE this array accidentally! inputOffset = objectOutputBuffer.arrayOffset(); } else {/*from w ww .j a v a2s.c om*/ // we can NOT use it. if (length > inputArrayLength) { inputArrayLength = length; inputArray = new byte[length]; this.inputArray = inputArray; } else { inputArray = this.inputArray; } objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length); inputOffset = 0; } ////////// compressing data // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger // output), will be negated by the increase in size by the encryption byte[] compressOutput = this.compressOutput; int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative) int maxCompressedLength = compressor.maxCompressedLength(length); // add 4 so there is room to write the compressed size to the buffer int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset; // lazy initialize the compression output buffer if (maxCompressedLengthWithOffset > compressOutputLength) { compressOutputLength = maxCompressedLengthWithOffset; compressOutput = new byte[maxCompressedLengthWithOffset]; this.compressOutput = compressOutput; } // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput, maxLengthLengthOffset, maxCompressedLength); // bytes can now be written to, because our compressed data is stored in a temp array. final int lengthLength = OptimizeUtilsByteArray.intLength(length, true); // correct input. compression output is now encryption input inputArray = compressOutput; inputOffset = maxLengthLengthOffset - lengthLength; // now write the ORIGINAL (uncompressed) length to the front of the byte array. This is so we can use the FAST decompress version OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset); // correct length for encryption length = compressedLength + lengthLength; // +1 to +4 for the uncompressed size bytes /////// encrypting data. final long nextGcmSequence = connection.getNextGcmSequence(); // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time final ParametersWithIV cryptoParameters = connection.getCryptoParameters(); BigEndian.Long_.toBytes(nextGcmSequence, cryptoParameters.getIV(), 4); // put our counter into the IV final GCMBlockCipher aes = this.aesEngine; aes.reset(); aes.init(true, cryptoParameters); byte[] cryptoOutput; // lazy initialize the crypto output buffer int cryptoSize = length + 16; // from: aes.getOutputSize(length); // 'output' is the temp byte array if (cryptoSize > cryptoOutputLength) { cryptoOutputLength = cryptoSize; cryptoOutput = new byte[cryptoSize]; this.cryptoOutput = cryptoOutput; } else { cryptoOutput = this.cryptoOutput; } int encryptedLength = aes.processBytes(inputArray, inputOffset, length, cryptoOutput, 0); try { // authentication tag for GCM encryptedLength += aes.doFinal(cryptoOutput, encryptedLength); } catch (Exception e) { throw new IOException("Unable to AES encrypt the data", e); } // write out our GCM counter OptimizeUtilsByteBuf.writeLong(buffer, nextGcmSequence, true); // have to copy over the orig data, because we used the temp buffer buffer.writeBytes(cryptoOutput, 0, encryptedLength); }
From source file:dorkbox.network.connection.KryoExtra.java
License:Apache License
public Object readCrypto(final Connection_ connection, final ByteBuf buffer, int length) throws IOException { // required by RMI and some serializers to determine which connection wrote (or has info about) this object this.rmiSupport = connection.rmiSupport(); //////////////// // Note: we CANNOT write BACK to the buffer as "temp" storage, since there could be additional data on it! //////////////// ByteBuf inputBuf = buffer; final long gcmIVCounter = OptimizeUtilsByteBuf.readLong(buffer, true); int lengthLength = OptimizeUtilsByteArray.longLength(gcmIVCounter, true); // have to adjust for the gcmIVCounter length = length - lengthLength;//w ww . j a v a2 s . c om /////////// decrypting data // NOTE: compression and encryption MUST work with byte[] because they use JNI! // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it... // see: https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf byte[] inputArray; int inputOffset; // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because // the buffer might be a slice of other buffer or a pooled buffer: //noinspection Duplicates if (inputBuf.hasArray() && inputBuf.array()[0] == inputBuf.getByte(0) && inputBuf.array().length == inputBuf.capacity()) { // we can use it... inputArray = inputBuf.array(); inputArrayLength = -1; // this is so we don't REUSE this array accidentally! inputOffset = inputBuf.arrayOffset() + lengthLength; } else { // we can NOT use it. if (length > inputArrayLength) { inputArrayLength = length; inputArray = new byte[length]; this.inputArray = inputArray; } else { inputArray = this.inputArray; } inputBuf.getBytes(inputBuf.readerIndex(), inputArray, 0, length); inputOffset = 0; } // have to make sure to set the position of the buffer, since our conversion to array DOES NOT set the new reader index. buffer.readerIndex(buffer.readerIndex() + length); // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time final ParametersWithIV cryptoParameters = connection.getCryptoParameters(); BigEndian.Long_.toBytes(gcmIVCounter, cryptoParameters.getIV(), 4); // put our counter into the IV final GCMBlockCipher aes = this.aesEngine; aes.reset(); aes.init(false, cryptoParameters); int cryptoSize = length - 16; // from: aes.getOutputSize(length); // lazy initialize the decrypt output buffer byte[] decryptOutputArray; if (cryptoSize > decryptOutputLength) { decryptOutputLength = cryptoSize; decryptOutputArray = new byte[cryptoSize]; this.decryptOutput = decryptOutputArray; decryptBuf = Unpooled.wrappedBuffer(decryptOutputArray); } else { decryptOutputArray = this.decryptOutput; } int decryptedLength = aes.processBytes(inputArray, inputOffset, length, decryptOutputArray, 0); try { // authentication tag for GCM decryptedLength += aes.doFinal(decryptOutputArray, decryptedLength); } catch (Exception e) { throw new IOException("Unable to AES decrypt the data", e); } ///////// decompress data -- as it's ALWAYS compressed // get the decompressed length (at the beginning of the array) inputArray = decryptOutputArray; final int uncompressedLength = OptimizeUtilsByteArray.readInt(inputArray, true); inputOffset = OptimizeUtilsByteArray.intLength(uncompressedLength, true); // because 1-4 bytes for the decompressed size byte[] decompressOutputArray = this.decompressOutput; if (uncompressedLength > decompressOutputLength) { decompressOutputLength = uncompressedLength; decompressOutputArray = new byte[uncompressedLength]; this.decompressOutput = decompressOutputArray; decompressBuf = Unpooled.wrappedBuffer(decompressOutputArray); // so we can read via kryo } inputBuf = decompressBuf; // LZ4 decompress, requires the size of the ORIGINAL length (because we use the FAST decompressor decompressor.decompress(inputArray, inputOffset, decompressOutputArray, 0, uncompressedLength); inputBuf.setIndex(0, uncompressedLength); // read the object from the buffer. reader.setBuffer(inputBuf); return readClassAndObject(reader); // this properly sets the readerIndex, but only if it's the correct buffer }
From source file:eu.matejkormuth.pexel.network.NettyMessageDecoder.java
License:Open Source License
@Override protected void decode(final ChannelHandlerContext paramChannelHandlerContext, final ByteBuf message, final List<Object> out) throws Exception { out.add(new NettyMessage(message.array())); }
From source file:eu.netide.backend.NetIDEBackendController.java
License:Open Source License
@Override public void onOpenFlowCoreMessage(Long datapathId, ByteBuf msg, int moduleId, int transactionId) { Dpid dpid = new Dpid(datapathId); DeviceId deviceId = DeviceId.deviceId(Dpid.uri(dpid)); /*if (netIDEDeviceProvider.getSwitchOFFactory(dpid) == null) { //The switch does not exist, check if needed return;//from ww w . j ava 2 s .co m }*/ //Check module ID, multibackend case if (moduleId != moduleHandler.getModuleId(BackendLayer.MODULE_NAME) && moduleHandler.getModuleNameFromID(moduleId) != null) { ChannelBuffer buffer = ChannelBuffers.copiedBuffer(msg.array()); OFMessageReader<OFMessage> reader = OFFactories.getGenericReader(); try { OFMessage message = reader.readFrom(buffer); log.debug("Msg from NetIDE core: TransactionId {}, ModuleId {}, OpenFlow Message {}", transactionId, moduleId, message); switch (message.getType()) { case PORT_STATUS: netIDEDeviceProvider.portChanged(dpid, (OFPortStatus) message); break; case PACKET_IN: Integer xid = xids.getIfPresent(transactionId); if (xid != null) { //TODO: Send only fence? //sendFenceMessage(transactionId, moduleId); } else { xids.put(transactionId, transactionId); lastXid.set(transactionId); netIDEPacketProvider.createPacketContext(dpid, (OFPacketIn) message, transactionId, moduleId); } break; case FLOW_REMOVED: OFFlowRemoved flowRemoved = (OFFlowRemoved) message; netIDEFlowRuleProvider.notifyFlowRemoved(deviceId, flowRemoved); break; case FEATURES_REPLY: //Send to DeviceProvider netIDEDeviceProvider.registerNewSwitch(dpid, (OFFeaturesReply) message); break; case STATS_REPLY: OFStatsReply reply = (OFStatsReply) message; switch (reply.getStatsType()) { case PORT_DESC: //Handling OF 13 port desc netIDEDeviceProvider.registerSwitchPorts(dpid, (OFPortDescStatsReply) reply); break; case FLOW: netIDEFlowRuleProvider.notifyStatistics(deviceId, (OFFlowStatsReply) message); break; default: break; } break; default: break; } } catch (Exception e) { log.error("Error in decoding OFMessage from the CORE {}", e); } } }
From source file:eu.netide.shim.NetIDEShimController.java
License:Open Source License
@Override public void onOpenFlowCoreMessage(Long datapathId, ByteBuf msg, int moduleId) { //TODO: Handle messages that requires replies (e.g. barrier) Dpid dpid = new Dpid(datapathId); log.debug("Dpid {}", dpid); OpenFlowSwitch sw = controller.getSwitch(dpid); ChannelBuffer buffer = ChannelBuffers.copiedBuffer(msg.array()); OFMessageReader<OFMessage> reader = OFFactories.getGenericReader(); try {// www . j a v a 2s .co m OFMessage message = reader.readFrom(buffer); switch (message.getType()) { case FLOW_MOD: OFFlowMod flowMod = (OFFlowMod) message; if (flowMod.getPriority() > ONOS_DEFAULT_PRIORITY) { sw.sendMsg(message); } else { OFFlowMod.Builder flowModBuilder = flowMod.createBuilder(); flowModBuilder.setPriority(ONOS_DEFAULT_PRIORITY + 1); sw.sendMsg(flowModBuilder.build()); } break; case STATS_REQUEST: OFStatsRequest reply = (OFStatsRequest) message; switch (reply.getStatsType()) { case DESC: OFDescStatsRequest ofDescStatsRequest = (OFDescStatsRequest) reply; OFDescStatsReply.Builder ofDescReply = sw.factory().buildDescStatsReply(); ofDescReply.setXid(ofDescStatsRequest.getXid()); sendOpenFlowMessageToCore(ofDescReply.build(), ofDescReply.getXid(), sw.getId(), moduleId); break; default: //Save the xid xids.put(message.getXid(), moduleId); sw.sendMsg(message); } break; case BARRIER_REQUEST: xids.put(message.getXid(), moduleId); sw.sendMsg(message); break; case ECHO_REQUEST: OFEchoRequest echoRequest = (OFEchoRequest) message; ChannelBuffer buf = ChannelBuffers.dynamicBuffer(); echoRequest.writeTo(buf); byte[] payload = buf.array(); OFEchoReply.Builder echoReply = sw.factory().buildEchoReply(); echoReply.setXid(echoRequest.getXid()); echoReply.setData(payload); sendOpenFlowMessageToCore(echoReply.build(), echoReply.getXid(), sw.getId(), moduleId); break; case FEATURES_REQUEST: OFFeaturesReply featuresReply = getFeatureReply(sw); sendOpenFlowMessageToCore(featuresReply, featuresReply.getXid(), sw.getId(), moduleId); //Create OFPortDescStatsReply for OF_13 if (sw.factory().getVersion() == OFVersion.OF_13) { OFPortDescStatsReply.Builder statsReplyBuilder = sw.factory().buildPortDescStatsReply(); statsReplyBuilder.setEntries(sw.getPorts()).setXid(0); OFPortDescStatsReply ofPortStatsReply = statsReplyBuilder.build(); sendOpenFlowMessageToCore(ofPortStatsReply, ofPortStatsReply.getXid(), sw.getId(), moduleId); } break; case PACKET_OUT: sw.sendMsg(message); break; case GET_CONFIG_REQUEST: OFGetConfigRequest setConfig = (OFGetConfigRequest) message; OFGetConfigReply.Builder configReply = sw.factory().buildGetConfigReply(); configReply.setXid(setConfig.getXid()); Set<OFConfigFlags> flags = Sets.newHashSet(OFConfigFlags.FRAG_NORMAL); configReply.setFlags(flags); configReply.setMissSendLen(0); sendOpenFlowMessageToCore(configReply.build(), configReply.getXid(), sw.getId(), moduleId); break; case SET_CONFIG: OFSetConfig ofSetConfig = (OFSetConfig) message; OFGetConfigReply.Builder ofGetConfigReply = sw.factory().buildGetConfigReply(); ofGetConfigReply.setXid(ofSetConfig.getXid()); Set<OFConfigFlags> flagsSet = Sets.newHashSet(OFConfigFlags.FRAG_NORMAL); ofGetConfigReply.setFlags(flagsSet); ofGetConfigReply.setMissSendLen(0); //sendOpenFlowMessageToCore(ofGetConfigReply.build(),ofGetConfigReply.getXid(),sw.getId(),moduleId); break; default: //sw.sendMsg(message); log.error("Unhandled OF message {}", message); break; } } catch (Exception e) { log.error("Error in decoding OFMessage from the CORE {}", e); } }
From source file:growthcraft.core.common.tileentity.GrcBaseTile.java
License:Open Source License
@Override public Packet getDescriptionPacket() { final NBTTagCompound data = new NBTTagCompound(); final ByteBuf stream = Unpooled.buffer(); try {/*from w w w .j a v a 2 s . c o m*/ writeToStream(stream); if (stream.readableBytes() == 0) { return null; } } catch (Throwable t) { System.err.println(t); } // P, for payload data.setByteArray("P", stream.array()); return new S35PacketUpdateTileEntity(xCoord, yCoord, zCoord, 127, data); }
From source file:igwmod.network.NetworkHandler.java
License:Open Source License
private static List<IMessage> getSplitMessages(IMessage message) { List<IMessage> messages = new ArrayList<IMessage>(); ByteBuf buf = Unpooled.buffer(); message.toBytes(buf);/*from ww w . j a va 2s . com*/ byte[] bytes = buf.array(); if (bytes.length < MAX_SIZE) { messages.add(message); } else { messages.add(new MessageMultiHeader(bytes.length)); int offset = 0; while (offset < bytes.length) { messages.add(new MessageMultiPart( Arrays.copyOfRange(bytes, offset, Math.min(offset + MAX_SIZE, bytes.length)))); offset += MAX_SIZE; } } return messages; }
From source file:impl.underdark.transport.bluetooth.BtLink.java
License:Open Source License
private void inputLoop() { // Input I/O thread. sendHelloFrame();/*from w w w . ja v a2 s .co m*/ int bufferSize = 4096; ByteBuf inputData = Unpooled.buffer(bufferSize); inputData.order(ByteOrder.BIG_ENDIAN); try { int len; while (true) { inputData.ensureWritable(bufferSize, true); len = inputStream.read(inputData.array(), inputData.writerIndex(), bufferSize); if (len <= 0) break; inputData.writerIndex(inputData.writerIndex() + len); if (!formFrames(inputData)) break; inputData.discardReadBytes(); inputData.capacity(inputData.writerIndex() + bufferSize); } // while } catch (InterruptedIOException ex) { Logger.warn("bt input timeout: {}", ex); try { inputStream.close(); } catch (IOException ioex) { } notifyDisconnect(); return; } catch (Exception ex) { Logger.warn("bt input read failed.", ex); try { inputStream.close(); } catch (IOException ioex) { } notifyDisconnect(); return; } Logger.debug("bt input read end."); notifyDisconnect(); }
From source file:impl.underdark.transport.bluetooth.discovery.ble.ManufacturerData.java
License:Open Source License
public byte[] build() { ByteBuf data = Unpooled.wrappedBuffer(new byte[27]); data.clear();//from w ww .ja va2s . c o m data.writeByte(1); // Version data.writeInt(appId); data.writeBytes(address); int channelsMask = 0; for (int channel : channels) { if (channel < 0 || channel > BtUtils.channelNumberMax) continue; // http://www.vipan.com/htdocs/bitwisehelp.html int channelBit = ipow(2, channel); channelsMask |= channelBit; } data.writeInt(channelsMask); return data.array(); }