List of usage examples for io.netty.buffer ByteBuf clear
public abstract ByteBuf clear();
From source file:com.vethrfolnir.game.network.mu.crypt.MuDecoder.java
License:Open Source License
private static int DecodeBlock(ByteBuf buff, ByteBuf outBuff, int offset, int size) { // decripted size int index = 0; if ((size % 11) != 0) { log.warn("Cannot decrypt packet, it's already decrypted!: Size " + size + " = " + ((size % 11))); log.warn(PrintData.printData(buff.nioBuffer())); return -1; }/*from www. ja va2s. co m*/ ByteBuf encrypted = alloc.heapBuffer(11, 11).order(ByteOrder.LITTLE_ENDIAN); short[] uByteArray = new short[encrypted.capacity()]; ByteBuf decrypted = alloc.heapBuffer(8, 8).order(ByteOrder.LITTLE_ENDIAN); ByteBuf converter = alloc.heapBuffer(4).order(ByteOrder.LITTLE_ENDIAN); for (int i = 0; i < size; i += 11) { buff.readBytes(encrypted); //System.out.println("ENC: "+PrintData.printData(encrypted.nioBuffer())); int Result = BlockDecode(decrypted, getAsUByteArray(encrypted, uByteArray), converter, MuKeyFactory.getClientToServerPacketDecKeys()); if (Result != -1) { //Buffer.BlockCopy(Decrypted, 0, m_DecryptResult, (OffSet - 1) + DecSize, Result); outBuff.writerIndex((offset - 1) + index); outBuff.writeBytes(decrypted); //outBuff.writeBytes(decrypted); decrypted.clear(); encrypted.clear(); converter.clear(); //System.arraycopy(Decrypted, 0, m_DecryptResult, (OffSet - 1) + DecSize, Result); index += Result; } } return index; }
From source file:com.vethrfolnir.game.network.mu.crypt.MuDecoder.java
License:Open Source License
/** * @param decrypted/* ww w . ja va 2 s .c o m*/ * @param encrypted * @param decServerKeys * @return */ private static int BlockDecode(ByteBuf decrypted, short[] InBuf, ByteBuf converter, long[] Keys) { long[] Ring = new long[4]; short[] Shift = new short[4]; ShiftBytes(Shift, 0x00, InBuf, 0x00, 0x10); ShiftBytes(Shift, 0x16, InBuf, 0x10, 0x02); writeByteArray(converter, Shift); flushArray(Shift, 0, 4); ShiftBytes(Shift, 0x00, InBuf, 0x12, 0x10); ShiftBytes(Shift, 0x16, InBuf, 0x22, 0x02); writeByteArray(converter, Shift); flushArray(Shift, 0, 4); ShiftBytes(Shift, 0x00, InBuf, 0x24, 0x10); ShiftBytes(Shift, 0x16, InBuf, 0x34, 0x02); writeByteArray(converter, Shift); flushArray(Shift, 0, 4); ShiftBytes(Shift, 0x00, InBuf, 0x36, 0x10); ShiftBytes(Shift, 0x16, InBuf, 0x46, 0x02); writeByteArray(converter, Shift); flushArray(Shift, 0, 4); // for (int i = 0; i < Ring.length; i++) { // System.err.print(Integer.toHexString((int) Ring[i])+" ");; // } // System.err.println(); for (int i = 0; i < Ring.length; i++) { Ring[i] = converter.readInt(); } converter.clear(); Ring[2] = Ring[2] ^ Keys[10] ^ (Ring[3] & 0xFFFF); Ring[1] = Ring[1] ^ Keys[9] ^ (Ring[2] & 0xFFFF); Ring[0] = Ring[0] ^ Keys[8] ^ (Ring[1] & 0xFFFF); // System.err.println("Finished Ring: "); // for (int i = 0; i < Ring.length; i++) { // System.err.print(Integer.toHexString((int) Ring[i])+" ");; // } // System.err.println(); int[] CryptBuf = new int[4]; // Had ushort cast here. CryptBuf[0] = (int) (Keys[8] ^ ((Ring[0] * Keys[4]) % Keys[0])); CryptBuf[1] = (int) (Keys[9] ^ ((Ring[1] * Keys[5]) % Keys[1]) ^ (Ring[0] & 0xFFFF)); CryptBuf[2] = (int) (Keys[10] ^ ((Ring[2] * Keys[6]) % Keys[2]) ^ (Ring[1] & 0xFFFF)); CryptBuf[3] = (int) (Keys[11] ^ ((Ring[3] * Keys[7]) % Keys[3]) ^ (Ring[2] & 0xFFFF)); // System.err.println("Pre done: " + PrintData.printData(CryptBuf)); short[] Finale = new short[2]; ShiftBytes(Finale, 0x00, InBuf, 0x48, 0x10); Finale[0] ^= Finale[1]; Finale[0] ^= 0x3D; converter.clear(); for (int i = 0; i < CryptBuf.length; i++) { converter.writeShort(CryptBuf[i]); } decrypted.writeBytes(converter, Finale[0]); converter.clear(); //System.out.println(PrintData.printData(decrypted.nioBuffer())); // System.err.println("Finale: "+ Finale[0]); short Check = 0xF8; for (int i = 0; i < Finale[0]; ++i) Check = (short) (Check ^ decrypted.getUnsignedByte(i)); if (Finale[1] == Check) return Finale[0]; //System.err.println("Finale["+Finale[0]+"] And done: "+PrintData.printData(decrypted.nioBuffer())); return Finale[0]; }
From source file:com.vethrfolnir.game.network.mu.MuChannelHandler.java
License:Open Source License
@Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { ByteBuf buff = (msg instanceof ByteBuffer) ? ctx.alloc().buffer().writeBytes((ByteBuffer) msg) : (ByteBuf) msg;/*from w w w.j a va 2s . co m*/ buff.readerIndex(2); int opcode = buff.readUnsignedByte(); switch (opcode) { // double opcode case 0xf1: case 0xf3: case 0x0e: case 0x03: buff.readerIndex(buff.readerIndex() - 1); opcode = buff.readUnsignedShort(); // ex 0xF1_03 break; default: break; } if (opcode == 0xe00) { // Time packet? buff.clear(); buff.release(); return; } ReadPacket packet = clientpackets.get(opcode); if (packet != null) { MuClient client = ctx.channel().attr(MuClient.ClientKey).get(); //System.out.println("Got opcode: 0x"+PrintData.fillHex(opcode, 2)+ " packet = \n"+packet.getClass().getSimpleName()); packet.read(client, buff); } else { log.warn("Unknown packet[opcode = 0x" + PrintData.fillHex(opcode, 2) + "]. Dump: "); log.warn(PrintData.printData(buff.nioBuffer(0, buff.writerIndex()))); } //log.warn(PrintData.printData(buff.nioBuffer(0, buff.writerIndex()))); if (buff.refCnt() > 0) { //System.out.println("Handler Release when packet[opcode = 0x"+PrintData.fillHex(opcode, 2)+"]"); buff.release(); } }
From source file:divconq.api.internal.UploadPutHandler.java
License:Open Source License
public void start(final HyperSession parent, ScatteringByteChannel src, String chanid, Map<String, Cookie> cookies, long size, long offset, final OperationCallback callback) { this.src = src; this.cookies = cookies; this.callback = callback; this.dest = this.allocateChannel(parent, callback); if (this.callback.hasErrors()) { callback.complete();/* w w w .j a v a2s . c om*/ return; } // send a request to get things going HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "/upload/" + chanid + "/Final"); req.headers().set(Names.HOST, parent.getInfo().getHost()); req.headers().set(Names.USER_AGENT, "DivConq HyperAPI Client 1.0"); req.headers().set(Names.CONNECTION, HttpHeaders.Values.CLOSE); req.headers().set(Names.COOKIE, ClientCookieEncoder.encode(this.cookies.values())); req.headers().set(HttpHeaders.Names.CONTENT_LENGTH, size - offset); // send request headers - must flush here in case CL = 0 this.dest.writeAndFlush(req); // now start sending the file long sent = offset; callback.getContext().setAmountCompleted((int) (sent * 100 / size)); ByteBuf bb = null; try { bb = Hub.instance.getBufferAllocator().directBuffer(64 * 1024); // TODO review if direct is best here long toskip = offset; if (src instanceof SeekableByteChannel) { ((SeekableByteChannel) src).position(toskip); } else { while (toskip > 0) { int skip = (int) Math.min(bb.capacity(), toskip); toskip -= bb.writeBytes(src, skip); bb.clear(); } } // now start writing the upload int amt = bb.writeBytes(src, bb.capacity()); while (amt != -1) { bb.retain(); // this ups ref cnt to 2 - we plan to reuse the buffer this.dest.writeAndFlush(bb).sync(); sent += amt; if (size > 0) callback.getContext().setAmountCompleted((int) (sent * 100 / size)); // by the time we get here, that buffer has been used up and we can use it for the next buffer if (bb.refCnt() != 1) throw new IOException("Buffer reference count is not correct"); // stop writing if canceled if (!this.dest.isOpen()) { this.finish(); // might already be finished but to be sure (this is helpful when api.abortStream is called) break; } bb.clear(); amt = bb.writeBytes(src, bb.capacity()); } // we are now done with it bb.release(); } catch (Exception x) { try { if (bb != null) bb.release(); } catch (Exception x2) { } callback.error(1, "Local read error: " + x); this.finish(); } }
From source file:divconq.api.LocalSession.java
License:Open Source License
@Override public void sendStream(ScatteringByteChannel in, long size, long offset, final String channelid, final OperationCallback callback) { final DataStreamChannel chan = this.session.getChannel(channelid); if (chan == null) { callback.error(1, "Missing channel"); callback.complete();// w w w .j a va2 s . c o m return; } chan.setDriver(new IStreamDriver() { @Override public void cancel() { callback.error(1, "Transfer canceled"); chan.complete(); callback.complete(); } @Override public void message(StreamMessage msg) { if (msg.isFinal()) { System.out.println("Final on channel: " + channelid); chan.complete(); callback.complete(); } } @Override public void nextChunk() { // won't chunk so won't happen here } }); long sent = offset; int seq = 0; if (size > 0) { callback.getContext().setAmountCompleted((int) (sent * 100 / size)); chan.getContext().setAmountCompleted((int) (sent * 100 / size)); // keep the channel active so it does not timeout } try { ByteBuf bb = Hub.instance.getBufferAllocator().directBuffer(64 * 1024); long toskip = offset; if (in instanceof SeekableByteChannel) { ((SeekableByteChannel) in).position(toskip); } else { while (toskip > 0) { int skip = (int) Math.min(bb.capacity(), toskip); toskip -= bb.writeBytes(in, skip); bb.clear(); } } chan.touch(); // now start writing the upload int amt = bb.writeBytes(in, bb.capacity()); while (amt != -1) { bb.retain(); // this ups ref cnt to 2 - we plan to reuse the buffer StreamMessage b = new StreamMessage("Block", bb); b.setField("Sequence", seq); OperationResult sr = chan.send(b); if (sr.hasErrors()) { chan.close(); break; } seq++; sent += amt; if (size > 0) { callback.getContext().setAmountCompleted((int) (sent * 100 / size)); chan.getContext().setAmountCompleted((int) (sent * 100 / size)); // keep the channel active so it does not timeout } callback.touch(); chan.touch(); // by the time we get here, that buffer has been used up and we can use it for the next buffer if (bb.refCnt() != 1) throw new IOException("Buffer reference count is not correct"); // stop writing if canceled if (chan.isClosed()) break; bb.clear(); amt = bb.writeBytes(in, bb.capacity()); } // we are now done with it bb.release(); // final only if not canceled if (!chan.isClosed()) chan.send(MessageUtil.streamFinal()); } catch (IOException x) { callback.error(1, "Local read error: " + x); chan.send(MessageUtil.streamError(1, "Source read error: " + x)); chan.close(); callback.complete(); } finally { try { in.close(); } catch (IOException x) { } } }
From source file:divconq.pgp.EncryptedFileStream.java
License:Open Source License
public void writeData(byte[] bytes, int offset, int len) { // the first time this is called we need to write headers - those headers // call into this method so clear flag immediately if (!this.writeFirst) { this.writeFirst = true; this.writeFirstLiteral(len); }/* w ww.ja v a 2s .c o m*/ int remaining = len; int avail = this.packetsize - this.packetpos; // packetbuf may have data that has not yet been processed, so if we are doing any writes // we need to write the packet buffer first ByteBuf pbb = this.packetbuf; if (pbb != null) { int bbremaining = pbb.readableBytes(); // only write if there is space available in current packet or if we have a total // amount of data larger than max packet size while ((bbremaining > 0) && ((avail > 0) || (bbremaining + remaining) >= MAX_PACKET_SIZE)) { // out of current packet space? create more packets if (avail == 0) { this.packetsize = MAX_PACKET_SIZE; this.packetpos = 0; this.writeDataInternal((byte) MAX_PARTIAL_LEN); // partial packet length avail = this.packetsize; } // figure out how much we can write to the current packet, write it, update indexes int alen = Math.min(avail, bbremaining); this.writeDataInternal(pbb.array(), pbb.arrayOffset() + pbb.readerIndex(), alen); pbb.skipBytes(alen); bbremaining = pbb.readableBytes(); this.packetpos += alen; avail = this.packetsize - this.packetpos; // our formula always assumes that packetbuf starts at zero offset, anytime // we write out part of the packetbuf we either need to write it all and clear it // or we need to start with a new buffer with data starting at offset 0 if (bbremaining == 0) { pbb.clear(); } else { ByteBuf npb = Hub.instance.getBufferAllocator().heapBuffer(MAX_PACKET_SIZE); npb.writeBytes(pbb, bbremaining); this.packetbuf = npb; pbb.release(); pbb = npb; } } } // only write if there is space available in current packet or if we have a total // amount of data larger than max packet size while ((remaining > 0) && ((avail > 0) || (remaining >= MAX_PACKET_SIZE))) { // out of current packet space? create more packets if (avail == 0) { this.packetsize = MAX_PACKET_SIZE; this.packetpos = 0; this.writeDataInternal((byte) MAX_PARTIAL_LEN); // partial packet length avail = this.packetsize; } // figure out how much we can write to the current packet, write it, update indexes int alen = Math.min(avail, remaining); this.writeDataInternal(bytes, offset, alen); remaining -= alen; offset += alen; this.packetpos += alen; avail = this.packetsize - this.packetpos; } // buffer remaining to build larger packet later if (remaining > 0) { if (this.packetbuf == null) this.packetbuf = Hub.instance.getBufferAllocator().heapBuffer(MAX_PACKET_SIZE); // add to new buffer or add to existing buffer, either way it should be less than max here this.packetbuf.writeBytes(bytes, offset, remaining); } }
From source file:dorkbox.network.connection.KryoExtra.java
License:Apache License
/** * This is NOT ENCRYPTED (and is only done on the loopback connection!) *//* w w w . j a v a 2 s.co m*/ public synchronized void writeCompressed(final Connection_ connection, final ByteBuf buffer, final Object message) throws IOException { // required by RMI and some serializers to determine which connection wrote (or has info about) this object this.rmiSupport = connection.rmiSupport(); ByteBuf objectOutputBuffer = this.tempBuffer; objectOutputBuffer.clear(); // always have to reset everything // write the object to a TEMP buffer! this will be compressed writer.setBuffer(objectOutputBuffer); writeClassAndObject(writer, message); // save off how much data the object took + magic byte int length = objectOutputBuffer.writerIndex(); // NOTE: compression and encryption MUST work with byte[] because they use JNI! // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it... // see: https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf byte[] inputArray; int inputOffset; // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because // the buffer might be a slice of other buffer or a pooled buffer: //noinspection Duplicates if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0) && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) { // we can use it... inputArray = objectOutputBuffer.array(); inputArrayLength = -1; // this is so we don't REUSE this array accidentally! inputOffset = objectOutputBuffer.arrayOffset(); } else { // we can NOT use it. if (length > inputArrayLength) { inputArrayLength = length; inputArray = new byte[length]; this.inputArray = inputArray; } else { inputArray = this.inputArray; } objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length); inputOffset = 0; } ////////// compressing data // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger // output), will be negated by the increase in size by the encryption byte[] compressOutput = this.compressOutput; int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative) int maxCompressedLength = compressor.maxCompressedLength(length); // add 4 so there is room to write the compressed size to the buffer int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset; // lazy initialize the compression output buffer if (maxCompressedLengthWithOffset > compressOutputLength) { compressOutputLength = maxCompressedLengthWithOffset; compressOutput = new byte[maxCompressedLengthWithOffset]; this.compressOutput = compressOutput; } // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput, maxLengthLengthOffset, maxCompressedLength); // bytes can now be written to, because our compressed data is stored in a temp array. final int lengthLength = OptimizeUtilsByteArray.intLength(length, true); // correct input. compression output is now buffer input inputArray = compressOutput; inputOffset = maxLengthLengthOffset - lengthLength; // now write the ORIGINAL (uncompressed) length to the front of the byte array (this is NOT THE BUFFER!). This is so we can use the FAST decompress version OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset); // have to copy over the orig data, because we used the temp buffer. Also have to account for the length of the uncompressed size buffer.writeBytes(inputArray, inputOffset, compressedLength + lengthLength); }
From source file:dorkbox.network.connection.KryoExtra.java
License:Apache License
public synchronized void writeCrypto(final Connection_ connection, final ByteBuf buffer, final Object message) throws IOException { // required by RMI and some serializers to determine which connection wrote (or has info about) this object this.rmiSupport = connection.rmiSupport(); ByteBuf objectOutputBuffer = this.tempBuffer; objectOutputBuffer.clear(); // always have to reset everything // write the object to a TEMP buffer! this will be compressed writer.setBuffer(objectOutputBuffer); writeClassAndObject(writer, message); // save off how much data the object took int length = objectOutputBuffer.writerIndex(); // NOTE: compression and encryption MUST work with byte[] because they use JNI! // Realistically, it is impossible to get the backing arrays out of a Heap Buffer once they are resized and begin to use // sliced. It's lame that there is a "double copy" of bytes here, but I don't know how to avoid it... // see: https://stackoverflow.com/questions/19296386/netty-java-getting-data-from-bytebuf byte[] inputArray; int inputOffset; // Even if a ByteBuf has a backing array (i.e. buf.hasArray() returns true), the using it isn't always possible because // the buffer might be a slice of other buffer or a pooled buffer: //noinspection Duplicates if (objectOutputBuffer.hasArray() && objectOutputBuffer.array()[0] == objectOutputBuffer.getByte(0) && objectOutputBuffer.array().length == objectOutputBuffer.capacity()) { // we can use it... inputArray = objectOutputBuffer.array(); inputArrayLength = -1; // this is so we don't REUSE this array accidentally! inputOffset = objectOutputBuffer.arrayOffset(); } else {//from w w w . j a v a2 s . co m // we can NOT use it. if (length > inputArrayLength) { inputArrayLength = length; inputArray = new byte[length]; this.inputArray = inputArray; } else { inputArray = this.inputArray; } objectOutputBuffer.getBytes(objectOutputBuffer.readerIndex(), inputArray, 0, length); inputOffset = 0; } ////////// compressing data // we ALWAYS compress our data stream -- because of how AES-GCM pads data out, the small input (that would result in a larger // output), will be negated by the increase in size by the encryption byte[] compressOutput = this.compressOutput; int maxLengthLengthOffset = 4; // length is never negative, so 4 is OK (5 means it's negative) int maxCompressedLength = compressor.maxCompressedLength(length); // add 4 so there is room to write the compressed size to the buffer int maxCompressedLengthWithOffset = maxCompressedLength + maxLengthLengthOffset; // lazy initialize the compression output buffer if (maxCompressedLengthWithOffset > compressOutputLength) { compressOutputLength = maxCompressedLengthWithOffset; compressOutput = new byte[maxCompressedLengthWithOffset]; this.compressOutput = compressOutput; } // LZ4 compress. output offset max 4 bytes to leave room for length of tempOutput data int compressedLength = compressor.compress(inputArray, inputOffset, length, compressOutput, maxLengthLengthOffset, maxCompressedLength); // bytes can now be written to, because our compressed data is stored in a temp array. final int lengthLength = OptimizeUtilsByteArray.intLength(length, true); // correct input. compression output is now encryption input inputArray = compressOutput; inputOffset = maxLengthLengthOffset - lengthLength; // now write the ORIGINAL (uncompressed) length to the front of the byte array. This is so we can use the FAST decompress version OptimizeUtilsByteArray.writeInt(inputArray, length, true, inputOffset); // correct length for encryption length = compressedLength + lengthLength; // +1 to +4 for the uncompressed size bytes /////// encrypting data. final long nextGcmSequence = connection.getNextGcmSequence(); // this is a threadlocal, so that we don't clobber other threads that are performing crypto on the same connection at the same time final ParametersWithIV cryptoParameters = connection.getCryptoParameters(); BigEndian.Long_.toBytes(nextGcmSequence, cryptoParameters.getIV(), 4); // put our counter into the IV final GCMBlockCipher aes = this.aesEngine; aes.reset(); aes.init(true, cryptoParameters); byte[] cryptoOutput; // lazy initialize the crypto output buffer int cryptoSize = length + 16; // from: aes.getOutputSize(length); // 'output' is the temp byte array if (cryptoSize > cryptoOutputLength) { cryptoOutputLength = cryptoSize; cryptoOutput = new byte[cryptoSize]; this.cryptoOutput = cryptoOutput; } else { cryptoOutput = this.cryptoOutput; } int encryptedLength = aes.processBytes(inputArray, inputOffset, length, cryptoOutput, 0); try { // authentication tag for GCM encryptedLength += aes.doFinal(cryptoOutput, encryptedLength); } catch (Exception e) { throw new IOException("Unable to AES encrypt the data", e); } // write out our GCM counter OptimizeUtilsByteBuf.writeLong(buffer, nextGcmSequence, true); // have to copy over the orig data, because we used the temp buffer buffer.writeBytes(cryptoOutput, 0, encryptedLength); }
From source file:dpfmanager.shell.modules.server.core.HttpServerHandler.java
License:Open Source License
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { // Will use the first five bytes to detect a protocol. if (in.readableBytes() < 5) { in.clear(); ctx.close();/* w w w . j a v a2 s . c om*/ return; } final int magic1 = in.getUnsignedByte(in.readerIndex()); final int magic2 = in.getUnsignedByte(in.readerIndex() + 1); if (isPost(magic1, magic2) || isOptions(magic1, magic2)) { // POST ChannelPipeline pipeline = ctx.pipeline(); pipeline.addLast(new HttpRequestDecoder()); pipeline.addLast(new HttpResponseEncoder()); pipeline.addLast(new HttpPostHandler(context)); pipeline.remove(this); } else if (isGet(magic1, magic2)) { // GET ChannelPipeline pipeline = ctx.pipeline(); pipeline.addLast(new HttpServerCodec()); pipeline.addLast(new HttpObjectAggregator(65536)); pipeline.addLast(new ChunkedWriteHandler()); pipeline.addLast(new HttpGetHandler(context)); pipeline.remove(this); } else { in.clear(); ctx.close(); } }
From source file:eu.jangos.realm.network.decoder.RealmPacketDecoder.java
License:Apache License
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { logger.debug("Packet received: " + in.readableBytes()); // We should at least get the header. if (in.readableBytes() < HEADER_LENGTH) { logger.debug("Packet received but less than header size."); return;/* w w w . j ava 2s. com*/ } ByteBuf msg = in.order(ByteOrder.LITTLE_ENDIAN); // We should decrypt the header only once per packet. if (opcode == 0) { byte[] header = new byte[HEADER_LENGTH]; int readBytes = (ctx.channel().attr(AUTH).get() == AuthStep.STEP_AUTHED ? HEADER_LENGTH : 4); for (int i = 0; i < readBytes; i++) { header[i] = msg.readByte(); } header = ctx.channel().attr(CRYPT).get().decrypt(header); size = (short) ((header[0] << 8 | header[1]) & 0xFF); opcode = (short) ((header[3] << 8 | header[2] & 0xFF)); logger.debug("Opcode received: " + opcode + ", with size: " + size + " (readable bytes: " + in.readableBytes() + ") "); } if ((in.readableBytes() + 4) < size) { logger.debug( "Packet size is higher than the available bytes. (" + in.readableBytes() + ", " + size + ")"); return; } final Opcodes code = Opcodes.convert(opcode); if (code == null) { return; } AbstractRealmClientPacket packet = null; switch (code) { case CMSG_PING: packet = new CMSG_PING(code, size); break; case CMSG_AUTH_SESSION: packet = new CMSG_AUTH_SESSION(code, (short) 0); break; case CMSG_CHAR_ENUM: packet = new CMSG_CHAR_ENUM(code, size); break; case CMSG_CHAR_CREATE: packet = new CMSG_CHAR_CREATE(code, size); break; case CMSG_CHAR_DELETE: packet = new CMSG_CHAR_DELETE(code, size); break; case CMSG_PLAYER_LOGIN: packet = new CMSG_PLAYER_LOGIN(code, size); break; default: logger.debug("Context: " + ctx.name() + "Packet received, opcode not supported: " + code); msg.clear(); ctx.close(); break; } if (packet != null) { try { logger.debug("Context: " + ctx.name() + "Packet received, opcode: " + code); logger.debug("Packet content: \n" + StringUtils.toPacketString(ByteBufUtil.hexDump(msg).toUpperCase(), size, code)); packet.decode(msg); opcode = 0; size = 0; } catch (Exception e) { return; } out.add(packet); msg.clear(); } }