List of usage examples for io.netty.buffer ByteBuf readableBytes
public abstract int readableBytes();
From source file:com.hop.hhxx.example.http.websocketx.server.WebSocketIndexPageHandler.java
License:Apache License
@Override protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest req) throws Exception { // Handle a bad request. if (!req.decoderResult().isSuccess()) { sendHttpResponse(ctx, req, new DefaultFullHttpResponse(HTTP_1_1, BAD_REQUEST)); return;/*from w w w. j a va 2s.c o m*/ } // Allow only GET methods. if (req.method() != GET) { sendHttpResponse(ctx, req, new DefaultFullHttpResponse(HTTP_1_1, FORBIDDEN)); return; } // Send the index page if ("/".equals(req.uri()) || "/index.html".equals(req.uri())) { String webSocketLocation = getWebSocketLocation(ctx.pipeline(), req, websocketPath); ByteBuf content = io.netty.example.http.websocketx.server.WebSocketServerIndexPage .getContent(webSocketLocation); FullHttpResponse res = new DefaultFullHttpResponse(HTTP_1_1, OK, content); res.headers().set(HttpHeaderNames.CONTENT_TYPE, "text/html; charset=UTF-8"); HttpUtil.setContentLength(res, content.readableBytes()); sendHttpResponse(ctx, req, res); } else { sendHttpResponse(ctx, req, new DefaultFullHttpResponse(HTTP_1_1, NOT_FOUND)); } }
From source file:com.hop.hhxx.example.http2.helloworld.server.HelloWorldHttp2Handler.java
License:Apache License
@Override public int onDataRead(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, boolean endOfStream) { int processed = data.readableBytes() + padding; if (endOfStream) { sendResponse(ctx, streamId, data.retain()); }/*w ww . j av a2 s . co m*/ return processed; }
From source file:com.hzmsc.scada.Jmtis.server.PortUnificationServerHandler.java
License:Apache License
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { System.out.println(in.getUnsignedByte(in.readerIndex()) + ".........................."); // Will use the first five bytes to detect a protocol. if (in.readableBytes() < 5) { return;// w ww .j ava 2s.c o m } if (isSsl(in)) { enableSsl(ctx); } else { final int magic1 = in.getUnsignedByte(in.readerIndex()); final int magic2 = in.getUnsignedByte(in.readerIndex() + 1); if (isGzip(magic1, magic2)) { enableGzip(ctx); } else if (isHttp(magic1, magic2)) { switchToHttp(ctx); } else if (isJMAC(magic1, magic2)) { switchToJMAC(ctx); } else { // Unknown protocol; discard everything and close the connection. in.clear(); ctx.close(); } } }
From source file:com.ibasco.agql.core.utils.ByteBufUtils.java
License:Open Source License
public static String readString(ByteBuf buffer, Charset encoding, boolean readNonNullTerminated, String defaultString) {// w w w.j a va2 s. c o m int length = buffer.bytesBefore((byte) 0); if (length < 0) { if (readNonNullTerminated && buffer.readableBytes() > 0) length = buffer.readableBytes(); else return null; } String data = buffer.readCharSequence(length, encoding).toString(); //Discard the null terminator (if available) if (buffer.readableBytes() > 2 && buffer.getByte(buffer.readerIndex()) == 0) buffer.readByte(); return data; }
From source file:com.ibasco.agql.core.utils.ByteBufUtils.java
License:Open Source License
public static String readStringToEnd(ByteBuf buffer, Charset encoding) { int length = buffer.readableBytes(); if (length > 0) return buffer.readCharSequence(length, encoding).toString(); return null;/*from w w w. j a va 2s .c o m*/ }
From source file:com.ibasco.agql.protocols.valve.source.query.handlers.SourceQueryPacketAssembler.java
License:Open Source License
@Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { log.trace("SourcePacketHandler.channelRead() : START"); try {/*from www . ja v a 2 s. co m*/ //Make sure we are only receiving an instance of DatagramPacket if (!(msg instanceof DatagramPacket)) { return; } final DatagramPacket packet = (DatagramPacket) msg; final ByteBuf data = ((DatagramPacket) msg).content(); //Verify size if (data.readableBytes() <= 5) { log.debug( "Not a valid datagram for processing. Size getTotalRequests needs to be at least more than or equal to 5 bytes. Discarding. (Readable Bytes: {})", data.readableBytes()); return; } //Try to read protocol header, determine if its a single packet or a split-packet int protocolHeader = data.readIntLE(); //If the packet arrived is single type, we can already forward it to the next handler if (protocolHeader == 0xFFFFFFFF) { //Pass the message to the succeeding handlers ctx.fireChannelRead(packet.retain()); return; } //If the packet is a split type...we need to process each succeeding read until we have a complete packet else if (protocolHeader == 0xFFFFFFFE) { final ByteBuf reassembledPacket = processSplitPackets(data, ctx.channel().alloc(), packet.sender()); //Check if we already have a reassembled packet if (reassembledPacket != null) { ctx.fireChannelRead(packet.replace(reassembledPacket)); return; } } //Packet is not being handled by any of our processors, discard else { log.debug("Not a valid protocol header. Discarding. (Header Received: Dec = {}, Hex = {})", protocolHeader, Integer.toHexString(protocolHeader)); return; } } catch (Exception e) { log.error(String.format("Error while processing packet for %s", ((DatagramPacket) msg).sender()), e); throw e; } finally { //Release the message ReferenceCountUtil.release(msg); } log.trace("SourcePacketHandler.channelRead() : END"); }
From source file:com.ibasco.agql.protocols.valve.source.query.handlers.SourceQueryPacketAssembler.java
License:Open Source License
/** * Process split-packet data//from w w w . j a v a2 s. c o m * * @param data * The {@link ByteBuf} containing the split-packet data * @param allocator * The {@link ByteBufAllocator} used to create/allocate pooled buffers * * @return Returns a non-null {@link ByteBuf} if the split-packets have been assembled. Null if the * * @throws Exception */ private ByteBuf processSplitPackets(ByteBuf data, ByteBufAllocator allocator, InetSocketAddress senderAddress) throws Exception { int packetCount, packetNumber, requestId, splitSize, packetChecksum = 0; boolean isCompressed; //Start processing requestId = data.readIntLE(); //read the most significant bit is set isCompressed = ((requestId & 0x80000000) != 0); //The total number of packets in the response. packetCount = data.readByte(); //The number of the packet. Starts at 0. packetNumber = data.readByte(); //Create our key for this request (request id + sender ip) final SplitPacketKey key = new SplitPacketKey(requestId, senderAddress); log.debug("Processing split packet {}", key); log.debug( "Split Packet Received = (AbstractRequest {}, Packet Number {}, Packet Count {}, Is Compressed: {})", requestId, packetNumber, packetCount, isCompressed); //Try to retrieve the split packet container for this request (if existing) //If request is not yet on the map, create and retrieve SplitPacketContainer splitPackets = this.requestMap.computeIfAbsent(key, k -> new SplitPacketContainer(packetCount)); //As per protocol specs, the size is only present in the first packet of the response and only if the response is being compressed. //split size = Maximum size of packet before packet switching occurs. The default value is 1248 bytes (0x04E0 if (isCompressed) { splitSize = data.readIntLE(); packetChecksum = data.readIntLE(); } else { splitSize = data.readShortLE(); } //TODO: Handle compressed split packets int bufferSize = Math.min(splitSize, data.readableBytes()); byte[] splitPacket = new byte[bufferSize]; data.readBytes(splitPacket); //transfer the split data into this buffer //Add the split packet to the container splitPackets.addPacket(packetNumber, splitPacket); //Have we received all packets for this request? if (splitPackets.isComplete()) { log.debug( "Split Packets have all been successfully received from AbstractRequest {}. Re-assembling packets.", requestId); //Retrieve total split packets received based on their length int packetSize = splitPackets.getPacketSize(); //Allocate a new buffer to store the re-assembled packets final ByteBuf packetBuffer = allocator.buffer(packetSize); boolean done = false; try { //Start re-assembling split-packets from the container done = reassembleSplitPackets(splitPackets, packetBuffer, isCompressed, splitSize, packetChecksum); } catch (Exception e) { //If an error occurs during re-assembly, make sure we release the allocated buffer packetBuffer.release(); throw e; } finally { if (done) requestMap.remove(key); } return packetBuffer; } //Return null, indicating that we still don't have a complete packet return null; }
From source file:com.ibasco.agql.protocols.valve.source.query.handlers.SourceQueryPacketAssembler.java
License:Open Source License
/** * Re-assemble's the packets from the container. * * @param splitPackets The {@link SplitPacketContainer} to be re-assembled * * @return Returns true if the re-assembly process has completed successfully *//*from w w w . j av a2 s .c o m*/ private boolean reassembleSplitPackets(SplitPacketContainer splitPackets, ByteBuf packetBuffer, boolean isCompressed, int decompressedSize, int packetChecksum) { log.trace("reassembleSplitPackets : START"); if (packetBuffer == null) throw new IllegalArgumentException("Packet Buffer is not initialized"); splitPackets.forEachEntry(packetEntry -> { log.debug("--> Packet #{} : {}", packetEntry.getKey(), packetEntry.getValue()); //Throw exception if compression is set. Not yet supported. if (isCompressed) throw new IllegalStateException("Compression is not yet supported at this time sorry"); //TODO: Is this still needed? /*if(isCompressed) { //From Steam Condenser (thanks Koraktor) try { ByteArrayInputStream stream = new ByteArrayInputStream(packetData); stream.read(); stream.read(); BZip2CompressorInputStream bzip2 = new BZip2CompressorInputStream(stream); byte[] uncompressedPacketData = new byte[uncompressedSize]; bzip2.read(uncompressedPacketData, 0, uncompressedSize); CRC32 crc32 = new CRC32(); crc32.update(uncompressedPacketData); int crc32checksum = (int) crc32.getValue(); if (crc32checksum != packetChecksum) { throw new PacketFormatException( "CRC32 checksum mismatch of uncompressed packet data."); } packetData = uncompressedPacketData; } catch(IOException e) { throw new SteamCondenserException(e.getMessage(), e); } }*/ packetBuffer.writeBytes(packetEntry.getValue()); }); log.trace("reassembleSplitPackets : END"); return packetBuffer.readableBytes() > 0; }
From source file:com.ibasco.agql.protocols.valve.source.query.handlers.SourceRconPacketDecoder.java
License:Open Source License
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception { final String separator = "================================================================================================="; //TODO: Move all code logic below to SourceRconPacketBuilder log.debug(separator);/* w w w . j ava 2s .c om*/ log.debug(" ({}) DECODING INCOMING DATA : Bytes Received = {} {}", index.incrementAndGet(), in.readableBytes(), index.get() > 1 ? "[Continuation]" : ""); log.debug(separator); String desc = StringUtils.rightPad("Minimum allowable size?", PAD_SIZE); //Verify we have the minimum allowable size if (in.readableBytes() < 14) { log.debug(" [ ] {} = NO (Actual Readable Bytes: {})", desc, in.readableBytes()); return; } log.debug(" [x] {} = YES (Actual Readable Bytes: {})", desc, in.readableBytes()); //Reset if this happens to be not a valid source rcon packet in.markReaderIndex(); //Read and Verify size desc = StringUtils.rightPad("Bytes received at least => than the \"declared\" size?", PAD_SIZE); int size = in.readIntLE(); int readableBytes = in.readableBytes(); if (readableBytes < size) { log.debug(" [ ] {} = NO (Declared Size: {}, Actual Bytes Read: {})", desc, readableBytes, size); in.resetReaderIndex(); return; } log.debug(" [x] {} = YES (Declared Size: {}, Actual Bytes Read: {})", desc, readableBytes, size); //Read and verify request id desc = StringUtils.rightPad("Request Id within the valid range?", PAD_SIZE); int id = in.readIntLE(); if (!(id == -1 || id == SourceRconUtil.RCON_TERMINATOR_RID || SourceRconUtil.isValidRequestId(id))) { log.debug(" [ ] {} = NO (Actual: {})", desc, id); in.resetReaderIndex(); return; } log.debug(" [x] {} = YES (Actual: {})", desc, id); //Read and verify request type desc = StringUtils.rightPad("Valid response type?", PAD_SIZE); int type = in.readIntLE(); if (get(type) == null) { log.debug(" [ ] {} = NO (Actual: {})", desc, type); in.resetReaderIndex(); return; } log.debug(" [x] {} = YES (Actual: {} = {})", desc, type, SourceRconResponseType.get(type)); //Read and verify body desc = StringUtils.rightPad("Contains Body?", PAD_SIZE); int bodyLength = in.bytesBefore((byte) 0); String body = StringUtils.EMPTY; if (bodyLength <= 0) log.debug(" [ ] {} = NO", desc); else { body = in.readCharSequence(bodyLength, StandardCharsets.UTF_8).toString(); log.debug(" [x] {} = YES (Length: {}, Body: {})", desc, bodyLength, StringUtils.replaceAll(StringUtils.truncate(body, 30), "\n", "\\\\n")); } //Peek at the last two bytes and verify that they are null-bytes byte bodyTerminator = in.getByte(in.readerIndex()); byte packetTerminator = in.getByte(in.readerIndex() + 1); desc = StringUtils.rightPad("Contains TWO null-terminating bytes at the end?", PAD_SIZE); //Make sure the last two bytes are NULL bytes (request id: 999 is reserved for split packet responses) if ((bodyTerminator != 0 || packetTerminator != 0) && (id == SourceRconUtil.RCON_TERMINATOR_RID)) { log.debug("Skipping {} bytes", in.readableBytes()); in.skipBytes(in.readableBytes()); return; } else if (bodyTerminator != 0 || packetTerminator != 0) { log.debug(" [ ] {} = NO (Actual: Body Terminator = {}, Packet Terminator = {})", desc, bodyTerminator, packetTerminator); in.resetReaderIndex(); return; } else { log.debug(" [x] {} = YES (Actual: Body Terminator = {}, Packet Terminator = {})", desc, bodyTerminator, packetTerminator); //All is good, skip the last two bytes if (in.readableBytes() >= 2) in.skipBytes(2); } //At this point, we can now construct a packet log.debug(" [x] Status: PASS (Size = {}, Id = {}, Type = {}, Remaining Bytes = {}, Body Size = {})", size, id, type, in.readableBytes(), bodyLength); log.debug(separator); //Reset the index index.set(0); //Construct the response packet and send to the next handlers SourceRconResponsePacket responsePacket; //Did we receive a terminator packet? if (this.terminatingPacketsEnabled && id == SourceRconUtil.RCON_TERMINATOR_RID && StringUtils.isBlank(body)) { responsePacket = new SourceRconTermResponsePacket(); } else { responsePacket = SourceRconPacketBuilder.getResponsePacket(type); } if (responsePacket != null) { responsePacket.setSize(size); responsePacket.setId(id); responsePacket.setType(type); responsePacket.setBody(body); log.debug( "Decode Complete. Passing response for request id : '{}' to the next handler. Remaining bytes ({})", id, in.readableBytes()); out.add(responsePacket); } }
From source file:com.ibasco.agql.protocols.valve.source.query.handlers.SourceRconRequestEncoder.java
License:Open Source License
@Override protected void encode(ChannelHandlerContext ctx, SourceRconRequest msg, List<Object> out) throws Exception { ByteBuf rconRequestPacket = builder.deconstructAsBuffer((SourceRconPacket) msg.getMessage()); if (log.isDebugEnabled()) { log.debug("Encoding Rcon Request: \n{}", ByteBufUtil.prettyHexDump(rconRequestPacket)); }//from w ww.j a v a2 s .c o m out.add(rconRequestPacket); //Send rcon-terminator except if it is an authentication request packet if (this.sendTerminatorPackets && !(msg instanceof SourceRconAuthRequest)) { ByteBuf terminatorPacket = builder.deconstructAsBuffer(new SourceRconTermRequestPacket()); log.debug("Sending RCON Terminator ({} bytes): \n{}", terminatorPacket.readableBytes(), ByteBufUtil.prettyHexDump(terminatorPacket)); out.add(terminatorPacket); } }