List of usage examples for io.netty.buffer ByteBuf skipBytes
public abstract ByteBuf skipBytes(int length);
From source file:net.tomp2p.connection.DSASignatureFactory.java
License:Apache License
@Override public PublicKey decodePublicKey(ByteBuf buf) { if (buf.readableBytes() < 2) { return null; }//from w w w . j a va 2 s . c o m int len = buf.getUnsignedShort(buf.readerIndex()); if (buf.readableBytes() - 2 < len) { return null; } buf.skipBytes(2); if (len <= 0) { return PeerBuilder.EMPTY_PUBLIC_KEY; } byte me[] = new byte[len]; buf.readBytes(me); return decodePublicKey(me); }
From source file:net.tomp2p.storage.Data.java
License:Apache License
/** * Reads the header. Does not modify the buffer positions if header could * not be fully read./*from w w w.java2 s . c om*/ * * Header format: * <pre> * 1 byte - header * 1 or 4 bytes - length * 4 or 0 bytes - ttl (hasTTL) * 1 or 0 bytes - number of basedon keys (hasBasedOn) * n x 20 bytes - basedon keys (hasBasedOn, number of basedon keys) * 2 or 0 bytes - length of public key (hasPublicKey) * n bytes - public key (hasPublicKey, length of public key) * </pre> * * * @param buf * The buffer to read from * @return The data object, may be partially filled */ public static Data decodeHeader(final ByteBuf buf, final SignatureFactory signatureFactory) { // 2 is the smallest packet size, we could start if we know 1 byte to // decode the header, but we always need // a second byte. Thus, we are waiting for at least 2 bytes. if (buf.readableBytes() < Utils.BYTE_BYTE_SIZE + Utils.BYTE_BYTE_SIZE) { return null; } final int header = buf.getUnsignedByte(buf.readerIndex()); final Data.Type type = Data.type(header); //Data length final int length; final int indexLength = Utils.BYTE_BYTE_SIZE; final int indexTTL; switch (type) { case SMALL: length = buf.getUnsignedByte(buf.readerIndex() + indexLength); indexTTL = indexLength + Utils.BYTE_BYTE_SIZE; break; case LARGE: indexTTL = indexLength + Utils.INTEGER_BYTE_SIZE; if (buf.readableBytes() < indexTTL) { return null; } length = buf.getInt(buf.readerIndex() + indexLength); break; default: throw new IllegalArgumentException("unknown type"); } //TTL final int ttl; final int indexBasedOnNr; if (hasTTL(header)) { indexBasedOnNr = indexTTL + Utils.INTEGER_BYTE_SIZE; if (buf.readableBytes() < indexBasedOnNr) { return null; } ttl = buf.getInt(buf.readerIndex() + indexTTL); } else { ttl = -1; indexBasedOnNr = indexTTL; } //Nr BasedOn + basedon final int numBasedOn; final int indexPublicKeySize; final int indexBasedOn; final Set<Number160> basedOn = new HashSet<Number160>(); if (hasBasedOn(header)) { // get # of based on keys indexBasedOn = indexBasedOnNr + Utils.BYTE_BYTE_SIZE; if (buf.readableBytes() < indexBasedOn) { return null; } numBasedOn = buf.getUnsignedByte(buf.readerIndex() + indexBasedOnNr) + 1; indexPublicKeySize = indexBasedOn + (numBasedOn * Number160.BYTE_ARRAY_SIZE); if (buf.readableBytes() < indexPublicKeySize) { return null; } //get basedon int index = buf.readerIndex() + indexBasedOnNr + Utils.BYTE_BYTE_SIZE; final byte[] me = new byte[Number160.BYTE_ARRAY_SIZE]; for (int i = 0; i < numBasedOn; i++) { buf.getBytes(index, me); index += Number160.BYTE_ARRAY_SIZE; basedOn.add(new Number160(me)); } } else { // no based on keys indexPublicKeySize = indexBasedOnNr; numBasedOn = 0; } //public key and size final int publicKeySize; final int indexPublicKey; final int indexEnd; final PublicKey publicKey; if (hasPublicKey(header)) { indexPublicKey = indexPublicKeySize + Utils.SHORT_BYTE_SIZE; if (buf.readableBytes() < indexPublicKey) { return null; } publicKeySize = buf.getUnsignedShort(buf.readerIndex() + indexPublicKeySize); indexEnd = indexPublicKey + publicKeySize; if (buf.readableBytes() < indexEnd) { return null; } //get public key buf.skipBytes(indexPublicKeySize); publicKey = signatureFactory.decodePublicKey(buf); } else { publicKeySize = 0; indexPublicKey = indexPublicKeySize; buf.skipBytes(indexPublicKey); publicKey = null; } //now we have read the header and the length final Data data = new Data(header, length); data.ttlSeconds = ttl; data.basedOnSet = basedOn; data.publicKey = publicKey; return data; }
From source file:net.tomp2p.synchronization.SyncUtils.java
License:Apache License
public static List<Instruction> decodeInstructions(ByteBuf buf) { List<Instruction> result = new ArrayList<Instruction>(); while (buf.isReadable()) { final int header = buf.readInt(); if ((header & 0x80000000) != 0) { //first bit set, we have a reference final int reference = header & 0x7FFFFFFF; result.add(new Instruction(reference)); } else {//from www. jav a 2 s. c o m //otherwise the header is the length final int length = header; final int remaining = Math.min(length, buf.readableBytes()); DataBuffer literal = new DataBuffer(buf.slice(buf.readerIndex(), remaining)); buf.skipBytes(remaining); result.add(new Instruction(literal)); } } return result; }
From source file:netty.syslog.DecoderUtil.java
License:Open Source License
static String readStringToSpace(ByteBuf buffer, boolean checkNull) { if (checkNull && peek(buffer) == '-') { buffer.readByte();//from w ww .j a v a 2 s . c om return null; } int length = -1; for (int i = buffer.readerIndex(); i < buffer.capacity(); i++) { if (buffer.getByte(i) == ' ') { length = i - buffer.readerIndex(); break; } } if (length < 0) { length = buffer.readableBytes(); } final String s = buffer.toString(buffer.readerIndex(), length, CharsetUtil.UTF_8); buffer.skipBytes(length); return s; }
From source file:netty.syslog.MessageDecoder.java
License:Open Source License
@Override protected void decode(ChannelHandlerContext context, ByteBuf buffer, List<Object> objects) throws Exception { if (buffer.readableBytes() < 1) { return;/* w w w. ja v a 2 s . co m*/ } final Message.MessageBuilder messageBuilder = Message.MessageBuilder.create(); // Decode PRI expect(buffer, '<'); final int pri = readDigit(buffer); if (pri < 0 || pri > 191) { throw new DecoderException("Invalid PRIVAL " + pri); } final int facility = pri / 8; final int severity = pri % 8; messageBuilder.facility(Message.Facility.values()[facility]); messageBuilder.severity(Message.Severity.values()[severity]); expect(buffer, '>'); // Decode VERSION if (buffer.readByte() != '1') { throw new DecoderException("Expected a version 1 syslog message"); } expect(buffer, ' '); // Decode TIMESTAMP final ZonedDateTime timestamp; final String timeStampString = readStringToSpace(buffer, true); if (timeStampString == null) { timestamp = null; } else { timestamp = ZonedDateTime.parse(timeStampString); } messageBuilder.timestamp(timestamp); expect(buffer, ' '); // Decode HOSTNAME messageBuilder.hostname(readStringToSpace(buffer, true)); expect(buffer, ' '); // Decode APP-NAME messageBuilder.applicationName(readStringToSpace(buffer, true)); expect(buffer, ' '); // Decode PROC-ID messageBuilder.processId(readStringToSpace(buffer, true)); expect(buffer, ' '); // Decode MSGID messageBuilder.messageId(readStringToSpace(buffer, true)); expect(buffer, ' '); // TODO Decode structured data expect(buffer, '-'); expect(buffer, ' '); final int length = buffer.readableBytes(); messageBuilder.content(buffer.slice(buffer.readerIndex(), length).retain()); buffer.skipBytes(length); objects.add(messageBuilder.build()); }
From source file:nettyClient4.clientDecoder.java
License:Apache License
/** * return the date of ByteBuf//from w w w . j a v a 2 s . c om * @param ctx * @param in * @return * @throws Exception */ private ByteBuf decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { if (discardingTooLongFrame) { long bytesToDiscard = this.bytesToDiscard; int localBytesToDiscard = (int) Math.min(bytesToDiscard, in.readableBytes()); in.skipBytes(localBytesToDiscard); bytesToDiscard -= localBytesToDiscard; this.bytesToDiscard = bytesToDiscard; failIfNecessary(ctx, false); return null; } if (in.readableBytes() < lengthFieldEndOffset) { return null; } int actualLengthFieldOffset = in.readerIndex() + lengthFieldOffset; /**??*/ // long frameLength = (in.order(byteOrder)).getUnsignedByte(actualLengthFieldOffset); long frameLength = (in.order(byteOrder)).getUnsignedShort(actualLengthFieldOffset); if (frameLength < 0) { in.skipBytes(lengthFieldEndOffset); throw new CorruptedFrameException("negative pre-adjustment length field: " + frameLength); } frameLength += lengthAdjustment + lengthFieldEndOffset; if (frameLength < lengthFieldEndOffset) { in.skipBytes(lengthFieldEndOffset); throw new CorruptedFrameException("Adjusted frame length (" + frameLength + ") is less " + "than lengthFieldEndOffset: " + lengthFieldEndOffset); } if (frameLength > maxFrameLength) { // Enter the discard mode and discard everything received so far. discardingTooLongFrame = true; tooLongFrameLength = frameLength; bytesToDiscard = frameLength - in.readableBytes(); in.skipBytes(in.readableBytes()); failIfNecessary(ctx, true); return null; } // never overflows because it's less than maxFrameLength int frameLengthInt = (int) frameLength; if (in.readableBytes() < frameLengthInt) { return null; } if (initialBytesToStrip > frameLengthInt) { in.skipBytes(frameLengthInt); throw new CorruptedFrameException("Adjusted frame length (" + frameLength + ") is less " + "than initialBytesToStrip: " + initialBytesToStrip); } in.skipBytes(initialBytesToStrip); // extract frame int readerIndex = in.readerIndex(); int actualFrameLength = frameLengthInt - initialBytesToStrip; ByteBuf frame = extractFrame(in, readerIndex, actualFrameLength, ctx); in.readerIndex(readerIndex + actualFrameLength); return frame; }
From source file:nl.thijsalders.spigotproxy.haproxy.HAProxyMessageDecoder.java
License:Apache License
/** * Create a frame out of the {@link ByteBuf} and return it. * Based on code from {@link LineBasedFrameDecoder#decode(ChannelHandlerContext, ByteBuf)}. * * @param ctx the {@link ChannelHandlerContext} which this {@link HAProxyMessageDecoder} belongs to * @param buffer the {@link ByteBuf} from which to read data * @return frame the {@link ByteBuf} which represent the frame or {@code null} if no frame could * be created/*from ww w . j av a 2 s.c o m*/ */ private ByteBuf decodeStruct(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception { final int eoh = findEndOfHeader(buffer); if (!discarding) { if (eoh >= 0) { final int length = eoh - buffer.readerIndex(); if (length > v2MaxHeaderSize) { buffer.readerIndex(eoh); failOverLimit(ctx, length); return null; } return buffer.readSlice(length); } else { final int length = buffer.readableBytes(); if (length > v2MaxHeaderSize) { discardedBytes = length; buffer.skipBytes(length); discarding = true; failOverLimit(ctx, "over " + discardedBytes); } return null; } } else { if (eoh >= 0) { buffer.readerIndex(eoh); discardedBytes = 0; discarding = false; } else { discardedBytes = buffer.readableBytes(); buffer.skipBytes(discardedBytes); } return null; } }
From source file:nl.thijsalders.spigotproxy.haproxy.HAProxyMessageDecoder.java
License:Apache License
/** * Create a frame out of the {@link ByteBuf} and return it. * Based on code from {@link LineBasedFrameDecoder#decode(ChannelHandlerContext, ByteBuf)}. * * @param ctx the {@link ChannelHandlerContext} which this {@link HAProxyMessageDecoder} belongs to * @param buffer the {@link ByteBuf} from which to read data * @return frame the {@link ByteBuf} which represent the frame or {@code null} if no frame could * be created//from w w w . ja v a 2s . c om */ private ByteBuf decodeLine(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception { final int eol = findEndOfLine(buffer); if (!discarding) { if (eol >= 0) { final int length = eol - buffer.readerIndex(); if (length > V1_MAX_LENGTH) { buffer.readerIndex(eol + DELIMITER_LENGTH); failOverLimit(ctx, length); return null; } ByteBuf frame = buffer.readSlice(length); buffer.skipBytes(DELIMITER_LENGTH); return frame; } else { final int length = buffer.readableBytes(); if (length > V1_MAX_LENGTH) { discardedBytes = length; buffer.skipBytes(length); discarding = true; failOverLimit(ctx, "over " + discardedBytes); } return null; } } else { if (eol >= 0) { final int delimLength = buffer.getByte(eol) == '\r' ? 2 : 1; buffer.readerIndex(eol + delimLength); discardedBytes = 0; discarding = false; } else { discardedBytes = buffer.readableBytes(); buffer.skipBytes(discardedBytes); } return null; } }
From source file:openbns.commons.net.codec.sts.HttpObjectDecoder.java
License:Apache License
@Override protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List<Object> out) throws Exception { switch (state()) { case SKIP_CONTROL_CHARS: { try {//from w w w. j a v a 2s . c om skipControlCharacters(buffer); checkpoint(State.READ_INITIAL); } finally { checkpoint(); } } case READ_INITIAL: try { String[] initialLine = splitInitialLine(readLine(buffer, maxInitialLineLength)); if (initialLine.length < 3) { // Invalid initial line - ignore. checkpoint(State.SKIP_CONTROL_CHARS); return; } message = createMessage(initialLine); checkpoint(State.READ_HEADER); } catch (Exception e) { out.add(invalidMessage(e)); return; } case READ_HEADER: try { State nextState = readHeaders(buffer); checkpoint(nextState); if (nextState == State.READ_CHUNK_SIZE) { if (!chunkedSupported) { throw new IllegalArgumentException("Chunked messages not supported"); } // Chunked encoding - generate StsMessage first. HttpChunks will follow. out.add(message); return; } if (nextState == State.SKIP_CONTROL_CHARS) { // No content is expected. out.add(message); out.add(LastStsContent.EMPTY_LAST_CONTENT); reset(); return; } long contentLength = contentLength(); if (contentLength == 0 || contentLength == -1 && isDecodingRequest()) { out.add(message); out.add(LastStsContent.EMPTY_LAST_CONTENT); reset(); return; } assert nextState == State.READ_FIXED_LENGTH_CONTENT || nextState == State.READ_VARIABLE_LENGTH_CONTENT; out.add(message); if (nextState == State.READ_FIXED_LENGTH_CONTENT) { // chunkSize will be decreased as the READ_FIXED_LENGTH_CONTENT state reads data chunk by chunk. chunkSize = contentLength; } // We return here, this forces decode to be called again where we will decode the content return; } catch (Exception e) { out.add(invalidMessage(e)); return; } case READ_VARIABLE_LENGTH_CONTENT: { // Keep reading data as a chunk until the end of connection is reached. int toRead = Math.min(actualReadableBytes(), maxChunkSize); if (toRead > 0) { ByteBuf content = readBytes(ctx.alloc(), buffer, toRead); if (buffer.isReadable()) { out.add(new DefaultStsContent(content)); } else { // End of connection. out.add(new DefaultLastStsContent(content, validateHeaders)); reset(); } } else if (!buffer.isReadable()) { // End of connection. out.add(LastStsContent.EMPTY_LAST_CONTENT); reset(); } return; } case READ_FIXED_LENGTH_CONTENT: { int readLimit = actualReadableBytes(); // Check if the buffer is readable first as we use the readable byte count // to create the HttpChunk. This is needed as otherwise we may end up with // create a HttpChunk instance that contains an empty buffer and so is // handled like it is the last HttpChunk. // // See https://github.com/commons/commons/issues/433 if (readLimit == 0) { return; } int toRead = Math.min(readLimit, maxChunkSize); if (toRead > chunkSize) { toRead = (int) chunkSize; } ByteBuf content = readBytes(ctx.alloc(), buffer, toRead); chunkSize -= toRead; if (chunkSize == 0) { // Read all content. out.add(new DefaultLastStsContent(content, validateHeaders)); reset(); } else { out.add(new DefaultStsContent(content)); } return; } /** * everything else after this point takes care of reading chunked content. basically, read chunk size, * read chunk, read and ignore the CRLF and repeat until 0 */ case READ_CHUNK_SIZE: try { AppendableCharSequence line = readLine(buffer, maxInitialLineLength); int chunkSize = getChunkSize(line.toString()); this.chunkSize = chunkSize; if (chunkSize == 0) { checkpoint(State.READ_CHUNK_FOOTER); return; } else { checkpoint(State.READ_CHUNKED_CONTENT); } } catch (Exception e) { out.add(invalidChunk(e)); return; } case READ_CHUNKED_CONTENT: { assert chunkSize <= Integer.MAX_VALUE; int toRead = Math.min((int) chunkSize, maxChunkSize); StsContent chunk = new DefaultStsContent(readBytes(ctx.alloc(), buffer, toRead)); chunkSize -= toRead; out.add(chunk); if (chunkSize == 0) { // Read all content. checkpoint(State.READ_CHUNK_DELIMITER); } else { return; } } case READ_CHUNK_DELIMITER: { for (;;) { byte next = buffer.readByte(); if (next == StsConstants.CR) { if (buffer.readByte() == StsConstants.LF) { checkpoint(State.READ_CHUNK_SIZE); return; } } else if (next == StsConstants.LF) { checkpoint(State.READ_CHUNK_SIZE); return; } else { checkpoint(); } } } case READ_CHUNK_FOOTER: try { LastStsContent trailer = readTrailingHeaders(buffer); out.add(trailer); reset(); return; } catch (Exception e) { out.add(invalidChunk(e)); return; } case BAD_MESSAGE: { // Keep discarding until disconnection. buffer.skipBytes(actualReadableBytes()); break; } case UPGRADED: { // Do not touch anything read - other handler will replace this codec with the upgraded protocol codec to // take the trafic over. break; } } }
From source file:org.apache.activemq.artemis.utils.AbstractByteBufPool.java
License:Apache License
/** * Returns a pooled entry if possible, a new one otherwise. * <p>/*from w w w . j a va 2s. c om*/ * The {@code byteBuf}'s {@link ByteBuf#readerIndex()} is incremented by {@code length} after it. */ public final T getOrCreate(final ByteBuf byteBuf) { final int length = byteBuf.readInt(); if (!canPool(byteBuf, length)) { return create(byteBuf, length); } else { if (!byteBuf.isReadable(length)) { throw new IndexOutOfBoundsException(); } final int bytesOffset = byteBuf.readerIndex(); final int hashCode = hashCode(byteBuf, bytesOffset, length); //fast % operation with power of 2 entries.length final int firstIndex = hashCode & mask; final T firstEntry = entries[firstIndex]; if (isEqual(firstEntry, byteBuf, bytesOffset, length)) { byteBuf.skipBytes(length); return firstEntry; } final int secondIndex = (hashCode >> shift) & mask; final T secondEntry = entries[secondIndex]; if (isEqual(secondEntry, byteBuf, bytesOffset, length)) { byteBuf.skipBytes(length); return secondEntry; } final T internedEntry = create(byteBuf, length); final int entryIndex = firstEntry == null ? firstIndex : secondIndex; entries[entryIndex] = internedEntry; return internedEntry; } }