Example usage for io.netty.buffer ByteBuf slice

List of usage examples for io.netty.buffer ByteBuf slice

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf slice.

Prototype

public abstract ByteBuf slice(int index, int length);

Source Link

Document

Returns a slice of this buffer's sub-region.

Usage

From source file:com.nanxiaoqiang.test.netty.protocol.demo1.codec.MarshallingDecoder.java

License:Apache License

protected Object decode(ByteBuf in) throws Exception {
    int objectSize = in.readInt();// intdata
    ByteBuf buf = in.slice(in.readerIndex(), objectSize);// ?Data size??
    ByteInput input = new ChannelBufferByteInput(buf);
    try {//from w  ww  .  j a  v a2 s . c  o m
        unmarshaller.start(input);
        Object obj = unmarshaller.readObject();// marshallingObject
        unmarshaller.finish();
        in.readerIndex(in.readerIndex() + objectSize);// 
        return obj;
    } finally {
        unmarshaller.close();
    }
}

From source file:com.necla.simba.server.gateway.server.frontend.FrontendFrameDecoder.java

License:Apache License

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {

    if (in.readableBytes() < 4) {
        // length not received yet, return without producing
        // an output
        return;//w w w . ja  va  2 s.co m
    }

    // get calls on ByteBuf don't change the stream
    // so we leave 'ByteBuf in' unchanged after reading
    // the length
    int readerIndex = in.readerIndex();
    int length = in.getInt(readerIndex);
    int realLength = length & ~(1 << 30);
    boolean isCompressed = (length >> 30) > 0;
    LOG.debug("got message len=" + realLength + " isCompressed=" + isCompressed + " readablebytes="
            + in.readableBytes());

    if (in.readableBytes() < realLength + 4)
        return;

    if (!isCompressed) {
        ByteBuf frame = extractMessage(ctx, in, 4 + readerIndex, realLength);
        out.add(frame);
    } else {

        ByteBuf frame = in.slice(4 + readerIndex, realLength);
        LOG.debug("going into decompress");
        ByteBuf ret = decompress(ctx, frame);
        LOG.debug("ret readablebytes=" + ret.readableBytes());

        out.add(decompress(ctx, frame));

    }

    in.readerIndex(readerIndex + 4 + realLength);

    Stats.received(readerIndex + 4 + realLength);

    return;

}

From source file:com.netflix.iep.http.NetflixJsonObjectDecoder.java

License:Apache License

/**
 * Override this method if you want to filter the json objects/arrays that get passed through the pipeline.
 *///w  w  w. j  ava2s .  c o  m
@SuppressWarnings("UnusedParameters")
protected ByteBuf extractObject(ChannelHandlerContext ctx, ByteBuf buffer, int index, int length) {
    if (length == 0)
        return null;
    ByteBuf buf = buffer.slice(index, length).retain();

    if (LOGGER.isTraceEnabled()) {
        byte[] bytes = new byte[buf.readableBytes()];
        buf.getBytes(buf.readerIndex(), bytes, 0, buf.readableBytes());
        LOGGER.trace("extracted [" + buf.readerIndex() + ":" + buf.readableBytes() + "]:" + new String(bytes));
    }

    return buf;
}

From source file:com.quavo.osrs.network.handler.listener.UpdateListener.java

License:Open Source License

@Override
public void handleMessage(ChannelHandlerContext ctx, UpdateRequest msg) {
    int type = msg.getType();
    int id = msg.getId();
    ByteBuf container = null;

    try {// w  w w.j  a  va2s  .c  o m
        if (type == 0xff && id == 0xff) {
            container = Unpooled.wrappedBuffer(CacheManager.getChecksumBuffer());
        } else {
            container = Unpooled.wrappedBuffer(CacheManager.getCache().getStore().read(type, id));
            if (type != 0xff) {
                container = container.slice(0, container.readableBytes() - 2);
            }
        }
    } catch (IOException e) {
        e.printStackTrace();
    }

    ctx.write(new UpdateResponse(type, id, msg.isPriority(), container));
}

From source file:com.rs3e.network.session.impl.UpdateSession.java

License:Open Source License

/**
 * Process' the file queue./*ww w .  jav  a2  s .c  o m*/
 */
public void processFileQueue() {
    FileRequest request;

    synchronized (fileQueue) {
        request = fileQueue.pop();
        if (fileQueue.isEmpty()) {
            idle = true;
        } else {
            service.addPendingSession(this);
            idle = false;
        }
    }

    if (request != null) {
        int type = request.getType();
        int file = request.getFile();

        RS3Cache cache = mainContext.getCache();
        ByteBuf buf;

        try {
            if (type == 255 && file == 255)
                buf = Unpooled.wrappedBuffer(mainContext.getChecksumTable());
            else {
                buf = Unpooled.wrappedBuffer(cache.getStore().read(type, file));
                if (type != 255)
                    buf = buf.slice(0, buf.readableBytes() - 2);
            }
            channel.write(new FileResponse(request.isPriority(), type, file, buf));
        } catch (IOException ex) {
            logger.log(Level.WARNING, "Failed to service file request " + type + ", " + file + ".", ex);
        }
    }
}

From source file:com.shelf.messagepack.MessagePackFrameDecoder.java

License:Apache License

protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, int index, int length) {
    // make a sliced buffer for reading full contents, then if enough data doesn't reached yet, 
    // ReplayingDecoder will throw an error for replaying decode operation at this line.
    // /*  w w w.  ja va  2 s  . c om*/
    // Don't create a new buffer with ctx.alloc().buffer() before enough data has come. It will not be released (and leaked).
    // If sliced buffer is created successfully, enough data has come.
    ByteBuf slice = buffer.slice(index, length);
    ByteBuf frame = ctx.alloc().buffer(length);
    frame.writeBytes(slice, 0, length);
    return frame;
}

From source file:com.streamsets.pipeline.lib.parser.net.DelimitedLengthFieldBasedFrameDecoder.java

License:Apache License

public DelimitedLengthFieldBasedFrameDecoder(int maxFrameLength, int lengthAdjustment, boolean failFast,
        ByteBuf delimiter, Charset lengthFieldCharset, boolean trimLengthString) {
    if (delimiter == null) {
        throw new NullPointerException("delimiter");
    }//from   w ww. java2s . co  m
    if (!delimiter.isReadable()) {
        throw new IllegalArgumentException("empty delimiter");
    }

    this.delimiter = delimiter.slice(delimiter.readerIndex(), delimiter.readableBytes());
    this.lengthFieldCharset = lengthFieldCharset;

    this.maxFrameLength = maxFrameLength;
    this.lengthAdjustment = lengthAdjustment;
    this.failFast = failFast;
    this.trimLengthString = trimLengthString;
}

From source file:com.streamsets.pipeline.lib.parser.net.DelimitedLengthFieldBasedFrameDecoder.java

License:Apache License

/**
 * Extract the sub-region of the specified buffer.
 * <p>//from  w  w  w .ja  v  a2 s  .  com
 * If you are sure that the frame and its content are not accessed after
 * the current {@link #decode(ChannelHandlerContext, ByteBuf)}
 * call returns, you can even avoid memory copy by returning the sliced
 * sub-region (i.e. <tt>return buffer.slice(index, length)</tt>).
 * It's often useful when you convert the extracted frame into an object.
 * Refer to the source code of {@link ObjectDecoder} to see how this method
 * is overridden to avoid memory copy.
 */
protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, int index, int length) {
    return buffer.slice(index, length).retain();
}

From source file:com.streamsets.pipeline.lib.parser.netflow.NetflowParser.java

License:Apache License

private List<Record> parseV5(int version, int packetLength, ByteBuf buf, String readerId,
        InetSocketAddress sender) throws OnRecordErrorException {
    int count = buf.getUnsignedShort(2); // 2-3
    if (count <= 0) {
        throw new OnRecordErrorException(Errors.NETFLOW_01, Utils.format("Count is invalid: {}", count));
    } else if (packetLength < V5_HEADER_SIZE + count * V5_FLOW_SIZE) {
        String msg = Utils.format("Readable bytes {} too small for count {} (max {})", packetLength, count,
                (V5_HEADER_SIZE + count * V5_FLOW_SIZE));
        throw new OnRecordErrorException(Errors.NETFLOW_01, msg);
    }/*from   w  w w  .  j  a  va  2s. com*/
    List<Record> result = new ArrayList<>(count);
    long uptime = buf.getUnsignedInt(4); // 4-7
    long seconds = buf.getUnsignedInt(8); // 8-11
    long millis = buf.getUnsignedInt(8) / 1000; // 12-15
    long timestamp = (seconds * 1000L) + millis; // java timestamp, which is milliseconds
    UUID packetId = UUIDs.startOfJavaTimestamp(timestamp);
    long flowSequence = buf.getUnsignedInt(16); // 16-19
    short engineType = buf.getUnsignedByte(20); // 20
    short engineId = buf.getUnsignedByte(21); // 21
    // the first 2 bits are the sampling mode, the remaining 14 the interval
    int sampling = buf.getUnsignedShort(22); // 22-23
    int samplingInterval = sampling & 0x3FFF;
    int samplingMode = sampling >> 14;
    Map<String, Field> headers = new HashMap<>();
    headers.put(VERSION, Field.create(version));
    headers.put(PACKETID, Field.create(packetId.toString()));
    headers.put(SENDER, Field.create((sender == null) ? "unknown" : sender.getAddress().toString()));
    headers.put(LENGTH, Field.create(packetLength));
    headers.put(UPTIME, Field.create(uptime));
    headers.put(TIMESTAMP, Field.create(timestamp));
    headers.put(FLOWSEQ, Field.create(flowSequence));
    headers.put(ENGINEID, Field.create(engineId));
    headers.put(ENGINETYPE, Field.create(engineType));
    headers.put(SAMPLINGINT, Field.create(samplingInterval));
    headers.put(SAMPLINGMODE, Field.create(samplingMode));
    headers.put(READERID, Field.create(readerId));
    for (int i = 0; i < count; i++) {
        ByteBuf flowBuf = buf.slice(V5_HEADER_SIZE + (i * V5_FLOW_SIZE), V5_FLOW_SIZE);
        Map<String, Field> fields = new HashMap<>();
        fields.putAll(headers);
        long pkts = flowBuf.getUnsignedInt(16);
        long bytes = flowBuf.getUnsignedInt(20);
        fields.put(ID, Field.create(UUIDs.timeBased().toString()));
        int srcaddr = (int) flowBuf.getUnsignedInt(0);
        int dstaddr = (int) flowBuf.getUnsignedInt(4);
        int nexthop = (int) flowBuf.getUnsignedInt(8);
        fields.put(SRCADDR, Field.create(srcaddr));
        fields.put(DSTADDR, Field.create(dstaddr));
        fields.put(NEXTHOP, Field.create(nexthop));
        fields.put(SRCADDR_S, Field.create(ipToString(srcaddr)));
        fields.put(DSTADDR_S, Field.create(ipToString(dstaddr)));
        fields.put(NEXTHOP_S, Field.create(ipToString(nexthop)));
        fields.put(SRCPORT, Field.create(flowBuf.getUnsignedShort(32)));
        fields.put(DSTPORT, Field.create(flowBuf.getUnsignedShort(34)));
        fields.put(SRCAS, Field.create(flowBuf.getUnsignedShort(40)));
        fields.put(DSTAS, Field.create(flowBuf.getUnsignedShort(42)));
        fields.put(PACKETS, Field.create(pkts));
        fields.put(DOCTECTS, Field.create(bytes));
        fields.put(PROTO, Field.create(flowBuf.getUnsignedByte(38)));
        fields.put(TOS, Field.create(flowBuf.getUnsignedByte(39)));
        fields.put(TCPFLAGS, Field.create(flowBuf.getUnsignedByte(37)));
        long first = flowBuf.getUnsignedInt(24);
        if (first > 0) {
            fields.put(FIRST, Field.create(timestamp - uptime - first));
        } else {
            fields.put(FIRST, Field.create(0L));
        }
        long last = flowBuf.getUnsignedInt(28);
        if (last > 0) {
            fields.put(LAST, Field.create(timestamp - uptime - last));
        } else {
            fields.put(LAST, Field.create(0L));
        }
        fields.put(SNMPINPUT, Field.create(flowBuf.getUnsignedShort(12)));
        fields.put(SNMPOUTPUT, Field.create(flowBuf.getUnsignedShort(14)));
        fields.put(SRCMASK, Field.create(flowBuf.getUnsignedByte(44)));
        fields.put(DSTMASK, Field.create(flowBuf.getUnsignedByte(45)));
        Record record = context.createRecord(readerId + "::" + recordId++);
        record.set(Field.create(fields));
        result.add(record);

    }
    return result;
}

From source file:com.tesora.dve.db.mysql.libmy.MyBinaryResultRow.java

License:Open Source License

@Override
public void unmarshallMessage(ByteBuf cb) throws PEException {
    int expectedFieldCount = fieldConverters.size();
    int expectedBitmapLength = MyNullBitmap.computeSize(expectedFieldCount, MyNullBitmap.BitmapType.RESULT_ROW);
    cb = cb.order(ByteOrder.LITTLE_ENDIAN);
    cb.skipBytes(1);//skip the bin row marker.

    byte[] nullBitmap = new byte[expectedBitmapLength];
    cb.readBytes(nullBitmap);//w  ww .j a va 2  s.  c  o  m
    MyNullBitmap resultBitmap = new MyNullBitmap(nullBitmap, expectedFieldCount,
            MyNullBitmap.BitmapType.RESULT_ROW);

    ByteBuf values = cb;

    for (int i = 0; i < expectedFieldCount; i++) {
        ByteBuf existing = fieldSlices.get(i);
        ByteBuf nextSlice = null;
        int startIndex = values.readerIndex();
        if (!resultBitmap.getBit(i + 1)) {
            fieldConverters.get(i).readObject(values);//TODO: we throw out the unmarshalled value, we could cache it.
            int endingOffset = values.readerIndex();
            nextSlice = values.slice(startIndex, endingOffset - startIndex);
        }

        if (existing != null)
            existing.release();
        fieldSlices.set(i, nextSlice);
    }
    if (cb.readableBytes() > 0) {
        log.warn("Decoded binary row had {} leftover bytes, re-encoding may fail.", cb.readableBytes());
        cb.skipBytes(cb.readableBytes());//consume rest of buffer.
    }
}