Example usage for io.netty.buffer ByteBuf slice

List of usage examples for io.netty.buffer ByteBuf slice

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf slice.

Prototype

public abstract ByteBuf slice(int index, int length);

Source Link

Document

Returns a slice of this buffer's sub-region.

Usage

From source file:org.apache.distributedlog.EnvelopedRecordSetReader.java

License:Apache License

EnvelopedRecordSetReader(long logSegmentSeqNo, long entryId, long transactionId, long startSlotId,
        int startPositionWithinLogSegment, long startSequenceId, ByteBuf src) throws IOException {
    this.logSegmentSeqNo = logSegmentSeqNo;
    this.entryId = entryId;
    this.transactionId = transactionId;
    this.slotId = startSlotId;
    this.position = startPositionWithinLogSegment;
    this.startSequenceId = startSequenceId;

    // read data//  w ww  .  jav  a 2 s  . c  om
    int metadata = src.readInt();
    int version = metadata & METADATA_VERSION_MASK;
    if (version != VERSION) {
        throw new IOException(String.format("Version mismatch while reading. Received: %d," + " Required: %d",
                version, VERSION));
    }
    int codecCode = metadata & METADATA_COMPRESSION_MASK;
    this.numRecords = src.readInt();
    int decompressedDataLen = src.readInt();
    int compressedDataLen = src.readInt();
    ByteBuf compressedBuf = src.slice(src.readerIndex(), compressedDataLen);
    try {
        if (Type.NONE.code() == codecCode && decompressedDataLen != compressedDataLen) {
            throw new IOException(
                    "Inconsistent data length found for a non-compressed record set : decompressed = "
                            + decompressedDataLen + ", actual = " + compressedDataLen);
        }
        CompressionCodec codec = CompressionUtils.getCompressionCodec(Type.of(codecCode));
        this.reader = codec.decompress(compressedBuf, decompressedDataLen);
    } finally {
        compressedBuf.release();
    }
    if (numRecords == 0) {
        this.reader.release();
    }
}

From source file:org.apache.distributedlog.LogRecord.java

License:Apache License

protected void readPayload(ByteBuf in, boolean copyData) throws IOException {
    int length = in.readInt();
    if (length < 0) {
        throw new EOFException("Log Record is corrupt: Negative length " + length);
    }//  w  ww.ja  v  a 2s . co m
    if (copyData) {
        setPayloadBuf(in.slice(in.readerIndex(), length), true);
    } else {
        setPayloadBuf(in.retainedSlice(in.readerIndex(), length), false);
    }
    in.skipBytes(length);
}

From source file:org.apache.drill.exec.rpc.ChunkCreationHandler.java

License:Apache License

@Override
protected void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws Exception {

    if (RpcConstants.EXTRA_DEBUGGING) {
        logger.debug("ChunkCreationHandler called with msg {} of size {} with chunkSize {}", msg,
                msg.readableBytes(), chunkSize);
    }/*from   ww  w .  j  a  va  2  s.  c  om*/

    if (!ctx.channel().isOpen()) {
        logger.debug("Channel closed, skipping encode inside {}.", RpcConstants.CHUNK_CREATION_HANDLER);
        msg.release();
        return;
    }

    // Calculate the number of chunks based on configured chunk size and input msg size
    int numChunks = (int) Math.ceil((double) msg.readableBytes() / chunkSize);

    // Initialize a composite buffer to hold numChunks chunk.
    final CompositeByteBuf cbb = ctx.alloc().compositeBuffer(numChunks);

    int cbbWriteIndex = 0;
    int currentChunkLen = min(msg.readableBytes(), chunkSize);

    // Create slices of chunkSize from input msg and add it to the composite buffer.
    while (numChunks > 0) {
        final ByteBuf chunkBuf = msg.slice(msg.readerIndex(), currentChunkLen);
        chunkBuf.retain();
        cbb.addComponent(chunkBuf);
        cbbWriteIndex += currentChunkLen;
        msg.skipBytes(currentChunkLen);
        --numChunks;
        currentChunkLen = min(msg.readableBytes(), chunkSize);
    }

    // Update the writerIndex of composite byte buffer. Netty doesn't do it automatically.
    cbb.writerIndex(cbbWriteIndex);

    // Add the final composite bytebuf into output buffer.
    out.add(cbb);
}

From source file:org.apache.drill.exec.rpc.RpcDecoder.java

License:Apache License

@Override
protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List<Object> out) throws Exception {
    if (!ctx.channel().isOpen()) {
        return;//from  www .j  av  a2  s.  co m
    }

    if (RpcConstants.EXTRA_DEBUGGING) {
        logger.debug("Inbound rpc message received.");
    }

    // now, we know the entire message is in the buffer and the buffer is constrained to this message. Additionally,
    // this process should avoid reading beyond the end of this buffer so we inform the ByteBufInputStream to throw an
    // exception if be go beyond readable bytes (as opposed to blocking).
    final ByteBufInputStream is = new ByteBufInputStream(buffer, buffer.readableBytes());

    // read the rpc header, saved in delimited format.
    checkTag(is, RpcEncoder.HEADER_TAG);
    final RpcHeader header = RpcHeader.parseDelimitedFrom(is);

    if (RpcConstants.EXTRA_DEBUGGING) {
        logger.debug(" post header read index {}", buffer.readerIndex());
    }

    // read the protobuf body into a buffer.
    checkTag(is, RpcEncoder.PROTOBUF_BODY_TAG);
    final int pBodyLength = readRawVarint32(is);
    final ByteBuf pBody = buffer.slice(buffer.readerIndex(), pBodyLength);
    buffer.skipBytes(pBodyLength);
    pBody.retain(1);
    if (RpcConstants.EXTRA_DEBUGGING) {
        logger.debug("Read protobuf body of length {} into buffer {}.", pBodyLength, pBody);
    }

    if (RpcConstants.EXTRA_DEBUGGING) {
        logger.debug("post protobufbody read index {}", buffer.readerIndex());
    }

    ByteBuf dBody = null;
    int dBodyLength = 0;

    // read the data body.
    if (buffer.readableBytes() > 0) {

        if (RpcConstants.EXTRA_DEBUGGING) {
            logger.debug("Reading raw body, buffer has {} bytes available, is available {}.",
                    buffer.readableBytes(), is.available());
        }
        checkTag(is, RpcEncoder.RAW_BODY_TAG);
        dBodyLength = readRawVarint32(is);
        if (buffer.readableBytes() != dBodyLength) {
            throw new CorruptedFrameException(String.format(
                    "Expected to receive a raw body of %d bytes but received a buffer with %d bytes.",
                    dBodyLength, buffer.readableBytes()));
        }
        dBody = buffer.slice();
        dBody.retain(1);
        if (RpcConstants.EXTRA_DEBUGGING) {
            logger.debug("Read raw body of {}", dBody);
        }

    } else {
        if (RpcConstants.EXTRA_DEBUGGING) {
            logger.debug("No need to read raw body, no readable bytes left.");
        }
    }

    // return the rpc message.
    InboundRpcMessage m = new InboundRpcMessage(header.getMode(), header.getRpcType(),
            header.getCoordinationId(), pBody, dBody);

    // move the reader index forward so the next rpc call won't try to work with it.
    buffer.skipBytes(dBodyLength);
    messageCounter.incrementAndGet();
    if (RpcConstants.SOME_DEBUGGING) {
        logger.debug("Inbound Rpc Message Decoded {}.", m);
    }
    out.add(m);

}

From source file:org.apache.helix.ipc.netty.NettyHelixIPCCallbackHandler.java

License:Apache License

@Override
protected void channelRead0(ChannelHandlerContext ctx, ByteBuf byteBuf) throws Exception {
    try {//from  w w w .ja  v a 2s . co  m
        // Message length
        int messageLength = byteBuf.readInt();

        // Message version
        @SuppressWarnings("unused")
        int messageVersion = byteBuf.readInt();

        // Message type
        int messageType = byteBuf.readInt();

        // Message ID
        UUID messageId = new UUID(byteBuf.readLong(), byteBuf.readLong());

        // Cluster
        int clusterSize = byteBuf.readInt();
        checkLength("clusterSize", clusterSize, messageLength);
        String clusterName = toNonEmptyString(clusterSize, byteBuf);

        // Resource
        int resourceSize = byteBuf.readInt();
        checkLength("resourceSize", resourceSize, messageLength);
        String resourceName = toNonEmptyString(resourceSize, byteBuf);

        // Partition
        int partitionSize = byteBuf.readInt();
        checkLength("partitionSize", partitionSize, messageLength);
        String partitionName = toNonEmptyString(partitionSize, byteBuf);

        // State
        int stateSize = byteBuf.readInt();
        checkLength("stateSize", stateSize, messageLength);
        String state = toNonEmptyString(stateSize, byteBuf);

        // Source instance
        int srcInstanceSize = byteBuf.readInt();
        checkLength("srcInstanceSize", srcInstanceSize, messageLength);
        String srcInstance = toNonEmptyString(srcInstanceSize, byteBuf);

        // Destination instance
        int dstInstanceSize = byteBuf.readInt();
        checkLength("dstInstanceSize", dstInstanceSize, messageLength);
        String dstInstance = toNonEmptyString(dstInstanceSize, byteBuf);

        // Message
        int messageSize = byteBuf.readInt();
        ByteBuf message = byteBuf.slice(byteBuf.readerIndex(), messageSize);

        // Error check
        if (dstInstance == null) {
            throw new IllegalStateException(
                    "Received message addressed to null destination from " + srcInstance);
        } else if (!dstInstance.equals(instanceName)) {
            throw new IllegalStateException(
                    instanceName + " received message addressed to " + dstInstance + " from " + srcInstance);
        } else if (callbacks.get(messageType) == null) {
            throw new IllegalStateException("No callback registered for message type " + messageType);
        }

        // Build scope
        HelixMessageScope scope = new HelixMessageScope.Builder().cluster(clusterName).resource(resourceName)
                .partition(partitionName).state(state).sourceInstance(srcInstance).build();

        // Get callback
        HelixIPCCallback callback = callbacks.get(messageType);
        if (callback == null) {
            throw new IllegalStateException("No callback registered for message type " + messageType);
        }

        // Handle callback
        callback.onMessage(scope, messageId, message);

        // Stats
        statRxMsg.mark();
        statRxBytes.mark(messageLength);
    } finally {
        byteBuf.release();
    }

}

From source file:org.apache.pulsar.client.impl.RawMessageImpl.java

License:Apache License

static public RawMessage deserializeFrom(ByteBuf buffer) {
    try {/*  w  w w .j  a v a 2s .co  m*/
        int idSize = buffer.readInt();

        int writerIndex = buffer.writerIndex();
        buffer.writerIndex(buffer.readerIndex() + idSize);
        ByteBufCodedInputStream stream = ByteBufCodedInputStream.get(buffer);
        MessageIdData.Builder builder = MessageIdData.newBuilder();
        MessageIdData id = builder.mergeFrom(stream, null).build();
        buffer.writerIndex(writerIndex);
        builder.recycle();

        int payloadAndMetadataSize = buffer.readInt();
        ByteBuf metadataAndPayload = buffer.slice(buffer.readerIndex(), payloadAndMetadataSize);

        return new RawMessageImpl(id, metadataAndPayload);
    } catch (IOException e) {
        // This is in-memory deserialization, should not fail
        log.error("IO exception deserializing ByteBuf (this shouldn't happen as operation is in-memory)", e);
        throw new RuntimeException(e);
    }
}

From source file:org.apache.pulsar.common.api.PulsarDecoderTest.java

License:Apache License

@Test
public void testChannelRead() throws Exception {
    long consumerId = 1234L;
    ByteBuf changeBuf = Commands.newActiveConsumerChange(consumerId, true);
    ByteBuf cmdBuf = changeBuf.slice(4, changeBuf.writerIndex() - 4);

    doNothing().when(decoder).handleActiveConsumerChange(any(CommandActiveConsumerChange.class));
    decoder.channelRead(mock(ChannelHandlerContext.class), cmdBuf);

    verify(decoder, times(1)).handleActiveConsumerChange(any(CommandActiveConsumerChange.class));
}

From source file:org.dcache.xrootd.protocol.messages.AuthenticationRequest.java

License:Open Source License

/**
 * Deserialize the buckets sent by the client and put them into a EnumMap
 * sorted by their header-information. As there are list-type buffers,
 * this method can be called recursively. In current xrootd, this is
 * limited to a maximum of 1 recursion (main buffer containing list of
 * further buffers)./* ww w .  j  av a 2s  . c o m*/
 *
 * @param buffer The buffer containing the received buckets
 * @return Map from bucket-type to deserialized buckets
 * @throws IOException Failure of deserialization
 */
public static Map<BucketType, XrootdBucket> deserializeBuckets(ByteBuf buffer) throws IOException {

    int bucketCode = buffer.readInt();
    BucketType bucketType = BucketType.get(bucketCode);

    LOGGER.debug("Deserializing a bucket with code {}", bucketCode);

    Map<BucketType, XrootdBucket> buckets = new EnumMap<>(BucketType.class);

    while (bucketType != BucketType.kXRS_none) {
        int bucketLength = buffer.readInt();

        XrootdBucket bucket = XrootdBucket.deserialize(bucketType,
                buffer.slice(buffer.readerIndex(), bucketLength));
        buckets.put(bucketType, bucket);

        /* proceed to the next bucket */
        buffer.readerIndex(buffer.readerIndex() + bucketLength);

        bucketCode = buffer.readInt();
        bucketType = BucketType.get(bucketCode);
    }

    return buckets;
}

From source file:org.elasticsearch.transport.netty4.ESLoggingHandler.java

License:Apache License

private String format(final ChannelHandlerContext ctx, final String eventName, final ByteBuf arg)
        throws IOException {
    final int readableBytes = arg.readableBytes();
    if (readableBytes == 0) {
        return super.format(ctx, eventName, arg);
    } else if (readableBytes >= 2) {
        final StringBuilder sb = new StringBuilder();
        sb.append(ctx.channel().toString());
        final int offset = arg.readerIndex();
        // this might be an ES message, check the header
        if (arg.getByte(offset) == (byte) 'E' && arg.getByte(offset + 1) == (byte) 'S') {
            if (readableBytes == TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE) {
                final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET);
                if (length == TcpTransport.PING_DATA_SIZE) {
                    sb.append(" [ping]").append(' ').append(eventName).append(": ").append(readableBytes)
                            .append('B');
                    return sb.toString();
                }/*from  w ww .j av a 2s. c  o m*/
            } else if (readableBytes >= TcpHeader.HEADER_SIZE) {
                // we are going to try to decode this as an ES message
                final int length = arg.getInt(offset + MESSAGE_LENGTH_OFFSET);
                final long requestId = arg.getLong(offset + REQUEST_ID_OFFSET);
                final byte status = arg.getByte(offset + STATUS_OFFSET);
                final boolean isRequest = TransportStatus.isRequest(status);
                final String type = isRequest ? "request" : "response";
                final String version = Version.fromId(arg.getInt(offset + VERSION_ID_OFFSET)).toString();
                sb.append(" [length: ").append(length);
                sb.append(", request id: ").append(requestId);
                sb.append(", type: ").append(type);
                sb.append(", version: ").append(version);
                if (isRequest) {
                    // it looks like an ES request, try to decode the action
                    final int remaining = readableBytes - ACTION_OFFSET;
                    final ByteBuf slice = arg.slice(offset + ACTION_OFFSET, remaining);
                    // the stream might be compressed
                    try (StreamInput in = in(status, slice, remaining)) {
                        // the first bytes in the message is the context headers
                        try (ThreadContext context = new ThreadContext(Settings.EMPTY)) {
                            context.readHeaders(in);
                        }
                        // now we can decode the action name
                        sb.append(", action: ").append(in.readString());
                    }
                }
                sb.append(']');
                sb.append(' ').append(eventName).append(": ").append(readableBytes).append('B');
                return sb.toString();
            }
        }
    }
    // we could not decode this as an ES message, use the default formatting
    return super.format(ctx, eventName, arg);
}

From source file:org.graylog.plugins.netflow.flows.NetFlowV5Packet.java

License:Apache License

public static NetFlowV5Packet parse(InetSocketAddress sender, ByteBuf buf) throws FlowException {
    final int version = (int) getUnsignedInteger(buf, 0, 2);
    if (version != 5) {
        throw new InvalidFlowVersionException(version);
    }//from www . j av  a  2  s  . c  o  m

    final int count = (int) getUnsignedInteger(buf, 2, 2);
    if (count <= 0 || buf.readableBytes() < HEADER_SIZE + count * FLOW_SIZE) {
        throw new CorruptFlowPacketException();
    }

    final long uptime = getUnsignedInteger(buf, 4, 4);
    final DateTime timestamp = new DateTime(getUnsignedInteger(buf, 8, 4) * 1000, DateTimeZone.UTC);
    final UUID id = UUIDs.startOf(timestamp.getMillis());
    final long flowSequence = getUnsignedInteger(buf, 16, 4);
    final int engineType = (int) getUnsignedInteger(buf, 20, 1);
    final int engineId = (int) getUnsignedInteger(buf, 21, 1);
    // the first 2 bits are the sampling mode, the remaining 14 the interval
    final int sampling = (int) getUnsignedInteger(buf, 22, 2);
    final int samplingInterval = sampling & 0x3FFF;
    final int samplingMode = sampling >> 14;

    final List<NetFlow> flows = Lists.newArrayListWithCapacity(count);
    for (int i = 0; i <= (count - 1); i++) {
        final NetFlow flowV5 = NetFlowV5.parse(sender, buf.slice(HEADER_SIZE + (i * FLOW_SIZE), FLOW_SIZE), id,
                uptime, timestamp, samplingInterval, samplingInterval > 0);
        flows.add(flowV5);
    }

    return new NetFlowV5Packet(id, sender, buf.readableBytes(), uptime, timestamp, flows, flowSequence,
            engineType, engineId, samplingInterval, samplingMode);
}