Example usage for io.netty.buffer ByteBuf slice

List of usage examples for io.netty.buffer ByteBuf slice

Introduction

In this page you can find the example usage for io.netty.buffer ByteBuf slice.

Prototype

public abstract ByteBuf slice(int index, int length);

Source Link

Document

Returns a slice of this buffer's sub-region.

Usage

From source file:io.scalecube.socketio.pipeline.FlashPolicyHandler.java

License:Apache License

@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
    // ?/*from ww  w  . j  a va2 s.com*/
    if (msg instanceof ByteBuf) {
        ByteBuf message = (ByteBuf) msg;
        if (message.readableBytes() >= policyRequestBuffer.readableBytes()) {
            ByteBuf data = message.slice(0, policyRequestBuffer.readableBytes());
            if (data.equals(policyRequestBuffer)) {
                // Remove SSL handler from pipeline otherwise on channel close SSL handler
                // will fail all pending writes instead of flushing them and as a result
                // client won't get flash policy file.
                if (ctx.pipeline().get(SocketIOChannelInitializer.SSL_HANDLER) != null) {
                    ctx.pipeline().remove(SocketIOChannelInitializer.SSL_HANDLER);
                }

                // Send flash policy file and close connection
                ByteBuf response = PipelineUtils.copiedBuffer(ctx.alloc(), policyResponse);
                ChannelFuture f = ctx.writeAndFlush(response);
                f.addListener(ChannelFutureListener.CLOSE);
                if (log.isDebugEnabled())
                    log.debug("Sent flash policy file to channel: {}", ctx.channel());
                message.release();
                return;
            }
        }
        ctx.pipeline().remove(this);
    }
    ctx.fireChannelRead(msg);
}

From source file:io.scalecube.socketio.serialization.PacketDecoder.java

License:Apache License

public static Packet decodePacket(final ByteBuf payload) throws IOException {
    int payloadSize = payload.readableBytes();

    // Decode packet type
    int typeDelimiterIndex = payload.forEachByte(packetDelimiterFinder);
    if (typeDelimiterIndex == -1) {
        return Packet.NULL_INSTANCE;
    }/*from   ww  w.  j a v  a2 s.c  o m*/
    ByteBuf typeBytes = payload.slice(0, typeDelimiterIndex);
    String typeString = typeBytes.toString(CharsetUtil.UTF_8);
    int typeId = Integer.valueOf(typeString);
    PacketType type = PacketType.valueOf(typeId);

    // Skip message id
    int messageIdDelimiterIndex = payload.forEachByte(typeDelimiterIndex + 1,
            payloadSize - typeDelimiterIndex - 1, packetDelimiterFinder);
    if (messageIdDelimiterIndex == -1) {
        return Packet.NULL_INSTANCE;
    }

    // Skip endpoint
    int endpointDelimiterIndex = payload.forEachByte(messageIdDelimiterIndex + 1,
            payloadSize - messageIdDelimiterIndex - 1, packetDelimiterFinder);

    // Create instance of packet
    Packet packet = new Packet(type);

    // Decode data
    boolean messagingType = type == PacketType.MESSAGE || type == PacketType.JSON;
    if (endpointDelimiterIndex != -1 && messagingType) {
        int dataLength = payloadSize - endpointDelimiterIndex - 1;
        if (dataLength > 0) {
            ByteBuf data = payload.copy(endpointDelimiterIndex + 1, dataLength);
            packet.setData(data);
        }
    }

    return packet;
}

From source file:io.scalecube.socketio.serialization.PacketFramer.java

License:Apache License

private static Packet decodeNextPacket(final ByteBuf buffer) throws IOException {
    Packet packet;//  w w  w. j a  v  a2s.com
    if (isDelimiter(buffer, buffer.readerIndex())) {
        CharSequence packetCharsCountString = decodePacketLength(buffer);
        final Integer packetCharsCount = Integer.valueOf(packetCharsCountString.toString());
        final int packetStartIndex = buffer.readerIndex() + DELIMITER_BYTES_SIZE
                + packetCharsCountString.length() + DELIMITER_BYTES_SIZE;
        final int packetBytesCount = getUtf8ByteCountByCharCount(buffer, packetStartIndex, packetCharsCount);

        ByteBuf frame = buffer.slice(packetStartIndex, packetBytesCount);

        packet = PacketDecoder.decodePacket(frame);
        buffer.readerIndex(packetStartIndex + packetBytesCount);
    } else {
        packet = PacketDecoder.decodePacket(buffer);
        buffer.readerIndex(buffer.readableBytes());
    }
    return packet;
}

From source file:net.hasor.rsf.protocol.rsf.RsfDecoder.java

License:Apache License

protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, int index, int length) {
    return buffer.slice(index, length);
}

From source file:net.ieldor.network.session.impl.UpdateSession.java

License:Open Source License

/**
 * Process' the file queue.//  www.j  av a2 s .  c  o m
 */
public void processFileQueue() {
    FileRequest request;

    synchronized (fileQueue) {
        request = fileQueue.pop();
        if (fileQueue.isEmpty()) {
            idle = true;
        } else {
            service.addPendingSession(this);
            idle = false;
        }
    }

    if (request != null) {
        int type = request.getType();
        int file = request.getFile();

        Cache cache = mainContext.getCache();
        ByteBuf buf;

        try {
            if (type == 255 && file == 255)
                buf = Unpooled.wrappedBuffer(mainContext.getChecksumTable());
            else {
                buf = Unpooled.wrappedBuffer(cache.getStore().read(type, file));
                if (type != 255)
                    buf = buf.slice(0, buf.readableBytes() - 2);
            }
            channel.write(new FileResponse(request.isPriority(), type, file, buf));
        } catch (IOException ex) {
            logger.log(Level.WARNING, "Failed to service file request " + type + ", " + file + ".", ex);
        }
    }
}

From source file:net.tomp2p.synchronization.SyncUtils.java

License:Apache License

public static List<Instruction> decodeInstructions(ByteBuf buf) {
    List<Instruction> result = new ArrayList<Instruction>();
    while (buf.isReadable()) {
        final int header = buf.readInt();
        if ((header & 0x80000000) != 0) {
            //first bit set, we have a reference
            final int reference = header & 0x7FFFFFFF;
            result.add(new Instruction(reference));
        } else {/*w w  w . j ava 2 s.co m*/
            //otherwise the header is the length
            final int length = header;
            final int remaining = Math.min(length, buf.readableBytes());
            DataBuffer literal = new DataBuffer(buf.slice(buf.readerIndex(), remaining));
            buf.skipBytes(remaining);
            result.add(new Instruction(literal));
        }
    }
    return result;
}

From source file:netty.syslog.MessageDecoder.java

License:Open Source License

@Override
protected void decode(ChannelHandlerContext context, ByteBuf buffer, List<Object> objects) throws Exception {
    if (buffer.readableBytes() < 1) {
        return;//  w w  w .  ja v a  2s .c o  m
    }
    final Message.MessageBuilder messageBuilder = Message.MessageBuilder.create();

    // Decode PRI
    expect(buffer, '<');
    final int pri = readDigit(buffer);
    if (pri < 0 || pri > 191) {
        throw new DecoderException("Invalid PRIVAL " + pri);
    }
    final int facility = pri / 8;
    final int severity = pri % 8;

    messageBuilder.facility(Message.Facility.values()[facility]);
    messageBuilder.severity(Message.Severity.values()[severity]);

    expect(buffer, '>');

    // Decode VERSION
    if (buffer.readByte() != '1') {
        throw new DecoderException("Expected a version 1 syslog message");
    }
    expect(buffer, ' ');

    // Decode TIMESTAMP
    final ZonedDateTime timestamp;
    final String timeStampString = readStringToSpace(buffer, true);
    if (timeStampString == null) {
        timestamp = null;
    } else {
        timestamp = ZonedDateTime.parse(timeStampString);
    }
    messageBuilder.timestamp(timestamp);
    expect(buffer, ' ');

    // Decode HOSTNAME
    messageBuilder.hostname(readStringToSpace(buffer, true));
    expect(buffer, ' ');

    // Decode APP-NAME
    messageBuilder.applicationName(readStringToSpace(buffer, true));
    expect(buffer, ' ');

    // Decode PROC-ID
    messageBuilder.processId(readStringToSpace(buffer, true));
    expect(buffer, ' ');

    // Decode MSGID
    messageBuilder.messageId(readStringToSpace(buffer, true));
    expect(buffer, ' ');

    // TODO Decode structured data
    expect(buffer, '-');
    expect(buffer, ' ');

    final int length = buffer.readableBytes();
    messageBuilder.content(buffer.slice(buffer.readerIndex(), length).retain());
    buffer.skipBytes(length);

    objects.add(messageBuilder.build());
}

From source file:org.apache.bookkeeper.proto.checksum.DigestManager.java

License:Apache License

private void verifyDigest(long entryId, ByteBuf dataReceived, boolean skipEntryIdCheck)
        throws BKDigestMatchException {

    if ((METADATA_LENGTH + macCodeLength) > dataReceived.readableBytes()) {
        logger.error(//from  ww  w.j a  v  a  2s.co  m
                "Data received is smaller than the minimum for this digest type. "
                        + " Either the packet it corrupt, or the wrong digest is configured. "
                        + " Digest type: {}, Packet Length: {}",
                this.getClass().getName(), dataReceived.readableBytes());
        throw new BKDigestMatchException();
    }
    update(dataReceived.slice(0, METADATA_LENGTH));

    int offset = METADATA_LENGTH + macCodeLength;
    update(dataReceived.slice(offset, dataReceived.readableBytes() - offset));

    ByteBuf digest = allocator.buffer(macCodeLength);
    populateValueAndReset(digest);

    try {
        if (digest.compareTo(dataReceived.slice(METADATA_LENGTH, macCodeLength)) != 0) {
            logger.error("Mac mismatch for ledger-id: " + ledgerId + ", entry-id: " + entryId);
            throw new BKDigestMatchException();
        }
    } finally {
        digest.release();
    }

    long actualLedgerId = dataReceived.readLong();
    long actualEntryId = dataReceived.readLong();

    if (actualLedgerId != ledgerId) {
        logger.error("Ledger-id mismatch in authenticated message, expected: " + ledgerId + " , actual: "
                + actualLedgerId);
        throw new BKDigestMatchException();
    }

    if (!skipEntryIdCheck && actualEntryId != entryId) {
        logger.error("Entry-id mismatch in authenticated message, expected: " + entryId + " , actual: "
                + actualEntryId);
        throw new BKDigestMatchException();
    }

}

From source file:org.apache.bookkeeper.proto.checksum.DigestManager.java

License:Apache License

public long verifyDigestAndReturnLac(ByteBuf dataReceived) throws BKDigestMatchException {
    if ((LAC_METADATA_LENGTH + macCodeLength) > dataReceived.readableBytes()) {
        logger.error(/*  w  w  w. j  av  a  2  s  . co  m*/
                "Data received is smaller than the minimum for this digest type."
                        + " Either the packet it corrupt, or the wrong digest is configured. "
                        + " Digest type: {}, Packet Length: {}",
                this.getClass().getName(), dataReceived.readableBytes());
        throw new BKDigestMatchException();
    }

    update(dataReceived.slice(0, LAC_METADATA_LENGTH));

    ByteBuf digest = allocator.buffer(macCodeLength);
    try {
        populateValueAndReset(digest);

        if (digest.compareTo(dataReceived.slice(LAC_METADATA_LENGTH, macCodeLength)) != 0) {
            logger.error("Mac mismatch for ledger-id LAC: " + ledgerId);
            throw new BKDigestMatchException();
        }
    } finally {
        digest.release();
    }

    long actualLedgerId = dataReceived.readLong();
    long lac = dataReceived.readLong();
    if (actualLedgerId != ledgerId) {
        logger.error("Ledger-id mismatch in authenticated message, expected: " + ledgerId + " , actual: "
                + actualLedgerId);
        throw new BKDigestMatchException();
    }
    return lac;
}

From source file:org.apache.distributedlog.EnvelopedEntry.java

License:Apache License

/**
 * Return an {@link ByteBuf} that reads from the provided {@link ByteBuf}, decompresses the data
 * and returns a new InputStream wrapping the underlying payload.
 *Note that src is modified by this call.
 *
 * @return//w  ww. j av a  2  s.com
 *      New Input stream with the underlying payload.
 * @throws Exception
 */
public static ByteBuf fromEnvelopedBuf(ByteBuf src, StatsLogger statsLogger) throws IOException {
    byte version = src.readByte();
    if (version != CURRENT_VERSION) {
        throw new IOException(String.format("Version mismatch while reading. Received: %d," + " Required: %d",
                version, CURRENT_VERSION));
    }
    int flags = src.readInt();
    int codecCode = flags & COMPRESSION_CODEC_MASK;
    int originDataLen = src.readInt();
    int actualDataLen = src.readInt();
    ByteBuf compressedBuf = src.slice(src.readerIndex(), actualDataLen);
    ByteBuf decompressedBuf;
    try {
        if (Type.NONE.code() == codecCode && originDataLen != actualDataLen) {
            throw new IOException("Inconsistent data length found for a non-compressed entry : compressed = "
                    + originDataLen + ", actual = " + actualDataLen);
        }
        CompressionCodec codec = CompressionUtils.getCompressionCodec(Type.of(codecCode));
        decompressedBuf = codec.decompress(compressedBuf, originDataLen);
    } finally {
        compressedBuf.release();
    }
    return decompressedBuf;
}