Example usage for io.netty.buffer ByteBufAllocator buffer

List of usage examples for io.netty.buffer ByteBufAllocator buffer

Introduction

In this page you can find the example usage for io.netty.buffer ByteBufAllocator buffer.

Prototype

ByteBuf buffer(int initialCapacity);

Source Link

Document

Allocate a ByteBuf with the given initial capacity.

Usage

From source file:com.github.milenkovicm.kafka.connection.DataKafkaBroker.java

License:Apache License

public static ByteBuf createMessageSet(ByteBufAllocator allocator, ByteBuf key, int partition,
        ByteBuf message) {//from  w  w w .  j a  va2 s.  c o  m

    // MESSAGE SET SIZE
    //
    //  message
    //  key
    // --
    // message+key
    final int messageKeySize = Convert.sizeOfBytes(message) + Convert.sizeOfBytes(key); //+2 for magic byte + attributes

    // TOTAL SIZE OF PARTITION MESSAGE
    // ALLOCATE
    //
    // 4 - partition
    // 4 - length
    // 18 - message set size
    // --
    // 26
    ByteBuf buffer = allocator.buffer(26 + messageKeySize);
    Convert.encodeInteger(partition, buffer); // partition id

    // MESSAGE SET SIZE
    //
    //  8 - offset
    //  4 - msg size
    //  4 - crc
    //  1 - magic byte
    //  1 - attributes
    // --
    // 18
    Convert.encodeInteger(messageKeySize + 18, buffer); // message set size
    Convert.encodeLong(0, buffer); // offset (MAY BE ZERO ALL THE TIMES FOR PRODUCER)
    Convert.encodeInteger(messageKeySize + 6, buffer); // message size ( +4 crc size)
    Convert.encodeInteger(0, buffer); //CRC (will be calculated later)
    Convert.encodeByte(0, buffer); // magic bytes
    Convert.encodeByte(0, buffer); // attributes
    Convert.encodeBytes(key, buffer);
    Convert.encodeBytes(message, buffer);

    return buffer;
}

From source file:com.github.milenkovicm.kafka.handler.CompositeProducerHandler.java

License:Apache License

ByteBuf creteProduceRequest(ByteBufAllocator allocator, ByteBuf messageSet, String topic) {
    final int messageSetSize = messageSet.readableBytes();

    // a bit hardcoded logic follows
    // total length is length of the all fields
    // i've pre-calculated sizes of arrays and strings in case of
    // topic and client id
    final int totalLength = 22 + messageSetSize + Convert.sizeOfString(topicNameEncoded)
            + Convert.sizeOfString(clientIdEncoded);
    // 18 + clientId.length
    ByteBuf header = allocator
            .buffer(24 + Convert.sizeOfString(topicNameEncoded) + Convert.sizeOfString(clientIdEncoded));
    createMessageHeader(totalLength, header);

    updateCrc(messageSet);/* w w w  .ja v a 2 s. c  o m*/
    return Unpooled.wrappedBuffer(header, messageSet);

}

From source file:com.github.milenkovicm.kafka.handler.CopyProducerHandler.java

License:Apache License

ByteBuf creteProduceRequest(ByteBufAllocator allocator, ByteBuf messageSet, String topic) {
    final int messageSetSize = messageSet.readableBytes();
    // a bit hardcoded logic follows
    // total length is length of the all fields
    // i've pre-calculated sizes of arrays and strings in case of
    // topic and client id
    final int totalLength = 22 + messageSetSize + Convert.sizeOfString(topicNameEncoded)
            + Convert.sizeOfString(clientIdEncoded);

    ByteBuf buffer = allocator.buffer(totalLength + 4); // msg length + size of length field
    createMessageHeader(totalLength, buffer);
    updateCrc(messageSet);/* w  w  w . ja v a  2  s.c  o m*/
    buffer.writeBytes(messageSet);

    return buffer;
}

From source file:com.github.milenkovicm.kafka.handler.MetadataHandler.java

License:Apache License

ByteBuf createMetadataRequest(ByteBufAllocator allocator, String topic, int correlation) {
    final int totalLength = 12 + Convert.sizeOfString(topic) + Convert.sizeOfString(clientId);
    // +4 as total message length does not include length field
    ByteBuf buffer = allocator.buffer(totalLength + 4);
    Convert.encodeInteger(totalLength, buffer);// length
    Convert.encodeShort(Api.Key.METADATA_REQUEST.value, buffer);//api key
    Convert.encodeShort(Api.VERSION, buffer); // api version
    Convert.encodeInteger(correlation, buffer); // correlation id
    Convert.encodeString(clientIdEncoded, buffer); //clientid
    Convert.encodeStringArray(buffer, topic);

    return buffer;
}

From source file:com.ibasco.agql.protocols.valve.source.query.handlers.SourceQueryPacketAssembler.java

License:Open Source License

/**
 * Process split-packet data//w w w  .  j  a va2  s  .c  om
 *
 * @param data
 *         The {@link ByteBuf} containing the split-packet data
 * @param allocator
 *         The {@link ByteBufAllocator} used to create/allocate pooled buffers
 *
 * @return Returns a non-null {@link ByteBuf} if the split-packets have been assembled. Null if the
 *
 * @throws Exception
 */
private ByteBuf processSplitPackets(ByteBuf data, ByteBufAllocator allocator, InetSocketAddress senderAddress)
        throws Exception {
    int packetCount, packetNumber, requestId, splitSize, packetChecksum = 0;
    boolean isCompressed;

    //Start processing
    requestId = data.readIntLE();
    //read the most significant bit is set
    isCompressed = ((requestId & 0x80000000) != 0);
    //The total number of packets in the response.
    packetCount = data.readByte();
    //The number of the packet. Starts at 0.
    packetNumber = data.readByte();

    //Create our key for this request (request id + sender ip)
    final SplitPacketKey key = new SplitPacketKey(requestId, senderAddress);

    log.debug("Processing split packet {}", key);

    log.debug(
            "Split Packet Received = (AbstractRequest {}, Packet Number {}, Packet Count {}, Is Compressed: {})",
            requestId, packetNumber, packetCount, isCompressed);

    //Try to retrieve the split packet container for this request (if existing)
    //If request is not yet on the map, create and retrieve
    SplitPacketContainer splitPackets = this.requestMap.computeIfAbsent(key,
            k -> new SplitPacketContainer(packetCount));

    //As per protocol specs, the size is only present in the first packet of the response and only if the response is being compressed.
    //split size = Maximum size of packet before packet switching occurs. The default value is 1248 bytes (0x04E0
    if (isCompressed) {
        splitSize = data.readIntLE();
        packetChecksum = data.readIntLE();
    } else {
        splitSize = data.readShortLE();
    }

    //TODO: Handle compressed split packets
    int bufferSize = Math.min(splitSize, data.readableBytes());
    byte[] splitPacket = new byte[bufferSize];
    data.readBytes(splitPacket); //transfer the split data into this buffer

    //Add the split packet to the container
    splitPackets.addPacket(packetNumber, splitPacket);

    //Have we received all packets for this request?
    if (splitPackets.isComplete()) {
        log.debug(
                "Split Packets have all been successfully received from AbstractRequest {}. Re-assembling packets.",
                requestId);

        //Retrieve total split packets received based on their length
        int packetSize = splitPackets.getPacketSize();
        //Allocate a new buffer to store the re-assembled packets
        final ByteBuf packetBuffer = allocator.buffer(packetSize);
        boolean done = false;
        try {
            //Start re-assembling split-packets from the container
            done = reassembleSplitPackets(splitPackets, packetBuffer, isCompressed, splitSize, packetChecksum);
        } catch (Exception e) {
            //If an error occurs during re-assembly, make sure we release the allocated buffer
            packetBuffer.release();
            throw e;
        } finally {
            if (done)
                requestMap.remove(key);
        }

        return packetBuffer;
    }

    //Return null, indicating that we still don't have a complete packet
    return null;
}

From source file:com.liferay.sync.engine.lan.server.file.SyncChunkedFile.java

License:Open Source License

@Override
public ByteBuf readChunk(ByteBufAllocator byteBufAllocator) throws Exception {

    long offset = _offset;

    if (offset >= _endOffset) {
        return null;
    }/*www  . ja v a 2  s  .  c om*/

    int chunkSize = (int) Math.min((long) _chunkSize, _endOffset - offset);

    ByteBuf byteBuf = byteBufAllocator.buffer(chunkSize);

    boolean release = true;

    try {
        FileTime currentFileTime = Files.getLastModifiedTime(_path, LinkOption.NOFOLLOW_LINKS);

        long currentTime = currentFileTime.toMillis();

        if (currentTime != _modifiedTime) {
            throw new Exception("File modified during transfer: " + _path);
        }

        int bytesRead = 0;

        if (_closeAggressively || (_fileChannel == null)) {
            _fileChannel = FileChannel.open(_path);

            _fileChannel.position(_offset);
        }

        while (true) {
            int localBytesRead = byteBuf.writeBytes(_fileChannel, chunkSize - bytesRead);

            if (localBytesRead >= 0) {
                bytesRead += localBytesRead;

                if (bytesRead != chunkSize) {
                    continue;
                }
            }

            _offset += bytesRead;

            release = false;

            return byteBuf;
        }
    } finally {
        if (_closeAggressively && (_fileChannel != null)) {
            _fileChannel.close();
        }

        if (release) {
            byteBuf.release();
        }
    }
}

From source file:com.netflix.ribbon.examples.rx.common.RxMovieTransformer.java

License:Apache License

@Override
public ByteBuf call(Movie movie, ByteBufAllocator byteBufAllocator) {
    byte[] bytes = movie.toString().getBytes(Charset.defaultCharset());
    ByteBuf byteBuf = byteBufAllocator.buffer(bytes.length);
    byteBuf.writeBytes(bytes);/*from  www.j  a v a  2s .c o m*/
    return byteBuf;
}

From source file:com.spotify.folsom.client.Request.java

License:Apache License

protected static ByteBuf toBuffer(final ByteBufAllocator alloc, ByteBuffer dst, int extra) {
    // TODO (dano): write directly to target buffer
    dst.flip();/* w w w.  j av a 2 s .co  m*/
    final ByteBuf buffer = alloc.buffer(dst.remaining() + extra);
    buffer.writeBytes(dst);
    return buffer;
}

From source file:com.spotify.netty4.handler.codec.zmtp.ZMTPMessage.java

License:Apache License

/**
 * Convenience method for writing a {@link ZMTPMessage} to a {@link ByteBuf}.
 *//*  www .  jav  a 2 s .co  m*/
public ByteBuf write(final ByteBufAllocator alloc, final ZMTPVersion version) {
    final ZMTPMessageEncoder encoder = new ZMTPMessageEncoder();
    final ZMTPEstimator estimator = ZMTPEstimator.create(version);
    encoder.estimate(this, estimator);
    final ByteBuf out = alloc.buffer(estimator.size());
    final ZMTPWriter writer = ZMTPWriter.create(version);
    writer.reset(out);
    encoder.encode(this, writer);
    return out;
}

From source file:com.uber.tchannel.codecs.CodecUtils.java

License:Open Source License

public static int writeArg(ByteBufAllocator allocator, ByteBuf arg, int writableBytes, List<ByteBuf> bufs) {
    if (writableBytes <= TFrame.FRAME_SIZE_LENGTH) {
        throw new UnsupportedOperationException(
                "writableBytes must be larger than " + TFrame.FRAME_SIZE_LENGTH);
    }/*from  www  . j av  a 2  s  . c  om*/

    int readableBytes = arg.readableBytes();
    int headerSize = TFrame.FRAME_SIZE_LENGTH;
    int chunkLength = Math.min(readableBytes + headerSize, writableBytes);
    ByteBuf sizeBuf = allocator.buffer(TFrame.FRAME_SIZE_LENGTH);
    bufs.add(sizeBuf);

    // Write the size of the `arg`
    sizeBuf.writeShort(chunkLength - headerSize);
    if (readableBytes == 0) {
        return TFrame.FRAME_SIZE_LENGTH;
    } else {
        bufs.add(arg.readSlice(chunkLength - headerSize).retain());
        return chunkLength;
    }
}