Example usage for io.netty.buffer ByteBufOutputStream ByteBufOutputStream

List of usage examples for io.netty.buffer ByteBufOutputStream ByteBufOutputStream

Introduction

In this page you can find the example usage for io.netty.buffer ByteBufOutputStream ByteBufOutputStream.

Prototype

public ByteBufOutputStream(ByteBuf buffer) 

Source Link

Document

Creates a new stream which writes data to the specified buffer .

Usage

From source file:org.apache.flink.runtime.query.netty.message.KvStateRequestSerializer.java

License:Apache License

/**
 * Allocates a buffer and serializes the KvState request failure into it.
 *
 * @param alloc ByteBuf allocator for the buffer to serialize message into
 * @param requestId ID of the request responding to
 * @param cause Failure cause/*from w w  w.  j av  a2 s .  c om*/
 * @return Serialized KvState request failure message
 * @throws IOException Serialization failures are forwarded
 */
public static ByteBuf serializeKvStateRequestFailure(ByteBufAllocator alloc, long requestId, Throwable cause)
        throws IOException {

    ByteBuf buf = alloc.ioBuffer();

    // Frame length is set at the end
    buf.writeInt(0);

    writeHeader(buf, KvStateRequestType.REQUEST_FAILURE);

    // Message
    buf.writeLong(requestId);

    try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf);
            ObjectOutputStream out = new ObjectOutputStream(bbos)) {

        out.writeObject(cause);
    }

    // Set frame length
    int frameLength = buf.readableBytes() - 4;
    buf.setInt(0, frameLength);

    return buf;
}

From source file:org.apache.flink.runtime.query.netty.message.KvStateRequestSerializer.java

License:Apache License

/**
 * Allocates a buffer and serializes the server failure into it.
 *
 * <p>The cause must not be or contain any user types as causes.
 *
 * @param alloc ByteBuf allocator for the buffer to serialize message into
 * @param cause Failure cause/*  w  w w  . j  a v  a2 s .c o m*/
 * @return Serialized server failure message
 * @throws IOException Serialization failures are forwarded
 */
public static ByteBuf serializeServerFailure(ByteBufAllocator alloc, Throwable cause) throws IOException {
    ByteBuf buf = alloc.ioBuffer();

    // Frame length is set at end
    buf.writeInt(0);

    writeHeader(buf, KvStateRequestType.SERVER_FAILURE);

    try (ByteBufOutputStream bbos = new ByteBufOutputStream(buf);
            ObjectOutputStream out = new ObjectOutputStream(bbos)) {

        out.writeObject(cause);
    }

    // Set frame length
    int frameLength = buf.readableBytes() - 4;
    buf.setInt(0, frameLength);

    return buf;
}

From source file:org.apache.giraph.comm.netty.handler.RequestEncoder.java

License:Apache License

@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
    if (!(msg instanceof WritableRequest)) {
        throw new IllegalArgumentException("encode: Got a message of type " + msg.getClass());
    }//from w w  w. j  ava2 s .  com

    // Encode the request
    if (LOG.isDebugEnabled()) {
        startEncodingNanoseconds = TIME.getNanoseconds();
    }

    ByteBuf buf;
    WritableRequest request = (WritableRequest) msg;
    int requestSize = request.getSerializedSize();
    if (requestSize == WritableRequest.UNKNOWN_SIZE) {
        buf = ctx.alloc().buffer(bufferStartingSize);
    } else {
        requestSize += SIZE_OF_INT + SIZE_OF_BYTE;
        buf = ctx.alloc().buffer(requestSize);
    }
    ByteBufOutputStream output = new ByteBufOutputStream(buf);

    // This will later be filled with the correct size of serialized request
    output.writeInt(0);
    output.writeByte(request.getType().ordinal());
    try {
        request.write(output);
    } catch (IndexOutOfBoundsException e) {
        LOG.error("write: Most likely the size of request was not properly "
                + "specified (this buffer is too small) - see getSerializedSize() " + "in "
                + request.getType().getRequestClass());
        throw new IllegalStateException(e);
    }
    output.flush();
    output.close();

    // Set the correct size at the end
    buf.setInt(0, buf.writerIndex() - SIZE_OF_INT);
    if (LOG.isDebugEnabled()) {
        LOG.debug("write: Client " + request.getClientId() + ", " + "requestId " + request.getRequestId()
                + ", size = " + buf.readableBytes() + ", " + request.getType() + " took "
                + Times.getNanosSince(TIME, startEncodingNanoseconds) + " ns");
    }
    ctx.write(buf, promise);
}

From source file:org.apache.giraph.comm.netty.handler.ResponseEncoder.java

License:Apache License

@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
    if (LOG.isDebugEnabled()) {
        LOG.debug("write(" + ctx + "," + msg);
    }/*from  ww  w . j a v a 2 s  .c o  m*/

    if (!(msg instanceof WritableRequest)) {
        throw new IllegalArgumentException("encode: cannot encode message of type " + msg.getClass()
                + " since it is not an instance of an implementation of " + " WritableRequest.");
    }
    @SuppressWarnings("unchecked")
    WritableRequest writableRequest = (WritableRequest) msg;

    ByteBuf buf = ctx.alloc().buffer(10);
    ByteBufOutputStream output = new ByteBufOutputStream(buf);

    if (LOG.isDebugEnabled()) {
        LOG.debug("encode: Encoding a message of type " + msg.getClass());
    }

    // Space is reserved now to be filled later by the serialize request size
    output.writeInt(0);
    // write type of object.
    output.writeByte(writableRequest.getType().ordinal());
    // write the object itself.
    writableRequest.write(output);

    output.flush();
    output.close();

    // Set the correct size at the end.
    buf.setInt(0, buf.writerIndex() - SIZE_OF_INT);

    if (LOG.isDebugEnabled()) {
        LOG.debug("encode: Encoding a message of type " + msg.getClass());
    }
    ctx.write(buf, promise);
    /*if[HADOOP_NON_SECURE]
    else[HADOOP_NON_SECURE]*/
    if (writableRequest.getType() == RequestType.SASL_COMPLETE_REQUEST) {
        // We are sending to the client a SASL_COMPLETE response (created by
        // the SaslServer handler). The SaslServer handler has removed itself
        // from the pipeline after creating this response, and now it's time for
        // the ResponseEncoder to remove itself also.
        if (LOG.isDebugEnabled()) {
            LOG.debug("encode: Removing RequestEncoder handler: no longer needed," + " since client: "
                    + ctx.channel().remoteAddress() + " has " + "completed authenticating.");
        }
        ctx.pipeline().remove(this);
    }
    /*end[HADOOP_NON_SECURE]*/
    ctx.write(buf, promise);
}

From source file:org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.java

License:Apache License

private static void requestWriteBlock(Channel channel, Enum<?> storageType,
        OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
    OpWriteBlockProto proto = STORAGE_TYPE_SETTER.set(writeBlockProtoBuilder, storageType).build();
    int protoLen = proto.getSerializedSize();
    ByteBuf buffer = channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen);
    buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
    buffer.writeByte(Op.WRITE_BLOCK.code);
    proto.writeDelimitedTo(new ByteBufOutputStream(buffer));
    channel.writeAndFlush(buffer);//from w  ww .jav  a  2  s .co  m
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannel.java

License:Apache License

/**
 * Write request to channel/*from w  w w .  ja  v a2  s  .  com*/
 *
 * @param call    to write
 */
private void writeRequest(final AsyncCall call) {
    try {
        final RPCProtos.RequestHeader.Builder requestHeaderBuilder = RPCProtos.RequestHeader.newBuilder();
        requestHeaderBuilder.setCallId(call.id).setMethodName(call.method.getName())
                .setRequestParam(call.param != null);

        if (Trace.isTracing()) {
            Span s = Trace.currentSpan();
            requestHeaderBuilder.setTraceInfo(
                    TracingProtos.RPCTInfo.newBuilder().setParentId(s.getSpanId()).setTraceId(s.getTraceId()));
        }

        ByteBuffer cellBlock = client.buildCellBlock(call.controller.cellScanner());
        if (cellBlock != null) {
            final RPCProtos.CellBlockMeta.Builder cellBlockBuilder = RPCProtos.CellBlockMeta.newBuilder();
            cellBlockBuilder.setLength(cellBlock.limit());
            requestHeaderBuilder.setCellBlockMeta(cellBlockBuilder.build());
        }
        // Only pass priority if there one.  Let zero be same as no priority.
        if (call.controller.getPriority() != 0) {
            requestHeaderBuilder.setPriority(call.controller.getPriority());
        }

        RPCProtos.RequestHeader rh = requestHeaderBuilder.build();

        int totalSize = IPCUtil.getTotalSizeWhenWrittenDelimited(rh, call.param);
        if (cellBlock != null) {
            totalSize += cellBlock.remaining();
        }

        ByteBuf b = channel.alloc().directBuffer(4 + totalSize);
        try (ByteBufOutputStream out = new ByteBufOutputStream(b)) {
            IPCUtil.write(out, rh, call.param, cellBlock);
        }

        channel.writeAndFlush(b).addListener(new CallWriteListener(this, call.id));
    } catch (IOException e) {
        close(e);
    }
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcChannelImpl.java

License:Apache License

/**
 * Write request to channel/*from  ww  w . ja  va2 s.co  m*/
 * @param call to write
 */
private void writeRequest(final AsyncCall call) {
    try {
        final RPCProtos.RequestHeader.Builder requestHeaderBuilder = RPCProtos.RequestHeader.newBuilder();
        requestHeaderBuilder.setCallId(call.id).setMethodName(call.method.getName())
                .setRequestParam(call.param != null);

        if (Trace.isTracing()) {
            Span s = Trace.currentSpan();
            requestHeaderBuilder.setTraceInfo(
                    TracingProtos.RPCTInfo.newBuilder().setParentId(s.getSpanId()).setTraceId(s.getTraceId()));
        }

        ByteBuffer cellBlock = client.buildCellBlock(call.cellScanner());
        if (cellBlock != null) {
            final RPCProtos.CellBlockMeta.Builder cellBlockBuilder = RPCProtos.CellBlockMeta.newBuilder();
            cellBlockBuilder.setLength(cellBlock.limit());
            requestHeaderBuilder.setCellBlockMeta(cellBlockBuilder.build());
        }
        // Only pass priority if there one. Let zero be same as no priority.
        if (call.getPriority() != PayloadCarryingRpcController.PRIORITY_UNSET) {
            requestHeaderBuilder.setPriority(call.getPriority());
        }
        requestHeaderBuilder
                .setTimeout(call.rpcTimeout > Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) call.rpcTimeout);

        RPCProtos.RequestHeader rh = requestHeaderBuilder.build();

        int totalSize = IPCUtil.getTotalSizeWhenWrittenDelimited(rh, call.param);
        if (cellBlock != null) {
            totalSize += cellBlock.remaining();
        }

        ByteBuf b = channel.alloc().directBuffer(4 + totalSize);
        try (ByteBufOutputStream out = new ByteBufOutputStream(b)) {
            call.callStats.setRequestSizeBytes(IPCUtil.write(out, rh, call.param, cellBlock));
        }

        channel.writeAndFlush(b).addListener(new CallWriteListener(this, call.id));
    } catch (IOException e) {
        close(e);
    }
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java

License:Apache License

NettyRpcConnection(NettyRpcClient rpcClient, ConnectionId remoteId) throws IOException {
    super(rpcClient.conf, AbstractRpcClient.WHEEL_TIMER, remoteId, rpcClient.clusterId,
            rpcClient.userProvider.isHBaseSecurityEnabled(), rpcClient.codec, rpcClient.compressor);
    this.rpcClient = rpcClient;
    byte[] connectionHeaderPreamble = getConnectionHeaderPreamble();
    this.connectionHeaderPreamble = Unpooled.directBuffer(connectionHeaderPreamble.length)
            .writeBytes(connectionHeaderPreamble);
    ConnectionHeader header = getConnectionHeader();
    this.connectionHeaderWithLength = Unpooled.directBuffer(4 + header.getSerializedSize());
    this.connectionHeaderWithLength.writeInt(header.getSerializedSize());
    header.writeTo(new ByteBufOutputStream(this.connectionHeaderWithLength));
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.java

License:Apache License

private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise promise) throws IOException {
    id2Call.put(call.id, call);/*from  w w w  . j  a v  a 2 s.c  om*/
    ByteBuf cellBlock = cellBlockBuilder.buildCellBlock(codec, compressor, call.cells, ctx.alloc());
    CellBlockMeta cellBlockMeta;
    if (cellBlock != null) {
        CellBlockMeta.Builder cellBlockMetaBuilder = CellBlockMeta.newBuilder();
        cellBlockMetaBuilder.setLength(cellBlock.writerIndex());
        cellBlockMeta = cellBlockMetaBuilder.build();
    } else {
        cellBlockMeta = null;
    }
    RequestHeader requestHeader = IPCUtil.buildRequestHeader(call, cellBlockMeta);
    int sizeWithoutCellBlock = IPCUtil.getTotalSizeWhenWrittenDelimited(requestHeader, call.param);
    int totalSize = cellBlock != null ? sizeWithoutCellBlock + cellBlock.writerIndex() : sizeWithoutCellBlock;
    ByteBuf buf = ctx.alloc().buffer(sizeWithoutCellBlock + 4);
    buf.writeInt(totalSize);
    ByteBufOutputStream bbos = new ByteBufOutputStream(buf);
    requestHeader.writeDelimitedTo(bbos);
    if (call.param != null) {
        call.param.writeDelimitedTo(bbos);
    }
    if (cellBlock != null) {
        ChannelPromise withoutCellBlockPromise = ctx.newPromise();
        ctx.write(buf, withoutCellBlockPromise);
        ChannelPromise cellBlockPromise = ctx.newPromise();
        ctx.write(cellBlock, cellBlockPromise);
        PromiseCombiner combiner = new PromiseCombiner();
        combiner.addAll(withoutCellBlockPromise, cellBlockPromise);
        combiner.finish(promise);
    } else {
        ctx.write(buf, promise);
    }
}

From source file:org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutputHelper.java

License:Apache License

private static void requestWriteBlock(Channel channel, Enum<?> storageType,
        OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException {
    // TODO: SASL negotiation. should be done using a netty Handler.
    OpWriteBlockProto proto = STORAGE_TYPE_SETTER.set(writeBlockProtoBuilder, storageType).build();
    int protoLen = proto.getSerializedSize();
    ByteBuf buffer = channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen);
    buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
    buffer.writeByte(Op.WRITE_BLOCK.code);
    proto.writeDelimitedTo(new ByteBufOutputStream(buffer));
    channel.writeAndFlush(buffer);//from w ww .  j ava 2s. co  m
}