Example usage for io.netty.channel ChannelHandlerContext alloc

List of usage examples for io.netty.channel ChannelHandlerContext alloc

Introduction

In this page you can find the example usage for io.netty.channel ChannelHandlerContext alloc.

Prototype

ByteBufAllocator alloc();

Source Link

Document

Return the assigned ByteBufAllocator which will be used to allocate ByteBuf s.

Usage

From source file:org.apache.drill.exec.rpc.SaslEncryptionHandler.java

License:Apache License

public void encode(ChannelHandlerContext ctx, ByteBuf msg, List<Object> out) throws IOException {

    if (!ctx.channel().isOpen()) {
        logger.debug("In " + RpcConstants.SASL_ENCRYPTION_HANDLER + " and channel is not open. "
                + "So releasing msg memory before encryption.");
        msg.release();//from  w  w  w.j  a  v a  2 s.  co m
        return;
    }

    try {
        // If encryption is enabled then this handler will always get ByteBuf of type Composite ByteBuf
        assert (msg instanceof CompositeByteBuf);

        final CompositeByteBuf cbb = (CompositeByteBuf) msg;
        final int numComponents = cbb.numComponents();

        // Get all the components inside the Composite ByteBuf for encryption
        for (int currentIndex = 0; currentIndex < numComponents; ++currentIndex) {
            final ByteBuf component = cbb.component(currentIndex);

            // Each component ByteBuf size should not be greater than wrapSizeLimit since ChunkCreationHandler
            // will break the RPC message into chunks of wrapSizeLimit.
            if (component.readableBytes() > wrapSizeLimit) {
                throw new RpcException(
                        String.format("Component Chunk size: %d is greater than the wrapSizeLimit: %d",
                                component.readableBytes(), wrapSizeLimit));
            }

            // Uncomment the below code if msg can contain both of Direct and Heap ByteBuf. Currently Drill only supports
            // DirectByteBuf so the below condition will always be false. If the msg are always HeapByteBuf then in
            // addition also remove the allocation of origMsgBuffer from constructor.
            /*if (component.hasArray()) {
              origMsg = component.array();
            } else {
                    
            if (RpcConstants.EXTRA_DEBUGGING) {
              logger.trace("The input bytebuf is not backed by a byte array so allocating a new one");
            }*/
            final byte[] origMsg = origMsgBuffer;
            component.getBytes(component.readerIndex(), origMsg, 0, component.readableBytes());
            //}

            if (logger.isTraceEnabled()) {
                logger.trace("Trying to encrypt chunk of size:{} with wrapSizeLimit:{} and chunkMode: {}",
                        component.readableBytes(), wrapSizeLimit);
            }

            // Length to encrypt will be component length not origMsg length since that can be greater.
            final byte[] wrappedMsg = saslCodec.wrap(origMsg, 0, component.readableBytes());

            if (logger.isTraceEnabled()) {
                logger.trace("Successfully encrypted message, original size: {} Final Size: {}",
                        component.readableBytes(), wrappedMsg.length);
            }

            // Allocate the buffer (directByteBuff) for copying the encrypted byte array and 4 octets for length of the
            // encrypted message. This is preferred since later on if the passed buffer is not in direct memory then it
            // will be copied by the channel into a temporary direct memory which will be cached to the thread. The size
            // of that temporary direct memory will be size of largest message send.
            final ByteBuf encryptedBuf = ctx.alloc()
                    .buffer(wrappedMsg.length + RpcConstants.LENGTH_FIELD_LENGTH);

            // Based on SASL RFC 2222/4422 we should have starting 4 octet as the length of the encrypted buffer in network
            // byte order. SASL framework provided by JDK doesn't do that by default and leaves it upto application. Whereas
            // Cyrus SASL implementation of sasl_encode does take care of this.
            lengthOctets.putInt(wrappedMsg.length);
            encryptedBuf.writeBytes(lengthOctets.array());

            // reset the position for re-use in next round
            lengthOctets.rewind();

            // Write the encrypted bytes inside the buffer
            encryptedBuf.writeBytes(wrappedMsg);

            // Update the msg and component reader index
            msg.skipBytes(component.readableBytes());
            component.skipBytes(component.readableBytes());

            // Add the encrypted buffer into the output to send it on wire.
            out.add(encryptedBuf);
        }
    } catch (OutOfMemoryException e) {
        logger.warn("Failure allocating buffer on incoming stream due to memory limits.");
        msg.resetReaderIndex();
        outOfMemoryHandler.handle();
    } catch (IOException e) {
        logger.error(
                "Something went wrong while wrapping the message: {} with MaxRawWrapSize: {}, ChunkMode: {} "
                        + "and error: {}",
                msg, wrapSizeLimit, e.getMessage());
        throw e;
    }
}

From source file:org.apache.flink.runtime.io.network.netty.OutboundEnvelopeEncoder.java

License:Apache License

@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
    Envelope env = (Envelope) msg;// w  w w .ja v a2  s  .com

    ByteBuf buf = ctx.alloc().directBuffer();

    encode(env, buf);

    if (buf.isReadable()) {
        ctx.write(buf, promise);
    } else {
        buf.release();
        ctx.write(Unpooled.EMPTY_BUFFER, promise);
    }
}

From source file:org.apache.flink.runtime.query.netty.KvStateClientTest.java

License:Apache License

/**
 * Multiple threads concurrently fire queries.
 *//*from  w  ww . ja  v a 2  s  .  co  m*/
@Test
public void testConcurrentQueries() throws Exception {
    Deadline deadline = TEST_TIMEOUT.fromNow();
    AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();

    ExecutorService executor = null;
    KvStateClient client = null;
    Channel serverChannel = null;

    final byte[] serializedResult = new byte[1024];
    ThreadLocalRandom.current().nextBytes(serializedResult);

    try {
        int numQueryTasks = 4;
        final int numQueriesPerTask = 1024;

        executor = Executors.newFixedThreadPool(numQueryTasks);

        client = new KvStateClient(1, stats);

        serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                ByteBuf buf = (ByteBuf) msg;
                assertEquals(KvStateRequestType.REQUEST, KvStateRequestSerializer.deserializeHeader(buf));
                KvStateRequest request = KvStateRequestSerializer.deserializeKvStateRequest(buf);

                buf.release();

                ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestResult(ctx.alloc(),
                        request.getRequestId(), serializedResult);

                ctx.channel().writeAndFlush(response);
            }
        });

        final KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);

        final KvStateClient finalClient = client;
        Callable<List<Future<byte[]>>> queryTask = new Callable<List<Future<byte[]>>>() {
            @Override
            public List<Future<byte[]>> call() throws Exception {
                List<Future<byte[]>> results = new ArrayList<>(numQueriesPerTask);

                for (int i = 0; i < numQueriesPerTask; i++) {
                    results.add(finalClient.getKvState(serverAddress, new KvStateID(), new byte[0]));
                }

                return results;
            }
        };

        // Submit query tasks
        List<java.util.concurrent.Future<List<Future<byte[]>>>> futures = new ArrayList<>();
        for (int i = 0; i < numQueryTasks; i++) {
            futures.add(executor.submit(queryTask));
        }

        // Verify results
        for (java.util.concurrent.Future<List<Future<byte[]>>> future : futures) {
            List<Future<byte[]>> results = future.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
            for (Future<byte[]> result : results) {
                byte[] actual = Await.result(result, deadline.timeLeft());
                assertArrayEquals(serializedResult, actual);
            }
        }

        int totalQueries = numQueryTasks * numQueriesPerTask;

        // Counts can take some time to propagate
        while (deadline.hasTimeLeft() && stats.getNumSuccessful() != totalQueries) {
            Thread.sleep(100);
        }

        assertEquals(totalQueries, stats.getNumRequests());
        assertEquals(totalQueries, stats.getNumSuccessful());
    } finally {
        if (executor != null) {
            executor.shutdown();
        }

        if (serverChannel != null) {
            serverChannel.close();
        }

        if (client != null) {
            client.shutDown();
        }

        assertEquals("Channel leak", 0, stats.getNumConnections());
    }
}

From source file:org.apache.flink.runtime.query.netty.KvStateServerHandler.java

License:Apache License

@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
    KvStateRequest request = null;//from www.j  a v a 2  s . c o m

    try {
        ByteBuf buf = (ByteBuf) msg;
        KvStateRequestType msgType = KvStateRequestSerializer.deserializeHeader(buf);

        if (msgType == KvStateRequestType.REQUEST) {
            // ------------------------------------------------------------
            // Request
            // ------------------------------------------------------------
            request = KvStateRequestSerializer.deserializeKvStateRequest(buf);

            stats.reportRequest();

            InternalKvState<?> kvState = registry.getKvState(request.getKvStateId());

            if (kvState != null) {
                // Execute actual query async, because it is possibly
                // blocking (e.g. file I/O).
                //
                // A submission failure is not treated as fatal.
                queryExecutor.submit(new AsyncKvStateQueryTask(ctx, request, kvState, stats));
            } else {
                ByteBuf unknown = KvStateRequestSerializer.serializeKvStateRequestFailure(ctx.alloc(),
                        request.getRequestId(), new UnknownKvStateID(request.getKvStateId()));

                ctx.writeAndFlush(unknown);

                stats.reportFailedRequest();
            }
        } else {
            // ------------------------------------------------------------
            // Unexpected
            // ------------------------------------------------------------
            ByteBuf failure = KvStateRequestSerializer.serializeServerFailure(ctx.alloc(),
                    new IllegalArgumentException("Unexpected message type " + msgType
                            + ". KvStateServerHandler expects " + KvStateRequestType.REQUEST + " messages."));

            ctx.writeAndFlush(failure);
        }
    } catch (Throwable t) {
        String stringifiedCause = ExceptionUtils.stringifyException(t);

        ByteBuf err;
        if (request != null) {
            String errMsg = "Failed to handle incoming request with ID " + request.getRequestId()
                    + ". Caused by: " + stringifiedCause;
            err = KvStateRequestSerializer.serializeKvStateRequestFailure(ctx.alloc(), request.getRequestId(),
                    new RuntimeException(errMsg));

            stats.reportFailedRequest();
        } else {
            String errMsg = "Failed to handle incoming message. Caused by: " + stringifiedCause;
            err = KvStateRequestSerializer.serializeServerFailure(ctx.alloc(), new RuntimeException(errMsg));
        }

        ctx.writeAndFlush(err);
    } finally {
        // IMPORTANT: We have to always recycle the incoming buffer.
        // Otherwise we will leak memory out of Netty's buffer pool.
        //
        // If any operation ever holds on to the buffer, it is the
        // responsibility of that operation to retain the buffer and
        // release it later.
        ReferenceCountUtil.release(msg);
    }
}

From source file:org.apache.flink.runtime.query.netty.KvStateServerHandler.java

License:Apache License

@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
    String stringifiedCause = ExceptionUtils.stringifyException(cause);
    String msg = "Exception in server pipeline. Caused by: " + stringifiedCause;

    ByteBuf err = KvStateRequestSerializer.serializeServerFailure(ctx.alloc(), new RuntimeException(msg));

    ctx.writeAndFlush(err).addListener(ChannelFutureListener.CLOSE);
}

From source file:org.apache.giraph.comm.netty.handler.RequestEncoder.java

License:Apache License

@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
    if (!(msg instanceof WritableRequest)) {
        throw new IllegalArgumentException("encode: Got a message of type " + msg.getClass());
    }//w w  w.jav a 2  s.  c  om

    // Encode the request
    if (LOG.isDebugEnabled()) {
        startEncodingNanoseconds = TIME.getNanoseconds();
    }

    ByteBuf buf;
    WritableRequest request = (WritableRequest) msg;
    int requestSize = request.getSerializedSize();
    if (requestSize == WritableRequest.UNKNOWN_SIZE) {
        buf = ctx.alloc().buffer(bufferStartingSize);
    } else {
        requestSize += SIZE_OF_INT + SIZE_OF_BYTE;
        buf = ctx.alloc().buffer(requestSize);
    }
    ByteBufOutputStream output = new ByteBufOutputStream(buf);

    // This will later be filled with the correct size of serialized request
    output.writeInt(0);
    output.writeByte(request.getType().ordinal());
    try {
        request.write(output);
    } catch (IndexOutOfBoundsException e) {
        LOG.error("write: Most likely the size of request was not properly "
                + "specified (this buffer is too small) - see getSerializedSize() " + "in "
                + request.getType().getRequestClass());
        throw new IllegalStateException(e);
    }
    output.flush();
    output.close();

    // Set the correct size at the end
    buf.setInt(0, buf.writerIndex() - SIZE_OF_INT);
    if (LOG.isDebugEnabled()) {
        LOG.debug("write: Client " + request.getClientId() + ", " + "requestId " + request.getRequestId()
                + ", size = " + buf.readableBytes() + ", " + request.getType() + " took "
                + Times.getNanosSince(TIME, startEncodingNanoseconds) + " ns");
    }
    ctx.write(buf, promise);
}

From source file:org.apache.giraph.comm.netty.handler.RequestServerHandler.java

License:Apache License

@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
    if (LOG.isTraceEnabled()) {
        LOG.trace("messageReceived: Got " + msg.getClass());
    }/*from w w  w.ja  v  a 2  s . c o m*/

    WritableRequest request = (WritableRequest) msg;

    // Simulate a closed connection on the first request (if desired)
    if (closeFirstRequest && !ALREADY_CLOSED_FIRST_REQUEST) {
        LOG.info("messageReceived: Simulating closing channel on first " + "request " + request.getRequestId()
                + " from " + request.getClientId());
        setAlreadyClosedFirstRequest();
        ctx.close();
        return;
    }

    // Only execute this request exactly once
    int alreadyDone = 1;
    if (workerRequestReservedMap.reserveRequest(request.getClientId(), request.getRequestId())) {
        if (LOG.isDebugEnabled()) {
            startProcessingNanoseconds = TIME.getNanoseconds();
        }
        processRequest((R) request);
        if (LOG.isDebugEnabled()) {
            LOG.debug("messageReceived: Processing client " + request.getClientId() + ", " + "requestId "
                    + request.getRequestId() + ", " + request.getType() + " took "
                    + Times.getNanosSince(TIME, startProcessingNanoseconds) + " ns");
        }
        alreadyDone = 0;
    } else {
        LOG.info("messageReceived: Request id " + request.getRequestId() + " from client "
                + request.getClientId() + " was already processed, " + "not processing again.");
    }

    // Send the response with the request id
    ByteBuf buffer = ctx.alloc().buffer(RESPONSE_BYTES);
    buffer.writeInt(myTaskInfo.getTaskId());
    buffer.writeLong(request.getRequestId());
    buffer.writeByte(alreadyDone);

    ctx.write(buffer);
}

From source file:org.apache.giraph.comm.netty.handler.ResponseEncoder.java

License:Apache License

@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
    if (LOG.isDebugEnabled()) {
        LOG.debug("write(" + ctx + "," + msg);
    }/*  w w  w .java 2  s .  co m*/

    if (!(msg instanceof WritableRequest)) {
        throw new IllegalArgumentException("encode: cannot encode message of type " + msg.getClass()
                + " since it is not an instance of an implementation of " + " WritableRequest.");
    }
    @SuppressWarnings("unchecked")
    WritableRequest writableRequest = (WritableRequest) msg;

    ByteBuf buf = ctx.alloc().buffer(10);
    ByteBufOutputStream output = new ByteBufOutputStream(buf);

    if (LOG.isDebugEnabled()) {
        LOG.debug("encode: Encoding a message of type " + msg.getClass());
    }

    // Space is reserved now to be filled later by the serialize request size
    output.writeInt(0);
    // write type of object.
    output.writeByte(writableRequest.getType().ordinal());
    // write the object itself.
    writableRequest.write(output);

    output.flush();
    output.close();

    // Set the correct size at the end.
    buf.setInt(0, buf.writerIndex() - SIZE_OF_INT);

    if (LOG.isDebugEnabled()) {
        LOG.debug("encode: Encoding a message of type " + msg.getClass());
    }
    ctx.write(buf, promise);
    /*if[HADOOP_NON_SECURE]
    else[HADOOP_NON_SECURE]*/
    if (writableRequest.getType() == RequestType.SASL_COMPLETE_REQUEST) {
        // We are sending to the client a SASL_COMPLETE response (created by
        // the SaslServer handler). The SaslServer handler has removed itself
        // from the pipeline after creating this response, and now it's time for
        // the ResponseEncoder to remove itself also.
        if (LOG.isDebugEnabled()) {
            LOG.debug("encode: Removing RequestEncoder handler: no longer needed," + " since client: "
                    + ctx.channel().remoteAddress() + " has " + "completed authenticating.");
        }
        ctx.pipeline().remove(this);
    }
    /*end[HADOOP_NON_SECURE]*/
    ctx.write(buf, promise);
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.java

License:Apache License

private void writeRequest(ChannelHandlerContext ctx, Call call, ChannelPromise promise) throws IOException {
    id2Call.put(call.id, call);//w  ww .j a  va  2 s .c o  m
    ByteBuf cellBlock = cellBlockBuilder.buildCellBlock(codec, compressor, call.cells, ctx.alloc());
    CellBlockMeta cellBlockMeta;
    if (cellBlock != null) {
        CellBlockMeta.Builder cellBlockMetaBuilder = CellBlockMeta.newBuilder();
        cellBlockMetaBuilder.setLength(cellBlock.writerIndex());
        cellBlockMeta = cellBlockMetaBuilder.build();
    } else {
        cellBlockMeta = null;
    }
    RequestHeader requestHeader = IPCUtil.buildRequestHeader(call, cellBlockMeta);
    int sizeWithoutCellBlock = IPCUtil.getTotalSizeWhenWrittenDelimited(requestHeader, call.param);
    int totalSize = cellBlock != null ? sizeWithoutCellBlock + cellBlock.writerIndex() : sizeWithoutCellBlock;
    ByteBuf buf = ctx.alloc().buffer(sizeWithoutCellBlock + 4);
    buf.writeInt(totalSize);
    ByteBufOutputStream bbos = new ByteBufOutputStream(buf);
    requestHeader.writeDelimitedTo(bbos);
    if (call.param != null) {
        call.param.writeDelimitedTo(bbos);
    }
    if (cellBlock != null) {
        ChannelPromise withoutCellBlockPromise = ctx.newPromise();
        ctx.write(buf, withoutCellBlockPromise);
        ChannelPromise cellBlockPromise = ctx.newPromise();
        ctx.write(cellBlock, cellBlockPromise);
        PromiseCombiner combiner = new PromiseCombiner();
        combiner.addAll(withoutCellBlockPromise, cellBlockPromise);
        combiner.finish(promise);
    } else {
        ctx.write(buf, promise);
    }
}

From source file:org.apache.hadoop.hbase.security.CryptoAESWrapHandler.java

License:Apache License

@Override
public void flush(ChannelHandlerContext ctx) throws Exception {
    if (queue.isEmpty()) {
        return;//from ww  w . j  a v a2 s.com
    }
    ByteBuf buf = null;
    try {
        ChannelPromise promise = ctx.newPromise();
        int readableBytes = queue.readableBytes();
        buf = queue.remove(readableBytes, promise);
        byte[] bytes = new byte[readableBytes];
        buf.readBytes(bytes);
        byte[] wrapperBytes = cryptoAES.wrap(bytes, 0, bytes.length);
        ChannelPromise lenPromise = ctx.newPromise();
        ctx.write(ctx.alloc().buffer(4).writeInt(wrapperBytes.length), lenPromise);
        ChannelPromise contentPromise = ctx.newPromise();
        ctx.write(Unpooled.wrappedBuffer(wrapperBytes), contentPromise);
        PromiseCombiner combiner = new PromiseCombiner();
        combiner.addAll(lenPromise, contentPromise);
        combiner.finish(promise);
        ctx.flush();
    } finally {
        if (buf != null) {
            ReferenceCountUtil.safeRelease(buf);
        }
    }
}