Example usage for io.netty.channel ChannelInboundHandlerAdapter ChannelInboundHandlerAdapter

List of usage examples for io.netty.channel ChannelInboundHandlerAdapter ChannelInboundHandlerAdapter

Introduction

In this page you can find the example usage for io.netty.channel ChannelInboundHandlerAdapter ChannelInboundHandlerAdapter.

Prototype

ChannelInboundHandlerAdapter

Source Link

Usage

From source file:net.epsilony.utils.codec.modbus.SimpModbusMasterChannelInitializer.java

License:Open Source License

@Override
protected void initChannel(SocketChannel ch) throws Exception {
    channel = ch;/*from  ww  w .jav  a 2 s.  c  om*/
    responseEventBus = new ResponseEventBus();
    responseEventBus.getEventBus().register(this);
    ch.pipeline().addLast(new ModbusMasterCodec(transectionId -> {
        Entry entry = requestRecorder.get(transectionId);
        if (null == entry || entry.trieved == true) {
            return null;
        }
        entry.trieved = true;
        return entry.request;
    }), responseEventBus, new ChannelInboundHandlerAdapter() {

        @Override
        public void channelInactive(ChannelHandlerContext ctx) throws Exception {

            super.channelInactive(ctx);
        }

    });
}

From source file:net.openhft.performance.tests.third.party.frameworks.netty.NettyEchoServer.java

License:Apache License

public void run() throws InterruptedException {
    @NotNull//from w  w  w  . ja va2s  . c  om
    EventLoopGroup bossGroup = new NioEventLoopGroup(); // (1)
    @NotNull
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {
        @NotNull
        ServerBootstrap b = new ServerBootstrap(); // (2)
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) // (3)
                .childHandler(new ChannelInitializer<SocketChannel>() { // (4)
                    @Override
                    public void initChannel(@NotNull SocketChannel ch) {
                        ch.pipeline().addLast(new ChannelInboundHandlerAdapter() {
                            // echo server
                            @Override
                            public void channelRead(@NotNull ChannelHandlerContext ctx, Object msg) { // (2)
                                ctx.write(msg); // (1)
                                ctx.flush(); // (2)
                            }

                            @Override
                            public void exceptionCaught(@NotNull ChannelHandlerContext ctx,
                                    @NotNull Throwable cause) { // (4)
                                // Close the connection when an exception is raised.
                                cause.printStackTrace();
                                ctx.close();
                            }
                        });
                    }
                }).option(ChannelOption.SO_BACKLOG, 128) // (5)
                .childOption(ChannelOption.SO_KEEPALIVE, true); // (6)

        // Bind and start to accept incoming connections.
        ChannelFuture f = b.bind(port).sync(); // (7)

        // Wait until the server socket is closed.
        // In this example, this does not happen, but you can do that to gracefully
        // shut down your server.
        f.channel().closeFuture().sync();
    } finally {
        workerGroup.shutdownGracefully();
        bossGroup.shutdownGracefully();
    }
}

From source file:org.apache.activemq.artemis.tests.unit.core.remoting.impl.netty.NettyConnectionTest.java

License:Apache License

private static EmbeddedChannel createChannel() {
    return new EmbeddedChannel(new ChannelInboundHandlerAdapter());
}

From source file:org.apache.flink.runtime.io.network.netty.ClientTransportErrorHandlingTest.java

License:Apache License

/**
 * Verifies that unexpected remote closes are reported as an instance of
 * {@link RemoteTransportException}./* w w w .  j  a v a 2 s. co m*/
 */
@Test
public void testExceptionOnRemoteClose() throws Exception {

    NettyProtocol protocol = new NettyProtocol() {
        @Override
        public ChannelHandler[] getServerChannelHandlers() {
            return new ChannelHandler[] {
                    // Close on read
                    new ChannelInboundHandlerAdapter() {
                        @Override
                        public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {

                            ctx.channel().close();
                        }
                    } };
        }

        @Override
        public ChannelHandler[] getClientChannelHandlers() {
            return new PartitionRequestProtocol(mock(ResultPartitionProvider.class),
                    mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class)).getClientChannelHandlers();
        }
    };

    NettyServerAndClient serverAndClient = initServerAndClient(protocol, createConfig());

    Channel ch = connect(serverAndClient);

    PartitionRequestClientHandler handler = getClientHandler(ch);

    // Create input channels
    RemoteInputChannel[] rich = new RemoteInputChannel[] { createRemoteInputChannel(),
            createRemoteInputChannel() };

    final CountDownLatch sync = new CountDownLatch(rich.length);

    Answer<Void> countDownLatch = new Answer<Void>() {
        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            sync.countDown();
            return null;
        }
    };

    for (RemoteInputChannel r : rich) {
        doAnswer(countDownLatch).when(r).onError(any(Throwable.class));
        handler.addInputChannel(r);
    }

    // Write something to trigger close by server
    ch.writeAndFlush(Unpooled.buffer().writerIndex(16));

    // Wait for the notification
    if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
        fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis()
                + " ms to be notified about remote connection close.");
    }

    // All the registered channels should be notified.
    for (RemoteInputChannel r : rich) {
        verify(r).onError(isA(RemoteTransportException.class));
    }

    shutdown(serverAndClient);
}

From source file:org.apache.flink.runtime.io.network.netty.OutboundConnectionQueueTest.java

License:Apache License

private void initTest(boolean autoTriggerWrite) {
    controller = new TestControlHandler(autoTriggerWrite);
    verifier = new TestVerificationHandler();

    channel = Mockito.spy(new EmbeddedChannel(new ChannelInboundHandlerAdapter() {
        @Override// w ww. j  ava2s .  c om
        public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
            exception = cause;
            super.exceptionCaught(ctx, cause);
        }
    }));

    connectionManager = Mockito.mock(NetworkConnectionManager.class);

    receiver = Mockito.mock(RemoteReceiver.class);

    queue = new OutboundConnectionQueue(channel, receiver, connectionManager, 0);

    channel.pipeline().addFirst("Test Control Handler", controller);
    channel.pipeline().addFirst("Test Verification Handler", verifier);

    exception = null;

    // The testing pipeline looks as follows:
    // - Test Verification Handler [OUT]
    // - Test Control Handler [IN]
    // - Idle State Handler [IN/OUT] [added by OutboundConnectionQueue]
    // - Outbound queue (SUT) [IN] [added by OutboundConnectionQueue]
    // - Exception setter [IN] [EmbeddedChannel constructor]
}

From source file:org.apache.flink.runtime.io.network.netty.OutboundConnectionQueueTest.java

License:Apache License

/**
 * Verifies that concurrent enqueue and close events are handled
 * correctly./*from  w  w w. j a  v a  2 s. co  m*/
 */
private void doTestConcurrentEnqueueAndClose(final int numProducers, final int numEnvelopesPerProducer,
        final int minSleepTimeMs, final int maxSleepTimeMs) throws Exception {

    final InetAddress bindHost = InetAddress.getLocalHost();
    final int bindPort = 20000;

    // Testing concurrent enqueue and close requires real TCP channels,
    // because Netty's testing EmbeddedChannel does not implement the
    // same threading model as the NioEventLoopGroup (for example there
    // is no difference between being IN and OUTSIDE of the event loop
    // thread).

    final ServerBootstrap in = new ServerBootstrap();
    in.group(new NioEventLoopGroup(1)).channel(NioServerSocketChannel.class).localAddress(bindHost, bindPort)
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new ChannelInboundHandlerAdapter());
                }
            });

    final Bootstrap out = new Bootstrap();
    out.group(new NioEventLoopGroup(1)).channel(NioSocketChannel.class)
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new ChannelOutboundHandlerAdapter());
                }
            }).option(ChannelOption.TCP_NODELAY, false).option(ChannelOption.SO_KEEPALIVE, true);

    in.bind().sync();

    // --------------------------------------------------------------------

    // The testing pipeline looks as follows:
    // - Test Verification Handler [OUT]
    // - Test Control Handler [IN]
    // - Idle State Handler [IN/OUT] [added by OutboundConnectionQueue]
    // - Outbound queue (SUT) [IN] [added by OutboundConnectionQueue]

    channel = out.connect(bindHost, bindPort).sync().channel();

    queue = new OutboundConnectionQueue(channel, receiver, connectionManager, 0);

    controller = new TestControlHandler(true);
    verifier = new TestVerificationHandler();

    channel.pipeline().addFirst("Test Control Handler", controller);
    channel.pipeline().addFirst("Test Verification Handler", verifier);

    // --------------------------------------------------------------------

    final Random rand = new Random(RANDOM_SEED);

    // Every producer works on their local reference of the queue and only
    // updates it to the new channel when enqueue returns false, which
    // should only happen if the channel has been closed.
    final ConcurrentMap<ChannelID, OutboundConnectionQueue> producerQueues = new ConcurrentHashMap<ChannelID, OutboundConnectionQueue>();

    final ChannelID[] ids = new ChannelID[numProducers];

    for (int i = 0; i < numProducers; i++) {
        ids[i] = new ChannelID();

        producerQueues.put(ids[i], queue);
    }

    final CountDownLatch receivedAllEnvelopesLatch = verifier.waitForEnvelopes(numEnvelopesPerProducer - 1,
            ids);

    final List<Channel> closedChannels = new ArrayList<Channel>();

    // --------------------------------------------------------------------

    final Runnable closer = new Runnable() {
        @Override
        public void run() {
            while (receivedAllEnvelopesLatch.getCount() != 0) {
                try {
                    controller.fireIdle();

                    // Test two idle events arriving "closely"
                    // after each other
                    if (rand.nextBoolean()) {
                        controller.fireIdle();
                    }

                    Thread.sleep(minSleepTimeMs / 2);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
    };

    final Runnable[] producers = new Runnable[numProducers];

    for (int i = 0; i < numProducers; i++) {
        final int index = i;

        producers[i] = new Runnable() {
            @Override
            public void run() {
                final JobID jid = new JobID();
                final ChannelID cid = ids[index];

                for (int j = 0; j < numEnvelopesPerProducer; j++) {
                    OutboundConnectionQueue localQueue = producerQueues.get(cid);

                    try {
                        // This code path is handled by the NetworkConnectionManager
                        // in production to enqueue the envelope either to the current
                        // channel or a new one if it was closed.
                        while (!localQueue.enqueue(new Envelope(j, jid, cid))) {
                            synchronized (lock) {
                                if (localQueue == queue) {
                                    closedChannels.add(channel);

                                    channel = out.connect(bindHost, bindPort).sync().channel();

                                    queue = new OutboundConnectionQueue(channel, receiver, connectionManager,
                                            0);

                                    channel.pipeline().addFirst("Test Control Handler", controller);
                                    channel.pipeline().addFirst("Test Verification Handler", verifier);
                                }
                            }

                            producerQueues.put(cid, queue);
                            localQueue = queue;
                        }

                        int sleepTime = rand.nextInt((maxSleepTimeMs - minSleepTimeMs) + 1) + minSleepTimeMs;
                        Thread.sleep(sleepTime);
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        };
    }

    for (int i = 0; i < numProducers; i++) {
        new Thread(producers[i], "Producer " + i).start();
    }

    new Thread(closer, "Closer").start();

    // --------------------------------------------------------------------

    while (receivedAllEnvelopesLatch.getCount() != 0) {
        receivedAllEnvelopesLatch.await();
    }

    // Final close, if the last close didn't make it.
    synchronized (lock) {
        if (channel != null) {
            controller.fireIdle();
        }
    }

    verifier.waitForClose();

    // If the producers do not sleep after each envelope, the close
    // should not make it through and no channel should have been
    // added to the list of closed channels
    if (minSleepTimeMs == 0 && maxSleepTimeMs == 0) {
        Assert.assertEquals(0, closedChannels.size());
    }

    for (Channel ch : closedChannels) {
        Assert.assertFalse(ch.isOpen());
    }

    System.out.println(closedChannels.size() + " channels were closed during execution.");

    out.group().shutdownGracefully().sync();
    in.group().shutdownGracefully().sync();
}

From source file:org.apache.flink.runtime.io.network.netty.ServerTransportErrorHandlingTest.java

License:Apache License

/**
 * Verifies remote closes trigger the release of all resources.
 */// www . j a v a2  s.  c o  m
@Test
public void testRemoteClose() throws Exception {
    final TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16);

    final CountDownLatch sync = new CountDownLatch(1);

    final ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);

    when(partitionManager.createSubpartitionView(any(ResultPartitionID.class), anyInt(),
            any(BufferProvider.class))).thenReturn(new InfiniteSubpartitionView(outboundBuffers, sync));

    NettyProtocol protocol = new NettyProtocol() {
        @Override
        public ChannelHandler[] getServerChannelHandlers() {
            return new PartitionRequestProtocol(partitionManager, mock(TaskEventDispatcher.class),
                    mock(NetworkBufferPool.class)).getServerChannelHandlers();
        }

        @Override
        public ChannelHandler[] getClientChannelHandlers() {
            return new ChannelHandler[] { new NettyMessageEncoder(),
                    // Close on read
                    new ChannelInboundHandlerAdapter() {
                        @Override
                        public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {

                            ctx.channel().close();
                        }
                    } };
        }
    };

    NettyServerAndClient serverAndClient = null;

    try {
        serverAndClient = initServerAndClient(protocol, createConfig());

        Channel ch = connect(serverAndClient);

        // Write something to trigger close by server
        ch.writeAndFlush(new PartitionRequest(new ResultPartitionID(), 0, new InputChannelID()));

        // Wait for the notification
        if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
            fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis()
                    + " ms to be notified about released partition.");
        }
    } finally {
        shutdown(serverAndClient);
    }
}

From source file:org.apache.flink.runtime.query.netty.KvStateClientTest.java

License:Apache License

/**
 * Tests simple queries, of which half succeed and half fail.
 *///from ww w  .  ja va  2 s .  c o m
@Test
public void testSimpleRequests() throws Exception {
    Deadline deadline = TEST_TIMEOUT.fromNow();
    AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();

    KvStateClient client = null;
    Channel serverChannel = null;

    try {
        client = new KvStateClient(1, stats);

        // Random result
        final byte[] expected = new byte[1024];
        ThreadLocalRandom.current().nextBytes(expected);

        final LinkedBlockingQueue<ByteBuf> received = new LinkedBlockingQueue<>();
        final AtomicReference<Channel> channel = new AtomicReference<>();

        serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
            @Override
            public void channelActive(ChannelHandlerContext ctx) throws Exception {
                channel.set(ctx.channel());
            }

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                received.add((ByteBuf) msg);
            }
        });

        KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);

        List<Future<byte[]>> futures = new ArrayList<>();

        int numQueries = 1024;

        for (int i = 0; i < numQueries; i++) {
            futures.add(client.getKvState(serverAddress, new KvStateID(), new byte[0]));
        }

        // Respond to messages
        Exception testException = new RuntimeException("Expected test Exception");

        for (int i = 0; i < numQueries; i++) {
            ByteBuf buf = received.poll(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
            assertNotNull("Receive timed out", buf);

            Channel ch = channel.get();
            assertNotNull("Channel not active", ch);

            assertEquals(KvStateRequestType.REQUEST, KvStateRequestSerializer.deserializeHeader(buf));
            KvStateRequest request = KvStateRequestSerializer.deserializeKvStateRequest(buf);

            buf.release();

            if (i % 2 == 0) {
                ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestResult(serverChannel.alloc(),
                        request.getRequestId(), expected);

                ch.writeAndFlush(response);
            } else {
                ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestFailure(
                        serverChannel.alloc(), request.getRequestId(), testException);

                ch.writeAndFlush(response);
            }
        }

        for (int i = 0; i < numQueries; i++) {
            if (i % 2 == 0) {
                byte[] serializedResult = Await.result(futures.get(i), deadline.timeLeft());
                assertArrayEquals(expected, serializedResult);
            } else {
                try {
                    Await.result(futures.get(i), deadline.timeLeft());
                    fail("Did not throw expected Exception");
                } catch (RuntimeException ignored) {
                    // Expected
                }
            }
        }

        assertEquals(numQueries, stats.getNumRequests());
        int expectedRequests = numQueries / 2;

        // Counts can take some time to propagate
        while (deadline.hasTimeLeft()
                && (stats.getNumSuccessful() != expectedRequests || stats.getNumFailed() != expectedRequests)) {
            Thread.sleep(100);
        }

        assertEquals(expectedRequests, stats.getNumSuccessful());
        assertEquals(expectedRequests, stats.getNumFailed());
    } finally {
        if (client != null) {
            client.shutDown();
        }

        if (serverChannel != null) {
            serverChannel.close();
        }

        assertEquals("Channel leak", 0, stats.getNumConnections());
    }
}

From source file:org.apache.flink.runtime.query.netty.KvStateClientTest.java

License:Apache License

/**
 * Multiple threads concurrently fire queries.
 *//* ww w  .j  a v  a  2s  .c om*/
@Test
public void testConcurrentQueries() throws Exception {
    Deadline deadline = TEST_TIMEOUT.fromNow();
    AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();

    ExecutorService executor = null;
    KvStateClient client = null;
    Channel serverChannel = null;

    final byte[] serializedResult = new byte[1024];
    ThreadLocalRandom.current().nextBytes(serializedResult);

    try {
        int numQueryTasks = 4;
        final int numQueriesPerTask = 1024;

        executor = Executors.newFixedThreadPool(numQueryTasks);

        client = new KvStateClient(1, stats);

        serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                ByteBuf buf = (ByteBuf) msg;
                assertEquals(KvStateRequestType.REQUEST, KvStateRequestSerializer.deserializeHeader(buf));
                KvStateRequest request = KvStateRequestSerializer.deserializeKvStateRequest(buf);

                buf.release();

                ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestResult(ctx.alloc(),
                        request.getRequestId(), serializedResult);

                ctx.channel().writeAndFlush(response);
            }
        });

        final KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);

        final KvStateClient finalClient = client;
        Callable<List<Future<byte[]>>> queryTask = new Callable<List<Future<byte[]>>>() {
            @Override
            public List<Future<byte[]>> call() throws Exception {
                List<Future<byte[]>> results = new ArrayList<>(numQueriesPerTask);

                for (int i = 0; i < numQueriesPerTask; i++) {
                    results.add(finalClient.getKvState(serverAddress, new KvStateID(), new byte[0]));
                }

                return results;
            }
        };

        // Submit query tasks
        List<java.util.concurrent.Future<List<Future<byte[]>>>> futures = new ArrayList<>();
        for (int i = 0; i < numQueryTasks; i++) {
            futures.add(executor.submit(queryTask));
        }

        // Verify results
        for (java.util.concurrent.Future<List<Future<byte[]>>> future : futures) {
            List<Future<byte[]>> results = future.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
            for (Future<byte[]> result : results) {
                byte[] actual = Await.result(result, deadline.timeLeft());
                assertArrayEquals(serializedResult, actual);
            }
        }

        int totalQueries = numQueryTasks * numQueriesPerTask;

        // Counts can take some time to propagate
        while (deadline.hasTimeLeft() && stats.getNumSuccessful() != totalQueries) {
            Thread.sleep(100);
        }

        assertEquals(totalQueries, stats.getNumRequests());
        assertEquals(totalQueries, stats.getNumSuccessful());
    } finally {
        if (executor != null) {
            executor.shutdown();
        }

        if (serverChannel != null) {
            serverChannel.close();
        }

        if (client != null) {
            client.shutDown();
        }

        assertEquals("Channel leak", 0, stats.getNumConnections());
    }
}

From source file:org.apache.flink.runtime.query.netty.KvStateClientTest.java

License:Apache License

/**
 * Tests that a server failure closes the connection and removes it from
 * the established connections./*from w  w w  .ja va 2s.  c o  m*/
 */
@Test
public void testFailureClosesChannel() throws Exception {
    Deadline deadline = TEST_TIMEOUT.fromNow();
    AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();

    KvStateClient client = null;
    Channel serverChannel = null;

    try {
        client = new KvStateClient(1, stats);

        final LinkedBlockingQueue<ByteBuf> received = new LinkedBlockingQueue<>();
        final AtomicReference<Channel> channel = new AtomicReference<>();

        serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
            @Override
            public void channelActive(ChannelHandlerContext ctx) throws Exception {
                channel.set(ctx.channel());
            }

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                received.add((ByteBuf) msg);
            }
        });

        KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);

        // Requests
        List<Future<byte[]>> futures = new ArrayList<>();
        futures.add(client.getKvState(serverAddress, new KvStateID(), new byte[0]));
        futures.add(client.getKvState(serverAddress, new KvStateID(), new byte[0]));

        ByteBuf buf = received.poll(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
        assertNotNull("Receive timed out", buf);
        buf.release();

        buf = received.poll(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
        assertNotNull("Receive timed out", buf);
        buf.release();

        assertEquals(1, stats.getNumConnections());

        Channel ch = channel.get();
        assertNotNull("Channel not active", ch);

        // Respond with failure
        ch.writeAndFlush(KvStateRequestSerializer.serializeServerFailure(serverChannel.alloc(),
                new RuntimeException("Expected test server failure")));

        try {
            Await.result(futures.remove(0), deadline.timeLeft());
            fail("Did not throw expected server failure");
        } catch (RuntimeException ignored) {
            // Expected
        }

        try {
            Await.result(futures.remove(0), deadline.timeLeft());
            fail("Did not throw expected server failure");
        } catch (RuntimeException ignored) {
            // Expected
        }

        assertEquals(0, stats.getNumConnections());

        // Counts can take some time to propagate
        while (deadline.hasTimeLeft() && (stats.getNumSuccessful() != 0 || stats.getNumFailed() != 2)) {
            Thread.sleep(100);
        }

        assertEquals(2, stats.getNumRequests());
        assertEquals(0, stats.getNumSuccessful());
        assertEquals(2, stats.getNumFailed());
    } finally {
        if (client != null) {
            client.shutDown();
        }

        if (serverChannel != null) {
            serverChannel.close();
        }

        assertEquals("Channel leak", 0, stats.getNumConnections());
    }
}