Example usage for io.netty.channel ChannelInboundHandlerAdapter ChannelInboundHandlerAdapter

List of usage examples for io.netty.channel ChannelInboundHandlerAdapter ChannelInboundHandlerAdapter

Introduction

In this page you can find the example usage for io.netty.channel ChannelInboundHandlerAdapter ChannelInboundHandlerAdapter.

Prototype

ChannelInboundHandlerAdapter

Source Link

Usage

From source file:org.apache.flink.runtime.query.netty.KvStateClientTest.java

License:Apache License

/**
 * Tests that a server channel close, closes the connection and removes it
 * from the established connections.//w w  w . j a  v a 2  s  . co m
 */
@Test
public void testServerClosesChannel() throws Exception {
    Deadline deadline = TEST_TIMEOUT.fromNow();
    AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();

    KvStateClient client = null;
    Channel serverChannel = null;

    try {
        client = new KvStateClient(1, stats);

        final AtomicBoolean received = new AtomicBoolean();
        final AtomicReference<Channel> channel = new AtomicReference<>();

        serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
            @Override
            public void channelActive(ChannelHandlerContext ctx) throws Exception {
                channel.set(ctx.channel());
            }

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                received.set(true);
            }
        });

        KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);

        // Requests
        Future<byte[]> future = client.getKvState(serverAddress, new KvStateID(), new byte[0]);

        while (!received.get() && deadline.hasTimeLeft()) {
            Thread.sleep(50);
        }
        assertTrue("Receive timed out", received.get());

        assertEquals(1, stats.getNumConnections());

        channel.get().close().await(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);

        try {
            Await.result(future, deadline.timeLeft());
            fail("Did not throw expected server failure");
        } catch (ClosedChannelException ignored) {
            // Expected
        }

        assertEquals(0, stats.getNumConnections());

        // Counts can take some time to propagate
        while (deadline.hasTimeLeft() && (stats.getNumSuccessful() != 0 || stats.getNumFailed() != 1)) {
            Thread.sleep(100);
        }

        assertEquals(1, stats.getNumRequests());
        assertEquals(0, stats.getNumSuccessful());
        assertEquals(1, stats.getNumFailed());
    } finally {
        if (client != null) {
            client.shutDown();
        }

        if (serverChannel != null) {
            serverChannel.close();
        }

        assertEquals("Channel leak", 0, stats.getNumConnections());
    }
}

From source file:org.apache.flink.runtime.query.netty.KvStateServerTest.java

License:Apache License

/**
 * Tests a simple successful query via a SocketChannel.
 */// w ww  .j  a  v  a  2s.com
@Test
public void testSimpleRequest() throws Exception {
    KvStateServer server = null;
    Bootstrap bootstrap = null;
    try {
        KvStateRegistry registry = new KvStateRegistry();
        KvStateRequestStats stats = new AtomicKvStateRequestStats();

        server = new KvStateServer(InetAddress.getLocalHost(), 0, 1, 1, registry, stats);
        server.start();

        KvStateServerAddress serverAddress = server.getAddress();
        int numKeyGroups = 1;
        AbstractStateBackend abstractBackend = new MemoryStateBackend();
        DummyEnvironment dummyEnv = new DummyEnvironment("test", 1, 0);
        dummyEnv.setKvStateRegistry(registry);
        AbstractKeyedStateBackend<Integer> backend = abstractBackend.createKeyedStateBackend(dummyEnv,
                new JobID(), "test_op", IntSerializer.INSTANCE, numKeyGroups, new KeyGroupRange(0, 0),
                registry.createTaskRegistry(new JobID(), new JobVertexID()));

        final KvStateServerHandlerTest.TestRegistryListener registryListener = new KvStateServerHandlerTest.TestRegistryListener();

        registry.registerListener(registryListener);

        ValueStateDescriptor<Integer> desc = new ValueStateDescriptor<>("any", IntSerializer.INSTANCE);
        desc.setQueryable("vanilla");

        ValueState<Integer> state = backend.getPartitionedState(VoidNamespace.INSTANCE,
                VoidNamespaceSerializer.INSTANCE, desc);

        // Update KvState
        int expectedValue = 712828289;

        int key = 99812822;
        backend.setCurrentKey(key);
        state.update(expectedValue);

        // Request
        byte[] serializedKeyAndNamespace = KvStateRequestSerializer.serializeKeyAndNamespace(key,
                IntSerializer.INSTANCE, VoidNamespace.INSTANCE, VoidNamespaceSerializer.INSTANCE);

        // Connect to the server
        final BlockingQueue<ByteBuf> responses = new LinkedBlockingQueue<>();
        bootstrap = createBootstrap(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4),
                new ChannelInboundHandlerAdapter() {
                    @Override
                    public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                        responses.add((ByteBuf) msg);
                    }
                });

        Channel channel = bootstrap.connect(serverAddress.getHost(), serverAddress.getPort()).sync().channel();

        long requestId = Integer.MAX_VALUE + 182828L;

        assertTrue(registryListener.registrationName.equals("vanilla"));
        ByteBuf request = KvStateRequestSerializer.serializeKvStateRequest(channel.alloc(), requestId,
                registryListener.kvStateId, serializedKeyAndNamespace);

        channel.writeAndFlush(request);

        ByteBuf buf = responses.poll(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);

        assertEquals(KvStateRequestType.REQUEST_RESULT, KvStateRequestSerializer.deserializeHeader(buf));
        KvStateRequestResult response = KvStateRequestSerializer.deserializeKvStateRequestResult(buf);

        assertEquals(requestId, response.getRequestId());
        int actualValue = KvStateRequestSerializer.deserializeValue(response.getSerializedResult(),
                IntSerializer.INSTANCE);
        assertEquals(expectedValue, actualValue);
    } finally {
        if (server != null) {
            server.shutDown();
        }

        if (bootstrap != null) {
            EventLoopGroup group = bootstrap.group();
            if (group != null) {
                group.shutdownGracefully();
            }
        }
    }
}

From source file:org.apache.giraph.comm.netty.NettyServer.java

License:Apache License

/**
 * Start the server with the appropriate port
 *///from   w  w w .j a  va2s . c  o  m
public void start() {
    bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .option(ChannelOption.SO_BACKLOG, tcpBacklog)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_SNDBUF, sendBufferSize)
            .childOption(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .childOption(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(receiveBufferSize / 4,
                    receiveBufferSize, receiveBufferSize));

    /**
     * Pipeline setup: depends on whether configured to use authentication
     * or not.
     */
    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        protected void initChannel(SocketChannel ch) throws Exception {
            /*if_not[HADOOP_NON_SECURE]*/
            if (conf.authenticate()) {
                LOG.info("start: Will use Netty pipeline with "
                        + "authentication and authorization of clients.");
                // After a client authenticates, the two authentication-specific
                // pipeline components SaslServerHandler and ResponseEncoder are
                // removed, leaving the pipeline the same as in the non-authenticated
                // configuration except for the presence of the Authorize component.
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("saslServerHandler",
                        saslServerHandlerFactory.newHandler(conf), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck("authorizeServerHandler", new AuthorizeServerHandler(),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("responseEncoder", new ResponseEncoder(),
                        handlerToUseExecutionGroup, executionGroup, ch);
            } else {
                LOG.info("start: Using Netty without authentication.");
                /*end[HADOOP_NON_SECURE]*/
                // Store all connected channels in order to ensure that we can close
                // them on stop(), or else stop() may hang waiting for the
                // connections to close on their own
                ch.pipeline().addLast("connectedChannels", new ChannelInboundHandlerAdapter() {
                    @Override
                    public void channelActive(ChannelHandlerContext ctx) throws Exception {
                        accepted.add(ctx.channel());
                        ctx.fireChannelActive();
                    }
                });
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                /*if_not[HADOOP_NON_SECURE]*/
            }
            /*end[HADOOP_NON_SECURE]*/
        }
    });

    int taskId = conf.getTaskPartition();
    int numTasks = conf.getInt("mapred.map.tasks", 1);
    // Number of workers + 1 for master
    int numServers = conf.getInt(GiraphConstants.MAX_WORKERS, numTasks) + 1;
    int portIncrementConstant = (int) Math.pow(10, Math.ceil(Math.log10(numServers)));
    int bindPort = GiraphConstants.IPC_INITIAL_PORT.get(conf) + taskId;
    int bindAttempts = 0;
    final int maxIpcPortBindAttempts = MAX_IPC_PORT_BIND_ATTEMPTS.get(conf);
    final boolean failFirstPortBindingAttempt = GiraphConstants.FAIL_FIRST_IPC_PORT_BIND_ATTEMPT.get(conf);

    // Simple handling of port collisions on the same machine while
    // preserving debugability from the port number alone.
    // Round up the max number of workers to the next power of 10 and use
    // it as a constant to increase the port number with.
    while (bindAttempts < maxIpcPortBindAttempts) {
        this.myAddress = new InetSocketAddress(localHostname, bindPort);
        if (failFirstPortBindingAttempt && bindAttempts == 0) {
            if (LOG.isInfoEnabled()) {
                LOG.info("start: Intentionally fail first "
                        + "binding attempt as giraph.failFirstIpcPortBindAttempt " + "is true, port "
                        + bindPort);
            }
            ++bindAttempts;
            bindPort += portIncrementConstant;
            continue;
        }

        try {
            ChannelFuture f = bootstrap.bind(myAddress).sync();
            accepted.add(f.channel());
            break;
        } catch (InterruptedException e) {
            throw new IllegalStateException(e);
            // CHECKSTYLE: stop IllegalCatchCheck
        } catch (Exception e) {
            // CHECKSTYLE: resume IllegalCatchCheck
            LOG.warn("start: Likely failed to bind on attempt " + bindAttempts + " to port " + bindPort,
                    e.getCause());
            ++bindAttempts;
            bindPort += portIncrementConstant;
        }
    }
    if (bindAttempts == maxIpcPortBindAttempts || myAddress == null) {
        throw new IllegalStateException(
                "start: Failed to start NettyServer with " + bindAttempts + " attempts");
    }

    if (LOG.isInfoEnabled()) {
        LOG.info("start: Started server " + "communication server: " + myAddress + " with up to " + maxPoolSize
                + " threads on bind attempt " + bindAttempts + " with sendBufferSize = " + sendBufferSize
                + " receiveBufferSize = " + receiveBufferSize);
    }
}

From source file:org.apache.hive.spark.client.rpc.Rpc.java

License:Apache License

/**
 * Creates an RPC client for a server running on the given remote host and port.
 *
 * @param config RPC configuration data.
 * @param eloop Event loop for managing the connection.
 * @param host Host name or IP address to connect to.
 * @param port Port where server is listening.
 * @param clientId The client ID that identifies the connection.
 * @param secret Secret for authenticating the client with the server.
 * @param dispatcher Dispatcher used to handle RPC calls.
 * @return A future that can be used to monitor the creation of the RPC object.
 *//* w ww  . ja  va 2 s.c om*/
public static Promise<Rpc> createClient(Map<String, String> config, final NioEventLoopGroup eloop, String host,
        int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    final RpcConfiguration rpcConf = new RpcConfiguration(config);
    int connectTimeoutMs = (int) rpcConf.getConnectTimeoutMs();

    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);

    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();

    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {
        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, rpcConf.getServerConnectTimeoutMs(),
            TimeUnit.MILLISECONDS);

    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(rpcConf, clientId, promise, timeoutFuture,
                        secret, dispatcher);
                Rpc rpc = createRpc(rpcConf, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });

    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });

    return promise;
}

From source file:org.apache.hive.spark.client.rpc.Rpc.java

License:Apache License

private Rpc(RpcConfiguration config, Channel channel, EventExecutorGroup egroup) {
    Preconditions.checkArgument(channel != null);
    Preconditions.checkArgument(egroup != null);
    this.config = config;
    this.channel = channel;
    this.channelLock = new Object();
    this.dispatcher = null;
    this.egroup = egroup;
    this.listeners = Lists.newLinkedList();
    this.rpcClosed = new AtomicBoolean();
    this.rpcId = new AtomicLong();

    // Note: this does not work for embedded channels.
    channel.pipeline().addLast("monitor", new ChannelInboundHandlerAdapter() {
        @Override//ww  w.  j  av  a2  s  . c o m
        public void channelInactive(ChannelHandlerContext ctx) {
            close();
        }
    });
}

From source file:org.apache.spark.sql.hive.thriftserver.rsc.Rpc.java

License:Apache License

/**
 * Creates an RPC client for a server running on the given remote host and port.
 *
 * @param config RPC configuration data.
 * @param eloop Event loop for managing the connection.
 * @param host Host name or IP address to connect to.
 * @param port Port where server is listening.
 * @param clientId The client ID that identifies the connection.
 * @param secret Secret for authenticating the client with the server.
 * @param dispatcher Dispatcher used to handle RPC calls.
 * @return A future that can be used to monitor the creation of the RPC object.
 */// ww  w. j a  v a2 s.  c  o  m
public static Promise<Rpc> createClient(final RSCConf config, final EventLoopGroup eloop, String host, int port,
        final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    int connectTimeoutMs = (int) config.getTimeAsMs(RSCConf.Entry.RPC_CLIENT_CONNECT_TIMEOUT);

    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);

    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();

    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {
        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask,
            config.getTimeAsMs(RSCConf.Entry.RPC_CLIENT_HANDSHAKE_TIMEOUT), TimeUnit.MILLISECONDS);

    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(config, clientId, promise, timeoutFuture,
                        secret, dispatcher);
                Rpc rpc = createRpc(config, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });

    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });

    return promise;
}

From source file:org.betawares.jorre.Server.java

License:Open Source License

/**
 * Starts the Server with the specified {@link Connection} settings.
 * /* w w w  .  ja v  a 2 s. co m*/
 * @param connection  a {@link Connection} instance specifying the connection settings
 * 
 * @throws Exception  thrown if there is an error starting the server
 */
public void start(Connection connection) throws Exception {

    SslContext sslCtx;

    if (connection.isSSL()) {
        SelfSignedCertificate ssc = new SelfSignedCertificate();
        sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build();
    } else {
        sslCtx = null;
    }

    bossGroup = new NioEventLoopGroup();
    workerGroup = new NioEventLoopGroup();

    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) {
                    if (sslCtx != null) {
                        ch.pipeline().addLast(sslCtx.newHandler(ch.alloc()));
                    }
                    ch.pipeline()
                            .addLast(new ObjectDecoder(10 * 1024 * 1024, ClassResolvers.cacheDisabled(null)));
                    ch.pipeline().addLast(encoder);
                    ch.pipeline().addLast("idleStateHandler", new IdleStateHandler(connection.getIdleTimeout(),
                            connection.getIdlePingTime(), 0, TimeUnit.MILLISECONDS));
                    ch.pipeline().addLast(handlersExecutor, "heartbeatHandler",
                            new ServerHeartbeatHandler(Server.this));
                    ch.pipeline().addLast("pingMessageHandler", pingMessageHandler);
                    ch.pipeline().addLast("pongMessageHandler", pongMessageHandler);

                    ch.pipeline().addLast("connectionHandler", new ChannelInboundHandlerAdapter() {
                        @Override
                        public void channelActive(ChannelHandlerContext ctx) throws Exception {
                            clients.add(ctx.channel());
                            ctx.pipeline().remove(this);
                            super.channelActive(ctx);
                        }
                    });
                    ch.pipeline().addLast(handlersExecutor, "serverMessageHandler", serverRequestHandler);
                    ch.pipeline().addLast("exceptionHandler", exceptionHandler);
                }
            });
    bootstrap.bind(connection.getPort()).sync();

}

From source file:org.beykery.wormhole.WormRegistry.java

License:Apache License

public WormRegistry(int port, int workerSize) {
    if (port <= 0 || workerSize <= 0) {
        throw new IllegalArgumentException("??");
    }//from  w ww.  j  a  va2  s  .c om
    services = new HashMap<>();
    workers = new NioEventLoopGroup(workerSize);
    final NioEventLoopGroup nioEventLoopGroup = new NioEventLoopGroup();
    Bootstrap bootstrap = new Bootstrap();
    bootstrap.channel(NioDatagramChannel.class);
    bootstrap.group(nioEventLoopGroup);
    bootstrap.handler(new ChannelInitializer<NioDatagramChannel>() {

        @Override
        protected void initChannel(NioDatagramChannel ch) throws Exception {
            ChannelPipeline cp = ch.pipeline();
            cp.addLast(new ChannelInboundHandlerAdapter() {
                @Override
                public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                    DatagramPacket dp = (DatagramPacket) msg;
                    WormRegistry.this.onMessage(dp);
                }

                @Override
                public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
                    WormRegistry.this.onException(cause);
                }
            });
        }
    });
    ChannelFuture sync = bootstrap.bind(port).syncUninterruptibly();
    channel = (NioDatagramChannel) sync.channel();
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override
        public void run() {
            nioEventLoopGroup.shutdownGracefully();
        }
    }));
}

From source file:org.dcache.pool.movers.NettyTransferService.java

License:Open Source License

protected void initChannel(Channel ch) throws Exception {
    ch.pipeline().addLast(new ChannelInboundHandlerAdapter() {
        @Override//w w w  . j  a va  2 s .  com
        public void channelActive(ChannelHandlerContext ctx) throws Exception {
            openChannels.add(ctx.channel());
            super.channelActive(ctx);
        }

        @Override
        public void channelInactive(ChannelHandlerContext ctx) throws Exception {
            super.channelInactive(ctx);
            openChannels.remove(ctx.channel());
            conditionallyStopServer();
        }
    });
}

From source file:org.eclipse.milo.opcua.stack.client.ClientChannelManager.java

License:Open Source License

private void disconnect(ClientSecureChannel secureChannel, CompletableFuture<Unit> disconnected) {
    RequestHeader requestHeader = new RequestHeader(NodeId.NULL_VALUE, DateTime.now(), uint(0), uint(0), null,
            uint(0), null);/*from   ww w  .  j  av a 2  s . com*/

    secureChannel.getChannel().pipeline().addFirst(new ChannelInboundHandlerAdapter() {
        @Override
        public void channelInactive(ChannelHandlerContext ctx) throws Exception {
            logger.debug("channelInactive(), disconnect complete");
            disconnected.complete(Unit.VALUE);
        }
    });

    logger.debug("Sending CloseSecureChannelRequest...");
    CloseSecureChannelRequest request = new CloseSecureChannelRequest(requestHeader);
    secureChannel.getChannel().pipeline().fireUserEventTriggered(request);
}