Example usage for io.netty.bootstrap ServerBootstrap childOption

List of usage examples for io.netty.bootstrap ServerBootstrap childOption

Introduction

In this page you can find the example usage for io.netty.bootstrap ServerBootstrap childOption.

Prototype

public <T> ServerBootstrap childOption(ChannelOption<T> childOption, T value) 

Source Link

Document

Allow to specify a ChannelOption which is used for the Channel instances once they get created (after the acceptor accepted the Channel ).

Usage

From source file:com.dinstone.rpc.netty.server.NettyServer.java

License:Apache License

public void start() {
    bossGroup = new NioEventLoopGroup();
    workerGroup = new NioEventLoopGroup();
    try {//from  ww w  .  ja  v a2  s .  c  o  m
        ServerBootstrap boot = new ServerBootstrap();
        boot.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .childHandler(new ChannelInitializer<SocketChannel>() {

                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        ch.pipeline().addLast(new RpcProtocolDecoder(true));
                        ch.pipeline().addLast(new RpcProtocolEncoder(true));
                        ch.pipeline().addLast(new NettyServerHandler(handler));
                    }
                });
        boot.option(ChannelOption.SO_BACKLOG, 128);
        boot.childOption(ChannelOption.SO_KEEPALIVE, true);

        int port = config.getInt(Constants.SERVICE_PORT, Constants.DEFAULT_SERVICE_PORT);
        InetSocketAddress localAddress = new InetSocketAddress(port);
        String host = config.get(Constants.SERVICE_HOST);
        if (host != null) {
            localAddress = new InetSocketAddress(host, port);
        }
        LOG.info("RPC service works on " + localAddress);

        boot.bind(localAddress).sync();
    } catch (InterruptedException e) {
        throw new RpcException(500, "Server can't bind to the specified local address ", e);
    }
}

From source file:com.diwayou.hybrid.remoting.netty.NettyRemotingServer.java

License:Apache License

@Override
public void start() {
    this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(//
            nettyServerConfig.getServerWorkerThreads(), //
            new ThreadFactory() {

                private AtomicInteger threadIndex = new AtomicInteger(0);

                @Override/* w  w  w  .  j a  v  a  2  s . com*/
                public Thread newThread(Runnable r) {
                    return new Thread(r, "NettyServerWorkerThread_" + this.threadIndex.incrementAndGet());
                }
            });

    ServerBootstrap childHandler = //
            this.serverBootstrap.group(this.eventLoopGroupBoss, this.eventLoopGroupWorker)
                    .channel(NioServerSocketChannel.class)
                    //
                    .option(ChannelOption.SO_BACKLOG, 1024)
                    //
                    .option(ChannelOption.SO_REUSEADDR, true)
                    //
                    .option(ChannelOption.SO_KEEPALIVE, false)
                    //
                    .childOption(ChannelOption.TCP_NODELAY, true)
                    //
                    .option(ChannelOption.SO_SNDBUF, nettyServerConfig.getServerSocketSndBufSize())
                    //
                    .option(ChannelOption.SO_RCVBUF, nettyServerConfig.getServerSocketRcvBufSize())
                    //
                    .localAddress(new InetSocketAddress(this.nettyServerConfig.getListenPort()))
                    .childHandler(new ChannelInitializer<SocketChannel>() {
                        @Override
                        public void initChannel(SocketChannel ch) throws Exception {
                            ch.pipeline().addLast(
                                    //
                                    defaultEventExecutorGroup, //
                                    new NettyEncoder(), //
                                    new NettyDecoder(), //
                                    new IdleStateHandler(0, 0,
                                            nettyServerConfig.getServerChannelMaxIdleTimeSeconds()), //
                                    new NettyConnetManageHandler(), //
                                    new NettyServerHandler());
                        }
                    });

    if (nettyServerConfig.isServerPooledByteBufAllocatorEnable()) {
        // ????
        childHandler.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    }

    try {
        ChannelFuture sync = this.serverBootstrap.bind().sync();
        InetSocketAddress addr = (InetSocketAddress) sync.channel().localAddress();
        this.port = addr.getPort();
    } catch (InterruptedException e1) {
        throw new RuntimeException("this.serverBootstrap.bind().sync() InterruptedException", e1);
    }

    if (this.channelEventListener != null) {
        this.nettyEventExecuter.start();
    }

    // ?1??
    this.timer.scheduleAtFixedRate(new TimerTask() {

        @Override
        public void run() {
            try {
                NettyRemotingServer.this.scanResponseTable();
            } catch (Exception e) {
                log.error("scanResponseTable exception", e);
            }
        }
    }, 1000 * 3, 1000);
}

From source file:com.farsunset.cim.sdk.server.handler.CIMNioSocketAcceptor.java

License:Apache License

public void bind() throws IOException {

    /**//from ww  w .jav a 2 s .  c  o m
     * websocket??
     */
    innerHandlerMap.put(WEBSOCKET_HANDLER_KEY, new WebsocketHandler());

    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.group(new NioEventLoopGroup(), new NioEventLoopGroup());
    bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
    bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
    bootstrap.channel(NioServerSocketChannel.class);
    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel ch) throws Exception {

            ch.pipeline().addLast(new ServerMessageDecoder());
            ch.pipeline().addLast(new ServerMessageEncoder());
            ch.pipeline().addLast(new LoggingHandler(LogLevel.INFO));
            ch.pipeline().addLast(new IdleStateHandler(READ_IDLE_TIME, WRITE_IDLE_TIME, 0));
            ch.pipeline().addLast(CIMNioSocketAcceptor.this);
        }
    });

    bootstrap.bind(port);
}

From source file:com.github.thinker0.mesos.MesosHealthCheckerServer.java

License:Apache License

/**
 * Initializes the server, socket, and channel.
 *
 * @param loopGroup          The event loop group.
 * @param serverChannelClass The socket channel class.
 * @throws InterruptedException on interruption.
 *///  w ww.  j  a  va 2 s.  c o  m
private void start(final EventLoopGroup loopGroup, final Class<? extends ServerChannel> serverChannelClass)
        throws InterruptedException {

    try {
        final InetSocketAddress inet = new InetSocketAddress(port);

        final ServerBootstrap b = new ServerBootstrap();
        b.option(ChannelOption.SO_BACKLOG, 1024);
        b.option(ChannelOption.SO_REUSEADDR, true);
        b.option(ChannelOption.SO_LINGER, 0);
        b.group(loopGroup).channel(serverChannelClass).childHandler(new WebServerInitializer());
        b.childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
        b.childOption(ChannelOption.SO_REUSEADDR, true);

        // final Channel ch = b.bind(inet).sync().channel();
        // ch.closeFuture().sync();
        b.bind(inet);
        logger.info("Listening for Admin on {}", inet);
    } catch (Throwable t) {
        logger.warn(t.getMessage(), t);
    } finally {
        // loopGroup.shutdownGracefully().sync();
    }
}

From source file:com.googlecode.protobuf.pro.duplex.example.DuplexPingPongServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length < 4) {
        System.err.println("usage: <serverHostname> <serverPort> <ssl=Y/N> <nodelay=Y/N>");
        System.exit(-1);//from ww  w . j  a  va2  s. c  o  m
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);
    boolean secure = "Y".equals(args[2]);
    boolean nodelay = "Y".equals(args[3]);
    long runDuration = 0;
    if (args.length > 4) {
        runDuration = Long.parseLong(args[4]);
    }

    log.info("DuplexPingPongServer " + serverHostname + ":" + serverPort + " ssl=" + (secure ? "Y" : "N")
            + " nodelay=" + (nodelay ? "Y" : "N"));

    PeerInfo serverInfo = new PeerInfo(serverHostname, serverPort);

    RpcServerCallExecutor executor = new ThreadPoolCallExecutor(3, 200);

    DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(serverInfo);
    serverFactory.setRpcServerCallExecutor(executor);
    if (secure) {
        RpcSSLContext sslCtx = new RpcSSLContext();
        sslCtx.setKeystorePassword("changeme");
        sslCtx.setKeystorePath("./lib/server.keystore");
        sslCtx.setTruststorePassword("changeme");
        sslCtx.setTruststorePath("./lib/truststore");
        sslCtx.init();

        serverFactory.setSslContext(sslCtx);
    }

    NullLogger logger = new NullLogger();
    serverFactory.setLogger(logger);

    RpcTimeoutExecutor timeoutExecutor = new TimeoutExecutor(1, 5);
    RpcTimeoutChecker timeoutChecker = new TimeoutChecker();
    timeoutChecker.setTimeoutExecutor(timeoutExecutor);
    timeoutChecker.startChecking(serverFactory.getRpcClientRegistry());

    // setup a RPC event listener - it just logs what happens
    RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();
    RpcConnectionEventListener listener = new RpcConnectionEventListener() {

        @Override
        public void connectionReestablished(RpcClientChannel clientChannel) {
            log.info("connectionReestablished " + clientChannel);
        }

        @Override
        public void connectionOpened(RpcClientChannel clientChannel) {
            log.info("connectionOpened " + clientChannel);
        }

        @Override
        public void connectionLost(RpcClientChannel clientChannel) {
            log.info("connectionLost " + clientChannel);
        }

        @Override
        public void connectionChanged(RpcClientChannel clientChannel) {
            log.info("connectionChanged " + clientChannel);
        }
    };
    rpcEventNotifier.setEventListener(listener);
    serverFactory.registerConnectionEventListener(rpcEventNotifier);

    // we give the server a blocking and non blocking (pong capable) Ping Service
    BlockingService bPingService = BlockingPingService
            .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(bPingService);

    Service nbPingService = NonBlockingPingService
            .newReflectiveService(new PingPongServiceFactory.NonBlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(nbPingService);

    // Configure the server to provide a Pong Service in both blocking an non blocking varieties
    BlockingService bPongService = BlockingPongService
            .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongServer());
    serverFactory.getRpcServiceRegistry().registerService(bPongService);

    Service nbPongService = NonBlockingPongService
            .newReflectiveService(new PingPongServiceFactory.NonBlockingPongServer());
    serverFactory.getRpcServiceRegistry().registerService(nbPongService);

    // Configure the server.
    ServerBootstrap bootstrap = new ServerBootstrap();
    NioEventLoopGroup boss = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory()));
    NioEventLoopGroup workers = new NioEventLoopGroup(16,
            new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()));
    bootstrap.group(boss, workers);
    bootstrap.channel(NioServerSocketChannel.class);
    bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.TCP_NODELAY, nodelay);
    bootstrap.childHandler(serverFactory);
    bootstrap.localAddress(serverInfo.getPort());

    // Bind and start to accept incoming connections.
    CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
    shutdownHandler.addResource(boss);
    shutdownHandler.addResource(workers);
    shutdownHandler.addResource(executor);
    shutdownHandler.addResource(timeoutChecker);
    shutdownHandler.addResource(timeoutExecutor);

    bootstrap.bind();

    log.info("Serving " + serverInfo);

    if (runDuration > 0) {
        Thread.sleep(runDuration);
        System.exit(0);
    } else {
        while (true) {
            try {
                log.info("Sleeping 60s before retesting clients.");
                Thread.sleep(60000);
                new ShortTests().execute(serverFactory.getRpcClientRegistry());
            } catch (Throwable e) {
                log.warn("Throwable.", e);
            }
        }
    }
}

From source file:com.googlecode.protobuf.pro.duplex.example.nonrpc.StatusServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.println("usage: <serverHostname> <serverPort>");
        System.exit(-1);/*w w  w .j  a v a 2 s  . c  om*/
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);

    PeerInfo serverInfo = new PeerInfo(serverHostname, serverPort);

    // RPC payloads are uncompressed when logged - so reduce logging
    CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
    logger.setLogRequestProto(false);
    logger.setLogResponseProto(false);

    // Configure the server.
    DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(serverInfo);
    RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(10, 10);
    serverFactory.setRpcServerCallExecutor(rpcExecutor);
    serverFactory.setLogger(logger);

    final RpcCallback<PingPong.Status> clientStatusCallback = new RpcCallback<PingPong.Status>() {

        @Override
        public void run(PingPong.Status parameter) {
            log.info("Received " + parameter);
        }

    };
    // setup a RPC event listener - it just logs what happens
    RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();
    RpcConnectionEventListener listener = new RpcConnectionEventListener() {

        @Override
        public void connectionReestablished(RpcClientChannel clientChannel) {
            log.info("connectionReestablished " + clientChannel);

            clientChannel.setOobMessageCallback(Status.getDefaultInstance(), clientStatusCallback);
        }

        @Override
        public void connectionOpened(RpcClientChannel clientChannel) {
            log.info("connectionOpened " + clientChannel);

            clientChannel.setOobMessageCallback(Status.getDefaultInstance(), clientStatusCallback);
        }

        @Override
        public void connectionLost(RpcClientChannel clientChannel) {
            log.info("connectionLost " + clientChannel);
        }

        @Override
        public void connectionChanged(RpcClientChannel clientChannel) {
            log.info("connectionChanged " + clientChannel);
            clientChannel.setOobMessageCallback(Status.getDefaultInstance(), clientStatusCallback);
        }
    };
    rpcEventNotifier.setEventListener(listener);
    serverFactory.registerConnectionEventListener(rpcEventNotifier);

    ServerBootstrap bootstrap = new ServerBootstrap();
    EventLoopGroup boss = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory()));
    EventLoopGroup workers = new NioEventLoopGroup(16,
            new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()));
    bootstrap.group(boss, workers);
    bootstrap.channel(NioServerSocketChannel.class);
    bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.TCP_NODELAY, true);
    bootstrap.childHandler(serverFactory);
    bootstrap.localAddress(serverInfo.getPort());

    CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
    shutdownHandler.addResource(boss);
    shutdownHandler.addResource(workers);
    shutdownHandler.addResource(rpcExecutor);

    // Bind and start to accept incoming connections.
    bootstrap.bind();
    log.info("Serving " + bootstrap);

    while (true) {

        List<RpcClientChannel> clients = serverFactory.getRpcClientRegistry().getAllClients();
        for (RpcClientChannel client : clients) {

            PingPong.Status serverStatus = PingPong.Status.newBuilder()
                    .setMessage("Server " + serverFactory.getServerInfo() + " OK@" + System.currentTimeMillis())
                    .build();

            ChannelFuture oobSend = client.sendOobMessage(serverStatus);
            if (!oobSend.isDone()) {
                log.info("Waiting for completion.");
                oobSend.syncUninterruptibly();
            }
            if (!oobSend.isSuccess()) {
                log.warn("OobMessage send failed.", oobSend.cause());
            }

        }
        log.info("Sleeping 5s before sending serverStatus to all clients.");

        Thread.sleep(5000);
    }
}

From source file:com.googlecode.protobuf.pro.duplex.example.simple.SimpleServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.println("usage: <serverHostname> <serverPort>");
        System.exit(-1);/*from   w ww. ja  va 2  s  . c  o m*/
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);

    PeerInfo serverInfo = new PeerInfo(serverHostname, serverPort);

    // RPC payloads are uncompressed when logged - so reduce logging
    CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
    logger.setLogRequestProto(false);
    logger.setLogResponseProto(false);

    // Configure the server.
    DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(serverInfo);

    ExtensionRegistry r = ExtensionRegistry.newInstance();
    PingPong.registerAllExtensions(r);
    serverFactory.setExtensionRegistry(r);

    RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(10, 10);
    serverFactory.setRpcServerCallExecutor(rpcExecutor);
    serverFactory.setLogger(logger);

    // setup a RPC event listener - it just logs what happens
    RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();
    RpcConnectionEventListener listener = new RpcConnectionEventListener() {

        @Override
        public void connectionReestablished(RpcClientChannel clientChannel) {
            log.info("connectionReestablished " + clientChannel);
        }

        @Override
        public void connectionOpened(RpcClientChannel clientChannel) {
            log.info("connectionOpened " + clientChannel);
        }

        @Override
        public void connectionLost(RpcClientChannel clientChannel) {
            log.info("connectionLost " + clientChannel);
        }

        @Override
        public void connectionChanged(RpcClientChannel clientChannel) {
            log.info("connectionChanged " + clientChannel);
        }
    };
    rpcEventNotifier.setEventListener(listener);
    serverFactory.registerConnectionEventListener(rpcEventNotifier);

    // we give the server a blocking and non blocking (pong capable) Ping Service
    BlockingService bPingService = BlockingPingService
            .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(true, bPingService);

    Service nbPingService = NonBlockingPingService
            .newReflectiveService(new PingPongServiceFactory.NonBlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(true, nbPingService);

    ServerBootstrap bootstrap = new ServerBootstrap();
    EventLoopGroup boss = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory()));
    EventLoopGroup workers = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()));
    bootstrap.group(boss, workers);
    bootstrap.channel(NioServerSocketChannel.class);
    bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.TCP_NODELAY, true);
    bootstrap.childHandler(serverFactory);
    bootstrap.localAddress(serverInfo.getPort());

    CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
    shutdownHandler.addResource(boss);
    shutdownHandler.addResource(workers);
    shutdownHandler.addResource(rpcExecutor);

    // Bind and start to accept incoming connections.
    bootstrap.bind();
    log.info("Serving " + bootstrap);

    while (true) {

        List<RpcClientChannel> clients = serverFactory.getRpcClientRegistry().getAllClients();
        log.info("Number of clients=" + clients.size());

        Thread.sleep(5000);
    }
}

From source file:com.ibm.crail.datanode.netty.server.NettyServer.java

License:Apache License

public void run() {
    /* start the netty server */
    EventLoopGroup acceptGroup = new NioEventLoopGroup();
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {//  w ww .  j a  v  a 2  s. c  o  m
        ServerBootstrap boot = new ServerBootstrap();
        boot.group(acceptGroup, workerGroup);
        /* we use sockets */
        boot.channel(NioServerSocketChannel.class);
        /* for new incoming connection */
        boot.childHandler(new ChannelInitializer<SocketChannel>() {
            @Override
            public void initChannel(SocketChannel ch) throws Exception {
                LOG.info("TID: " + Thread.currentThread().getId()
                        + " , a new client connection has arrived from : " + ch.remoteAddress().toString());
                /* incoming pipeline */
                ch.pipeline().addLast(new RdmaDecoderRx(), /* this makes full RDMA messages */
                        new IncomingRequestHandler(ch, dataNode));
                /* outgoing pipeline */
                //ch.pipeline().addLast(new RdmaEncoderTx());
            }
        });
        /* general optimization settings */
        boot.option(ChannelOption.SO_BACKLOG, 1024);
        boot.childOption(ChannelOption.SO_KEEPALIVE, true);

        /* now we bind the server and start */
        ChannelFuture f = boot.bind(this.inetSocketAddress.getAddress(), this.inetSocketAddress.getPort())
                .sync();
        LOG.info("Datanode binded to : " + this.inetSocketAddress);
        /* at this point we are binded and ready */
        f.channel().closeFuture().sync();
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        workerGroup.shutdownGracefully();
        acceptGroup.shutdownGracefully();
        LOG.info("Datanode at " + this.inetSocketAddress + " is shutdown");
    }
}

From source file:com.ibm.crail.namenode.rpc.netty.NettyNameNode.java

License:Apache License

public void run(final RpcNameNodeService service) {
    /* here we run the incoming RPC service */
    InetSocketAddress inetSocketAddress = CrailUtils.getNameNodeAddress();
    LOG.info("Starting the NettyNamenode service at : " + inetSocketAddress);
    /* start the netty server */
    EventLoopGroup acceptGroup = new NioEventLoopGroup();
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {//  w  w w .j  a va2  s  .  co  m
        ServerBootstrap boot = new ServerBootstrap();
        boot.group(acceptGroup, workerGroup);
        /* we use sockets */
        boot.channel(NioServerSocketChannel.class);
        /* for new incoming connection */
        boot.childHandler(new ChannelInitializer<SocketChannel>() {
            @Override
            public void initChannel(SocketChannel ch) throws Exception {
                LOG.info("A new connection has arrived from : " + ch.remoteAddress().toString());
                /* incoming pipeline */
                ch.pipeline().addLast("RequestDecoder", new RequestDecoder());
                ch.pipeline().addLast("NNProcessor", new NamenodeProcessor(service));
                /* outgoing pipeline */
                ch.pipeline().addLast("ResponseEncoder", new ResponseEncoder());
            }
        });
        /* general optimization settings */
        boot.option(ChannelOption.SO_BACKLOG, 1024);
        boot.childOption(ChannelOption.SO_KEEPALIVE, true);

        /* now we bind the server and start */
        ChannelFuture f = boot.bind(inetSocketAddress.getAddress(), inetSocketAddress.getPort()).sync();
        /* at this point we are binded and ready */
        f.channel().closeFuture().sync();
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        workerGroup.shutdownGracefully();
        acceptGroup.shutdownGracefully();
        LOG.info("Netty namenode at " + inetSocketAddress + " is shutdown");
    }
}

From source file:com.ict.dtube.remoting.netty.NettyRemotingServer.java

License:Apache License

@Override
public void start() {
    this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(//
            nettyServerConfig.getServerWorkerThreads(), //
            new ThreadFactory() {

                private AtomicInteger threadIndex = new AtomicInteger(0);

                @Override/*  w  ww . jav  a2  s .  c o m*/
                public Thread newThread(Runnable r) {
                    return new Thread(r, "NettyServerWorkerThread_" + this.threadIndex.incrementAndGet());
                }
            });

    ServerBootstrap childHandler = //
            this.serverBootstrap.group(this.eventLoopGroupBoss, this.eventLoopGroupWorker)
                    .channel(NioServerSocketChannel.class)
                    //
                    .option(ChannelOption.SO_BACKLOG, 1024)
                    //
                    .option(ChannelOption.SO_REUSEADDR, true)
                    //
                    .childOption(ChannelOption.TCP_NODELAY, true)
                    //
                    .childOption(ChannelOption.SO_SNDBUF, NettySystemConfig.SocketSndbufSize)
                    //
                    .childOption(ChannelOption.SO_RCVBUF, NettySystemConfig.SocketRcvbufSize)

                    .localAddress(new InetSocketAddress(this.nettyServerConfig.getListenPort()))
                    .childHandler(new ChannelInitializer<SocketChannel>() {
                        @Override
                        public void initChannel(SocketChannel ch) throws Exception {
                            ch.pipeline().addLast(
                                    //
                                    defaultEventExecutorGroup, //
                                    new NettyEncoder(), //
                                    new NettyDecoder(), //
                                    new IdleStateHandler(0, 0,
                                            nettyServerConfig.getServerChannelMaxIdleTimeSeconds()), //
                                    new NettyConnetManageHandler(), //
                                    new NettyServerHandler());
                        }
                    });

    if (NettySystemConfig.NettyPooledByteBufAllocatorEnable) {
        // ????
        childHandler.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)//
        ;
    }

    try {
        ChannelFuture sync = this.serverBootstrap.bind().sync();
        InetSocketAddress addr = (InetSocketAddress) sync.channel().localAddress();
        this.port = addr.getPort();
    } catch (InterruptedException e1) {
        throw new RuntimeException("this.serverBootstrap.bind().sync() InterruptedException", e1);
    }

    if (this.channelEventListener != null) {
        this.nettyEventExecuter.start();
    }

    // ?1??
    this.timer.scheduleAtFixedRate(new TimerTask() {

        @Override
        public void run() {
            try {
                NettyRemotingServer.this.scanResponseTable();
            } catch (Exception e) {
                log.error("scanResponseTable exception", e);
            }
        }
    }, 1000 * 3, 1000);
}