Example usage for io.netty.bootstrap ServerBootstrap ServerBootstrap

List of usage examples for io.netty.bootstrap ServerBootstrap ServerBootstrap

Introduction

In this page you can find the example usage for io.netty.bootstrap ServerBootstrap ServerBootstrap.

Prototype

public ServerBootstrap() 

Source Link

Usage

From source file:com.github.zk1931.jzab.NettyTransport.java

License:Apache License

/**
 * Constructs a NettyTransport object./*  w w w.j  av a 2  s. c om*/
 *
 * @param hostPort "hostname:port" string. The netty transport binds to the
 *                 port specified in the string.
 * @param receiver receiver callback.
 * @param sslParam Ssl parameters.
 * @param dir the directory used to store the received file.
 */
public NettyTransport(String hostPort, final Receiver receiver, ZabConfig.SslParameters sslParam,
        final File dir) throws InterruptedException, GeneralSecurityException, IOException {
    super(receiver);
    this.keyStore = sslParam.getKeyStore();
    this.trustStore = sslParam.getTrustStore();
    this.keyStorePassword = sslParam.getKeyStorePassword() != null
            ? sslParam.getKeyStorePassword().toCharArray()
            : null;
    this.trustStorePassword = sslParam.getTrustStorePassword() != null
            ? sslParam.getTrustStorePassword().toCharArray()
            : null;
    this.dir = dir;
    if (isSslEnabled()) {
        initSsl();
    }

    this.hostPort = hostPort;
    String[] address = hostPort.split(":", 2);
    int port = Integer.parseInt(address[1]);
    ServerBootstrap b = new ServerBootstrap();
    b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).option(ChannelOption.SO_BACKLOG, 128)
            .option(ChannelOption.SO_REUSEADDR, true).childOption(ChannelOption.SO_KEEPALIVE, true)
            .childOption(ChannelOption.TCP_NODELAY, true).childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) throws Exception {
                    if (isSslEnabled()) {
                        SSLEngine engine = serverContext.createSSLEngine();
                        engine.setUseClientMode(false);
                        engine.setNeedClientAuth(true);
                        ch.pipeline().addLast(new SslHandler(engine));
                    }
                    // Incoming handlers
                    ch.pipeline().addLast(new MainHandler());
                    ch.pipeline().addLast(new ServerHandshakeHandler());
                    ch.pipeline().addLast(new NotifyHandler());
                    ch.pipeline().addLast(new ServerErrorHandler());
                }
            });

    // Travis build fails once in a while because it fails to bind to a port.
    // This is most likely a transient failure. Retry binding for 5 times with
    // 1 second sleep in between before giving up.
    int bindRetryCount = 5;
    for (int i = 0;; i++) {
        try {
            channel = b.bind(port).sync().channel();
            LOG.info("Server started: {}", hostPort);
            return;
        } catch (Exception ex) {
            if (i >= bindRetryCount) {
                throw ex;
            }
            LOG.debug("Failed to bind to {}. Retrying after 1 second.", hostPort);
            Thread.sleep(1000);
        }
    }
}

From source file:com.github.zk1931.jzab.transport.NettyTransport.java

License:Apache License

/**
 * Constructs a NettyTransport object.//  w w  w  .  ja  v  a  2  s  .  co m
 *
 * @param hostPort "hostname:port" string. The netty transport binds to the
 *                 port specified in the string.
 * @param receiver receiver callback.
 * @param sslParam Ssl parameters.
 * @param dir the directory used to store the received file.
 */
public NettyTransport(String hostPort, final Receiver receiver, SslParameters sslParam, final File dir)
        throws InterruptedException, GeneralSecurityException, IOException {
    super(receiver);
    this.keyStore = sslParam.getKeyStore();
    this.trustStore = sslParam.getTrustStore();
    this.keyStorePassword = sslParam.getKeyStorePassword() != null
            ? sslParam.getKeyStorePassword().toCharArray()
            : null;
    this.trustStorePassword = sslParam.getTrustStorePassword() != null
            ? sslParam.getTrustStorePassword().toCharArray()
            : null;
    this.dir = dir;
    if (isSslEnabled()) {
        initSsl();
    }

    this.hostPort = hostPort;
    String[] address = hostPort.split(":", 2);
    int port = Integer.parseInt(address[1]);
    ServerBootstrap b = new ServerBootstrap();
    b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).option(ChannelOption.SO_BACKLOG, 128)
            .option(ChannelOption.SO_REUSEADDR, true).childOption(ChannelOption.SO_KEEPALIVE, true)
            .childOption(ChannelOption.TCP_NODELAY, true).childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) throws Exception {
                    if (isSslEnabled()) {
                        SSLEngine engine = serverContext.createSSLEngine();
                        engine.setUseClientMode(false);
                        engine.setNeedClientAuth(true);
                        ch.pipeline().addLast(new SslHandler(engine));
                    }
                    // Incoming handlers
                    ch.pipeline().addLast(new MainHandler());
                    ch.pipeline().addLast(new ServerHandshakeHandler());
                    ch.pipeline().addLast(new NotifyHandler());
                    ch.pipeline().addLast(new ErrorHandler());
                    // Outgoing handlers.
                    ch.pipeline().addLast("frameEncoder", new LengthFieldPrepender(4));
                }
            });

    // Travis build fails once in a while because it fails to bind to a port.
    // This is most likely a transient failure. Retry binding for 5 times with
    // 1 second sleep in between before giving up.
    int bindRetryCount = 5;
    for (int i = 0;; i++) {
        try {
            channel = b.bind(port).sync().channel();
            LOG.info("Server started: {}", hostPort);
            return;
        } catch (Exception ex) {
            if (i >= bindRetryCount) {
                throw ex;
            }
            LOG.debug("Failed to bind to {}. Retrying after 1 second.", hostPort);
            Thread.sleep(1000);
        }
    }
}

From source file:com.google.cloud.pubsub.proxy.moquette.NettyAcceptor.java

License:Open Source License

private void initFactory(String host, int port, final PipelineInitializer pipeliner) {
    ServerBootstrap bootsrap = new ServerBootstrap();
    bootsrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override//from  w ww.  j a  va 2 s.co  m
                public void initChannel(SocketChannel ch) throws Exception {
                    ChannelPipeline pipeline = ch.pipeline();
                    try {
                        pipeliner.init(pipeline);
                    } catch (Throwable th) {
                        LOG.error("Severe error during pipeline creation", th);
                        throw th;
                    }
                }
            }).option(ChannelOption.SO_BACKLOG, 128).option(ChannelOption.SO_REUSEADDR, true)
            .option(ChannelOption.TCP_NODELAY, true).childOption(ChannelOption.SO_KEEPALIVE, true);
    try {
        // Bind and start to accept incoming connections.
        ChannelFuture future = bootsrap.bind(host, port);
        LOG.info("Server binded host: {}, port: {}", host, port);
        future.sync();
    } catch (InterruptedException ex) {
        LOG.error(null, ex);
    }
}

From source file:com.google.devtools.build.lib.remote.blobstore.http.HttpBlobStoreTest.java

License:Open Source License

private ServerSocketChannel startServer(ChannelHandler handler) throws Exception {
    EventLoopGroup eventLoop = new NioEventLoopGroup(1);
    ServerBootstrap sb = new ServerBootstrap().group(eventLoop).channel(NioServerSocketChannel.class)
            .childHandler(new ChannelInitializer<NioSocketChannel>() {
                @Override/*from  www.  j  a va  2  s.co m*/
                protected void initChannel(NioSocketChannel ch) {
                    ch.pipeline().addLast(new HttpServerCodec());
                    ch.pipeline().addLast(new HttpObjectAggregator(1000));
                    ch.pipeline().addLast(handler);
                }
            });
    return ((ServerSocketChannel) sb.bind("localhost", 0).sync().channel());
}

From source file:com.googlecode.protobuf.pro.duplex.example.DuplexPingPongServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length < 4) {
        System.err.println("usage: <serverHostname> <serverPort> <ssl=Y/N> <nodelay=Y/N>");
        System.exit(-1);// w  w w.  ja  v  a2  s.c om
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);
    boolean secure = "Y".equals(args[2]);
    boolean nodelay = "Y".equals(args[3]);
    long runDuration = 0;
    if (args.length > 4) {
        runDuration = Long.parseLong(args[4]);
    }

    log.info("DuplexPingPongServer " + serverHostname + ":" + serverPort + " ssl=" + (secure ? "Y" : "N")
            + " nodelay=" + (nodelay ? "Y" : "N"));

    PeerInfo serverInfo = new PeerInfo(serverHostname, serverPort);

    RpcServerCallExecutor executor = new ThreadPoolCallExecutor(3, 200);

    DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(serverInfo);
    serverFactory.setRpcServerCallExecutor(executor);
    if (secure) {
        RpcSSLContext sslCtx = new RpcSSLContext();
        sslCtx.setKeystorePassword("changeme");
        sslCtx.setKeystorePath("./lib/server.keystore");
        sslCtx.setTruststorePassword("changeme");
        sslCtx.setTruststorePath("./lib/truststore");
        sslCtx.init();

        serverFactory.setSslContext(sslCtx);
    }

    NullLogger logger = new NullLogger();
    serverFactory.setLogger(logger);

    RpcTimeoutExecutor timeoutExecutor = new TimeoutExecutor(1, 5);
    RpcTimeoutChecker timeoutChecker = new TimeoutChecker();
    timeoutChecker.setTimeoutExecutor(timeoutExecutor);
    timeoutChecker.startChecking(serverFactory.getRpcClientRegistry());

    // setup a RPC event listener - it just logs what happens
    RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();
    RpcConnectionEventListener listener = new RpcConnectionEventListener() {

        @Override
        public void connectionReestablished(RpcClientChannel clientChannel) {
            log.info("connectionReestablished " + clientChannel);
        }

        @Override
        public void connectionOpened(RpcClientChannel clientChannel) {
            log.info("connectionOpened " + clientChannel);
        }

        @Override
        public void connectionLost(RpcClientChannel clientChannel) {
            log.info("connectionLost " + clientChannel);
        }

        @Override
        public void connectionChanged(RpcClientChannel clientChannel) {
            log.info("connectionChanged " + clientChannel);
        }
    };
    rpcEventNotifier.setEventListener(listener);
    serverFactory.registerConnectionEventListener(rpcEventNotifier);

    // we give the server a blocking and non blocking (pong capable) Ping Service
    BlockingService bPingService = BlockingPingService
            .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(bPingService);

    Service nbPingService = NonBlockingPingService
            .newReflectiveService(new PingPongServiceFactory.NonBlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(nbPingService);

    // Configure the server to provide a Pong Service in both blocking an non blocking varieties
    BlockingService bPongService = BlockingPongService
            .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongServer());
    serverFactory.getRpcServiceRegistry().registerService(bPongService);

    Service nbPongService = NonBlockingPongService
            .newReflectiveService(new PingPongServiceFactory.NonBlockingPongServer());
    serverFactory.getRpcServiceRegistry().registerService(nbPongService);

    // Configure the server.
    ServerBootstrap bootstrap = new ServerBootstrap();
    NioEventLoopGroup boss = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory()));
    NioEventLoopGroup workers = new NioEventLoopGroup(16,
            new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()));
    bootstrap.group(boss, workers);
    bootstrap.channel(NioServerSocketChannel.class);
    bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.TCP_NODELAY, nodelay);
    bootstrap.childHandler(serverFactory);
    bootstrap.localAddress(serverInfo.getPort());

    // Bind and start to accept incoming connections.
    CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
    shutdownHandler.addResource(boss);
    shutdownHandler.addResource(workers);
    shutdownHandler.addResource(executor);
    shutdownHandler.addResource(timeoutChecker);
    shutdownHandler.addResource(timeoutExecutor);

    bootstrap.bind();

    log.info("Serving " + serverInfo);

    if (runDuration > 0) {
        Thread.sleep(runDuration);
        System.exit(0);
    } else {
        while (true) {
            try {
                log.info("Sleeping 60s before retesting clients.");
                Thread.sleep(60000);
                new ShortTests().execute(serverFactory.getRpcClientRegistry());
            } catch (Throwable e) {
                log.warn("Throwable.", e);
            }
        }
    }
}

From source file:com.googlecode.protobuf.pro.duplex.example.nonrpc.StatusServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.println("usage: <serverHostname> <serverPort>");
        System.exit(-1);//w w w  .  j ava 2 s  .c  o m
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);

    PeerInfo serverInfo = new PeerInfo(serverHostname, serverPort);

    // RPC payloads are uncompressed when logged - so reduce logging
    CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
    logger.setLogRequestProto(false);
    logger.setLogResponseProto(false);

    // Configure the server.
    DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(serverInfo);
    RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(10, 10);
    serverFactory.setRpcServerCallExecutor(rpcExecutor);
    serverFactory.setLogger(logger);

    final RpcCallback<PingPong.Status> clientStatusCallback = new RpcCallback<PingPong.Status>() {

        @Override
        public void run(PingPong.Status parameter) {
            log.info("Received " + parameter);
        }

    };
    // setup a RPC event listener - it just logs what happens
    RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();
    RpcConnectionEventListener listener = new RpcConnectionEventListener() {

        @Override
        public void connectionReestablished(RpcClientChannel clientChannel) {
            log.info("connectionReestablished " + clientChannel);

            clientChannel.setOobMessageCallback(Status.getDefaultInstance(), clientStatusCallback);
        }

        @Override
        public void connectionOpened(RpcClientChannel clientChannel) {
            log.info("connectionOpened " + clientChannel);

            clientChannel.setOobMessageCallback(Status.getDefaultInstance(), clientStatusCallback);
        }

        @Override
        public void connectionLost(RpcClientChannel clientChannel) {
            log.info("connectionLost " + clientChannel);
        }

        @Override
        public void connectionChanged(RpcClientChannel clientChannel) {
            log.info("connectionChanged " + clientChannel);
            clientChannel.setOobMessageCallback(Status.getDefaultInstance(), clientStatusCallback);
        }
    };
    rpcEventNotifier.setEventListener(listener);
    serverFactory.registerConnectionEventListener(rpcEventNotifier);

    ServerBootstrap bootstrap = new ServerBootstrap();
    EventLoopGroup boss = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory()));
    EventLoopGroup workers = new NioEventLoopGroup(16,
            new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()));
    bootstrap.group(boss, workers);
    bootstrap.channel(NioServerSocketChannel.class);
    bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.TCP_NODELAY, true);
    bootstrap.childHandler(serverFactory);
    bootstrap.localAddress(serverInfo.getPort());

    CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
    shutdownHandler.addResource(boss);
    shutdownHandler.addResource(workers);
    shutdownHandler.addResource(rpcExecutor);

    // Bind and start to accept incoming connections.
    bootstrap.bind();
    log.info("Serving " + bootstrap);

    while (true) {

        List<RpcClientChannel> clients = serverFactory.getRpcClientRegistry().getAllClients();
        for (RpcClientChannel client : clients) {

            PingPong.Status serverStatus = PingPong.Status.newBuilder()
                    .setMessage("Server " + serverFactory.getServerInfo() + " OK@" + System.currentTimeMillis())
                    .build();

            ChannelFuture oobSend = client.sendOobMessage(serverStatus);
            if (!oobSend.isDone()) {
                log.info("Waiting for completion.");
                oobSend.syncUninterruptibly();
            }
            if (!oobSend.isSuccess()) {
                log.warn("OobMessage send failed.", oobSend.cause());
            }

        }
        log.info("Sleeping 5s before sending serverStatus to all clients.");

        Thread.sleep(5000);
    }
}

From source file:com.googlecode.protobuf.pro.duplex.example.simple.SimpleServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.println("usage: <serverHostname> <serverPort>");
        System.exit(-1);//  w w w  .j  a  va  2s .c  o m
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);

    PeerInfo serverInfo = new PeerInfo(serverHostname, serverPort);

    // RPC payloads are uncompressed when logged - so reduce logging
    CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
    logger.setLogRequestProto(false);
    logger.setLogResponseProto(false);

    // Configure the server.
    DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(serverInfo);

    ExtensionRegistry r = ExtensionRegistry.newInstance();
    PingPong.registerAllExtensions(r);
    serverFactory.setExtensionRegistry(r);

    RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(10, 10);
    serverFactory.setRpcServerCallExecutor(rpcExecutor);
    serverFactory.setLogger(logger);

    // setup a RPC event listener - it just logs what happens
    RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();
    RpcConnectionEventListener listener = new RpcConnectionEventListener() {

        @Override
        public void connectionReestablished(RpcClientChannel clientChannel) {
            log.info("connectionReestablished " + clientChannel);
        }

        @Override
        public void connectionOpened(RpcClientChannel clientChannel) {
            log.info("connectionOpened " + clientChannel);
        }

        @Override
        public void connectionLost(RpcClientChannel clientChannel) {
            log.info("connectionLost " + clientChannel);
        }

        @Override
        public void connectionChanged(RpcClientChannel clientChannel) {
            log.info("connectionChanged " + clientChannel);
        }
    };
    rpcEventNotifier.setEventListener(listener);
    serverFactory.registerConnectionEventListener(rpcEventNotifier);

    // we give the server a blocking and non blocking (pong capable) Ping Service
    BlockingService bPingService = BlockingPingService
            .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(true, bPingService);

    Service nbPingService = NonBlockingPingService
            .newReflectiveService(new PingPongServiceFactory.NonBlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(true, nbPingService);

    ServerBootstrap bootstrap = new ServerBootstrap();
    EventLoopGroup boss = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory()));
    EventLoopGroup workers = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()));
    bootstrap.group(boss, workers);
    bootstrap.channel(NioServerSocketChannel.class);
    bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.TCP_NODELAY, true);
    bootstrap.childHandler(serverFactory);
    bootstrap.localAddress(serverInfo.getPort());

    CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
    shutdownHandler.addResource(boss);
    shutdownHandler.addResource(workers);
    shutdownHandler.addResource(rpcExecutor);

    // Bind and start to accept incoming connections.
    bootstrap.bind();
    log.info("Serving " + bootstrap);

    while (true) {

        List<RpcClientChannel> clients = serverFactory.getRpcClientRegistry().getAllClients();
        log.info("Number of clients=" + clients.size());

        Thread.sleep(5000);
    }
}

From source file:com.grillecube.engine.network.server.ServerNetwork.java

public void start() throws Exception {
    this._bossgroup = new NioEventLoopGroup();
    this._workergroup = new NioEventLoopGroup();
    this._bootstrap = new ServerBootstrap();
    this._bootstrap.group(this._bossgroup, this._workergroup);
    this._bootstrap.channel(NioServerSocketChannel.class);
    this._bootstrap.childHandler(this);
    this._bootstrap.option(ChannelOption.SO_BACKLOG, 128);
    this._bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);

    // Bind and start to accept incoming connections.
    this._channel = this._bootstrap.bind(this._port).sync(); // (7)
    Logger.get().log(Level.FINE, "Listening on " + this._port);
    this.stop();/*  w w w .ja  v a  2s. c om*/
}

From source file:com.grillecube.server.network.ServerNetwork.java

public void start() throws Exception {
    this.bossgroup = new NioEventLoopGroup();
    this.workergroup = new NioEventLoopGroup();
    this.bootstrap = new ServerBootstrap();
    this.bootstrap.group(this.bossgroup, this.workergroup);
    this.bootstrap.channel(NioServerSocketChannel.class);
    this.bootstrap.childHandler(this);
    this.bootstrap.option(ChannelOption.SO_BACKLOG, 128);
    this.bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);

    // Bind and start to accept incoming connections.
    this.channel = this.bootstrap.bind(this.port).sync(); // (7)
    Logger.get().log(Level.FINE, "Listening on " + this.port);
    this.stop();/* ww  w  .j  ava 2  s.  co m*/
}

From source file:com.gw.monitor.alphaenvmonitor.monitor.MonitorServer.java

License:Apache License

public void run() throws Exception {
    EventLoopGroup bossGroup = new NioEventLoopGroup();
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {//w w w  .j a  v a 2  s. c  om
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .childHandler(new MonitorServerInitializer());

        b.bind(port).sync().channel().closeFuture().sync();
    } finally {
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}