Example usage for io.netty.channel ChannelOption SO_BACKLOG

List of usage examples for io.netty.channel ChannelOption SO_BACKLOG

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption SO_BACKLOG.

Prototype

ChannelOption SO_BACKLOG

To view the source code for io.netty.channel ChannelOption SO_BACKLOG.

Click Source Link

Usage

From source file:com.hazelcast.openshift.TunnelServer.java

License:Open Source License

@Override
protected ServerBootstrap createBootstrap(int localPort) throws Exception {
    SslContext sslContext;//from   w ww  .  ja  va 2s . c o  m
    if (!ssl) {
        sslContext = null;

    } else {
        SelfSignedCertificate ssc = new SelfSignedCertificate();
        sslContext = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build();
    }

    System.out.println("Creating serverside http-socket: (" + localPort + ") => (" + forwardHost + ":"
            + forwardPort + ")");
    return new ServerBootstrap().option(ChannelOption.SO_BACKLOG, 20).group(getBossGroup(), getWorkerGroup())
            .channel(NioServerSocketChannel.class).childHandler(new ChannelInitializer<SocketChannel>() {

                @Override
                protected void initChannel(SocketChannel channel) throws Exception {
                    ChannelPipeline pipeline = channel.pipeline();
                    if (sslContext != null) {
                        pipeline.addLast("ssl", sslContext.newHandler(channel.alloc()));
                    }
                    pipeline.addLast("http-codec", new HttpServerCodec());
                    pipeline.addLast(new TunnelClientAcceptor(getWorkerGroup(), forwardHost, forwardPort));
                }
            });

}

From source file:com.heliosapm.streams.onramp.OnRampBoot.java

License:Apache License

/**
 * Creates a new OnRampBoot/*from   w w w . j a  v a  2s . co  m*/
 * @param appConfig  The application configuration
 */
public OnRampBoot(final Properties appConfig) {
    final String jmxmpUri = ConfigurationHelper.getSystemThenEnvProperty("jmx.jmxmp.uri",
            "jmxmp://0.0.0.0:1893", appConfig);
    JMXHelper.fireUpJMXMPServer(jmxmpUri);
    MessageForwarder.initialize(appConfig);
    port = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.port", 8091, appConfig);
    bindInterface = ConfigurationHelper.getSystemThenEnvProperty("onramp.network.bind", "0.0.0.0", appConfig);
    bindSocket = new InetSocketAddress(bindInterface, port);
    workerThreads = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.worker_threads", CORES * 2,
            appConfig);
    connectTimeout = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sotimeout", 0, appConfig);
    backlog = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.backlog", 3072, appConfig);
    writeSpins = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.writespins", 16, appConfig);
    recvBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.recbuffer", 43690, appConfig);
    sendBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sendbuffer", 8192, appConfig);
    disableEpoll = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.epoll.disable", false,
            appConfig);
    async = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.async_io", true, appConfig);
    tcpNoDelay = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.tcp_no_delay", true,
            appConfig);
    keepAlive = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.keep_alive", true,
            appConfig);
    reuseAddress = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.reuse_address", true,
            appConfig);
    tcpPipelineFactory = new PipelineFactory(appConfig);
    udpPipelineFactory = new UDPPipelineFactory();
    tcpServerBootstrap.handler(new LoggingHandler(getClass(), LogLevel.INFO));
    tcpServerBootstrap.childHandler(tcpPipelineFactory);
    // Set the child options
    tcpServerBootstrap.childOption(ChannelOption.ALLOCATOR, BufferManager.getInstance().getAllocator());
    tcpServerBootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
    tcpServerBootstrap.childOption(ChannelOption.SO_KEEPALIVE, keepAlive);
    tcpServerBootstrap.childOption(ChannelOption.SO_RCVBUF, recvBuffer);
    tcpServerBootstrap.childOption(ChannelOption.SO_SNDBUF, sendBuffer);
    tcpServerBootstrap.childOption(ChannelOption.WRITE_SPIN_COUNT, writeSpins);
    // Set the server options
    tcpServerBootstrap.option(ChannelOption.SO_BACKLOG, backlog);
    tcpServerBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
    tcpServerBootstrap.option(ChannelOption.SO_RCVBUF, recvBuffer);
    tcpServerBootstrap.option(ChannelOption.SO_TIMEOUT, connectTimeout);

    final StringBuilder tcpUri = new StringBuilder("tcp");
    final StringBuilder udpUri = new StringBuilder("udp");
    if (IS_LINUX && !disableEpoll) {
        bossExecutorThreadFactory = new ExecutorThreadFactory("EpollServerBoss", true);
        bossGroup = new EpollEventLoopGroup(1, (ThreadFactory) bossExecutorThreadFactory);
        workerExecutorThreadFactory = new ExecutorThreadFactory("EpollServerWorker", true);
        workerGroup = new EpollEventLoopGroup(workerThreads, (ThreadFactory) workerExecutorThreadFactory);
        tcpChannelType = EpollServerSocketChannel.class;
        udpChannelType = EpollDatagramChannel.class;
        tcpUri.append("epoll");
        udpUri.append("epoll");
    } else {
        bossExecutorThreadFactory = new ExecutorThreadFactory("NioServerBoss", true);
        bossGroup = new NioEventLoopGroup(1, bossExecutorThreadFactory);
        workerExecutorThreadFactory = new ExecutorThreadFactory("NioServerWorker", true);
        workerGroup = new NioEventLoopGroup(workerThreads, workerExecutorThreadFactory);
        tcpChannelType = NioServerSocketChannel.class;
        udpChannelType = NioDatagramChannel.class;
        tcpUri.append("nio");
        udpUri.append("nio");
    }

    tcpUri.append("://").append(bindInterface).append(":").append(port);
    udpUri.append("://").append(bindInterface).append(":").append(port);
    URI u = null;
    try {
        u = new URI(tcpUri.toString());
    } catch (URISyntaxException e) {
        log.warn("Failed TCP server URI const: [{}]. Programmer Error", tcpUri, e);
    }
    tcpServerURI = u;
    try {
        u = new URI(udpUri.toString());
    } catch (URISyntaxException e) {
        log.warn("Failed UDP server URI const: [{}]. Programmer Error", udpUri, e);
    }
    udpServerURI = u;

    log.info(">>>>> Starting OnRamp TCP Listener on [{}]...", tcpServerURI);
    log.info(">>>>> Starting OnRamp UDP Listener on [{}]...", udpServerURI);
    final ChannelFuture cf = tcpServerBootstrap.channel(tcpChannelType).group(bossGroup, workerGroup)
            .bind(bindSocket).awaitUninterruptibly()
            .addListener(new GenericFutureListener<Future<? super Void>>() {
                public void operationComplete(final Future<? super Void> f) throws Exception {
                    log.info("<<<<< OnRamp TCP Listener on [{}] Started", tcpServerURI);
                };
            }).awaitUninterruptibly();
    final ChannelFuture ucf = udpBootstrap.channel(udpChannelType).group(workerGroup)
            .option(ChannelOption.SO_BROADCAST, true).handler(new UDPPipelineFactory()).bind(bindSocket)
            .awaitUninterruptibly().addListener(new GenericFutureListener<Future<? super Void>>() {
                public void operationComplete(final Future<? super Void> f) throws Exception {
                    log.info("<<<<< OnRamp UDP Listener on [{}] Started", udpServerURI);
                };
            }).awaitUninterruptibly();

    tcpServerChannel = cf.channel();
    udpServerChannel = ucf.channel();
    tcpCloseFuture = tcpServerChannel.closeFuture();
    udpCloseFuture = udpServerChannel.closeFuture();
    Runtime.getRuntime().addShutdownHook(shutdownHook);

}

From source file:com.heren.turtle.entry.channel.MessageReceiveServer.java

License:Open Source License

public void bind(int port, boolean needToFilter) {
    System.out.println("netty start!");
    EventLoopGroup bossGroup = new NioEventLoopGroup(6);
    EventLoopGroup workGroup = new NioEventLoopGroup(12);
    try {//ww w  .  j a  va  2s  .  c  o  m
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workGroup).channel(NioServerSocketChannel.class)
                .option(ChannelOption.SO_BACKLOG, 1024)
                .childHandler(new MessageReceiveInitializer(needToFilter));
        //bound port,waiting for a successful synchronization
        ChannelFuture f = b.bind(port).sync();
        //waiting for the server listening port closed
        f.channel().closeFuture().sync();
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        bossGroup.shutdownGracefully();
        workGroup.shutdownGracefully();
    }
}

From source file:com.hiido.eagle.hes.network.FileServer.java

License:Apache License

public void run() throws Exception {
    // Configure the server.
    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {//  w ww .ja va  2 s  .  com
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        //ch.pipeline().addLast(new ChunkedWriteHandler());
                        ch.pipeline().addLast(new FileHandler());
                    }
                });

        // Start the server.
        ChannelFuture f = b.bind(port).sync();

        // Wait until the server socket is closed.
        f.channel().closeFuture().sync();
    } finally {
        // Shut down all event loops to terminate all threads.
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.hiido.eagle.hes.transfer.FileTransferServer.java

License:Apache License

public void start() throws Exception {

    logger.info("Start server at host:{} port:{}, the number of work:{}", host, port, workNum);

    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup(workNum);
    try {/*from www  . jav a2s.c o m*/
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new ChannelInitializer<SocketChannel>() {

                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        ch.pipeline().addLast(new FileHandler());
                    }
                });

        ChannelFuture f = null;
        if (host == null) {
            f = b.bind(port).sync();
        } else {
            f = b.bind(host, port).sync();
        }

        logger.info("Server bound host:{} port:{} successfully", host, port);

        f.channel().closeFuture().sync();
    } finally {
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.hipishare.chat.test.EchoServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    // Configure SSL.
    final SslContext sslCtx;
    if (SSL) {/*w ww .  j  ava  2 s. c  o m*/
        SelfSignedCertificate ssc = new SelfSignedCertificate();
        sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build();
    } else {
        sslCtx = null;
    }

    // Configure the server.
    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        ChannelPipeline p = ch.pipeline();
                        if (sslCtx != null) {
                            p.addLast(sslCtx.newHandler(ch.alloc()));
                        }
                        //p.addLast(new LoggingHandler(LogLevel.INFO));
                        p.addLast(new IdleStateHandler(30, 10, 0, TimeUnit.SECONDS));
                        p.addLast(new HeartBeatHandler());
                        p.addLast(new EchoServerHandler());
                    }
                });

        // Start the server.
        ChannelFuture f = b.bind(PORT).sync();

        // Wait until the server socket is closed.
        f.channel().closeFuture().sync();
    } finally {
        // Shut down all event loops to terminate all threads.
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.hop.hhxx.example.sctp.multihoming.SctpMultiHomingEchoServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    // Configure the server.
    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {/*w ww . j  ava 2  s  .c  o m*/
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioSctpServerChannel.class)
                .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new ChannelInitializer<SctpChannel>() {
                    @Override
                    public void initChannel(SctpChannel ch) throws Exception {
                        ch.pipeline().addLast(
                                //                             new LoggingHandler(LogLevel.INFO),
                                new SctpEchoServerHandler());
                    }
                });

        InetSocketAddress localAddress = new InetSocketAddress(SERVER_PRIMARY_HOST, SERVER_PORT);
        InetAddress localSecondaryAddress = InetAddress.getByName(SERVER_SECONDARY_HOST);

        // Bind the server to primary address.
        ChannelFuture bindFuture = b.bind(localAddress).sync();

        //Get the underlying sctp channel
        SctpServerChannel channel = (SctpServerChannel) bindFuture.channel();

        //Bind the secondary address
        ChannelFuture connectFuture = channel.bindAddress(localSecondaryAddress).sync();

        // Wait until the connection is closed.
        connectFuture.channel().closeFuture().sync();
    } finally {
        // Shut down all event loops to terminate all threads.
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.hxr.javatone.concurrency.netty.official.filetransfer.FileServer.java

License:Apache License

public void run() throws Exception {
    // Configure the server.
    EventLoopGroup bossGroup = new NioEventLoopGroup();
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {/*from   ww w. ja  va 2  s  .c om*/
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        ch.pipeline().addLast(new StringEncoder(CharsetUtil.UTF_8),
                                new LineBasedFrameDecoder(8192), new StringDecoder(CharsetUtil.UTF_8),
                                new FileHandler());
                    }
                });

        // Start the server.
        ChannelFuture f = b.bind(port).sync();

        // Wait until the server socket is closed.
        f.channel().closeFuture().sync();
    } finally {
        // Shut down all event loops to terminate all threads.
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.hxr.javatone.nettyguide.d12.server.NettyServer.java

License:Apache License

public void bind() throws Exception {
    // ??NIO//from w  w  w .ja  v a 2  s.com
    EventLoopGroup bossGroup = new NioEventLoopGroup();
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    ServerBootstrap b = new ServerBootstrap();
    b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).option(ChannelOption.SO_BACKLOG, 100)
            .handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) throws IOException {
                    ch.pipeline().addLast(new NettyMessageDecoder(1024 * 1024, 4, 4));
                    ch.pipeline().addLast(new NettyMessageEncoder());
                    ch.pipeline().addLast("readTimeoutHandler", new ReadTimeoutHandler(50));
                    ch.pipeline().addLast(new LoginAuthRespHandler());
                    ch.pipeline().addLast("HeartBeatHandler", new HeartBeatRespHandler());
                }
            });

    // ???
    b.bind(NettyConstant.REMOTEIP, NettyConstant.PORT).sync().channel().closeFuture().sync().channel();
    System.out.println("Netty server start ok : " + (NettyConstant.REMOTEIP + " : " + NettyConstant.PORT));
}

From source file:com.ibm.crail.datanode.netty.server.NettyServer.java

License:Apache License

public void run() {
    /* start the netty server */
    EventLoopGroup acceptGroup = new NioEventLoopGroup();
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {//from ww w.j  a va  2s  .co m
        ServerBootstrap boot = new ServerBootstrap();
        boot.group(acceptGroup, workerGroup);
        /* we use sockets */
        boot.channel(NioServerSocketChannel.class);
        /* for new incoming connection */
        boot.childHandler(new ChannelInitializer<SocketChannel>() {
            @Override
            public void initChannel(SocketChannel ch) throws Exception {
                LOG.info("TID: " + Thread.currentThread().getId()
                        + " , a new client connection has arrived from : " + ch.remoteAddress().toString());
                /* incoming pipeline */
                ch.pipeline().addLast(new RdmaDecoderRx(), /* this makes full RDMA messages */
                        new IncomingRequestHandler(ch, dataNode));
                /* outgoing pipeline */
                //ch.pipeline().addLast(new RdmaEncoderTx());
            }
        });
        /* general optimization settings */
        boot.option(ChannelOption.SO_BACKLOG, 1024);
        boot.childOption(ChannelOption.SO_KEEPALIVE, true);

        /* now we bind the server and start */
        ChannelFuture f = boot.bind(this.inetSocketAddress.getAddress(), this.inetSocketAddress.getPort())
                .sync();
        LOG.info("Datanode binded to : " + this.inetSocketAddress);
        /* at this point we are binded and ready */
        f.channel().closeFuture().sync();
    } catch (InterruptedException e) {
        e.printStackTrace();
    } finally {
        workerGroup.shutdownGracefully();
        acceptGroup.shutdownGracefully();
        LOG.info("Datanode at " + this.inetSocketAddress + " is shutdown");
    }
}