Example usage for io.netty.channel ChannelOption SO_KEEPALIVE

List of usage examples for io.netty.channel ChannelOption SO_KEEPALIVE

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption SO_KEEPALIVE.

Prototype

ChannelOption SO_KEEPALIVE

To view the source code for io.netty.channel ChannelOption SO_KEEPALIVE.

Click Source Link

Usage

From source file:com.grillecube.engine.network.client.ClientNetwork.java

public void start(String host, int port) throws Exception {
    this._workergroup = new NioEventLoopGroup();
    this._bootstrap = new Bootstrap(); // (1)
    this._bootstrap.group(this._workergroup); // (2)
    this._bootstrap.channel(NioSocketChannel.class); // (3)
    this._bootstrap.option(ChannelOption.SO_KEEPALIVE, true); // (4)
    this._bootstrap.handler(new ChannelInitializer<SocketChannel>() {
        @Override/*w ww.j  a  v a2 s.c o m*/
        public void initChannel(SocketChannel ch) throws Exception {
            ch.pipeline().addLast(new TimeClientHandler());
        }
    });

    // Start the client.
    try {
        this._channel = this._bootstrap.connect(host, port).sync(); // (5)
    } catch (Exception exception) {
        // args are: quiettime, timeout, time unit
        this._workergroup.shutdownGracefully(0, 10, TimeUnit.SECONDS);
        Logger.get().log(Level.ERROR, "Couldnt connect to host: " + host + ":" + port);
        exception.printStackTrace(Logger.get().getPrintStream());

        this._bootstrap = null;
        this._channel = null;
        this._workergroup = null;
    }
}

From source file:com.grillecube.engine.network.server.ServerNetwork.java

public void start() throws Exception {
    this._bossgroup = new NioEventLoopGroup();
    this._workergroup = new NioEventLoopGroup();
    this._bootstrap = new ServerBootstrap();
    this._bootstrap.group(this._bossgroup, this._workergroup);
    this._bootstrap.channel(NioServerSocketChannel.class);
    this._bootstrap.childHandler(this);
    this._bootstrap.option(ChannelOption.SO_BACKLOG, 128);
    this._bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);

    // Bind and start to accept incoming connections.
    this._channel = this._bootstrap.bind(this._port).sync(); // (7)
    Logger.get().log(Level.FINE, "Listening on " + this._port);
    this.stop();// w  w w. j  av  a 2 s .c  o  m
}

From source file:com.grillecube.server.network.ServerNetwork.java

public void start() throws Exception {
    this.bossgroup = new NioEventLoopGroup();
    this.workergroup = new NioEventLoopGroup();
    this.bootstrap = new ServerBootstrap();
    this.bootstrap.group(this.bossgroup, this.workergroup);
    this.bootstrap.channel(NioServerSocketChannel.class);
    this.bootstrap.childHandler(this);
    this.bootstrap.option(ChannelOption.SO_BACKLOG, 128);
    this.bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);

    // Bind and start to accept incoming connections.
    this.channel = this.bootstrap.bind(this.port).sync(); // (7)
    Logger.get().log(Level.FINE, "Listening on " + this.port);
    this.stop();/*from  w w w .ja v  a  2 s  . co m*/
}

From source file:com.gxkj.demo.netty.socksproxy.SocksServerConnectHandler.java

License:Apache License

@Override
public void messageReceived(final ChannelHandlerContext ctx, final SocksCmdRequest request) throws Exception {
    Promise<Channel> promise = ctx.executor().newPromise();
    promise.addListener(new GenericFutureListener<Future<Channel>>() {
        @Override//from ww w  . ja v a2 s.  co  m
        public void operationComplete(final Future<Channel> future) throws Exception {
            final Channel outboundChannel = future.getNow();
            if (future.isSuccess()) {
                ctx.channel().writeAndFlush(new SocksCmdResponse(SocksCmdStatus.SUCCESS, request.addressType()))
                        .addListener(new ChannelFutureListener() {
                            @Override
                            public void operationComplete(ChannelFuture channelFuture) throws Exception {
                                ctx.pipeline().remove(getName());
                                outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
                                ctx.channel().pipeline().addLast(new RelayHandler(outboundChannel));
                            }
                        });
            } else {
                ctx.channel()
                        .writeAndFlush(new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));
                SocksServerUtils.closeOnFlush(ctx.channel());
            }
        }
    });

    final Channel inboundChannel = ctx.channel();
    b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
            .handler(new DirectClientInitializer(promise));

    b.connect(request.host(), request.port()).addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                // Connection established use handler provided results
            } else {
                // Close the connection if the connection attempt has failed.
                ctx.channel()
                        .writeAndFlush(new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));
                SocksServerUtils.closeOnFlush(ctx.channel());
            }
        }
    });
}

From source file:com.hazelcast.simulator.protocol.connector.ClientConnector.java

License:Open Source License

private Bootstrap getBootstrap() {
    Bootstrap bootstrap = new Bootstrap();
    bootstrap.group(group).channel(NioSocketChannel.class)
            .remoteAddress(new InetSocketAddress(remoteHost, remotePort))
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONNECT_TIMEOUT_MILLIS)
            .option(ChannelOption.SO_KEEPALIVE, true).handler(new ChannelInitializer<SocketChannel>() {
                @Override/*from  ww w .  j a v  a  2 s  .  com*/
                public void initChannel(SocketChannel channel) {
                    pipelineConfigurator.configureClientPipeline(channel.pipeline(), remoteAddress, futureMap);
                }
            });
    return bootstrap;
}

From source file:com.heliosapm.streams.forwarder.HttpJsonMetricForwarder.java

License:Apache License

/**
 * Starts the forwarder/*from  w  ww .j a v  a2s  .c o  m*/
 * @throws Exception thrown on any error
 */
public void start() throws Exception {
    log.info(">>>>> Starting HttpJsonMetricForwarder [{}]....", beanName);
    threadPool = new JMXManagedThreadPool(EXECUTOR_OBJECT_NAME, beanName, workerThreads, workerThreads * 2, 1,
            60000, 100, 99, true);
    eventLoopGroup = new NioEventLoopGroup(workerThreads, (Executor) threadPool);
    outboundHandler = new HttpJsonOutboundHandler(1500, endpointHost, endpointUri);
    bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class).handler(this)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000) // FIXME: config
            .option(ChannelOption.SO_SNDBUF, 64000) // FIXME: config
            .option(ChannelOption.SO_KEEPALIVE, true);
    senderChannel = bootstrap.connect(endpointHost, endpointPort).sync().channel(); // FIXME: short timeout, reconnect loop
    consumerProperties.put("bootstrap.servers", kafkaBootstrapServers);
    consumerProperties.put("group.id", kafkaGroupId);
    consumerProperties.put("enable.auto.commit", "" + kafkaAutoCommit);
    consumerProperties.put("auto.commit.interval.ms", "" + kafkaAutoCommitInterval);
    consumerProperties.put("session.timeout.ms", "" + 30000);
    consumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProperties.put("value.deserializer", HeliosSerdes.STREAMED_METRIC_VALUE_DESER.getClass().getName());
    subscriberThread = new Thread(this, "KafkaSubscriberThread-" + beanName);
    subscriberThread.setDaemon(true);
    subThreadActive.set(true);
    log.info("[{}] Subscriber Thread Starting...", beanName);
    subscriberThread.start();
    log.info("<<<<< HttpJsonMetricForwarder [{}] Started.", beanName);
}

From source file:com.heliosapm.streams.onramp.OnRampBoot.java

License:Apache License

/**
 * Creates a new OnRampBoot/*from www  .  j  ava  2s  .  co  m*/
 * @param appConfig  The application configuration
 */
public OnRampBoot(final Properties appConfig) {
    final String jmxmpUri = ConfigurationHelper.getSystemThenEnvProperty("jmx.jmxmp.uri",
            "jmxmp://0.0.0.0:1893", appConfig);
    JMXHelper.fireUpJMXMPServer(jmxmpUri);
    MessageForwarder.initialize(appConfig);
    port = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.port", 8091, appConfig);
    bindInterface = ConfigurationHelper.getSystemThenEnvProperty("onramp.network.bind", "0.0.0.0", appConfig);
    bindSocket = new InetSocketAddress(bindInterface, port);
    workerThreads = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.worker_threads", CORES * 2,
            appConfig);
    connectTimeout = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sotimeout", 0, appConfig);
    backlog = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.backlog", 3072, appConfig);
    writeSpins = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.writespins", 16, appConfig);
    recvBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.recbuffer", 43690, appConfig);
    sendBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sendbuffer", 8192, appConfig);
    disableEpoll = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.epoll.disable", false,
            appConfig);
    async = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.async_io", true, appConfig);
    tcpNoDelay = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.tcp_no_delay", true,
            appConfig);
    keepAlive = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.keep_alive", true,
            appConfig);
    reuseAddress = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.reuse_address", true,
            appConfig);
    tcpPipelineFactory = new PipelineFactory(appConfig);
    udpPipelineFactory = new UDPPipelineFactory();
    tcpServerBootstrap.handler(new LoggingHandler(getClass(), LogLevel.INFO));
    tcpServerBootstrap.childHandler(tcpPipelineFactory);
    // Set the child options
    tcpServerBootstrap.childOption(ChannelOption.ALLOCATOR, BufferManager.getInstance().getAllocator());
    tcpServerBootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
    tcpServerBootstrap.childOption(ChannelOption.SO_KEEPALIVE, keepAlive);
    tcpServerBootstrap.childOption(ChannelOption.SO_RCVBUF, recvBuffer);
    tcpServerBootstrap.childOption(ChannelOption.SO_SNDBUF, sendBuffer);
    tcpServerBootstrap.childOption(ChannelOption.WRITE_SPIN_COUNT, writeSpins);
    // Set the server options
    tcpServerBootstrap.option(ChannelOption.SO_BACKLOG, backlog);
    tcpServerBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
    tcpServerBootstrap.option(ChannelOption.SO_RCVBUF, recvBuffer);
    tcpServerBootstrap.option(ChannelOption.SO_TIMEOUT, connectTimeout);

    final StringBuilder tcpUri = new StringBuilder("tcp");
    final StringBuilder udpUri = new StringBuilder("udp");
    if (IS_LINUX && !disableEpoll) {
        bossExecutorThreadFactory = new ExecutorThreadFactory("EpollServerBoss", true);
        bossGroup = new EpollEventLoopGroup(1, (ThreadFactory) bossExecutorThreadFactory);
        workerExecutorThreadFactory = new ExecutorThreadFactory("EpollServerWorker", true);
        workerGroup = new EpollEventLoopGroup(workerThreads, (ThreadFactory) workerExecutorThreadFactory);
        tcpChannelType = EpollServerSocketChannel.class;
        udpChannelType = EpollDatagramChannel.class;
        tcpUri.append("epoll");
        udpUri.append("epoll");
    } else {
        bossExecutorThreadFactory = new ExecutorThreadFactory("NioServerBoss", true);
        bossGroup = new NioEventLoopGroup(1, bossExecutorThreadFactory);
        workerExecutorThreadFactory = new ExecutorThreadFactory("NioServerWorker", true);
        workerGroup = new NioEventLoopGroup(workerThreads, workerExecutorThreadFactory);
        tcpChannelType = NioServerSocketChannel.class;
        udpChannelType = NioDatagramChannel.class;
        tcpUri.append("nio");
        udpUri.append("nio");
    }

    tcpUri.append("://").append(bindInterface).append(":").append(port);
    udpUri.append("://").append(bindInterface).append(":").append(port);
    URI u = null;
    try {
        u = new URI(tcpUri.toString());
    } catch (URISyntaxException e) {
        log.warn("Failed TCP server URI const: [{}]. Programmer Error", tcpUri, e);
    }
    tcpServerURI = u;
    try {
        u = new URI(udpUri.toString());
    } catch (URISyntaxException e) {
        log.warn("Failed UDP server URI const: [{}]. Programmer Error", udpUri, e);
    }
    udpServerURI = u;

    log.info(">>>>> Starting OnRamp TCP Listener on [{}]...", tcpServerURI);
    log.info(">>>>> Starting OnRamp UDP Listener on [{}]...", udpServerURI);
    final ChannelFuture cf = tcpServerBootstrap.channel(tcpChannelType).group(bossGroup, workerGroup)
            .bind(bindSocket).awaitUninterruptibly()
            .addListener(new GenericFutureListener<Future<? super Void>>() {
                public void operationComplete(final Future<? super Void> f) throws Exception {
                    log.info("<<<<< OnRamp TCP Listener on [{}] Started", tcpServerURI);
                };
            }).awaitUninterruptibly();
    final ChannelFuture ucf = udpBootstrap.channel(udpChannelType).group(workerGroup)
            .option(ChannelOption.SO_BROADCAST, true).handler(new UDPPipelineFactory()).bind(bindSocket)
            .awaitUninterruptibly().addListener(new GenericFutureListener<Future<? super Void>>() {
                public void operationComplete(final Future<? super Void> f) throws Exception {
                    log.info("<<<<< OnRamp UDP Listener on [{}] Started", udpServerURI);
                };
            }).awaitUninterruptibly();

    tcpServerChannel = cf.channel();
    udpServerChannel = ucf.channel();
    tcpCloseFuture = tcpServerChannel.closeFuture();
    udpCloseFuture = udpServerChannel.closeFuture();
    Runtime.getRuntime().addShutdownHook(shutdownHook);

}

From source file:com.hop.hhxx.example.http2.helloworld.client.Http2Client.java

License:Apache License

public static void main(String[] args) throws Exception {
    // Configure SSL.
    final SslContext sslCtx;
    if (SSL) {/*from   www  . j av  a  2s .c  om*/
        SslProvider provider = OpenSsl.isAlpnSupported() ? SslProvider.OPENSSL : SslProvider.JDK;
        sslCtx = SslContextBuilder.forClient().sslProvider(provider)
                /* NOTE: the cipher filter may not include all ciphers required by the HTTP/2 specification.
                 * Please refer to the HTTP/2 specification for cipher requirements. */
                .ciphers(Http2SecurityUtil.CIPHERS, SupportedCipherSuiteFilter.INSTANCE)
                .trustManager(InsecureTrustManagerFactory.INSTANCE)
                .applicationProtocolConfig(new ApplicationProtocolConfig(Protocol.ALPN,
                        // NO_ADVERTISE is currently the only mode supported by both OpenSsl and JDK providers.
                        SelectorFailureBehavior.NO_ADVERTISE,
                        // ACCEPT is currently the only mode supported by both OpenSsl and JDK providers.
                        SelectedListenerFailureBehavior.ACCEPT, ApplicationProtocolNames.HTTP_2,
                        ApplicationProtocolNames.HTTP_1_1))
                .build();
    } else {
        sslCtx = null;
    }

    EventLoopGroup workerGroup = new NioEventLoopGroup();
    Http2ClientInitializer initializer = new Http2ClientInitializer(sslCtx, Integer.MAX_VALUE);

    try {
        // Configure the client.
        Bootstrap b = new Bootstrap();
        b.group(workerGroup);
        b.channel(NioSocketChannel.class);
        b.option(ChannelOption.SO_KEEPALIVE, true);
        b.remoteAddress(HOST, PORT);
        b.handler(initializer);

        // Start the client.
        Channel channel = b.connect().syncUninterruptibly().channel();
        System.out.println("Connected to [" + HOST + ':' + PORT + ']');

        // Wait for the HTTP/2 upgrade to occur.
        Http2SettingsHandler http2SettingsHandler = initializer.settingsHandler();
        http2SettingsHandler.awaitSettings(5, TimeUnit.SECONDS);

        HttpResponseHandler responseHandler = initializer.responseHandler();
        int streamId = 3;
        HttpScheme scheme = SSL ? HttpScheme.HTTPS : HttpScheme.HTTP;
        AsciiString hostName = new AsciiString(HOST + ':' + PORT);
        System.err.println("Sending request(s)...");
        if (URL != null) {
            // Create a simple GET request.
            FullHttpRequest request = new DefaultFullHttpRequest(HTTP_1_1, GET, URL);
            request.headers().add(HttpHeaderNames.HOST, hostName);
            request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), scheme.name());
            request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP);
            request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.DEFLATE);
            responseHandler.put(streamId, channel.writeAndFlush(request), channel.newPromise());
            streamId += 2;
        }
        if (URL2 != null) {
            // Create a simple POST request with a body.
            FullHttpRequest request = new DefaultFullHttpRequest(HTTP_1_1, POST, URL2,
                    Unpooled.copiedBuffer(URL2DATA.getBytes(CharsetUtil.UTF_8)));
            request.headers().add(HttpHeaderNames.HOST, hostName);
            request.headers().add(HttpConversionUtil.ExtensionHeaderNames.SCHEME.text(), scheme.name());
            request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.GZIP);
            request.headers().add(HttpHeaderNames.ACCEPT_ENCODING, HttpHeaderValues.DEFLATE);
            responseHandler.put(streamId, channel.writeAndFlush(request), channel.newPromise());
            streamId += 2;
        }
        responseHandler.awaitResponses(5, TimeUnit.SECONDS);
        System.out.println("Finished HTTP/2 request(s)");

        // Wait until the connection is closed.
        channel.close().syncUninterruptibly();
    } finally {
        workerGroup.shutdownGracefully();
    }
}

From source file:com.hop.hhxx.example.socksproxy.SocksServerConnectHandler.java

License:Apache License

@Override
public void channelRead0(final ChannelHandlerContext ctx, final SocksMessage message) throws Exception {
    if (message instanceof Socks4CommandRequest) {
        final Socks4CommandRequest request = (Socks4CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            @Override// ww w  .j  a va  2 s  . c  o m
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    ChannelFuture responseFuture = ctx.channel()
                            .writeAndFlush(new DefaultSocks4CommandResponse(Socks4CommandStatus.SUCCESS));

                    responseFuture.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture channelFuture) {
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline()
                                    .addLast(new io.netty.example.socksproxy.RelayHandler(ctx.channel()));
                            ctx.pipeline()
                                    .addLast(new io.netty.example.socksproxy.RelayHandler(outboundChannel));
                        }
                    });
                } else {
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    io.netty.example.socksproxy.SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });

        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new io.netty.example.socksproxy.DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    io.netty.example.socksproxy.SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else if (message instanceof Socks5CommandRequest) {
        final Socks5CommandRequest request = (Socks5CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            @Override
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    ChannelFuture responseFuture = ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(
                            Socks5CommandStatus.SUCCESS, request.dstAddrType()));

                    responseFuture.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture channelFuture) {
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline()
                                    .addLast(new io.netty.example.socksproxy.RelayHandler(ctx.channel()));
                            ctx.pipeline()
                                    .addLast(new io.netty.example.socksproxy.RelayHandler(outboundChannel));
                        }
                    });
                } else {
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    io.netty.example.socksproxy.SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });

        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    io.netty.example.socksproxy.SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else {
        ctx.close();
    }
}

From source file:com.ibasco.agql.protocols.valve.source.query.SourceRconMessenger.java

License:Open Source License

@Override
protected Transport<SourceRconRequest> createTransportService() {
    NettyPooledTcpTransport<SourceRconRequest> transport = new NettyPooledTcpTransport<>(ChannelType.NIO_TCP);
    transport.setChannelInitializer(new SourceRconChannelInitializer(this));
    transport.addChannelOption(ChannelOption.SO_SNDBUF, 1048576 * 4);
    transport.addChannelOption(ChannelOption.SO_RCVBUF, 1048576 * 4);
    transport.addChannelOption(ChannelOption.SO_KEEPALIVE, true);
    transport.addChannelOption(ChannelOption.TCP_NODELAY, true);
    return transport;
}