Example usage for io.netty.channel ChannelOption SO_SNDBUF

List of usage examples for io.netty.channel ChannelOption SO_SNDBUF

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption SO_SNDBUF.

Prototype

ChannelOption SO_SNDBUF

To view the source code for io.netty.channel ChannelOption SO_SNDBUF.

Click Source Link

Usage

From source file:com.googlecode.protobuf.pro.duplex.example.simple.TimeoutTestingClient.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 4) {
        System.err.println("usage: <serverHostname> <serverPort> <clientHostname> <clientPort>");
        System.exit(-1);//from www  . j a  v  a 2 s  .c  o m
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);
    String clientHostname = args[2];
    int clientPort = Integer.parseInt(args[3]);

    PeerInfo client = new PeerInfo(clientHostname, clientPort);
    PeerInfo server = new PeerInfo(serverHostname, serverPort);

    try {
        DuplexTcpClientPipelineFactory clientFactory = new DuplexTcpClientPipelineFactory();
        // force the use of a local port
        // - normally you don't need this
        clientFactory.setClientInfo(client);

        ExtensionRegistry r = ExtensionRegistry.newInstance();
        PingPong.registerAllExtensions(r);
        clientFactory.setExtensionRegistry(r);

        clientFactory.setConnectResponseTimeoutMillis(10000);
        RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(3, 10);
        clientFactory.setRpcServerCallExecutor(rpcExecutor);

        // RPC payloads are uncompressed when logged - so reduce logging
        CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
        logger.setLogRequestProto(false);
        logger.setLogResponseProto(false);
        clientFactory.setRpcLogger(logger);

        // Set up the event pipeline factory.
        // setup a RPC event listener - it just logs what happens
        RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();

        final RpcConnectionEventListener listener = new RpcConnectionEventListener() {

            @Override
            public void connectionReestablished(RpcClientChannel clientChannel) {
                log.info("connectionReestablished " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionOpened(RpcClientChannel clientChannel) {
                log.info("connectionOpened " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionLost(RpcClientChannel clientChannel) {
                log.info("connectionLost " + clientChannel);
            }

            @Override
            public void connectionChanged(RpcClientChannel clientChannel) {
                log.info("connectionChanged " + clientChannel);
                channel = clientChannel;
            }
        };
        rpcEventNotifier.addEventListener(listener);
        clientFactory.registerConnectionEventListener(rpcEventNotifier);

        // Configure the client to provide a Pong Service in both blocking an non blocking varieties
        BlockingService bPongService = BlockingPongService
                .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongServer());
        clientFactory.getRpcServiceRegistry().registerService(bPongService);

        Service nbPongService = NonBlockingPongService
                .newReflectiveService(new PingPongServiceFactory.NonBlockingPongServer());
        clientFactory.getRpcServiceRegistry().registerService(nbPongService);

        // we give the client a blocking and non blocking (pong capable) Ping Service
        BlockingService bPingService = BlockingPingService
                .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongingPingServer());
        clientFactory.getRpcServiceRegistry().registerService(bPingService);

        Service nbPingService = NonBlockingPingService
                .newReflectiveService(new PingPongServiceFactory.NonBlockingPongingPingServer());
        clientFactory.getRpcServiceRegistry().registerService(nbPingService);

        Bootstrap bootstrap = new Bootstrap();
        EventLoopGroup workers = new NioEventLoopGroup(16,
                new RenamingThreadFactoryProxy("workers", Executors.defaultThreadFactory()));

        bootstrap.group(workers);
        bootstrap.handler(clientFactory);
        bootstrap.channel(NioSocketChannel.class);
        bootstrap.option(ChannelOption.TCP_NODELAY, true);
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000);
        bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
        bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);

        RpcClientConnectionWatchdog watchdog = new RpcClientConnectionWatchdog(clientFactory, bootstrap);
        rpcEventNotifier.addEventListener(watchdog);
        watchdog.start();

        CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
        shutdownHandler.addResource(workers);
        shutdownHandler.addResource(rpcExecutor);

        clientFactory.peerWith(server, bootstrap);

        while (true && channel != null) {

            callNonBlockingServerPingWithBlockingClientTimeoutBeforeFinish();

            Thread.sleep(10000);

        }

    } catch (Exception e) {
        log.warn("Failure.", e);
    } finally {
        System.exit(0);
    }
}

From source file:com.heliosapm.streams.forwarder.HttpJsonMetricForwarder.java

License:Apache License

/**
 * Starts the forwarder/*ww  w . j  a va  2  s  . c  om*/
 * @throws Exception thrown on any error
 */
public void start() throws Exception {
    log.info(">>>>> Starting HttpJsonMetricForwarder [{}]....", beanName);
    threadPool = new JMXManagedThreadPool(EXECUTOR_OBJECT_NAME, beanName, workerThreads, workerThreads * 2, 1,
            60000, 100, 99, true);
    eventLoopGroup = new NioEventLoopGroup(workerThreads, (Executor) threadPool);
    outboundHandler = new HttpJsonOutboundHandler(1500, endpointHost, endpointUri);
    bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class).handler(this)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000) // FIXME: config
            .option(ChannelOption.SO_SNDBUF, 64000) // FIXME: config
            .option(ChannelOption.SO_KEEPALIVE, true);
    senderChannel = bootstrap.connect(endpointHost, endpointPort).sync().channel(); // FIXME: short timeout, reconnect loop
    consumerProperties.put("bootstrap.servers", kafkaBootstrapServers);
    consumerProperties.put("group.id", kafkaGroupId);
    consumerProperties.put("enable.auto.commit", "" + kafkaAutoCommit);
    consumerProperties.put("auto.commit.interval.ms", "" + kafkaAutoCommitInterval);
    consumerProperties.put("session.timeout.ms", "" + 30000);
    consumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProperties.put("value.deserializer", HeliosSerdes.STREAMED_METRIC_VALUE_DESER.getClass().getName());
    subscriberThread = new Thread(this, "KafkaSubscriberThread-" + beanName);
    subscriberThread.setDaemon(true);
    subThreadActive.set(true);
    log.info("[{}] Subscriber Thread Starting...", beanName);
    subscriberThread.start();
    log.info("<<<<< HttpJsonMetricForwarder [{}] Started.", beanName);
}

From source file:com.heliosapm.streams.onramp.OnRampBoot.java

License:Apache License

/**
 * Creates a new OnRampBoot//from  w  ww  .ja v a  2s.c  om
 * @param appConfig  The application configuration
 */
public OnRampBoot(final Properties appConfig) {
    final String jmxmpUri = ConfigurationHelper.getSystemThenEnvProperty("jmx.jmxmp.uri",
            "jmxmp://0.0.0.0:1893", appConfig);
    JMXHelper.fireUpJMXMPServer(jmxmpUri);
    MessageForwarder.initialize(appConfig);
    port = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.port", 8091, appConfig);
    bindInterface = ConfigurationHelper.getSystemThenEnvProperty("onramp.network.bind", "0.0.0.0", appConfig);
    bindSocket = new InetSocketAddress(bindInterface, port);
    workerThreads = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.worker_threads", CORES * 2,
            appConfig);
    connectTimeout = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sotimeout", 0, appConfig);
    backlog = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.backlog", 3072, appConfig);
    writeSpins = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.writespins", 16, appConfig);
    recvBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.recbuffer", 43690, appConfig);
    sendBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sendbuffer", 8192, appConfig);
    disableEpoll = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.epoll.disable", false,
            appConfig);
    async = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.async_io", true, appConfig);
    tcpNoDelay = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.tcp_no_delay", true,
            appConfig);
    keepAlive = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.keep_alive", true,
            appConfig);
    reuseAddress = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.reuse_address", true,
            appConfig);
    tcpPipelineFactory = new PipelineFactory(appConfig);
    udpPipelineFactory = new UDPPipelineFactory();
    tcpServerBootstrap.handler(new LoggingHandler(getClass(), LogLevel.INFO));
    tcpServerBootstrap.childHandler(tcpPipelineFactory);
    // Set the child options
    tcpServerBootstrap.childOption(ChannelOption.ALLOCATOR, BufferManager.getInstance().getAllocator());
    tcpServerBootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
    tcpServerBootstrap.childOption(ChannelOption.SO_KEEPALIVE, keepAlive);
    tcpServerBootstrap.childOption(ChannelOption.SO_RCVBUF, recvBuffer);
    tcpServerBootstrap.childOption(ChannelOption.SO_SNDBUF, sendBuffer);
    tcpServerBootstrap.childOption(ChannelOption.WRITE_SPIN_COUNT, writeSpins);
    // Set the server options
    tcpServerBootstrap.option(ChannelOption.SO_BACKLOG, backlog);
    tcpServerBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
    tcpServerBootstrap.option(ChannelOption.SO_RCVBUF, recvBuffer);
    tcpServerBootstrap.option(ChannelOption.SO_TIMEOUT, connectTimeout);

    final StringBuilder tcpUri = new StringBuilder("tcp");
    final StringBuilder udpUri = new StringBuilder("udp");
    if (IS_LINUX && !disableEpoll) {
        bossExecutorThreadFactory = new ExecutorThreadFactory("EpollServerBoss", true);
        bossGroup = new EpollEventLoopGroup(1, (ThreadFactory) bossExecutorThreadFactory);
        workerExecutorThreadFactory = new ExecutorThreadFactory("EpollServerWorker", true);
        workerGroup = new EpollEventLoopGroup(workerThreads, (ThreadFactory) workerExecutorThreadFactory);
        tcpChannelType = EpollServerSocketChannel.class;
        udpChannelType = EpollDatagramChannel.class;
        tcpUri.append("epoll");
        udpUri.append("epoll");
    } else {
        bossExecutorThreadFactory = new ExecutorThreadFactory("NioServerBoss", true);
        bossGroup = new NioEventLoopGroup(1, bossExecutorThreadFactory);
        workerExecutorThreadFactory = new ExecutorThreadFactory("NioServerWorker", true);
        workerGroup = new NioEventLoopGroup(workerThreads, workerExecutorThreadFactory);
        tcpChannelType = NioServerSocketChannel.class;
        udpChannelType = NioDatagramChannel.class;
        tcpUri.append("nio");
        udpUri.append("nio");
    }

    tcpUri.append("://").append(bindInterface).append(":").append(port);
    udpUri.append("://").append(bindInterface).append(":").append(port);
    URI u = null;
    try {
        u = new URI(tcpUri.toString());
    } catch (URISyntaxException e) {
        log.warn("Failed TCP server URI const: [{}]. Programmer Error", tcpUri, e);
    }
    tcpServerURI = u;
    try {
        u = new URI(udpUri.toString());
    } catch (URISyntaxException e) {
        log.warn("Failed UDP server URI const: [{}]. Programmer Error", udpUri, e);
    }
    udpServerURI = u;

    log.info(">>>>> Starting OnRamp TCP Listener on [{}]...", tcpServerURI);
    log.info(">>>>> Starting OnRamp UDP Listener on [{}]...", udpServerURI);
    final ChannelFuture cf = tcpServerBootstrap.channel(tcpChannelType).group(bossGroup, workerGroup)
            .bind(bindSocket).awaitUninterruptibly()
            .addListener(new GenericFutureListener<Future<? super Void>>() {
                public void operationComplete(final Future<? super Void> f) throws Exception {
                    log.info("<<<<< OnRamp TCP Listener on [{}] Started", tcpServerURI);
                };
            }).awaitUninterruptibly();
    final ChannelFuture ucf = udpBootstrap.channel(udpChannelType).group(workerGroup)
            .option(ChannelOption.SO_BROADCAST, true).handler(new UDPPipelineFactory()).bind(bindSocket)
            .awaitUninterruptibly().addListener(new GenericFutureListener<Future<? super Void>>() {
                public void operationComplete(final Future<? super Void> f) throws Exception {
                    log.info("<<<<< OnRamp UDP Listener on [{}] Started", udpServerURI);
                };
            }).awaitUninterruptibly();

    tcpServerChannel = cf.channel();
    udpServerChannel = ucf.channel();
    tcpCloseFuture = tcpServerChannel.closeFuture();
    udpCloseFuture = udpServerChannel.closeFuture();
    Runtime.getRuntime().addShutdownHook(shutdownHook);

}

From source file:com.ibasco.agql.protocols.valve.source.query.SourceQueryMessenger.java

License:Open Source License

@Override
protected Transport<SourceServerRequest> createTransportService() {
    NettyPooledUdpTransport<SourceServerRequest> transport = new NettyPooledUdpTransport<>(ChannelType.NIO_UDP);
    transport.setChannelInitializer(new SourceQueryChannelInitializer(this));
    transport.addChannelOption(ChannelOption.SO_SNDBUF, 1048576);
    transport.addChannelOption(ChannelOption.SO_RCVBUF, 1048576);
    transport.addChannelOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(1400));
    return transport;
}

From source file:com.ibasco.agql.protocols.valve.source.query.SourceRconMessenger.java

License:Open Source License

@Override
protected Transport<SourceRconRequest> createTransportService() {
    NettyPooledTcpTransport<SourceRconRequest> transport = new NettyPooledTcpTransport<>(ChannelType.NIO_TCP);
    transport.setChannelInitializer(new SourceRconChannelInitializer(this));
    transport.addChannelOption(ChannelOption.SO_SNDBUF, 1048576 * 4);
    transport.addChannelOption(ChannelOption.SO_RCVBUF, 1048576 * 4);
    transport.addChannelOption(ChannelOption.SO_KEEPALIVE, true);
    transport.addChannelOption(ChannelOption.TCP_NODELAY, true);
    return transport;
}

From source file:com.ibasco.agql.protocols.valve.steam.master.MasterServerMessenger.java

License:Open Source License

@Override
protected Transport<MasterServerRequest> createTransportService() {
    NettyPooledUdpTransport<MasterServerRequest> transport = new NettyPooledUdpTransport<>(ChannelType.NIO_UDP);
    //Set our channel initializer
    transport.setChannelInitializer(new MasterServerChannelInitializer(this));
    //Channel Options
    transport.addChannelOption(ChannelOption.SO_SNDBUF, 1048576);
    transport.addChannelOption(ChannelOption.SO_RCVBUF, 1048576 * 8);
    return transport;
}

From source file:com.ict.dtube.remoting.netty.NettyRemotingClient.java

License:Apache License

@Override
public void start() {
    this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(//
            nettyClientConfig.getClientWorkerThreads(), //
            new ThreadFactory() {

                private AtomicInteger threadIndex = new AtomicInteger(0);

                @Override//from  www.j  a v a2 s. co m
                public Thread newThread(Runnable r) {
                    return new Thread(r, "NettyClientWorkerThread_" + this.threadIndex.incrementAndGet());
                }
            });

    Bootstrap handler = this.bootstrap.group(this.eventLoopGroupWorker).channel(NioSocketChannel.class)//
            //
            .option(ChannelOption.TCP_NODELAY, true)
            //
            .option(ChannelOption.SO_SNDBUF, NettySystemConfig.SocketSndbufSize)
            //
            .option(ChannelOption.SO_RCVBUF, NettySystemConfig.SocketRcvbufSize)
            //
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(//
                            defaultEventExecutorGroup, //
                            new NettyEncoder(), //
                            new NettyDecoder(), //
                            new IdleStateHandler(0, 0, nettyClientConfig.getClientChannelMaxIdleTimeSeconds()), //
                            new NettyConnetManageHandler(), //
                            new NettyClientHandler());
                }
            });

    // ?1??
    this.timer.scheduleAtFixedRate(new TimerTask() {

        @Override
        public void run() {
            try {
                NettyRemotingClient.this.scanResponseTable();
            } catch (Exception e) {
                log.error("scanResponseTable exception", e);
            }
        }
    }, 1000 * 3, 1000);

    if (this.channelEventListener != null) {
        this.nettyEventExecuter.start();
    }
}

From source file:com.ict.dtube.remoting.netty.NettyRemotingServer.java

License:Apache License

@Override
public void start() {
    this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(//
            nettyServerConfig.getServerWorkerThreads(), //
            new ThreadFactory() {

                private AtomicInteger threadIndex = new AtomicInteger(0);

                @Override// ww  w.  java2 s .  c o m
                public Thread newThread(Runnable r) {
                    return new Thread(r, "NettyServerWorkerThread_" + this.threadIndex.incrementAndGet());
                }
            });

    ServerBootstrap childHandler = //
            this.serverBootstrap.group(this.eventLoopGroupBoss, this.eventLoopGroupWorker)
                    .channel(NioServerSocketChannel.class)
                    //
                    .option(ChannelOption.SO_BACKLOG, 1024)
                    //
                    .option(ChannelOption.SO_REUSEADDR, true)
                    //
                    .childOption(ChannelOption.TCP_NODELAY, true)
                    //
                    .childOption(ChannelOption.SO_SNDBUF, NettySystemConfig.SocketSndbufSize)
                    //
                    .childOption(ChannelOption.SO_RCVBUF, NettySystemConfig.SocketRcvbufSize)

                    .localAddress(new InetSocketAddress(this.nettyServerConfig.getListenPort()))
                    .childHandler(new ChannelInitializer<SocketChannel>() {
                        @Override
                        public void initChannel(SocketChannel ch) throws Exception {
                            ch.pipeline().addLast(
                                    //
                                    defaultEventExecutorGroup, //
                                    new NettyEncoder(), //
                                    new NettyDecoder(), //
                                    new IdleStateHandler(0, 0,
                                            nettyServerConfig.getServerChannelMaxIdleTimeSeconds()), //
                                    new NettyConnetManageHandler(), //
                                    new NettyServerHandler());
                        }
                    });

    if (NettySystemConfig.NettyPooledByteBufAllocatorEnable) {
        // ????
        childHandler.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)//
        ;
    }

    try {
        ChannelFuture sync = this.serverBootstrap.bind().sync();
        InetSocketAddress addr = (InetSocketAddress) sync.channel().localAddress();
        this.port = addr.getPort();
    } catch (InterruptedException e1) {
        throw new RuntimeException("this.serverBootstrap.bind().sync() InterruptedException", e1);
    }

    if (this.channelEventListener != null) {
        this.nettyEventExecuter.start();
    }

    // ?1??
    this.timer.scheduleAtFixedRate(new TimerTask() {

        @Override
        public void run() {
            try {
                NettyRemotingServer.this.scanResponseTable();
            } catch (Exception e) {
                log.error("scanResponseTable exception", e);
            }
        }
    }, 1000 * 3, 1000);
}

From source file:com.jfastnet.peers.netty.KryoNettyPeer.java

License:Apache License

@Override
public boolean start() {
    group = new NioEventLoopGroup();
    try {/*from w w w . j  av  a  2s.c o m*/
        Bootstrap b = new Bootstrap();
        b.group(group).channel(NioDatagramChannel.class).option(ChannelOption.SO_BROADCAST, true)
                .option(ChannelOption.SO_SNDBUF, config.socketSendBufferSize)
                .option(ChannelOption.SO_RCVBUF, config.socketReceiveBufferSize)
                .option(ChannelOption.RCVBUF_ALLOCATOR,
                        new FixedRecvByteBufAllocator(config.receiveBufferAllocator))
                .handler(channelHandler != null ? channelHandler : new UdpHandler());

        channel = b.bind(config.bindPort).sync().channel();

    } catch (Exception e) {
        log.error("Couldn't start server.", e);
        return false;
    }
    return true;
}

From source file:com.look.remoting.netty.NettyRemotingClient.java

License:Apache License

@Override
public void start() {
    this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(//
            nettyClientConfig.getClientWorkerThreads(), //
            new ThreadFactory() {

                private AtomicInteger threadIndex = new AtomicInteger(0);

                @Override/*from w  ww. j  av  a  2  s  .c  o  m*/
                public Thread newThread(Runnable r) {
                    return new Thread(r, "NettyClientWorkerThread_" + this.threadIndex.incrementAndGet());
                }
            });

    Bootstrap handler = this.bootstrap.group(this.eventLoopGroupWorker).channel(NioSocketChannel.class)//
            //
            .option(ChannelOption.TCP_NODELAY, true)
            //
            .option(ChannelOption.SO_KEEPALIVE, false)
            //
            .option(ChannelOption.SO_SNDBUF, nettyClientConfig.getClientSocketSndBufSize())
            //
            .option(ChannelOption.SO_RCVBUF, nettyClientConfig.getClientSocketRcvBufSize())
            //
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(//
                            defaultEventExecutorGroup, //
                            new NettyEncoder(), //
                            new NettyDecoder(), //
                            new IdleStateHandler(0, 0, nettyClientConfig.getClientChannelMaxIdleTimeSeconds()), //
                            new NettyConnetManageHandler(), //
                            new NettyClientHandler());
                }
            });

    this.timer.scheduleAtFixedRate(new TimerTask() {

        @Override
        public void run() {
            try {
                NettyRemotingClient.this.scanResponseTable();
            } catch (Exception e) {
                log.error("scanResponseTable exception", e);
            }
        }
    }, 1000 * 3, 1000);

    if (this.channelEventListener != null) {
        this.nettyEventExecuter.start();
    }
}