List of usage examples for io.netty.channel ChannelOption SO_RCVBUF
ChannelOption SO_RCVBUF
To view the source code for io.netty.channel ChannelOption SO_RCVBUF.
Click Source Link
From source file:org.wso2.carbon.transport.http.netty.listener.ServerConnectorBootstrap.java
License:Open Source License
public void addSocketConfiguration(ServerBootstrapConfiguration serverBootstrapConfiguration) { // Set other serverBootstrap parameters serverBootstrap.option(ChannelOption.SO_BACKLOG, serverBootstrapConfiguration.getSoBackLog()); serverBootstrap.childOption(ChannelOption.TCP_NODELAY, serverBootstrapConfiguration.isTcpNoDelay()); serverBootstrap.option(ChannelOption.SO_KEEPALIVE, serverBootstrapConfiguration.isKeepAlive()); serverBootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, serverBootstrapConfiguration.getConnectTimeOut()); serverBootstrap.option(ChannelOption.SO_SNDBUF, serverBootstrapConfiguration.getSendBufferSize()); serverBootstrap.option(ChannelOption.SO_RCVBUF, serverBootstrapConfiguration.getReceiveBufferSize()); serverBootstrap.childOption(ChannelOption.SO_RCVBUF, serverBootstrapConfiguration.getReceiveBufferSize()); serverBootstrap.childOption(ChannelOption.SO_SNDBUF, serverBootstrapConfiguration.getSendBufferSize()); log.debug("Netty Server Socket BACKLOG " + serverBootstrapConfiguration.getSoBackLog()); log.debug("Netty Server Socket TCP_NODELAY " + serverBootstrapConfiguration.isTcpNoDelay()); log.debug("Netty Server Socket SO_KEEPALIVE " + serverBootstrapConfiguration.isKeepAlive()); log.debug("Netty Server Socket CONNECT_TIMEOUT_MILLIS " + serverBootstrapConfiguration.getConnectTimeOut()); log.debug("Netty Server Socket SO_SNDBUF " + serverBootstrapConfiguration.getSendBufferSize()); log.debug("Netty Server Socket SO_RCVBUF " + serverBootstrapConfiguration.getReceiveBufferSize()); log.debug("Netty Server Socket SO_RCVBUF " + serverBootstrapConfiguration.getReceiveBufferSize()); log.debug("Netty Server Socket SO_SNDBUF " + serverBootstrapConfiguration.getSendBufferSize()); }
From source file:org.wso2.carbon.transport.http.netty.listener.ServerConnectorController.java
License:Open Source License
public void start() { Set<TransportProperty> transportPropertiesSet = transportsConfiguration.getTransportProperties(); Map<String, Object> transportProperties = new HashMap<>(); if (transportPropertiesSet != null && !transportPropertiesSet.isEmpty()) { transportProperties = transportPropertiesSet.stream() .collect(Collectors.toMap(TransportProperty::getName, TransportProperty::getValue)); }//from ww w .j a v a 2s. c o m // Create Bootstrap Configuration from listener parameters ServerBootstrapConfiguration.createBootStrapConfiguration(transportProperties); ServerBootstrapConfiguration serverBootstrapConfiguration = ServerBootstrapConfiguration.getInstance(); // Create Boss Group - boss group is for accepting channels EventLoopGroup bossGroup = HTTPTransportContextHolder.getInstance().getBossGroup(); if (bossGroup == null) { int bossGroupSize = Util.getIntProperty(transportProperties, Constants.SERVER_BOOTSTRAP_BOSS_GROUP_SIZE, Runtime.getRuntime().availableProcessors()); bossGroup = new NioEventLoopGroup(bossGroupSize); HTTPTransportContextHolder.getInstance().setBossGroup(bossGroup); } // Create Worker Group - worker group is for processing IO EventLoopGroup workerGroup = HTTPTransportContextHolder.getInstance().getWorkerGroup(); if (workerGroup == null) { int workerGroupSize = Util.getIntProperty(transportProperties, Constants.SERVER_BOOTSTRAP_WORKER_GROUP_SIZE, Runtime.getRuntime().availableProcessors() * 2); workerGroup = new NioEventLoopGroup(workerGroupSize); HTTPTransportContextHolder.getInstance().setWorkerGroup(workerGroup); } // Set Handler Executor HTTPTransportContextHolder.getInstance().setHandlerExecutor(new HandlerExecutor()); bootstrap = new ServerBootstrap(); bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class); // Register Channel initializer handler = new HTTPServerChannelInitializer(); handler.setupConnectionManager(transportProperties); bootstrap.childHandler(handler); int bufferSize = Util.getIntProperty(transportProperties, Constants.OUTPUT_CONTENT_BUFFER_SIZE, 0); if (bufferSize != 0) { BufferFactory.createInstance(bufferSize); } // Set other bootstrap parameters bootstrap.option(ChannelOption.SO_BACKLOG, serverBootstrapConfiguration.getSoBackLog()); log.debug("Netty Server Socket BACKLOG " + serverBootstrapConfiguration.getSoBackLog()); bootstrap.childOption(ChannelOption.TCP_NODELAY, serverBootstrapConfiguration.isTcpNoDelay()); log.debug("Netty Server Socket TCP_NODELAY " + serverBootstrapConfiguration.isTcpNoDelay()); bootstrap.option(ChannelOption.SO_KEEPALIVE, serverBootstrapConfiguration.isKeepAlive()); log.debug("Netty Server Socket SO_KEEPALIVE " + serverBootstrapConfiguration.isKeepAlive()); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, serverBootstrapConfiguration.getConnectTimeOut()); log.debug( " Netty Server Socket CONNECT_TIMEOUT_MILLIS " + serverBootstrapConfiguration.getConnectTimeOut()); bootstrap.option(ChannelOption.SO_SNDBUF, serverBootstrapConfiguration.getSendBufferSize()); log.debug("Netty Server Socket SO_SNDBUF " + serverBootstrapConfiguration.getSendBufferSize()); bootstrap.option(ChannelOption.SO_RCVBUF, serverBootstrapConfiguration.getReceiveBufferSize()); log.debug("Netty Server Socket SO_RCVBUF " + serverBootstrapConfiguration.getReceiveBufferSize()); bootstrap.childOption(ChannelOption.SO_RCVBUF, serverBootstrapConfiguration.getReceiveBufferSize()); log.debug("Netty Server Socket SO_RCVBUF " + serverBootstrapConfiguration.getReceiveBufferSize()); bootstrap.childOption(ChannelOption.SO_SNDBUF, serverBootstrapConfiguration.getSendBufferSize()); log.debug("Netty Server Socket SO_SNDBUF " + serverBootstrapConfiguration.getSendBufferSize()); initialized = true; }
From source file:p2p_server.P2p_server.java
public void run() throws Exception { EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); List<ChannelFuture> futures = new ArrayList<>(); SelfSignedCertificate ssc = new SelfSignedCertificate(); SslContext sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); try {/*from w w w . j a v a2 s. c om*/ ServerBootstrap appboot = new ServerBootstrap(); appboot.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 8192).childHandler(new AppChildChannelHandler(sslCtx)); appboot.option(ChannelOption.SO_REUSEADDR, true); appboot.option(ChannelOption.TCP_NODELAY, true); appboot.childOption(ChannelOption.SO_KEEPALIVE, true); appboot.childOption(ChannelOption.SO_RCVBUF, 512); appboot.childOption(ChannelOption.SO_SNDBUF, 512); ServerBootstrap devboot = new ServerBootstrap(); devboot.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 8192).childHandler(new DevChildChannelHandler(sslCtx)); devboot.option(ChannelOption.SO_REUSEADDR, true); devboot.option(ChannelOption.TCP_NODELAY, true); devboot.childOption(ChannelOption.SO_KEEPALIVE, true); devboot.childOption(ChannelOption.SO_RCVBUF, 512); devboot.childOption(ChannelOption.SO_SNDBUF, 512); //ChannelFuture f = boostrap.bind(port).sync(); futures.add(devboot.bind(5560)); futures.add(appboot.bind(5561)); for (ChannelFuture f : futures) { f.sync(); } for (ChannelFuture f : futures) { f.channel().closeFuture().sync(); } // ??? // f.channel().closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:picoview.collectd.CollectClient.java
License:Apache License
@Override public void initialize() throws RuntimeException { NetworkInterface nic;/*from ww w.j a va 2 s . c o m*/ try { if (StringUtils.isBlank(nicName)) { nic = NetworkInterface.getByIndex(0); } else { nic = NetworkInterface.getByName(nicName); } } catch (SocketException exep) { throw new RuntimeException("unable to determine network interface to use", exep); } Bootstrap bs = new Bootstrap(); bs.option(ChannelOption.SO_BROADCAST, true); bs.option(ChannelOption.SO_REUSEADDR, true); bs.option(ChannelOption.IP_MULTICAST_LOOP_DISABLED, false); bs.option(ChannelOption.SO_RCVBUF, 2048); bs.option(ChannelOption.IP_MULTICAST_TTL, 255); bs.group(new NioEventLoopGroup()); bs.channelFactory(new ChannelFactory<Channel>() { public Channel newChannel() { return new NioDatagramChannel(InternetProtocolFamily.IPv4); } }); bs.handler(new ChannelInitializer<DatagramChannel>() { @Override public void initChannel(DatagramChannel channel) throws Exception { channel.pipeline().addLast(new CollectChannelHandler()); } }); if (StringUtils.isBlank(multicastHost)) { multicastHost = "239.192.74.66"; } if (multicastPort <= 0) { multicastPort = 25826; } try { DatagramChannel dch = (DatagramChannel) bs.bind(multicastPort).sync().channel(); ChannelFuture cf = dch.joinGroup(new InetSocketAddress(multicastHost, multicastPort), nic).sync(); if (!cf.isSuccess()) { throw new RuntimeException("unable to join multicast group"); } } catch (InterruptedException exep) { throw new RuntimeException("unable to setup network for collect client", exep); } }
From source file:playground.gregor.vis.CASimVisClient.java
License:Open Source License
public static void main(String[] args) throws Exception { if (args.length != 2) { System.out.println("usage: VisClient <server> <port>"); System.exit(-1);/*from w w w .j a v a 2s. c om*/ } String serv = args[0]; int port = Integer.parseInt(args[1]); PeerInfo server = new PeerInfo(serv, port); DuplexTcpClientPipelineFactory clientFactory = new DuplexTcpClientPipelineFactory(); // in case client acts as server, which is the reason behind a duplex // connection indeed. RpcServerCallExecutor executor = new ThreadPoolCallExecutor(3, 100); clientFactory.setRpcServerCallExecutor(executor); clientFactory.setConnectResponseTimeoutMillis(1000); // clientFactory.getRpcServiceRegistry().registerService(); Bootstrap bootstrap = new Bootstrap(); bootstrap.group(new NioEventLoopGroup()); bootstrap.handler(clientFactory); bootstrap.channel(NioSocketChannel.class); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000); bootstrap.option(ChannelOption.SO_SNDBUF, 1048576); bootstrap.option(ChannelOption.SO_RCVBUF, 1048576); RpcClientChannel channel = clientFactory.peerWith(server, bootstrap); BlockingInterface visService = ProtoFrame.FrameServerService.newBlockingStub(channel); RpcController cntr = channel.newRpcController(); clientFactory.getRpcServiceRegistry().registerService( ProtoFrame.FrameServerService.newReflectiveBlockingService(new BlockingVisServiceImpl(null, null))); new CASimVisClient(visService, cntr).run(); // // channel.close(); // executor.shutdown(); // System.exit(0); }
From source file:playground.gregor.vis.VisServer.java
License:Open Source License
private void initServer() { PeerInfo server = new PeerInfo("localhost", 9090); RpcServerCallExecutor executor = new ThreadPoolCallExecutor(3, 200); DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(server); serverFactory.setRpcServerCallExecutor(executor); ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(/*from ww w. j ava 2s. co m*/ new NioEventLoopGroup(0, new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory())), new NioEventLoopGroup(0, new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()))); bootstrap.channel(NioServerSocketChannel.class); bootstrap.childHandler(serverFactory); bootstrap.localAddress(server.getPort()); bootstrap.option(ChannelOption.SO_SNDBUF, 1048576); bootstrap.option(ChannelOption.SO_RCVBUF, 1048576); bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576); bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576); bootstrap.option(ChannelOption.TCP_NODELAY, true); serverFactory.getRpcServiceRegistry() .registerService(ProtoFrame.FrameServerService.newReflectiveBlockingService(this.service)); bootstrap.bind(); }
From source file:qunar.tc.qmq.netty.client.AbstractNettyClient.java
License:Apache License
public synchronized void start(NettyClientConfig config) { if (started.get()) { return;//from w w w . j a v a 2 s . co m } initHandler(); Bootstrap bootstrap = new Bootstrap(); eventLoopGroup = new NioEventLoopGroup(1, new DefaultThreadFactory(clientName + "-boss")); eventExecutors = new DefaultEventExecutorGroup(config.getClientWorkerThreads(), new DefaultThreadFactory(clientName + "-worker")); connectManager = new NettyConnectManageHandler(bootstrap, config.getConnectTimeoutMillis()); bootstrap.group(this.eventLoopGroup).channel(NioSocketChannel.class).option(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_KEEPALIVE, false) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getConnectTimeoutMillis()) .option(ChannelOption.SO_SNDBUF, config.getClientSocketSndBufSize()) .option(ChannelOption.SO_RCVBUF, config.getClientSocketRcvBufSize()) .handler(newChannelInitializer(config, eventExecutors, connectManager)); started.set(true); }
From source file:ratpack.server.internal.DefaultRatpackServer.java
License:Apache License
protected Channel buildChannel(final ServerConfig serverConfig, final ChannelHandler handlerAdapter) throws InterruptedException { SslContext sslContext = serverConfig.getNettySslContext(); this.useSsl = sslContext != null; ServerBootstrap serverBootstrap = new ServerBootstrap(); serverConfig.getConnectTimeoutMillis().ifPresent(i -> { serverBootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, i); serverBootstrap.childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, i); });/*w ww. j a va2 s. c o m*/ serverConfig.getMaxMessagesPerRead().ifPresent(i -> { FixedRecvByteBufAllocator allocator = new FixedRecvByteBufAllocator(i); serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, allocator); serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, allocator); }); serverConfig.getReceiveBufferSize().ifPresent(i -> { serverBootstrap.option(ChannelOption.SO_RCVBUF, i); serverBootstrap.childOption(ChannelOption.SO_RCVBUF, i); }); serverConfig.getWriteSpinCount().ifPresent(i -> { serverBootstrap.option(ChannelOption.WRITE_SPIN_COUNT, i); serverBootstrap.childOption(ChannelOption.WRITE_SPIN_COUNT, i); }); serverConfig.getConnectQueueSize().ifPresent(i -> serverBootstrap.option(ChannelOption.SO_BACKLOG, i)); return serverBootstrap.group(execController.getEventLoopGroup()) .channel(ChannelImplDetector.getServerSocketChannelImpl()) .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .childHandler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); new ConnectionIdleTimeout(pipeline, serverConfig.getIdleTimeout()); if (sslContext != null) { SSLEngine sslEngine = sslContext.newEngine(PooledByteBufAllocator.DEFAULT); pipeline.addLast("ssl", new SslHandler(sslEngine)); } pipeline.addLast("decoder", new HttpRequestDecoder(serverConfig.getMaxInitialLineLength(), serverConfig.getMaxHeaderSize(), serverConfig.getMaxChunkSize(), false)); pipeline.addLast("encoder", new HttpResponseEncoder()); pipeline.addLast("deflater", new IgnorableHttpContentCompressor()); pipeline.addLast("chunkedWriter", new ChunkedWriteHandler()); pipeline.addLast("adapter", handlerAdapter); ch.config().setAutoRead(false); } }).bind(buildSocketAddress(serverConfig)).sync().channel(); }
From source file:reactor.io.net.impl.netty.tcp.NettyTcpClient.java
License:Apache License
/** * Creates a new NettyTcpClient that will use the given {@code env} for configuration and the given {@code * reactor} to//from w ww.jav a2 s . c o m * send events. The number of IO threads used by the client is configured by the environment's {@code * reactor.tcp.ioThreadCount} property. In its absence the number of IO threads will be equal to the {@link * Environment#PROCESSORS number of available processors}. </p> The client will connect to the given {@code * connectAddress}, configuring its socket using the given {@code opts}. The given {@code codec} will be used for * encoding and decoding of data. * * @param env The configuration environment * @param dispatcher The dispatcher used to send events * @param connectAddress The address the client will connect to * @param options The configuration options for the client's socket * @param sslOptions The SSL configuration options for the client's socket * @param codec The codec used to encode and decode data */ public NettyTcpClient(Environment env, Dispatcher dispatcher, InetSocketAddress connectAddress, final ClientSocketOptions options, final SslOptions sslOptions, Codec<Buffer, IN, OUT> codec) { super(env, dispatcher, connectAddress, options, sslOptions, codec); this.connectAddress = connectAddress; if (options instanceof NettyClientSocketOptions) { this.nettyOptions = (NettyClientSocketOptions) options; } else { this.nettyOptions = null; } if (null != nettyOptions && null != nettyOptions.eventLoopGroup()) { this.ioGroup = nettyOptions.eventLoopGroup(); } else { int ioThreadCount = env != null ? env.getIntProperty("reactor.tcp.ioThreadCount", Environment.PROCESSORS) : Environment.PROCESSORS; this.ioGroup = new NioEventLoopGroup(ioThreadCount, new NamedDaemonThreadFactory("reactor-tcp-io")); } this.bootstrap = new Bootstrap().group(ioGroup).channel(NioSocketChannel.class) .option(ChannelOption.SO_RCVBUF, options.rcvbuf()).option(ChannelOption.SO_SNDBUF, options.sndbuf()) .option(ChannelOption.SO_KEEPALIVE, options.keepAlive()) .option(ChannelOption.SO_LINGER, options.linger()) .option(ChannelOption.TCP_NODELAY, options.tcpNoDelay()) .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.AUTO_READ, sslOptions != null) //.remoteAddress(this.connectAddress) ; this.connectionSupplier = new Supplier<ChannelFuture>() { @Override public ChannelFuture get() { if (started.get()) { return bootstrap.connect(getConnectAddress()); } else { return null; } } }; }
From source file:reactor.io.net.impl.netty.tcp.NettyTcpServer.java
License:Apache License
protected NettyTcpServer(Environment env, Dispatcher dispatcher, InetSocketAddress listenAddress, final ServerSocketOptions options, final SslOptions sslOptions, Codec<Buffer, IN, OUT> codec) { super(env, dispatcher, listenAddress, options, sslOptions, codec); if (options instanceof NettyServerSocketOptions) { this.nettyOptions = (NettyServerSocketOptions) options; } else {//from w ww . j a v a2 s .c om this.nettyOptions = null; } int selectThreadCount = getDefaultEnvironment().getIntProperty("reactor.tcp.selectThreadCount", Environment.PROCESSORS / 2); int ioThreadCount = getDefaultEnvironment().getIntProperty("reactor.tcp.ioThreadCount", Environment.PROCESSORS); this.selectorGroup = new NioEventLoopGroup(selectThreadCount, new NamedDaemonThreadFactory("reactor-tcp-select")); if (null != nettyOptions && null != nettyOptions.eventLoopGroup()) { this.ioGroup = nettyOptions.eventLoopGroup(); } else { this.ioGroup = new NioEventLoopGroup(ioThreadCount, new NamedDaemonThreadFactory("reactor-tcp-io")); } this.bootstrap = new ServerBootstrap().group(selectorGroup, ioGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, options.backlog()) .option(ChannelOption.SO_RCVBUF, options.rcvbuf()).option(ChannelOption.SO_SNDBUF, options.sndbuf()) .option(ChannelOption.SO_REUSEADDR, options.reuseAddr()) .localAddress((null == listenAddress ? new InetSocketAddress(0) : listenAddress)) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .childOption(ChannelOption.AUTO_READ, sslOptions != null); }