List of usage examples for io.netty.channel ChannelOption SO_SNDBUF
ChannelOption SO_SNDBUF
To view the source code for io.netty.channel ChannelOption SO_SNDBUF.
Click Source Link
From source file:p2p_server.P2p_server.java
public void run() throws Exception { EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); List<ChannelFuture> futures = new ArrayList<>(); SelfSignedCertificate ssc = new SelfSignedCertificate(); SslContext sslCtx = SslContextBuilder.forServer(ssc.certificate(), ssc.privateKey()).build(); try {/*w w w .j a va 2s .com*/ ServerBootstrap appboot = new ServerBootstrap(); appboot.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 8192).childHandler(new AppChildChannelHandler(sslCtx)); appboot.option(ChannelOption.SO_REUSEADDR, true); appboot.option(ChannelOption.TCP_NODELAY, true); appboot.childOption(ChannelOption.SO_KEEPALIVE, true); appboot.childOption(ChannelOption.SO_RCVBUF, 512); appboot.childOption(ChannelOption.SO_SNDBUF, 512); ServerBootstrap devboot = new ServerBootstrap(); devboot.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 8192).childHandler(new DevChildChannelHandler(sslCtx)); devboot.option(ChannelOption.SO_REUSEADDR, true); devboot.option(ChannelOption.TCP_NODELAY, true); devboot.childOption(ChannelOption.SO_KEEPALIVE, true); devboot.childOption(ChannelOption.SO_RCVBUF, 512); devboot.childOption(ChannelOption.SO_SNDBUF, 512); //ChannelFuture f = boostrap.bind(port).sync(); futures.add(devboot.bind(5560)); futures.add(appboot.bind(5561)); for (ChannelFuture f : futures) { f.sync(); } for (ChannelFuture f : futures) { f.channel().closeFuture().sync(); } // ??? // f.channel().closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:playground.gregor.vis.CASimVisClient.java
License:Open Source License
public static void main(String[] args) throws Exception { if (args.length != 2) { System.out.println("usage: VisClient <server> <port>"); System.exit(-1);/*from w w w . j a va2 s .c om*/ } String serv = args[0]; int port = Integer.parseInt(args[1]); PeerInfo server = new PeerInfo(serv, port); DuplexTcpClientPipelineFactory clientFactory = new DuplexTcpClientPipelineFactory(); // in case client acts as server, which is the reason behind a duplex // connection indeed. RpcServerCallExecutor executor = new ThreadPoolCallExecutor(3, 100); clientFactory.setRpcServerCallExecutor(executor); clientFactory.setConnectResponseTimeoutMillis(1000); // clientFactory.getRpcServiceRegistry().registerService(); Bootstrap bootstrap = new Bootstrap(); bootstrap.group(new NioEventLoopGroup()); bootstrap.handler(clientFactory); bootstrap.channel(NioSocketChannel.class); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000); bootstrap.option(ChannelOption.SO_SNDBUF, 1048576); bootstrap.option(ChannelOption.SO_RCVBUF, 1048576); RpcClientChannel channel = clientFactory.peerWith(server, bootstrap); BlockingInterface visService = ProtoFrame.FrameServerService.newBlockingStub(channel); RpcController cntr = channel.newRpcController(); clientFactory.getRpcServiceRegistry().registerService( ProtoFrame.FrameServerService.newReflectiveBlockingService(new BlockingVisServiceImpl(null, null))); new CASimVisClient(visService, cntr).run(); // // channel.close(); // executor.shutdown(); // System.exit(0); }
From source file:playground.gregor.vis.VisServer.java
License:Open Source License
private void initServer() { PeerInfo server = new PeerInfo("localhost", 9090); RpcServerCallExecutor executor = new ThreadPoolCallExecutor(3, 200); DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(server); serverFactory.setRpcServerCallExecutor(executor); ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(//from w w w. j ava 2 s . com new NioEventLoopGroup(0, new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory())), new NioEventLoopGroup(0, new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()))); bootstrap.channel(NioServerSocketChannel.class); bootstrap.childHandler(serverFactory); bootstrap.localAddress(server.getPort()); bootstrap.option(ChannelOption.SO_SNDBUF, 1048576); bootstrap.option(ChannelOption.SO_RCVBUF, 1048576); bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576); bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576); bootstrap.option(ChannelOption.TCP_NODELAY, true); serverFactory.getRpcServiceRegistry() .registerService(ProtoFrame.FrameServerService.newReflectiveBlockingService(this.service)); bootstrap.bind(); }
From source file:qunar.tc.qmq.netty.client.AbstractNettyClient.java
License:Apache License
public synchronized void start(NettyClientConfig config) { if (started.get()) { return;// w ww. j av a 2 s. c om } initHandler(); Bootstrap bootstrap = new Bootstrap(); eventLoopGroup = new NioEventLoopGroup(1, new DefaultThreadFactory(clientName + "-boss")); eventExecutors = new DefaultEventExecutorGroup(config.getClientWorkerThreads(), new DefaultThreadFactory(clientName + "-worker")); connectManager = new NettyConnectManageHandler(bootstrap, config.getConnectTimeoutMillis()); bootstrap.group(this.eventLoopGroup).channel(NioSocketChannel.class).option(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_KEEPALIVE, false) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getConnectTimeoutMillis()) .option(ChannelOption.SO_SNDBUF, config.getClientSocketSndBufSize()) .option(ChannelOption.SO_RCVBUF, config.getClientSocketRcvBufSize()) .handler(newChannelInitializer(config, eventExecutors, connectManager)); started.set(true); }
From source file:reactor.io.net.impl.netty.tcp.NettyTcpClient.java
License:Apache License
/** * Creates a new NettyTcpClient that will use the given {@code env} for configuration and the given {@code * reactor} to//from w ww .ja va 2 s . c om * send events. The number of IO threads used by the client is configured by the environment's {@code * reactor.tcp.ioThreadCount} property. In its absence the number of IO threads will be equal to the {@link * Environment#PROCESSORS number of available processors}. </p> The client will connect to the given {@code * connectAddress}, configuring its socket using the given {@code opts}. The given {@code codec} will be used for * encoding and decoding of data. * * @param env The configuration environment * @param dispatcher The dispatcher used to send events * @param connectAddress The address the client will connect to * @param options The configuration options for the client's socket * @param sslOptions The SSL configuration options for the client's socket * @param codec The codec used to encode and decode data */ public NettyTcpClient(Environment env, Dispatcher dispatcher, InetSocketAddress connectAddress, final ClientSocketOptions options, final SslOptions sslOptions, Codec<Buffer, IN, OUT> codec) { super(env, dispatcher, connectAddress, options, sslOptions, codec); this.connectAddress = connectAddress; if (options instanceof NettyClientSocketOptions) { this.nettyOptions = (NettyClientSocketOptions) options; } else { this.nettyOptions = null; } if (null != nettyOptions && null != nettyOptions.eventLoopGroup()) { this.ioGroup = nettyOptions.eventLoopGroup(); } else { int ioThreadCount = env != null ? env.getIntProperty("reactor.tcp.ioThreadCount", Environment.PROCESSORS) : Environment.PROCESSORS; this.ioGroup = new NioEventLoopGroup(ioThreadCount, new NamedDaemonThreadFactory("reactor-tcp-io")); } this.bootstrap = new Bootstrap().group(ioGroup).channel(NioSocketChannel.class) .option(ChannelOption.SO_RCVBUF, options.rcvbuf()).option(ChannelOption.SO_SNDBUF, options.sndbuf()) .option(ChannelOption.SO_KEEPALIVE, options.keepAlive()) .option(ChannelOption.SO_LINGER, options.linger()) .option(ChannelOption.TCP_NODELAY, options.tcpNoDelay()) .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.AUTO_READ, sslOptions != null) //.remoteAddress(this.connectAddress) ; this.connectionSupplier = new Supplier<ChannelFuture>() { @Override public ChannelFuture get() { if (started.get()) { return bootstrap.connect(getConnectAddress()); } else { return null; } } }; }
From source file:reactor.io.net.impl.netty.tcp.NettyTcpServer.java
License:Apache License
protected NettyTcpServer(Environment env, Dispatcher dispatcher, InetSocketAddress listenAddress, final ServerSocketOptions options, final SslOptions sslOptions, Codec<Buffer, IN, OUT> codec) { super(env, dispatcher, listenAddress, options, sslOptions, codec); if (options instanceof NettyServerSocketOptions) { this.nettyOptions = (NettyServerSocketOptions) options; } else {//from ww w .j a v a2 s . com this.nettyOptions = null; } int selectThreadCount = getDefaultEnvironment().getIntProperty("reactor.tcp.selectThreadCount", Environment.PROCESSORS / 2); int ioThreadCount = getDefaultEnvironment().getIntProperty("reactor.tcp.ioThreadCount", Environment.PROCESSORS); this.selectorGroup = new NioEventLoopGroup(selectThreadCount, new NamedDaemonThreadFactory("reactor-tcp-select")); if (null != nettyOptions && null != nettyOptions.eventLoopGroup()) { this.ioGroup = nettyOptions.eventLoopGroup(); } else { this.ioGroup = new NioEventLoopGroup(ioThreadCount, new NamedDaemonThreadFactory("reactor-tcp-io")); } this.bootstrap = new ServerBootstrap().group(selectorGroup, ioGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, options.backlog()) .option(ChannelOption.SO_RCVBUF, options.rcvbuf()).option(ChannelOption.SO_SNDBUF, options.sndbuf()) .option(ChannelOption.SO_REUSEADDR, options.reuseAddr()) .localAddress((null == listenAddress ? new InetSocketAddress(0) : listenAddress)) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .childOption(ChannelOption.AUTO_READ, sslOptions != null); }
From source file:reactor.io.net.impl.netty.udp.NettyDatagramServer.java
License:Apache License
public NettyDatagramServer(@Nonnull Environment env, @Nonnull Dispatcher dispatcher, @Nullable InetSocketAddress listenAddress, @Nullable final NetworkInterface multicastInterface, @Nonnull final ServerSocketOptions options, @Nullable Codec<Buffer, IN, OUT> codec) { super(env, dispatcher, listenAddress, multicastInterface, options, codec); if (options instanceof NettyServerSocketOptions) { this.nettyOptions = (NettyServerSocketOptions) options; } else {//from www . j ava 2s . co m this.nettyOptions = null; } if (null != nettyOptions && null != nettyOptions.eventLoopGroup()) { this.ioGroup = nettyOptions.eventLoopGroup(); } else { int ioThreadCount = getDefaultEnvironment().getIntProperty("reactor.udp.ioThreadCount", Environment.PROCESSORS); this.ioGroup = new NioEventLoopGroup(ioThreadCount, new NamedDaemonThreadFactory("reactor-udp-io")); } final InternetProtocolFamily family = toNettyFamily(options.protocolFamily()); this.bootstrap = new Bootstrap().group(ioGroup).option(ChannelOption.SO_RCVBUF, options.rcvbuf()) .option(ChannelOption.SO_SNDBUF, options.sndbuf()) .option(ChannelOption.SO_REUSEADDR, options.reuseAddr()).option(ChannelOption.AUTO_READ, false) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.timeout()) .channelFactory(new ChannelFactory<Channel>() { @Override public Channel newChannel() { return new NioDatagramChannel(family); } }); if (null != listenAddress) { bootstrap.localAddress(listenAddress); } else { bootstrap.localAddress(NetUtil.LOCALHOST, 3000); } if (null != multicastInterface) { bootstrap.option(ChannelOption.IP_MULTICAST_IF, multicastInterface); } }
From source file:reactor.io.net.netty.tcp.NettyTcpClient.java
License:Apache License
/** * Creates a new NettyTcpClient that will use the given {@code env} for configuration and the given {@code * reactor} to//from w ww. j a v a2 s . c om * send events. The number of IO threads used by the client is configured by the environment's {@code * reactor.tcp.ioThreadCount} property. In its absence the number of IO threads will be equal to the {@link * Environment#PROCESSORS number of available processors}. </p> The client will connect to the given {@code * connectAddress}, configuring its socket using the given {@code opts}. The given {@code codec} will be used for * encoding and decoding of data. * * @param env The configuration environment * @param reactor The reactor used to send events * @param connectAddress The address the client will connect to * @param options The configuration options for the client's socket * @param sslOptions The SSL configuration options for the client's socket * @param codec The codec used to encode and decode data * @param consumers The consumers that will interact with the connection */ public NettyTcpClient(@Nonnull Environment env, @Nonnull EventBus reactor, @Nonnull InetSocketAddress connectAddress, @Nonnull final ClientSocketOptions options, @Nullable final SslOptions sslOptions, @Nullable Codec<Buffer, IN, OUT> codec, @Nonnull Collection<Consumer<NetChannel<IN, OUT>>> consumers) { super(env, reactor, connectAddress, options, sslOptions, codec, consumers); this.connectAddress = connectAddress; if (options instanceof NettyClientSocketOptions) { this.nettyOptions = (NettyClientSocketOptions) options; } else { this.nettyOptions = null; } if (null != nettyOptions && null != nettyOptions.eventLoopGroup()) { this.ioGroup = nettyOptions.eventLoopGroup(); } else { int ioThreadCount = env.getProperty("reactor.tcp.ioThreadCount", Integer.class, Environment.PROCESSORS); this.ioGroup = new NioEventLoopGroup(ioThreadCount, new NamedDaemonThreadFactory("reactor-tcp-io")); } this.bootstrap = new Bootstrap().group(ioGroup).channel(NioSocketChannel.class) .option(ChannelOption.SO_RCVBUF, options.rcvbuf()).option(ChannelOption.SO_SNDBUF, options.sndbuf()) .option(ChannelOption.SO_KEEPALIVE, options.keepAlive()) .option(ChannelOption.SO_LINGER, options.linger()) .option(ChannelOption.TCP_NODELAY, options.tcpNoDelay()).remoteAddress(this.connectAddress) .handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(final SocketChannel ch) throws Exception { ch.config().setConnectTimeoutMillis(options.timeout()); if (null != sslOptions) { SSLEngine ssl = new SSLEngineSupplier(sslOptions, true).get(); if (log.isDebugEnabled()) { log.debug("SSL enabled using keystore {}", (null != sslOptions.keystoreFile() ? sslOptions.keystoreFile() : "<DEFAULT>")); } ch.pipeline().addLast(new SslHandler(ssl)); } if (null != nettyOptions && null != nettyOptions.pipelineConfigurer()) { nettyOptions.pipelineConfigurer().accept(ch.pipeline()); } ch.pipeline().addLast(createChannelHandlers(ch)); } }); this.connectionSupplier = new Supplier<ChannelFuture>() { @Override public ChannelFuture get() { if (!closing) { return bootstrap.connect(getConnectAddress()); } else { return null; } } }; }
From source file:reactor.io.net.netty.tcp.NettyTcpServer.java
License:Apache License
protected NettyTcpServer(@Nonnull Environment env, @Nonnull EventBus reactor, @Nullable InetSocketAddress listenAddress, final ServerSocketOptions options, final SslOptions sslOptions, @Nullable Codec<Buffer, IN, OUT> codec, @Nonnull Collection<Consumer<NetChannel<IN, OUT>>> consumers) { super(env, reactor, listenAddress, options, sslOptions, codec, consumers); if (options instanceof NettyServerSocketOptions) { this.nettyOptions = (NettyServerSocketOptions) options; } else {/*w w w . j a v a 2 s.co m*/ this.nettyOptions = null; } int selectThreadCount = env.getProperty("reactor.tcp.selectThreadCount", Integer.class, Environment.PROCESSORS / 2); int ioThreadCount = env.getProperty("reactor.tcp.ioThreadCount", Integer.class, Environment.PROCESSORS); this.selectorGroup = new NioEventLoopGroup(selectThreadCount, new NamedDaemonThreadFactory("reactor-tcp-select")); if (null != nettyOptions && null != nettyOptions.eventLoopGroup()) { this.ioGroup = nettyOptions.eventLoopGroup(); } else { this.ioGroup = new NioEventLoopGroup(ioThreadCount, new NamedDaemonThreadFactory("reactor-tcp-io")); } this.bootstrap = new ServerBootstrap().group(selectorGroup, ioGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, options.backlog()) .option(ChannelOption.SO_RCVBUF, options.rcvbuf()).option(ChannelOption.SO_SNDBUF, options.sndbuf()) .option(ChannelOption.SO_REUSEADDR, options.reuseAddr()) .localAddress((null == listenAddress ? new InetSocketAddress(3000) : listenAddress)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(final SocketChannel ch) throws Exception { SocketChannelConfig config = ch.config(); config.setReceiveBufferSize(options.rcvbuf()); config.setSendBufferSize(options.sndbuf()); config.setKeepAlive(options.keepAlive()); config.setReuseAddress(options.reuseAddr()); config.setSoLinger(options.linger()); config.setTcpNoDelay(options.tcpNoDelay()); if (log.isDebugEnabled()) { log.debug("CONNECT {}", ch); } if (null != sslOptions) { SSLEngine ssl = new SSLEngineSupplier(sslOptions, false).get(); if (log.isDebugEnabled()) { log.debug("SSL enabled using keystore {}", (null != sslOptions.keystoreFile() ? sslOptions.keystoreFile() : "<DEFAULT>")); } ch.pipeline().addLast(new SslHandler(ssl)); } if (null != nettyOptions && null != nettyOptions.pipelineConfigurer()) { nettyOptions.pipelineConfigurer().accept(ch.pipeline()); } ch.pipeline().addLast(createChannelHandlers(ch)); ch.closeFuture().addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (log.isDebugEnabled()) { log.debug("CLOSE {}", ch); } close(ch); } }); } }); }
From source file:reactor.io.net.netty.udp.NettyDatagramServer.java
License:Apache License
public NettyDatagramServer(@Nonnull Environment env, @Nonnull EventBus reactor, @Nullable InetSocketAddress listenAddress, @Nullable final NetworkInterface multicastInterface, @Nonnull final ServerSocketOptions options, @Nullable Codec<Buffer, IN, OUT> codec, Collection<Consumer<NetChannel<IN, OUT>>> consumers) { super(env, reactor, listenAddress, multicastInterface, options, codec, consumers); if (options instanceof NettyServerSocketOptions) { this.nettyOptions = (NettyServerSocketOptions) options; } else {//from w ww .j a v a2 s.c o m this.nettyOptions = null; } if (null != nettyOptions && null != nettyOptions.eventLoopGroup()) { this.ioGroup = nettyOptions.eventLoopGroup(); } else { int ioThreadCount = env.getProperty("reactor.udp.ioThreadCount", Integer.class, Environment.PROCESSORS); this.ioGroup = new NioEventLoopGroup(ioThreadCount, new NamedDaemonThreadFactory("reactor-udp-io")); } final NettyNetChannelInboundHandler inboundHandler = new NettyNetChannelInboundHandler() { @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { super.channelRead(ctx, ((DatagramPacket) msg).content()); } }; this.bootstrap = new Bootstrap().group(ioGroup).option(ChannelOption.SO_RCVBUF, options.rcvbuf()) .option(ChannelOption.SO_SNDBUF, options.sndbuf()) .option(ChannelOption.SO_REUSEADDR, options.reuseAddr()) .channelFactory(new ChannelFactory<Channel>() { @Override public Channel newChannel() { final NioDatagramChannel ch = new NioDatagramChannel(); DatagramChannelConfig config = ch.config(); config.setReceiveBufferSize(options.rcvbuf()); config.setSendBufferSize(options.sndbuf()); config.setReuseAddress(options.reuseAddr()); if (null != multicastInterface) { config.setNetworkInterface(multicastInterface); } if (null != nettyOptions && null != nettyOptions.pipelineConfigurer()) { nettyOptions.pipelineConfigurer().accept(ch.pipeline()); } ch.closeFuture().addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (log.isInfoEnabled()) { log.info("CLOSE {}", ch); } close(ch); } }); netChannel = (NettyNetChannel<IN, OUT>) select(ch); inboundHandler.setNetChannel(netChannel); ch.pipeline().addLast(new ChannelOutboundHandlerAdapter() { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { super.write(ctx, msg, promise); } }); return ch; } }).handler(inboundHandler); if (null != listenAddress) { bootstrap.localAddress(listenAddress); } else { bootstrap.localAddress(NetUtil.LOCALHOST, 3000); } if (null != multicastInterface) { bootstrap.option(ChannelOption.IP_MULTICAST_IF, multicastInterface); } }