List of usage examples for io.netty.channel ChannelOption RCVBUF_ALLOCATOR
ChannelOption RCVBUF_ALLOCATOR
To view the source code for io.netty.channel ChannelOption RCVBUF_ALLOCATOR.
Click Source Link
From source file:org.graylog2.plugin.inputs.transports.AbstractTcpTransport.java
License:Open Source License
protected ServerBootstrap getBootstrap(MessageInput input) { final LinkedHashMap<String, Callable<? extends ChannelHandler>> parentHandlers = getChannelHandlers(input); final LinkedHashMap<String, Callable<? extends ChannelHandler>> childHandlers = getChildChannelHandlers( input);/*from w ww. j a va2 s. c o m*/ childEventLoopGroup = eventLoopGroupFactory.create(workerThreads, localRegistry, "workers"); return new ServerBootstrap().group(parentEventLoopGroup, childEventLoopGroup) .channelFactory(new ServerSocketChannelFactory(nettyTransportConfiguration.getType())) .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(8192)) .option(ChannelOption.SO_RCVBUF, getRecvBufferSize()) .childOption(ChannelOption.SO_RCVBUF, getRecvBufferSize()) .childOption(ChannelOption.SO_KEEPALIVE, tcpKeepalive) .handler(getChannelInitializer(parentHandlers)).childHandler(getChannelInitializer(childHandlers)); }
From source file:org.midonet.util.netty.ServerFrontEnd.java
License:Apache License
@Override protected void doStart() { try {/*from w w w. j av a 2 s . com*/ if (datagram) { log.info("Starting Netty UDP server on port {}", port); Bootstrap boot = new Bootstrap(); boot.group(wrkr).channel(NioDatagramChannel.class).handler(adapter); if (rcvbufSize != null) boot.option(ChannelOption.SO_RCVBUF, rcvbufSize).option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(rcvbufSize)); sock = boot.bind(port).sync(); } else { log.info("Starting Netty TCP server on port {}", port); ServerBootstrap boot = new ServerBootstrap(); boot.group(boss, wrkr).channel(NioServerSocketChannel.class).childHandler(adapter) .option(ChannelOption.SO_BACKLOG, 128).option(ChannelOption.SO_KEEPALIVE, true); sock = boot.bind(port).sync(); } log.info("Netty server started"); notifyStarted(); } catch (InterruptedException e) { log.warn("Netty server start interrupted"); Thread.currentThread().interrupt(); notifyFailed(e); } }
From source file:org.opendaylight.groupbasedpolicy.jsonrpc.RpcServer.java
License:Open Source License
public void start() { EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); try {/*www . j a v a 2 s .co m*/ ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { logger.debug("New Passive channel created : " + ch.toString()); InetAddress address = ch.remoteAddress().getAddress(); int port = ch.remoteAddress().getPort(); String identifier = address.getHostAddress() + ":" + port; ch.pipeline().addLast(new LoggingHandler(LogLevel.INFO), new JsonRpcDecoder(100000), new StringEncoder(CharsetUtil.UTF_8)); handleNewConnection(identifier, ch); logger.warn("Connected Node : " + identifier); } }); b.option(ChannelOption.TCP_NODELAY, true); b.option(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(65535, 65535, 65535)); // Start the server. ChannelFuture f = b.bind(identity, listenPort).sync(); String id = f.channel().localAddress().toString(); logger.warn("Connected Node : " + id); this.channel = f.channel(); // Wait until the server socket is closed. f.channel().closeFuture().sync(); } catch (InterruptedException e) { logger.error("Thread interrupted", e); } finally { // Shut down all event loops to terminate all threads. bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:org.opendaylight.ovsdb.lib.impl.OvsdbConnectionService.java
License:Open Source License
@Override public OvsdbClient connectWithSsl(final InetAddress address, final int port, final SSLContext sslContext) { try {/*from ww w .j a va 2s . c o m*/ Bootstrap bootstrap = new Bootstrap(); bootstrap.group(new NioEventLoopGroup()); bootstrap.channel(NioSocketChannel.class); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(65535, 65535, 65535)); bootstrap.handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { if (sslContext != null) { /* First add ssl handler if ssl context is given */ SSLEngine engine = sslContext.createSSLEngine(address.toString(), port); engine.setUseClientMode(true); channel.pipeline().addLast("ssl", new SslHandler(engine)); } channel.pipeline().addLast( //new LoggingHandler(LogLevel.INFO), new JsonRpcDecoder(100000), new StringEncoder(CharsetUtil.UTF_8), new ExceptionHandler()); } }); ChannelFuture future = bootstrap.connect(address, port).sync(); Channel channel = future.channel(); OvsdbClient client = getChannelClient(channel, ConnectionType.ACTIVE, Executors.newFixedThreadPool(NUM_THREADS)); return client; } catch (InterruptedException e) { System.out.println("Thread was interrupted during connect"); } return null; }
From source file:org.opendaylight.ovsdb.lib.impl.OvsdbConnectionService.java
License:Open Source License
/** * OVSDB Passive listening thread that uses Netty ServerBootstrap to open * passive connection with Ssl and handle channel callbacks. *//*from ww w .ja va 2 s . c om*/ private static void ovsdbManagerWithSsl(int port, final SSLContext sslContext) { EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap serverBootstrap = new ServerBootstrap(); serverBootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { logger.debug("New Passive channel created : {}", channel); if (sslContext != null) { /* Add SSL handler first if SSL context is provided */ SSLEngine engine = sslContext.createSSLEngine(); engine.setUseClientMode(false); // work in a server mode engine.setNeedClientAuth(true); // need client authentication channel.pipeline().addLast("ssl", new SslHandler(engine)); } channel.pipeline().addLast(new JsonRpcDecoder(100000), new StringEncoder(CharsetUtil.UTF_8), new ExceptionHandler()); handleNewPassiveConnection(channel); } }); serverBootstrap.option(ChannelOption.TCP_NODELAY, true); serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(65535, 65535, 65535)); // Start the server. ChannelFuture channelFuture = serverBootstrap.bind(port).sync(); Channel serverListenChannel = channelFuture.channel(); // Wait until the server socket is closed. serverListenChannel.closeFuture().sync(); } catch (InterruptedException e) { logger.error("Thread interrupted", e); } finally { // Shut down all event loops to terminate all threads. bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:org.opendaylight.ovsdb.plugin.ConnectionService.java
License:Open Source License
@Override public Node connect(String identifier, Map<ConnectionConstants, String> params) { InetAddress address;/* ww w. j a v a2s . co m*/ Integer port; try { address = InetAddress.getByName(params.get(ConnectionConstants.ADDRESS)); } catch (Exception e) { logger.error("Unable to resolve " + params.get(ConnectionConstants.ADDRESS), e); return null; } try { port = Integer.parseInt(params.get(ConnectionConstants.PORT)); if (port == 0) port = defaultOvsdbPort; } catch (Exception e) { port = defaultOvsdbPort; } try { Bootstrap bootstrap = new Bootstrap(); bootstrap.group(new NioEventLoopGroup()); bootstrap.channel(NioSocketChannel.class); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(65535, 65535, 65535)); bootstrap.handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { if (handlers == null) { channel.pipeline().addLast( //new LoggingHandler(LogLevel.INFO), new JsonRpcDecoder(10000000), new StringEncoder(CharsetUtil.UTF_8)); } else { for (ChannelHandler handler : handlers) { channel.pipeline().addLast(handler); } } } }); ChannelFuture future = bootstrap.connect(address, port).sync(); Channel channel = future.channel(); return handleNewConnection(identifier, channel, this); } catch (InterruptedException e) { logger.error("Thread was interrupted during connect", e); } catch (ExecutionException e) { logger.error("ExecutionException in handleNewConnection for identifier " + identifier, e); } return null; }
From source file:org.opendaylight.ovsdb.plugin.ConnectionService.java
License:Open Source License
private void ovsdbManager() { EventLoopGroup bossGroup = new NioEventLoopGroup(); EventLoopGroup workerGroup = new NioEventLoopGroup(); try {/*w ww.java 2s.c o m*/ ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO)) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel channel) throws Exception { logger.debug("New Passive channel created : " + channel.toString()); InetAddress address = channel.remoteAddress().getAddress(); int port = channel.remoteAddress().getPort(); String identifier = address.getHostAddress() + ":" + port; channel.pipeline().addLast(new LoggingHandler(LogLevel.INFO), new JsonRpcDecoder(10000000), new StringEncoder(CharsetUtil.UTF_8)); Node node = handleNewConnection(identifier, channel, ConnectionService.this); logger.debug("Connected Node : " + node.toString()); } }); b.option(ChannelOption.TCP_NODELAY, true); b.option(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(65535, 65535, 65535)); // Start the server. ChannelFuture f = b.bind(ovsdbListenPort).sync(); serverListenChannel = f.channel(); // Wait until the server socket is closed. serverListenChannel.closeFuture().sync(); } catch (InterruptedException e) { logger.error("Thread interrupted", e); } finally { // Shut down all event loops to terminate all threads. bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:org.opendaylight.sxp.core.service.ConnectFacade.java
License:Open Source License
/** * Create new Connection to Peer/*from w ww. ja va 2 s.c o m*/ * * @param node SxpNode containing Security options * @param connection SxpConnection containing connection details * @param hf HandlerFactory providing handling of communication * @return ChannelFuture callback */ public static ChannelFuture createClient(SxpNode node, SxpConnection connection, final HandlerFactory hf) { if (!Epoll.isAvailable()) { throw new UnsupportedOperationException(Epoll.unavailabilityCause().getCause()); } Bootstrap bootstrap = new Bootstrap(); if (connection.getPassword() != null && !connection.getPassword().isEmpty()) { bootstrap.option(EpollChannelOption.TCP_MD5SIG, Collections.singletonMap(connection.getDestination().getAddress(), connection.getPassword().getBytes(StandardCharsets.US_ASCII))); } bootstrap.channel(EpollSocketChannel.class); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, Configuration.NETTY_CONNECT_TIMEOUT_MILLIS); RecvByteBufAllocator recvByteBufAllocator = new FixedRecvByteBufAllocator( Configuration.getConstants().getMessageLengthMax()); bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.localAddress(node.getSourceIp().getHostAddress(), 0); bootstrap.group(eventLoopGroup); bootstrap.handler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(hf.getDecoders()); ch.pipeline().addLast(hf.getEncoders()); } }); return bootstrap.connect(connection.getDestination()); }
From source file:org.restcomm.imscf.common.lwcomm.service.impl.LwCommListener.java
License:Open Source License
void start() { // TODO: handle AS-resolved pools int receiveTransportThreads = config.getReceiveTransportPoolConfig().getMaxThreads(); int receiveWorkerThreads = config.getReceiveWorkerPoolConfig().getMaxThreads(); // Netty 4.0 does not handle parallel UDP servers well. // See: https://github.com/netty/netty/issues/1706 // We differentiate two listener modes: //// ww w . ja v a 2 s.co m // a) NIO // ------ // In this case a simple NioEventLoopGroup is used. The NioEventLoopGroup is given // "receiveTransportThreads" number of threads. User listener will be called // in a different executor which has receiveWorkerThreads number of threads. // This does not work well with netty 4.0 but still implemented here // in case of it will be fixed in a future netty version (the problem is // that regardless of setting the nThreads parameter in NioEventLoopGroup only // one thread is used for incoming packet processing...). // // c) EPOLL // -------- // The solution offered in the link above: // 1) Use the epoll transport (Linux only) // 2) Turn on SO_REUSEPORT option // 3) Create multiple datagram channels bound to the same port // According to this: http://stackoverflow.com/questions/3261965/so-reuseport-on-linux // only works on Linux with kernel 3.9+ or RHEL 6.5+ -- if epoll is not available, // it falls back to NIO mode. LwCommServiceImpl.LOGGER.info( "Starting LwCommListener. Receive transport threads: {}, receive worker threads: {}", receiveTransportThreads, receiveWorkerThreads); Configuration.ListenerMode listenerMode = config.getListenerMode(); LwCommServiceImpl.LOGGER.info("Listener mode configured is {}", config.getListenerMode()); if (listenerMode == Configuration.ListenerMode.EPOLL && !Epoll.isAvailable()) { LwCommServiceImpl.LOGGER .warn("Listener mode EPOLL is configured but is not available. Falling back to NIO mode."); listenerMode = Configuration.ListenerMode.NIO; } Bootstrap b = new Bootstrap(); b.group(receiveTransportGroup); if (receiveTransportGroup instanceof EpollEventLoopGroup) { b.channel(EpollDatagramChannel.class); b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); b.option(EpollChannelOption.SO_REUSEPORT, true); } else { b.channel(NioDatagramChannel.class); b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); } channels = new HashSet<Channel>(); b.handler(new ChannelInitializer<DatagramChannel>() { protected void initChannel(DatagramChannel channel) throws Exception { LwCommServiceImpl.LOGGER.info("Initializing channel: '{}'", channel); channels.add(channel); channel.pipeline().addLast(channelHandler); } }); // TODO FIXME: hardcoded 256K limit for receive buffer! b.option(ChannelOption.SO_RCVBUF, 256 * 1024); b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(10240)); InetAddress host = null; int port = config.getLocalNode().getPort(); try { host = InetAddress.getByName(config.getLocalNode().getHost()); ChannelFuture future; if (listenerMode == ListenerMode.NIO) { future = b.bind(host, port).sync(); if (!future.isSuccess()) { LwCommServiceImpl.LOGGER.error("Error while binding socket to {}:{}", host, port); } else { LwCommServiceImpl.LOGGER.info("Binding socket to {}:{} - SUCCESS", host, port); } } else { for (int i = 0; i < receiveTransportThreads; i++) { future = b.bind(host, port).sync(); if (!future.isSuccess()) { LwCommServiceImpl.LOGGER.error("Error while binding {} of {} socket to {}:{}", i + 1, receiveTransportThreads, host, port); } else { LwCommServiceImpl.LOGGER.info("Successfully bound socket {} of {} to {}:{} - ", i + 1, receiveTransportThreads, host, port, future.channel()); } } } } catch (Exception e) { LwCommServiceImpl.LOGGER.error("Error while binding socket or getting local node address.", e); } }
From source file:org.vertx.java.core.net.impl.TCPSSLHelper.java
License:Open Source License
public void applyConnectionOptions(ServerBootstrap bootstrap) { bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay); if (tcpSendBufferSize != -1) { bootstrap.childOption(ChannelOption.SO_SNDBUF, tcpSendBufferSize); }// w w w . j ava2 s .c om if (tcpReceiveBufferSize != -1) { bootstrap.childOption(ChannelOption.SO_RCVBUF, tcpReceiveBufferSize); bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(tcpReceiveBufferSize)); } bootstrap.option(ChannelOption.SO_LINGER, soLinger); if (trafficClass != -1) { bootstrap.childOption(ChannelOption.IP_TOS, trafficClass); } bootstrap.childOption(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE); bootstrap.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive); bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); bootstrap.option(ChannelOption.SO_BACKLOG, acceptBackLog); }