List of usage examples for io.netty.channel ChannelOption SO_RCVBUF
ChannelOption SO_RCVBUF
To view the source code for io.netty.channel ChannelOption SO_RCVBUF.
Click Source Link
From source file:org.restcomm.imscf.common.lwcomm.service.impl.LwCommListener.java
License:Open Source License
void start() { // TODO: handle AS-resolved pools int receiveTransportThreads = config.getReceiveTransportPoolConfig().getMaxThreads(); int receiveWorkerThreads = config.getReceiveWorkerPoolConfig().getMaxThreads(); // Netty 4.0 does not handle parallel UDP servers well. // See: https://github.com/netty/netty/issues/1706 // We differentiate two listener modes: ////from w w w.j a v a2s.c o m // a) NIO // ------ // In this case a simple NioEventLoopGroup is used. The NioEventLoopGroup is given // "receiveTransportThreads" number of threads. User listener will be called // in a different executor which has receiveWorkerThreads number of threads. // This does not work well with netty 4.0 but still implemented here // in case of it will be fixed in a future netty version (the problem is // that regardless of setting the nThreads parameter in NioEventLoopGroup only // one thread is used for incoming packet processing...). // // c) EPOLL // -------- // The solution offered in the link above: // 1) Use the epoll transport (Linux only) // 2) Turn on SO_REUSEPORT option // 3) Create multiple datagram channels bound to the same port // According to this: http://stackoverflow.com/questions/3261965/so-reuseport-on-linux // only works on Linux with kernel 3.9+ or RHEL 6.5+ -- if epoll is not available, // it falls back to NIO mode. LwCommServiceImpl.LOGGER.info( "Starting LwCommListener. Receive transport threads: {}, receive worker threads: {}", receiveTransportThreads, receiveWorkerThreads); Configuration.ListenerMode listenerMode = config.getListenerMode(); LwCommServiceImpl.LOGGER.info("Listener mode configured is {}", config.getListenerMode()); if (listenerMode == Configuration.ListenerMode.EPOLL && !Epoll.isAvailable()) { LwCommServiceImpl.LOGGER .warn("Listener mode EPOLL is configured but is not available. Falling back to NIO mode."); listenerMode = Configuration.ListenerMode.NIO; } Bootstrap b = new Bootstrap(); b.group(receiveTransportGroup); if (receiveTransportGroup instanceof EpollEventLoopGroup) { b.channel(EpollDatagramChannel.class); b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); b.option(EpollChannelOption.SO_REUSEPORT, true); } else { b.channel(NioDatagramChannel.class); b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); } channels = new HashSet<Channel>(); b.handler(new ChannelInitializer<DatagramChannel>() { protected void initChannel(DatagramChannel channel) throws Exception { LwCommServiceImpl.LOGGER.info("Initializing channel: '{}'", channel); channels.add(channel); channel.pipeline().addLast(channelHandler); } }); // TODO FIXME: hardcoded 256K limit for receive buffer! b.option(ChannelOption.SO_RCVBUF, 256 * 1024); b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(10240)); InetAddress host = null; int port = config.getLocalNode().getPort(); try { host = InetAddress.getByName(config.getLocalNode().getHost()); ChannelFuture future; if (listenerMode == ListenerMode.NIO) { future = b.bind(host, port).sync(); if (!future.isSuccess()) { LwCommServiceImpl.LOGGER.error("Error while binding socket to {}:{}", host, port); } else { LwCommServiceImpl.LOGGER.info("Binding socket to {}:{} - SUCCESS", host, port); } } else { for (int i = 0; i < receiveTransportThreads; i++) { future = b.bind(host, port).sync(); if (!future.isSuccess()) { LwCommServiceImpl.LOGGER.error("Error while binding {} of {} socket to {}:{}", i + 1, receiveTransportThreads, host, port); } else { LwCommServiceImpl.LOGGER.info("Successfully bound socket {} of {} to {}:{} - ", i + 1, receiveTransportThreads, host, port, future.channel()); } } } } catch (Exception e) { LwCommServiceImpl.LOGGER.error("Error while binding socket or getting local node address.", e); } }
From source file:org.teiid.transport.SocketListener.java
License:Apache License
public SocketListener(final InetSocketAddress address, final int inputBufferSize, final int outputBufferSize, int maxWorkers, final SSLConfiguration config, final ClientServiceRegistryImpl csr, final StorageManager storageManager) { if (config != null) { this.isClientEncryptionEnabled = config.isClientEncryptionEnabled(); }//from w ww.j a v a 2 s . c o m this.csr = csr; NamedThreadFactory nettyPool = new NamedThreadFactory("NIO"); //$NON-NLS-1$ if (LogManager.isMessageToBeRecorded(LogConstants.CTX_TRANSPORT, MessageLevel.DETAIL)) { LogManager.logDetail(LogConstants.CTX_TRANSPORT, "server = " + address.getAddress() + "binding to port:" + address.getPort()); //$NON-NLS-1$ //$NON-NLS-2$ } if (maxWorkers == 0) { maxWorkers = Math.max(4, PropertiesUtils.getIntProperty(System.getProperties(), "io.netty.eventLoopThreads", 2 * Runtime.getRuntime().availableProcessors())); //$NON-NLS-1$ } EventLoopGroup workers = new NioEventLoopGroup(maxWorkers, nettyPool); bootstrap = new ServerBootstrap(); bootstrap.group(workers).channel(NioServerSocketChannel.class); this.channelHandler = createChannelHandler(); bootstrap.childHandler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { ChannelPipeline pipeline = ch.pipeline(); configureChannelPipeline(pipeline, config, storageManager); } }); if (inputBufferSize != 0) { bootstrap.childOption(ChannelOption.SO_RCVBUF, new Integer(inputBufferSize)); } if (outputBufferSize != 0) { bootstrap.childOption(ChannelOption.SO_SNDBUF, new Integer(outputBufferSize)); } bootstrap.childOption(ChannelOption.TCP_NODELAY, Boolean.TRUE); bootstrap.childOption(ChannelOption.SO_KEEPALIVE, Boolean.TRUE); ChannelFuture future = bootstrap.bind(address); future.syncUninterruptibly(); this.serverChannel = future.channel(); }
From source file:org.vertx.java.core.net.impl.TCPSSLHelper.java
License:Open Source License
public void applyConnectionOptions(ServerBootstrap bootstrap) { bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay); if (tcpSendBufferSize != -1) { bootstrap.childOption(ChannelOption.SO_SNDBUF, tcpSendBufferSize); }//www .j a v a2s. c om if (tcpReceiveBufferSize != -1) { bootstrap.childOption(ChannelOption.SO_RCVBUF, tcpReceiveBufferSize); bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(tcpReceiveBufferSize)); } bootstrap.option(ChannelOption.SO_LINGER, soLinger); if (trafficClass != -1) { bootstrap.childOption(ChannelOption.IP_TOS, trafficClass); } bootstrap.childOption(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE); bootstrap.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive); bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); bootstrap.option(ChannelOption.SO_BACKLOG, acceptBackLog); }
From source file:org.vertx.java.core.net.impl.TCPSSLHelper.java
License:Open Source License
public void applyConnectionOptions(Bootstrap bootstrap) { bootstrap.option(ChannelOption.TCP_NODELAY, tcpNoDelay); if (tcpSendBufferSize != -1) { bootstrap.option(ChannelOption.SO_SNDBUF, tcpSendBufferSize); }//from w w w. j a v a 2s .c o m if (tcpReceiveBufferSize != -1) { bootstrap.option(ChannelOption.SO_RCVBUF, tcpReceiveBufferSize); bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(tcpReceiveBufferSize)); } bootstrap.option(ChannelOption.SO_LINGER, soLinger); if (trafficClass != -1) { bootstrap.option(ChannelOption.IP_TOS, trafficClass); } bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeout); bootstrap.option(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE); bootstrap.option(ChannelOption.SO_KEEPALIVE, tcpKeepAlive); }
From source file:org.waarp.common.utility.WaarpNettyUtil.java
License:Open Source License
/** * Add default configuration for client bootstrap * /*w w w . j av a 2 s . com*/ * @param bootstrap * @param group * @param timeout */ public static void setBootstrap(Bootstrap bootstrap, EventLoopGroup group, int timeout) { bootstrap.channel(NioSocketChannel.class); bootstrap.group(group); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.SO_REUSEADDR, true); bootstrap.option(ChannelOption.SO_KEEPALIVE, true); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, timeout); bootstrap.option(ChannelOption.SO_RCVBUF, 1048576); bootstrap.option(ChannelOption.SO_SNDBUF, 1048576); }
From source file:org.waarp.common.utility.WaarpNettyUtil.java
License:Open Source License
/** * Add default configuration for server bootstrap * /*from w w w. j av a 2s . c o m*/ * @param bootstrap * @param groupBoss * @param groupWorker * @param timeout */ public static void setServerBootstrap(ServerBootstrap bootstrap, EventLoopGroup groupBoss, EventLoopGroup groupWorker, int timeout) { bootstrap.channel(NioServerSocketChannel.class); bootstrap.group(groupBoss, groupWorker); bootstrap.option(ChannelOption.TCP_NODELAY, true); bootstrap.option(ChannelOption.SO_REUSEADDR, true); bootstrap.childOption(ChannelOption.TCP_NODELAY, true); bootstrap.childOption(ChannelOption.SO_REUSEADDR, true); bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true); bootstrap.childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, timeout); bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576); bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576); }
From source file:org.wenxueliu.netty.client.ClientTest.java
License:Apache License
private void connectRetry(String ip, int port, ChannelFutureListener clientConnectionListener) { try {//from ww w. ja va2s . c o m bootstrap = new Bootstrap().group(workerGroup).channel(NioSocketChannel.class) .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_REUSEADDR, true) .option(ChannelOption.SO_KEEPALIVE, true).option(ChannelOption.SO_SNDBUF, SEND_BUFFER_SIZE) .option(ChannelOption.SO_RCVBUF, SEND_BUFFER_SIZE) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONNECT_TIMEOUT); pipelineFactory = new DefaultChannelInitializer(timer, this); bootstrap.handler(pipelineFactory); //.handler(new ChannelInitializer<SocketChannel>() { // @Override // protected void initChannel(SocketChannel channel) throws Exception { // ChannelPipeline p = channel.pipeline(); // p.addLast(new MessageDecoder(), // new StringEncoder(CharsetUtil.UTF_8), // new IdleStateHandler(IDLE_TIMEOUT_SEC, 0, 0), // new BootstrapTimeoutHandler(timer, 10), // new ConnectionHandler()); // } //}); bootstrap.remoteAddress(ip, port); ChannelFuture future = bootstrap.connect(); future.awaitUninterruptibly(); future.addListener(clientConnectionListener); } catch (Exception e) { log.warn("Connection to the server {}:{} failed", ip, port); } }
From source file:org.wso2.carbon.protobuf.registry.internal.ProtobufRegistryActivator.java
License:Open Source License
public void start(BundleContext bundleContext) { log.info("/////////////////////////////////////"); // load protobuf server configurations from pbs xml ProtobufConfiguration configuration = null; try {/* ww w .j a va 2 s.co m*/ configuration = ProtobufConfigFactory.build(); } catch (ProtobufConfigurationException e) { String msg = "Error while loading pbs xml file " + e.getLocalizedMessage(); log.error(msg, e); return; } if (!configuration.isEnabled()) { log.debug("ProtobufServer is not enabled in pbs xml"); return; } log.info("Starting ProtobufServer..."); // gathering configurations into local variables ServerConfiguration carbonConfig = ServerConfiguration.getInstance(); org.wso2.carbon.protobuf.registry.config.ServerConfiguration serverConfig = configuration .getServerConfiguration(); ServerCallExecutorThreadPoolConfiguration callExecutorConfig = serverConfig .getServerCallExecutorThreadPoolConfiguration(); TimeoutExecutorThreadPoolConfiguration timeoutExecutorConfig = serverConfig .getTimeoutExecutorThreadPoolConfiguration(); TimeoutCheckerThreadPoolConfiguration timeoutCheckerConfig = serverConfig .getTimeoutCheckerThreadPoolConfiguration(); LoggerConfiguration loggerConfig = serverConfig.getLoggerConfiguration(); TransportConfiguration transportConfig = configuration.getTransportConfiguration(); AcceptorsConfiguration acceptorsConfig = transportConfig.getAcceptorsConfiguration(); ChannelHandlersConfiguration channelHandlersConfig = transportConfig.getChannelHandlersConfiguration(); String hostName = carbonConfig.getFirstProperty("HostName"); int port = serverConfig.getPort(); int portOffset = Integer.parseInt(carbonConfig.getFirstProperty("Ports.Offset")); int effectivePort = port + portOffset; // server information PeerInfo serverInfo = new PeerInfo(hostName, effectivePort); int callExecutorCorePoolSize = callExecutorConfig.getCorePoolSize(); int callExecutorMaxPoolSize = callExecutorConfig.getMaxPoolSize(); int callExecutorMaxPoolTimeout = callExecutorConfig.getMaxPoolTimeout(); int callExecutorWorkQueueCapacity = callExecutorConfig.getWorkQueueCapacity(); // call executor RpcServerCallExecutor callExecutor = new ThreadPoolCallExecutor(callExecutorCorePoolSize, callExecutorMaxPoolSize, callExecutorMaxPoolTimeout, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(callExecutorWorkQueueCapacity), Executors.defaultThreadFactory()); serverFactory = new DuplexTcpServerPipelineFactory(serverInfo); serverFactory.setRpcServerCallExecutor(callExecutor); // if SSL encryption is enabled if (serverConfig.isSSLEnabled()) { //read keystore and truststore from carbon String keystorePassword = carbonConfig.getFirstProperty("Security.KeyStore.Password"); String keystorePath = carbonConfig.getFirstProperty("Security.KeyStore.Location"); String truststorePassword = carbonConfig.getFirstProperty("Security.TrustStore.Password"); String truststorePath = carbonConfig.getFirstProperty("Security.TrustStore.Location"); RpcSSLContext sslCtx = new RpcSSLContext(); sslCtx.setKeystorePassword(keystorePassword); sslCtx.setKeystorePath(keystorePath); sslCtx.setTruststorePassword(truststorePassword); sslCtx.setTruststorePath(truststorePath); try { sslCtx.init(); } catch (Exception e) { String msg = "Couldn't create SSL Context : " + e.getLocalizedMessage(); log.error(msg, e); return; } serverFactory.setSslContext(sslCtx); } // Timeout Executor int timeoutExecutorCorePoolSize = timeoutExecutorConfig.getCorePoolSize(); int timeoutExecutorMaxPoolSize = timeoutExecutorConfig.getMaxPoolSize(); int timeoutExecutorKeepAliveTime = timeoutExecutorConfig.getKeepAliveTime(); BlockingQueue<Runnable> timeoutExecutorWorkQueue = new ArrayBlockingQueue<Runnable>( timeoutExecutorCorePoolSize, false); ThreadFactory timeoutExecutorTF = new RenamingThreadFactoryProxy("timeout", Executors.defaultThreadFactory()); RpcTimeoutExecutor timeoutExecutor = new TimeoutExecutor(timeoutExecutorCorePoolSize, timeoutExecutorMaxPoolSize, timeoutExecutorKeepAliveTime, TimeUnit.SECONDS, timeoutExecutorWorkQueue, timeoutExecutorTF); // Timeout Checker int timeoutCheckerSleepTimeMs = timeoutCheckerConfig.getPeriod(); int timeoutCheckerCorePoolSize = timeoutCheckerConfig.getCorePoolSize(); ThreadFactory timeoutCheckerTF = new RenamingThreadFactoryProxy("check", Executors.defaultThreadFactory()); RpcTimeoutChecker timeoutChecker = new TimeoutChecker(timeoutCheckerSleepTimeMs, timeoutCheckerCorePoolSize, timeoutCheckerTF); timeoutChecker.setTimeoutExecutor(timeoutExecutor); timeoutChecker.startChecking(serverFactory.getRpcClientRegistry()); // setup a RPC event listener - it just logs what happens RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier(); RpcConnectionEventListener listener = new RpcConnectionEventListener() { @Override public void connectionReestablished(RpcClientChannel clientChannel) { log.info("Protobuf connection Reestablished " + clientChannel); } @Override public void connectionOpened(RpcClientChannel clientChannel) { log.info("Protobuf connection Opened " + clientChannel); } @Override public void connectionLost(RpcClientChannel clientChannel) { log.info("Protobuf connection Lost " + clientChannel); } @Override public void connectionChanged(RpcClientChannel clientChannel) { log.info("Protobuf connection Changed " + clientChannel); } }; rpcEventNotifier.setEventListener(listener); serverFactory.registerConnectionEventListener(rpcEventNotifier); // ProtobufServer Logger boolean isLogReq = loggerConfig.isLogReqProtoEnabled(); boolean isLogRes = loggerConfig.isLogResProtoEnabled(); boolean isLogEve = loggerConfig.isLogEventProtoEnabled(); CategoryPerServiceLogger logger = new CategoryPerServiceLogger(); logger.setLogRequestProto(isLogReq); logger.setLogResponseProto(isLogRes); logger.setLogEventProto(isLogEve); if (isLogReq || isLogRes || isLogEve) { serverFactory.setLogger(logger); } else { serverFactory.setLogger(null); } // Call acceptors parameters int acceptorsPoolSize = acceptorsConfig.getPoolSize(); int acceptorsSendBufferSize = acceptorsConfig.getSendBufferSize(); int acceptorsReceiverBufferSize = acceptorsConfig.getReceiverBufferSize(); // Channel handlers parameters int channelHandlersPoolSize = channelHandlersConfig.getPoolSize(); int channelHandlersSendBufferSize = channelHandlersConfig.getSendBufferSize(); int channelHandlersReceiverBufferSize = channelHandlersConfig.getReceiverBufferSize(); // enable nagle's algorithm or not boolean tcpNoDelay = transportConfig.isTCPNoDelay(); // boss and worker thread factories ThreadFactory bossTF = new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory()); NioEventLoopGroup boss = new NioEventLoopGroup(acceptorsPoolSize, bossTF); ThreadFactory workersTF = new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()); NioEventLoopGroup workers = new NioEventLoopGroup(channelHandlersPoolSize, workersTF); // Configure the server. ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(boss, workers); bootstrap.channel(NioServerSocketChannel.class); bootstrap.option(ChannelOption.SO_SNDBUF, acceptorsSendBufferSize); bootstrap.option(ChannelOption.SO_RCVBUF, acceptorsReceiverBufferSize); bootstrap.childOption(ChannelOption.SO_RCVBUF, channelHandlersReceiverBufferSize); bootstrap.childOption(ChannelOption.SO_SNDBUF, channelHandlersSendBufferSize); bootstrap.option(ChannelOption.TCP_NODELAY, tcpNoDelay); bootstrap.childHandler(serverFactory); bootstrap.localAddress(serverInfo.getPort()); // To release resources on shutdown CleanShutdownHandler shutdownHandler = new CleanShutdownHandler(); shutdownHandler.addResource(boss); shutdownHandler.addResource(workers); shutdownHandler.addResource(callExecutor); shutdownHandler.addResource(timeoutChecker); shutdownHandler.addResource(timeoutExecutor); // Bind and start to accept incoming connections. bootstrap.bind(); log.info("ProtobufServer Serving " + serverInfo); // Register ProtobufServer Registry as an OSGi service ProtobufRegistry pbsRegistry = new ProtobufRegistryImpl(serverFactory); bundleContext.registerService(ProtobufRegistry.class.getName(), pbsRegistry, null); }
From source file:org.wso2.carbon.transport.http.netty.listener.HTTPTransportListener.java
License:Open Source License
private void startTransport() { //Create Bootstrap Configuration from listener parameters ServerBootstrapConfiguration.createBootStrapConfiguration(transportProperties); ServerBootstrapConfiguration serverBootstrapConfiguration = ServerBootstrapConfiguration.getInstance(); //boss group is for accepting channels EventLoopGroup bossGroup = HTTPTransportContextHolder.getInstance().getBossGroup(); if (bossGroup == null) { bossGroup = new NioEventLoopGroup( bossGroupSize != 0 ? bossGroupSize : Runtime.getRuntime().availableProcessors()); HTTPTransportContextHolder.getInstance().setBossGroup(bossGroup); }//from w w w . j a v a2 s .c om //worker group is for processing IO EventLoopGroup workerGroup = HTTPTransportContextHolder.getInstance().getWorkerGroup(); if (workerGroup == null) { workerGroup = new NioEventLoopGroup( workerGroupSize != 0 ? workerGroupSize : Runtime.getRuntime().availableProcessors() * 2); HTTPTransportContextHolder.getInstance().setWorkerGroup(workerGroup); } log.debug("Netty Boss group size " + bossGroup); log.debug("Netty Worker group Size" + workerGroup); bootstrap = new ServerBootstrap(); bootstrap.option(ChannelOption.SO_BACKLOG, serverBootstrapConfiguration.getSoBackLog()); log.debug("Netty Server Socket BACKLOG " + serverBootstrapConfiguration.getSoBackLog()); bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class); addChannelInitializer(); bootstrap.childOption(ChannelOption.TCP_NODELAY, serverBootstrapConfiguration.isTcpNoDelay()); log.debug("Netty Server Socket TCP_NODELAY " + serverBootstrapConfiguration.isTcpNoDelay()); bootstrap.option(ChannelOption.SO_KEEPALIVE, serverBootstrapConfiguration.isKeepAlive()); log.debug("Netty Server Socket SO_KEEPALIVE " + serverBootstrapConfiguration.isKeepAlive()); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, serverBootstrapConfiguration.getConnectTimeOut()); log.debug( " Netty Server Socket CONNECT_TIMEOUT_MILLIS " + serverBootstrapConfiguration.getConnectTimeOut()); bootstrap.option(ChannelOption.SO_SNDBUF, serverBootstrapConfiguration.getSendBufferSize()); log.debug("Netty Server Socket SO_SNDBUF " + serverBootstrapConfiguration.getSendBufferSize()); bootstrap.option(ChannelOption.SO_RCVBUF, serverBootstrapConfiguration.getReciveBufferSize()); log.debug("Netty Server Socket SO_RCVBUF " + serverBootstrapConfiguration.getReciveBufferSize()); bootstrap.childOption(ChannelOption.SO_RCVBUF, serverBootstrapConfiguration.getReciveBufferSize()); log.debug("Netty Server Socket SO_RCVBUF " + serverBootstrapConfiguration.getReciveBufferSize()); bootstrap.childOption(ChannelOption.SO_SNDBUF, serverBootstrapConfiguration.getSendBufferSize()); log.debug("Netty Server Socket SO_SNDBUF " + serverBootstrapConfiguration.getSendBufferSize()); if (defaultListenerConfig.isBindOnStartup()) { bindInterface(defaultListenerConfig); } TransportListenerManager transportListenerManager = HTTPTransportContextHolder.getInstance().getManager(); if (transportListenerManager != null) { transportListenerManager.registerTransportListener(this); } }
From source file:org.wso2.carbon.transport.http.netty.listener.NettyListener.java
License:Open Source License
private void startTransport() { ServerBootstrapConfiguration.createBootStrapConfiguration(nettyConfig.getParameters()); ServerBootstrapConfiguration serverBootstrapConfiguration = ServerBootstrapConfiguration.getInstance(); bossGroup = new NioEventLoopGroup(nettyConfig.getBossThreadPoolSize()); workerGroup = new NioEventLoopGroup(nettyConfig.getWorkerThreadPoolSize()); bootstrap = new ServerBootstrap(); bootstrap.option(ChannelOption.SO_BACKLOG, serverBootstrapConfiguration.getSoBackLog()); bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class); addChannelInitializer();/*from ww w.j ava 2 s .co m*/ bootstrap.childOption(ChannelOption.TCP_NODELAY, serverBootstrapConfiguration.isTcpNoDelay()); bootstrap.option(ChannelOption.SO_KEEPALIVE, serverBootstrapConfiguration.isKeepAlive()); bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, serverBootstrapConfiguration.getConnectTimeOut()); bootstrap.option(ChannelOption.SO_SNDBUF, serverBootstrapConfiguration.getSendBufferSize()); bootstrap.option(ChannelOption.SO_RCVBUF, serverBootstrapConfiguration.getReciveBufferSize()); bootstrap.childOption(ChannelOption.SO_RCVBUF, serverBootstrapConfiguration.getReciveBufferSize()); bootstrap.childOption(ChannelOption.SO_SNDBUF, serverBootstrapConfiguration.getSendBufferSize()); setupChannelInitializer(); try { bootstrap.bind(new InetSocketAddress(nettyConfig.getHost(), nettyConfig.getPort())).sync(); TransportListenerManager artifactDeployer = NettyTransportContextHolder.getInstance().getManager(); if (artifactDeployer != null) { artifactDeployer.registerTransportListener(id, this); } log.info("Netty Listener starting on port " + nettyConfig.getPort()); } catch (InterruptedException e) { log.error(e.getMessage(), e); } }