List of usage examples for io.netty.channel ChannelOption SO_REUSEADDR
ChannelOption SO_REUSEADDR
To view the source code for io.netty.channel ChannelOption SO_REUSEADDR.
Click Source Link
From source file:org.apache.geode.redis.GeodeRedisServer.java
License:Apache License
/** * Helper method to start the server listening for connections. The server is bound to the port * specified by {@link GeodeRedisServer#serverPort} * //from w w w . j a v a2 s. com * @throws IOException * @throws InterruptedException */ private void startRedisServer() throws IOException, InterruptedException { ThreadFactory selectorThreadFactory = new ThreadFactory() { private final AtomicInteger counter = new AtomicInteger(); @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName("GeodeRedisServer-SelectorThread-" + counter.incrementAndGet()); t.setDaemon(true); return t; } }; ThreadFactory workerThreadFactory = new ThreadFactory() { private final AtomicInteger counter = new AtomicInteger(); @Override public Thread newThread(Runnable r) { Thread t = new Thread(r); t.setName("GeodeRedisServer-WorkerThread-" + counter.incrementAndGet()); return t; } }; bossGroup = null; workerGroup = null; Class<? extends ServerChannel> socketClass = null; if (singleThreadPerConnection) { bossGroup = new OioEventLoopGroup(Integer.MAX_VALUE, selectorThreadFactory); workerGroup = new OioEventLoopGroup(Integer.MAX_VALUE, workerThreadFactory); socketClass = OioServerSocketChannel.class; } else { bossGroup = new NioEventLoopGroup(this.numSelectorThreads, selectorThreadFactory); workerGroup = new NioEventLoopGroup(this.numWorkerThreads, workerThreadFactory); socketClass = NioServerSocketChannel.class; } InternalDistributedSystem system = (InternalDistributedSystem) cache.getDistributedSystem(); String pwd = system.getConfig().getRedisPassword(); final byte[] pwdB = Coder.stringToBytes(pwd); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(socketClass).childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { if (logger.fineEnabled()) logger.fine("GeodeRedisServer-Connection established with " + ch.remoteAddress()); ChannelPipeline p = ch.pipeline(); p.addLast(ByteToCommandDecoder.class.getSimpleName(), new ByteToCommandDecoder()); p.addLast(ExecutionHandlerContext.class.getSimpleName(), new ExecutionHandlerContext(ch, cache, regionCache, GeodeRedisServer.this, pwdB)); } }).option(ChannelOption.SO_REUSEADDR, true).option(ChannelOption.SO_RCVBUF, getBufferSize()) .childOption(ChannelOption.SO_KEEPALIVE, true) .childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, GeodeRedisServer.connectTimeoutMillis) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); // Bind and start to accept incoming connections. ChannelFuture f = b.bind(new InetSocketAddress(getBindAddress(), serverPort)).sync(); if (this.logger.infoEnabled()) { String logMessage = "GeodeRedisServer started {" + getBindAddress() + ":" + serverPort + "}, Selector threads: " + this.numSelectorThreads; if (this.singleThreadPerConnection) logMessage += ", One worker thread per connection"; else logMessage += ", Worker threads: " + this.numWorkerThreads; this.logger.info(logMessage); } this.serverChannel = f.channel(); }
From source file:org.apache.hive.spark.client.rpc.RpcServer.java
License:Apache License
public RpcServer(Map<String, String> mapConf) throws IOException, InterruptedException { this.config = new RpcConfiguration(mapConf); this.group = new NioEventLoopGroup(this.config.getRpcThreadCount(), new ThreadFactoryBuilder().setNameFormat("RPC-Handler-%d").setDaemon(true).build()); this.channel = new ServerBootstrap().group(group).channel(NioServerSocketChannel.class) .childHandler(new ChannelInitializer<SocketChannel>() { @Override/*from w w w .jav a 2 s. c om*/ public void initChannel(SocketChannel ch) throws Exception { SaslServerHandler saslHandler = new SaslServerHandler(config); final Rpc newRpc = Rpc.createServer(saslHandler, config, ch, group); saslHandler.rpc = newRpc; Runnable cancelTask = new Runnable() { @Override public void run() { LOG.warn("Timed out waiting for hello from client."); newRpc.close(); } }; saslHandler.cancelTask = group.schedule(cancelTask, RpcServer.this.config.getServerConnectTimeoutMs(), TimeUnit.MILLISECONDS); } }).option(ChannelOption.SO_BACKLOG, 1).option(ChannelOption.SO_REUSEADDR, true) .childOption(ChannelOption.SO_KEEPALIVE, true).bind(0).sync().channel(); this.port = ((InetSocketAddress) channel.localAddress()).getPort(); this.pendingClients = Maps.newConcurrentMap(); this.address = this.config.getServerAddress(); }
From source file:org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient.java
License:Apache License
public void run() { if (!isRunning()) { // manually stopped return;/*from w ww.j av a 2s.c om*/ } Bootstrap b; synchronized (this.sync) { if (this.active) { return; } state = STATUS_STARTING; handler = new StandbyClientHandler(this.store, observer, running, readTimeoutMs, autoClean); group = new NioEventLoopGroup(); b = new Bootstrap(); b.group(group); b.channel(NioSocketChannel.class); b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, readTimeoutMs); b.option(ChannelOption.TCP_NODELAY, true); b.option(ChannelOption.SO_REUSEADDR, true); b.option(ChannelOption.SO_KEEPALIVE, true); b.handler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); if (sslContext != null) { p.addLast(sslContext.newHandler(ch.alloc())); } p.addLast("readTimeoutHandler", new ReadTimeoutHandler(readTimeoutMs, TimeUnit.MILLISECONDS)); p.addLast(new StringEncoder(CharsetUtil.UTF_8)); p.addLast(new SnappyFramedDecoder(true)); p.addLast(new RecordIdDecoder(store)); p.addLast(handler); } }); state = STATUS_RUNNING; this.active = true; } try { long startTimestamp = System.currentTimeMillis(); // Start the client. ChannelFuture f = b.connect(host, port).sync(); // Wait until the connection is closed. f.channel().closeFuture().sync(); this.failedRequests = 0; this.syncStartTimestamp = startTimestamp; this.syncEndTimestamp = System.currentTimeMillis(); this.lastSuccessfulRequest = syncEndTimestamp / 1000; } catch (Exception e) { this.failedRequests++; log.error("Failed synchronizing state.", e); } finally { synchronized (this.sync) { this.active = false; shutdownNetty(); } } }
From source file:org.apache.jackrabbit.oak.plugins.segment.standby.server.StandbyServer.java
License:Apache License
public StandbyServer(int port, final SegmentStore store, String[] allowedClientIPRanges, boolean secure) throws CertificateException, SSLException { this.port = port; if (secure) { SelfSignedCertificate ssc = new SelfSignedCertificate(); sslContext = SslContext.newServerContext(ssc.certificate(), ssc.privateKey()); }// w ww. ja v a 2 s. c o m observer = new CommunicationObserver("primary"); handler = new StandbyServerHandler(store, observer, allowedClientIPRanges); bossGroup = new NioEventLoopGroup(1); workerGroup = new NioEventLoopGroup(); final MBeanServer jmxServer = ManagementFactory.getPlatformMBeanServer(); try { jmxServer.registerMBean(new StandardMBean(this, StandbyStatusMBean.class), new ObjectName(this.getMBeanName())); } catch (Exception e) { log.error("can't register standby status mbean", e); } b = new ServerBootstrap(); b.group(bossGroup, workerGroup); b.channel(NioServerSocketChannel.class); b.option(ChannelOption.TCP_NODELAY, true); b.option(ChannelOption.SO_REUSEADDR, true); b.childOption(ChannelOption.TCP_NODELAY, true); b.childOption(ChannelOption.SO_REUSEADDR, true); b.childOption(ChannelOption.SO_KEEPALIVE, true); b.childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ChannelPipeline p = ch.pipeline(); if (sslContext != null) { p.addLast(sslContext.newHandler(ch.alloc())); } p.addLast(new LineBasedFrameDecoder(8192)); p.addLast(new StringDecoder(CharsetUtil.UTF_8)); p.addLast(new SnappyFramedEncoder()); p.addLast(new RecordIdEncoder()); p.addLast(new SegmentEncoder()); p.addLast(new BlobEncoder()); p.addLast(handler); } }); }
From source file:org.apache.reef.wake.remote.transport.netty.NettyMessagingTransport.java
License:Apache License
/** * Constructs a messaging transport/* ww w .ja v a2 s . c om*/ * * @param hostAddress the server host address * @param port the server listening port; when it is 0, randomly assign a port number * @param clientStage the client-side stage that handles transport events * @param serverStage the server-side stage that handles transport events * @param numberOfTries the number of tries of connection * @param retryTimeout the timeout of reconnection */ public NettyMessagingTransport(final String hostAddress, int port, final EStage<TransportEvent> clientStage, final EStage<TransportEvent> serverStage, final int numberOfTries, final int retryTimeout) { if (port < 0) { throw new RemoteRuntimeException("Invalid server port: " + port); } this.numberOfTries = numberOfTries; this.retryTimeout = retryTimeout; this.clientEventListener = new NettyClientEventListener(this.addrToLinkRefMap, clientStage); this.serverEventListener = new NettyServerEventListener(this.addrToLinkRefMap, serverStage); this.serverBossGroup = new NioEventLoopGroup(SERVER_BOSS_NUM_THREADS, new DefaultThreadFactory(CLASS_NAME + "ServerBoss")); this.serverWorkerGroup = new NioEventLoopGroup(SERVER_WORKER_NUM_THREADS, new DefaultThreadFactory(CLASS_NAME + "ServerWorker")); this.clientWorkerGroup = new NioEventLoopGroup(CLIENT_WORKER_NUM_THREADS, new DefaultThreadFactory(CLASS_NAME + "ClientWorker")); this.clientBootstrap = new Bootstrap(); this.clientBootstrap.group(this.clientWorkerGroup).channel(NioSocketChannel.class) .handler(new NettyChannelInitializer(new NettyDefaultChannelHandlerFactory("client", this.clientChannelGroup, this.clientEventListener))) .option(ChannelOption.SO_REUSEADDR, true).option(ChannelOption.SO_KEEPALIVE, true); this.serverBootstrap = new ServerBootstrap(); this.serverBootstrap.group(this.serverBossGroup, this.serverWorkerGroup) .channel(NioServerSocketChannel.class) .childHandler(new NettyChannelInitializer(new NettyDefaultChannelHandlerFactory("server", this.serverChannelGroup, this.serverEventListener))) .option(ChannelOption.SO_BACKLOG, 128).option(ChannelOption.SO_REUSEADDR, true) .childOption(ChannelOption.SO_KEEPALIVE, true); LOG.log(Level.FINE, "Binding to {0}", port); Channel acceptor = null; try { if (port > 0) { acceptor = this.serverBootstrap.bind(new InetSocketAddress(hostAddress, port)).sync().channel(); } else { while (acceptor == null) { port = randPort.nextInt(PORT_START) + PORT_RANGE; LOG.log(Level.FINEST, "Try port {0}", port); try { acceptor = this.serverBootstrap.bind(new InetSocketAddress(hostAddress, port)).sync() .channel(); } catch (final Exception ex) { if (ex instanceof BindException) { LOG.log(Level.FINEST, "The port {0} is already bound. Try again", port); } else { throw ex; } } } } } catch (final Exception ex) { final RuntimeException transportException = new TransportRuntimeException( "Cannot bind to port " + port); LOG.log(Level.SEVERE, "Cannot bind to port " + port, ex); this.clientWorkerGroup.shutdownGracefully(); this.serverBossGroup.shutdownGracefully(); this.serverWorkerGroup.shutdownGracefully(); throw transportException; } this.acceptor = acceptor; this.serverPort = port; this.localAddress = new InetSocketAddress(hostAddress, this.serverPort); LOG.log(Level.FINE, "Starting netty transport socket address: {0}", this.localAddress); }
From source file:org.apache.rocketmq.remoting.netty.NettyRemotingServer.java
License:Apache License
@Override public void start() { this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(nettyServerConfig.getServerWorkerThreads(), new ThreadFactory() { private AtomicInteger threadIndex = new AtomicInteger(0); @Override/*from w ww . j a v a 2s .c o m*/ public Thread newThread(Runnable r) { return new Thread(r, "NettyServerCodecThread_" + this.threadIndex.incrementAndGet()); } }); ServerBootstrap childHandler = this.serverBootstrap .group(this.eventLoopGroupBoss, this.eventLoopGroupSelector).channel(NioServerSocketChannel.class) .option(ChannelOption.SO_BACKLOG, 1024).option(ChannelOption.SO_REUSEADDR, true) .option(ChannelOption.SO_KEEPALIVE, false).childOption(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_SNDBUF, nettyServerConfig.getServerSocketSndBufSize()) .option(ChannelOption.SO_RCVBUF, nettyServerConfig.getServerSocketRcvBufSize()) .localAddress(new InetSocketAddress(this.nettyServerConfig.getListenPort())) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(defaultEventExecutorGroup, new NettyEncoder(), new NettyDecoder(), new IdleStateHandler(0, 0, nettyServerConfig.getServerChannelMaxIdleTimeSeconds()), new NettyConnetManageHandler(), new NettyServerHandler()); } }); if (nettyServerConfig.isServerPooledByteBufAllocatorEnable()) { childHandler.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); } try { ChannelFuture sync = this.serverBootstrap.bind().sync(); InetSocketAddress addr = (InetSocketAddress) sync.channel().localAddress(); this.port = addr.getPort(); } catch (InterruptedException e1) { throw new RuntimeException("this.serverBootstrap.bind().sync() InterruptedException", e1); } if (this.channelEventListener != null) { this.nettyEventExecuter.start(); } this.timer.scheduleAtFixedRate(new TimerTask() { @Override public void run() { try { NettyRemotingServer.this.scanResponseTable(); } catch (Exception e) { log.error("scanResponseTable exception", e); } } }, 1000 * 3, 1000); }
From source file:org.apache.spark.network.server.TransportServer.java
License:Apache License
private void init(String hostToBind, int portToBind) { IOMode ioMode = IOMode.valueOf(conf.ioMode()); EventLoopGroup bossGroup = NettyUtils.createEventLoop(ioMode, conf.serverThreads(), conf.getModuleName() + "-server"); EventLoopGroup workerGroup = bossGroup; PooledByteBufAllocator allocator = NettyUtils.createPooledByteBufAllocator(conf.preferDirectBufs(), true /* allowCache */, conf.serverThreads()); bootstrap = new ServerBootstrap().group(bossGroup, workerGroup) .channel(NettyUtils.getServerChannelClass(ioMode)).option(ChannelOption.ALLOCATOR, allocator) .option(ChannelOption.SO_REUSEADDR, !SystemUtils.IS_OS_WINDOWS) .childOption(ChannelOption.ALLOCATOR, allocator); this.metrics = new NettyMemoryMetrics(allocator, conf.getModuleName() + "-server", conf); if (conf.backLog() > 0) { bootstrap.option(ChannelOption.SO_BACKLOG, conf.backLog()); }/* w w w .j a va 2 s. co m*/ if (conf.receiveBuf() > 0) { bootstrap.childOption(ChannelOption.SO_RCVBUF, conf.receiveBuf()); } if (conf.sendBuf() > 0) { bootstrap.childOption(ChannelOption.SO_SNDBUF, conf.sendBuf()); } bootstrap.childHandler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) { RpcHandler rpcHandler = appRpcHandler; for (TransportServerBootstrap bootstrap : bootstraps) { rpcHandler = bootstrap.doBootstrap(ch, rpcHandler); } context.initializePipeline(ch, rpcHandler); } }); InetSocketAddress address = hostToBind == null ? new InetSocketAddress(portToBind) : new InetSocketAddress(hostToBind, portToBind); channelFuture = bootstrap.bind(address); channelFuture.syncUninterruptibly(); port = ((InetSocketAddress) channelFuture.channel().localAddress()).getPort(); logger.debug("Shuffle server started on port: {}", port); }
From source file:org.apache.spark.sql.hive.thriftserver.rsc.RpcServer.java
License:Apache License
public RpcServer(RSCConf lconf) throws IOException, InterruptedException { this.config = lconf; this.group = new NioEventLoopGroup(this.config.getInt(RSCConf.Entry.RPC_MAX_THREADS), Utils.newDaemonThreadFactory("RPC-Handler-%d")); this.channel = new ServerBootstrap().group(group).channel(NioServerSocketChannel.class) .childHandler(new ChannelInitializer<SocketChannel>() { @Override//w w w . j a va2 s.c o m public void initChannel(SocketChannel ch) throws Exception { SaslServerHandler saslHandler = new SaslServerHandler(config); final Rpc newRpc = Rpc.createServer(saslHandler, config, ch, group); saslHandler.rpc = newRpc; Runnable cancelTask = new Runnable() { @Override public void run() { LOG.warn("Timed out waiting for hello from client."); newRpc.close(); } }; saslHandler.cancelTask = group.schedule(cancelTask, config.getTimeAsMs(RSCConf.Entry.RPC_CLIENT_HANDSHAKE_TIMEOUT), TimeUnit.MILLISECONDS); } }).option(ChannelOption.SO_BACKLOG, 1).option(ChannelOption.SO_REUSEADDR, true) .childOption(ChannelOption.SO_KEEPALIVE, true).bind(0).sync().channel(); this.port = ((InetSocketAddress) channel.localAddress()).getPort(); this.pendingClients = new ConcurrentHashMap<>(); String address = config.get(RSCConf.Entry.RPC_SERVER_ADDRESS); if (address == null) { address = config.findLocalAddress(); } this.address = address; }
From source file:org.apache.tajo.rpc.NettyClientBase.java
License:Apache License
protected void init(ChannelInitializer<Channel> initializer) { this.bootstrap = new Bootstrap(); this.bootstrap.channel(NioSocketChannel.class).handler(initializer) .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .option(ChannelOption.SO_REUSEADDR, true) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONNECTION_TIMEOUT) .option(ChannelOption.SO_RCVBUF, 1048576 * 10).option(ChannelOption.TCP_NODELAY, true); }
From source file:org.apache.tajo.rpc.NettyServerBase.java
License:Apache License
public void init(ChannelInitializer<Channel> initializer, int workerNum) { for (RpcEventListener listener : listeners) { listener.onBeforeInit(this); }/*from www. j av a 2 s. c om*/ bootstrap = RpcChannelFactory.createServerChannelFactory(serviceName, workerNum); this.initializer = initializer; bootstrap.channel(NioServerSocketChannel.class).childHandler(initializer) .option(ChannelOption.SO_REUSEADDR, true).option(ChannelOption.TCP_NODELAY, true) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .childOption(ChannelOption.TCP_NODELAY, true) .childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000) .childOption(ChannelOption.SO_RCVBUF, 1048576 * 10); for (RpcEventListener listener : listeners) { listener.onAfterInit(this); } }