List of usage examples for io.netty.channel ChannelOption SO_REUSEADDR
ChannelOption SO_REUSEADDR
To view the source code for io.netty.channel ChannelOption SO_REUSEADDR.
Click Source Link
From source file:com.whirvis.jraknet.client.RakNetClient.java
License:Open Source License
/** * Connects the client to a server.// w w w . j a v a 2 s . c om * * @param address * the address of the server to connect to. * @throws NullPointerException * if the <code>address</code> or the IP address of the * <code>address</code> is <code>null</code>. * @throws IllegalStateException * if the client is currently connected to a server. * @throws RakNetException * if an error occurs during connection or login. */ public void connect(InetSocketAddress address) throws NullPointerException, IllegalStateException, RakNetException { if (address == null) { throw new NullPointerException("Address cannot be null"); } else if (address.getAddress() == null) { throw new NullPointerException("IP address cannot be null"); } else if (this.isConnected()) { throw new IllegalStateException("Client is currently connected to a server"); } else if (listeners.isEmpty()) { log.warn("Client has no listeners"); } // Initiate networking this.serverAddress = address; try { this.bootstrap = new Bootstrap(); this.group = new NioEventLoopGroup(); this.handler = new RakNetClientHandler(this); bootstrap.channel(NioDatagramChannel.class).group(group).handler(handler); bootstrap.option(ChannelOption.SO_BROADCAST, true).option(ChannelOption.SO_REUSEADDR, false); this.channel = (bindingAddress != null ? bootstrap.bind(bindingAddress) : bootstrap.bind(0)).sync() .channel(); this.bindAddress = (InetSocketAddress) channel.localAddress(); this.setMaximumTransferUnitSizes(DEFAULT_TRANSFER_UNIT_SIZES); log.debug("Initialized networking"); } catch (InterruptedException e) { throw new RakNetException(e); } // Prepare connection MaximumTransferUnit[] units = MaximumTransferUnit.sort(maximumTransferUnits); for (MaximumTransferUnit unit : maximumTransferUnits) { unit.reset(); log.debug("Reset maximum transfer unit with size of " + unit.getSize() + " bytes (" + (unit.getSize() * 8) + " bits)"); } this.peerFactory = new PeerFactory(this, address, bootstrap, channel, units[0].getSize(), highestMaximumTransferUnitSize); log.debug("Reset maximum transfer units and created peer peerFactory"); peerFactory.startAssembly(units); // Send connection packet ConnectionRequest connectionRequest = new ConnectionRequest(); connectionRequest.clientGuid = this.guid; connectionRequest.timestamp = System.currentTimeMillis() - timestamp; connectionRequest.encode(); peer.sendMessage(Reliability.RELIABLE_ORDERED, connectionRequest); log.debug("Sent connection request to server"); // Create and start peer update thread RakNetClient client = this; this.peerThread = new Thread( RakNetClient.class.getSimpleName() + "-Peer-Thread-" + Long.toHexString(guid).toUpperCase()) { @Override public void run() { while (peer != null && !this.isInterrupted()) { try { Thread.sleep(0, 1); // Lower CPU usage } catch (InterruptedException e) { this.interrupt(); // Interrupted during sleep continue; } if (peer != null) { if (!peer.isDisconnected()) { try { peer.update(); } catch (Throwable throwable) { client.callEvent(listener -> listener.onPeerException(client, peer, throwable)); if (!peer.isDisconnected()) { client.disconnect(throwable); } } } } } } }; peerThread.start(); log.debug("Created and started peer update thread"); log.info("Connected to server with address " + address); }
From source file:com.whirvis.jraknet.discovery.DiscoveryThread.java
License:Open Source License
/** * Allocates a discovery thread./* w w w . ja va 2 s .c o m*/ */ protected DiscoveryThread() { this.log = LogManager.getLogger(DiscoveryThread.class); this.bootstrap = new Bootstrap(); this.group = new NioEventLoopGroup(); this.handler = new DiscoveryHandler(); bootstrap.channel(NioDatagramChannel.class).group(group).handler(handler); bootstrap.option(ChannelOption.SO_BROADCAST, true).option(ChannelOption.SO_REUSEADDR, false); try { this.channel = bootstrap.bind(0).sync().channel(); } catch (InterruptedException e) { this.interrupt(); // Cause thread to immediately break out of loop Discovery.setDiscoveryMode(DiscoveryMode.DISABLED); log.error("Failed to bind channel necessary for broadcasting pings, disabled discovery system"); } this.setName(log.getName()); }
From source file:com.whirvis.jraknet.server.RakNetServer.java
License:Open Source License
/** * Starts the server./*from w ww . jav a 2 s . c o m*/ * * @throws IllegalStateException * if the server is already running. * @throws RakNetException * if an error occurs during startup. */ public void start() throws IllegalStateException, RakNetException { if (running == true) { throw new IllegalStateException("Server is already running"); } else if (listeners.isEmpty()) { log.warn("Server has no listeners"); } try { this.bootstrap = new Bootstrap(); this.group = new NioEventLoopGroup(); this.handler = new RakNetServerHandler(this); bootstrap.handler(handler); // Create bootstrap and bind channel bootstrap.channel(NioDatagramChannel.class).group(group); bootstrap.option(ChannelOption.SO_BROADCAST, true).option(ChannelOption.SO_REUSEADDR, false) .option(ChannelOption.SO_SNDBUF, maximumTransferUnit) .option(ChannelOption.SO_RCVBUF, maximumTransferUnit); this.channel = (bindingAddress != null ? bootstrap.bind(bindingAddress) : bootstrap.bind(0)).sync() .channel(); this.bindAddress = (InetSocketAddress) channel.localAddress(); this.running = true; log.debug("Created and bound bootstrap"); // Create and start peer update thread RakNetServer server = this; this.peerThread = new Thread( RakNetServer.class.getSimpleName() + "-Peer-Thread-" + Long.toHexString(guid).toUpperCase()) { @Override public void run() { HashMap<RakNetClientPeer, Throwable> disconnected = new HashMap<RakNetClientPeer, Throwable>(); while (server.running == true && !this.isInterrupted()) { try { Thread.sleep(0, 1); // Lower CPU usage } catch (InterruptedException e) { this.interrupt(); // Interrupted during sleep continue; } for (RakNetClientPeer peer : clients.values()) { if (!peer.isDisconnected()) { try { peer.update(); if (peer.getPacketsReceivedThisSecond() >= RakNet.getMaxPacketsPerSecond()) { server.blockAddress(peer.getInetAddress(), "Too many packets", RakNet.MAX_PACKETS_PER_SECOND_BLOCK); } } catch (Throwable throwable) { server.callEvent(listener -> listener.onPeerException(server, peer, throwable)); disconnected.put(peer, throwable); } } } /* * Disconnect peers. * * This must be done here as simply removing a client * from the clients map would be an incorrect way of * disconnecting a client. This means that calling the * disconnect() method is required. However, calling it * while in the loop would cause a * ConcurrentModifactionException. To get around this, * the clients that need to be disconnected are properly * disconnected after the loop is finished. This is done * simply by having them and their disconnect reason be * put in a disconnection map. */ if (disconnected.size() > 0) { for (RakNetClientPeer peer : disconnected.keySet()) { server.disconnect(peer, disconnected.get(peer)); } disconnected.clear(); } } } }; peerThread.start(); log.debug("Created and started peer update thread"); this.callEvent(listener -> listener.onStart(this)); } catch (InterruptedException e) { this.running = false; throw new RakNetException(e); } log.info("Started server"); }
From source file:com.whizzosoftware.hobson.ssdp.SSDPPlugin.java
License:Open Source License
public void createSockets() { try {/*from w w w . ja v a 2 s . c o m*/ logger.debug("Using network interface: {}; local address: {}", nic, localAddress); if (nic == null) { logger.error("Unable to determine local NIC; discovery may not work properly"); } if (nic != null) { Bootstrap clientBootstrap = new Bootstrap().group(eventLoopGroup) .channelFactory(new ChannelFactory<Channel>() { @Override public Channel newChannel() { return new NioDatagramChannel(InternetProtocolFamily.IPv4); } }).localAddress(groupAddress).option(ChannelOption.IP_MULTICAST_IF, nic) .option(ChannelOption.SO_REUSEADDR, true).handler(new SSDPInboundHandler(this)); clientBootstrap.bind().addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture channelFuture) throws Exception { multicastChannel = (NioDatagramChannel) channelFuture.channel(); multicastChannel.joinGroup(groupAddress, nic); } }); } Bootstrap serverBootstrap = new Bootstrap().group(eventLoopGroup) .channelFactory(new ChannelFactory<Channel>() { @Override public Channel newChannel() { return new NioDatagramChannel(InternetProtocolFamily.IPv4); } }).localAddress(localAddress).option(ChannelOption.IP_MULTICAST_IF, nic) .option(ChannelOption.SO_REUSEADDR, true).handler(new SSDPInboundHandler(this)); serverBootstrap.bind().addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture channelFuture) throws Exception { localChannel = (NioDatagramChannel) channelFuture.channel(); sendDiscoveryPacket(); } }); } catch (Exception e) { e.printStackTrace(); } }
From source file:com.wolfbe.configcenter.remoting.netty.NettyRemotingServer.java
License:Apache License
@Override public void start() { this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(// nettyServerConfig.getServerWorkerThreads(), // new ThreadFactory() { private AtomicInteger threadIndex = new AtomicInteger(0); @Override// w ww . j ava2s. c o m public Thread newThread(Runnable r) { return new Thread(r, "NettyServerCodecThread_" + this.threadIndex.incrementAndGet()); } }); ServerBootstrap childHandler = // this.serverBootstrap.group(this.eventLoopGroupBoss, this.eventLoopGroupSelector) .channel(NioServerSocketChannel.class) // .option(ChannelOption.SO_BACKLOG, 1024) // .option(ChannelOption.SO_REUSEADDR, true) // .option(ChannelOption.SO_KEEPALIVE, false) // .childOption(ChannelOption.TCP_NODELAY, true) // .option(ChannelOption.SO_SNDBUF, nettyServerConfig.getServerSocketSndBufSize()) // .option(ChannelOption.SO_RCVBUF, nettyServerConfig.getServerSocketRcvBufSize()) // .localAddress(new InetSocketAddress(this.nettyServerConfig.getListenPort())) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast(defaultEventExecutorGroup, // new NettyEncoder(), // new NettyDecoder(), // new IdleStateHandler(0, 0, nettyServerConfig.getServerChannelMaxIdleTimeSeconds()), // new NettyConnetManageHandler(), // new NettyServerHandler()); } }); if (nettyServerConfig.isServerPooledByteBufAllocatorEnable()) { childHandler.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); } try { ChannelFuture sync = this.serverBootstrap.bind().sync(); InetSocketAddress addr = (InetSocketAddress) sync.channel().localAddress(); this.port = addr.getPort(); } catch (InterruptedException e1) { throw new RuntimeException("this.serverBootstrap.bind().sync() InterruptedException", e1); } if (this.channelEventListener != null) { this.nettyEventExecuter.start(); } this.timer.scheduleAtFixedRate(new TimerTask() { @Override public void run() { try { NettyRemotingServer.this.scanResponseTable(); } catch (Exception e) { log.error("scanResponseTable exception", e); } } }, 1000 * 3, 1000); }
From source file:com.xyz.rpc.netty4.server.Netty4Server.java
License:Apache License
public void start(int listenPort, final ExecutorService ignore) throws Exception { if (!startFlag.compareAndSet(false, true)) { return;/* w w w. j a v a2 s . co m*/ } bossGroup = new NioEventLoopGroup(); ioGroup = new NioEventLoopGroup(); businessGroup = new DefaultEventExecutorGroup(businessThreads); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, ioGroup).channel(NioServerSocketChannel.class) .childOption(ChannelOption.TCP_NODELAY, Boolean.parseBoolean(System.getProperty("nfs.rpc.tcp.nodelay", "true"))) .childOption(ChannelOption.SO_REUSEADDR, Boolean.parseBoolean(System.getProperty("nfs.rpc.tcp.reuseaddress", "true"))) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast("decoder", new Netty4ProtocolDecoder()); ch.pipeline().addLast("encoder", new Netty4ProtocolEncoder()); ch.pipeline().addLast(businessGroup, "handler", new Netty4ServerHandler()); } }); b.bind(new InetSocketAddress("127.0.0.1", listenPort)).sync(); LOGGER.warn("Server started,listen at: " + listenPort + ", businessThreads is " + businessThreads); }
From source file:com.zaradai.distributor.messaging.netty.NettyServer.java
License:Apache License
private void configure(ServerBootstrap b) { b.option(ChannelOption.SO_BACKLOG, config.getAcceptBacklog()); b.option(ChannelOption.SO_REUSEADDR, config.getReuseAddress()); b.childOption(ChannelOption.TCP_NODELAY, config.getTcpNoDelay()); b.childOption(ChannelOption.SO_KEEPALIVE, config.getKeepAlive()); }
From source file:com.ztesoft.zsmart.zmq.remoting.netty.NettyRemotingServer.java
License:Apache License
@Override public void start() { this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(// nettyServerConfig.getServerWorkThreads(), // new ThreadFactory() { private AtomicInteger threadIndex = new AtomicInteger(0); @Override/*ww w . j av a 2 s . com*/ public Thread newThread(Runnable r) { return new Thread(r, "NettyServerWorkerThread_" + this.threadIndex.incrementAndGet()); } }); ServerBootstrap childHandler = // this.serverBootstrap.group(this.eventLoopGroupBoss, this.eventLoopGroupWorker) .channel(NioServerSocketChannel.class) // .option(ChannelOption.SO_BACKLOG, 1024) // .option(ChannelOption.SO_REUSEADDR, true) // .option(ChannelOption.SO_KEEPALIVE, false) // .childOption(ChannelOption.TCP_NODELAY, true) // .option(ChannelOption.SO_SNDBUF, nettyServerConfig.getServerSocketSndBufSize()) // .option(ChannelOption.SO_RCVBUF, nettyServerConfig.getServerSocketRcvBufSize()) // .localAddress(new InetSocketAddress(this.nettyServerConfig.getListenPort())) .childHandler(new ChannelInitializer<SocketChannel>() { @Override public void initChannel(SocketChannel ch) throws Exception { ch.pipeline().addLast( // defaultEventExecutorGroup, // new NettyEncoder(), // new NettyDecoder(), // new IdleStateHandler(0, 0, nettyServerConfig.getServerChannelMaxIdleTimeSeconds()), // new NettyConnetManageHandler(), // new NettyServerHandler()); } }); if (nettyServerConfig.isServerPooledByteBufAllocatorEnable()) { // ???? childHandler.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); } try { ChannelFuture sync = this.serverBootstrap.bind().sync(); InetSocketAddress addr = (InetSocketAddress) sync.channel().localAddress(); this.port = addr.getPort(); } catch (InterruptedException e1) { throw new RuntimeException("this.serverBootstrap.bind().sync() InterruptedException", e1); } if (this.channelEventListener != null) { this.nettyEventExecuter.start(); } // ?1?? this.timer.scheduleAtFixedRate(new TimerTask() { @Override public void run() { try { NettyRemotingServer.this.scanResponseTable(); } catch (Exception e) { log.error("scanResponseTable exception", e); } } }, 1000 * 3, 1000); }
From source file:darks.grid.network.GridMessageServer.java
License:Apache License
@Override public boolean initialize() { try {/*from w w w. j a va2 s . c o m*/ NetworkConfig config = GridRuntime.config().getNetworkConfig(); int bossNum = Runtime.getRuntime().availableProcessors() * config.getServerBossThreadDelta(); int workerNum = config.getServerWorkerThreadNumber(); bossGroup = new NioEventLoopGroup(bossNum); workerGroup = new NioEventLoopGroup(workerNum); super.initialize(); bootstrap = new ServerBootstrap(); bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.TCP_NODELAY, config.isTcpNodelay()) .option(ChannelOption.SO_KEEPALIVE, config.isTcpKeepAlive()) .option(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true)) // .option(ChannelOption.SO_TIMEOUT, config.getRecvTimeout()) .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getConnectTimeout()) .option(ChannelOption.SO_REUSEADDR, config.isTcpReuseAddr()) // .option(ChannelOption.SO_BACKLOG, config.getTcpBacklog()) .option(ChannelOption.SO_SNDBUF, config.getTcpSendBufferSize()) .option(ChannelOption.SO_RCVBUF, config.getTcpRecvBufferSize()) .childOption(ChannelOption.TCP_NODELAY, config.isTcpNodelay()) .childOption(ChannelOption.SO_KEEPALIVE, config.isTcpKeepAlive()) .childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true)) .childOption(ChannelOption.SO_TIMEOUT, config.getRecvTimeout()) .childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getConnectTimeout()) .childOption(ChannelOption.SO_REUSEADDR, config.isTcpReuseAddr()) .childOption(ChannelOption.SO_BACKLOG, config.getTcpBacklog()) .childOption(ChannelOption.SO_SNDBUF, config.getTcpSendBufferSize()) .childOption(ChannelOption.SO_RCVBUF, config.getTcpRecvBufferSize()); bootstrap.childHandler(newChannelHandler()); return true; } catch (Exception e) { log.error(e.getMessage(), e); return false; } }
From source file:de.jackwhite20.comix.Comix.java
License:Open Source License
public void start() { System.setProperty("java.net.preferIPv4Stack", "true"); ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.DISABLED); AnsiConsole.systemInstall();//from ww w .jav a 2 s. c o m LogManager.getLogManager().reset(); logger = new ComixLogger(consoleReader); logger.log(Level.INFO, "Comix", "------ Comix v.0.1 ------"); loadConfig(); logger.log(Level.INFO, "Load-Balancer", (targets.size() > 0) ? "Targets:" : "No Target Servers found!"); targets.forEach(t -> logger.log(Level.INFO, "Load-Balancer", t.getName() + " - " + t.getHost() + ":" + t.getPort())); logger.log(Level.INFO, "Commands", "Registering commands..."); registerCommands(); logger.log(Level.INFO, "Comix", "Starting Comix on " + balancerHost + ":" + balancerPort + "..."); balancingStrategy = new RoundRobinBalancingStrategy(targets); new Timer("CheckTargets").scheduleAtFixedRate(new CheckTargets(balancingStrategy), 0, TimeUnit.SECONDS.toMillis(comixConfig.getCheckTime())); bossGroup = new NioEventLoopGroup(1); workerGroup = new NioEventLoopGroup(comixConfig.getThreads()); try { ServerBootstrap bootstrap = new ServerBootstrap(); bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .option(ChannelOption.TCP_NODELAY, true) .option(ChannelOption.SO_BACKLOG, comixConfig.getBacklog()) .option(ChannelOption.SO_REUSEADDR, true).childOption(ChannelOption.TCP_NODELAY, true) .childOption(ChannelOption.AUTO_READ, false).childOption(ChannelOption.SO_TIMEOUT, 4000) .childHandler(new ComixChannelInitializer()); ChannelFuture f = bootstrap.bind(comixConfig.getPort()).sync(); reload(); logger.log(Level.INFO, "Comix", "Comix is started!"); f.channel().closeFuture().sync(); running = false; } catch (Exception e) { e.printStackTrace(); } finally { shutdown(); } }