List of usage examples for io.netty.channel ChannelOption AUTO_READ
ChannelOption AUTO_READ
To view the source code for io.netty.channel ChannelOption AUTO_READ.
Click Source Link
From source file:me.bigteddy98.movingmotd.ClientSideConnection.java
License:Open Source License
@Override public void channelActive(ChannelHandlerContext ctx) throws Exception { this.networkManager.incomingChannel = ctx.channel(); this.networkManager.clientsidePipeline = ctx.pipeline(); Bootstrap bootstrab = new Bootstrap(); bootstrab.group(networkManager.incomingChannel.eventLoop()); bootstrab.channel(ctx.channel().getClass()); bootstrab.handler(new ServerSideConnectionInitialization(networkManager)); bootstrab.option(ChannelOption.AUTO_READ, false); ChannelFuture f = bootstrab.connect(this.toHostname, this.toPort); f.addListener(new ChannelFutureListener() { @Override/*w w w. ja v a 2 s . co m*/ public void operationComplete(ChannelFuture future) throws Exception { if (future.isSuccess()) { networkManager.incomingChannel.read(); } else { networkManager.incomingChannel.close(); } } }); this.outgoingChannel = f.channel(); }
From source file:me.bigteddy98.movingmotd.Main.java
License:Open Source License
@Override public void run() { nettyThreadGroup = new ThreadGroup(Thread.currentThread().getThreadGroup(), "NettyThreadGroup"); new Thread(this.nettyThreadGroup, new Runnable() { @Override/*from w w w. ja v a2 s .c om*/ public void run() { final ThreadGroup group = new ThreadGroup(Main.this.nettyThreadGroup, "PortListener"); EventLoopGroup bossGroup = new NioEventLoopGroup(MAX_NETTY_BOSS_THREADS, new ThreadFactory() { private int counter = 0; private String newName = group.getName() + "\\nettyboss"; @Override public Thread newThread(Runnable r) { Thread t = new Thread(group, r, newName + "\\" + counter++); t.setPriority(Thread.NORM_PRIORITY - 1); t.setDaemon(true); return t; } }); EventLoopGroup workerGroup = new NioEventLoopGroup(MAX_NETTY_WORKER_THREADS, new ThreadFactory() { private int counter = 0; private String newName = group.getName() + "\\nettyworker"; @Override public Thread newThread(Runnable r) { Thread t = new Thread(group, r, newName + "\\" + counter++); t.setPriority(Thread.NORM_PRIORITY - 1); t.setDaemon(true); return t; } }); try { ServerBootstrap bootstrab = new ServerBootstrap(); bootstrab.group(bossGroup, workerGroup); bootstrab.channel(NioServerSocketChannel.class); bootstrab.childHandler(new ClientSideConnectionInitialization("localhost", toPort)); bootstrab.childOption(ChannelOption.AUTO_READ, false); bootstrab.bind(fromPort).sync().channel().closeFuture().sync(); } catch (InterruptedException e) { e.printStackTrace(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } } }).start(); }
From source file:me.bigteddy98.slimeportal.Main.java
License:Open Source License
public void run() throws Exception { SlimeLogger.info("Starting " + NAME + " version " + VERSION + " developed by " + AUTHOR + "!"); SlimeLogger.info(//from w w w. jav a2 s . c o m "Starting server process using commandline " + Arrays.asList(processBuilder).toString() + "..."); ProcessBuilder builder = new ProcessBuilder(processBuilder); builder.redirectErrorStream(true); this.serverProcess = builder.start(); this.processPrintWriter = new PrintWriter(this.serverProcess.getOutputStream()); SlimeLogger.info("Server process started."); Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { @Override public void run() { serverProcess.destroy(); } })); new Thread(new Runnable() { @Override public void run() { try (InputStream r = serverProcess.getInputStream()) { StringBuilder tmp = new StringBuilder(); byte[] consoleOutput = new byte[1024]; int read; while ((read = r.read(consoleOutput)) != -1) { String consoleLog = new String(consoleOutput, 0, read); String[] c = consoleLog.split("\n", -1); if (c.length != 0) { if (c.length == 1) { tmp.append(c[0]); } else { for (int i = 0; i < c.length - 1; i++) { tmp.append(c[i]); SlimeLogger.info(tmp.toString()); tmp.setLength(0); } tmp.append(c[c.length - 1]); } } } } catch (IOException e) { e.printStackTrace(); } SlimeLogger.warn("Server thread ended!"); System.exit(0); } }, "Server Output Reader").start(); new Thread(new Runnable() { @Override public void run() { try (Scanner in = new Scanner(System.in)) { while (in.hasNextLine()) { String newLine = in.nextLine(); executeCommand(newLine); } } SlimeLogger.warn("COMMAND LOOP ENDED, this shouldn't happen!"); } }, "CommandReader").start(); final ThreadGroup nettyListeners = new ThreadGroup(Thread.currentThread().getThreadGroup(), "Netty Listeners"); new Thread(nettyListeners, new Runnable() { @Override public void run() { SlimeLogger.info("Started Netty Server at port " + fromPort + "..."); final ThreadGroup group = new ThreadGroup(nettyListeners, "Listener-" + toPort); EventLoopGroup bossGroup = new NioEventLoopGroup(MAX_NETTY_BOSS_THREADS, new ThreadFactory() { private int threadCount = 0; private String newName = group.getName() + "\\boss"; @Override public Thread newThread(Runnable r) { Thread t = new Thread(group, r, newName + "\\" + threadCount++); t.setPriority(Thread.NORM_PRIORITY - 1); t.setDaemon(true); return t; } }); EventLoopGroup workerGroup = new NioEventLoopGroup(MAX_NETTY_WORKER_THREADS, new ThreadFactory() { private int threadCount = 0; private String newName = group.getName() + "\\worker"; @Override public Thread newThread(Runnable r) { Thread t = new Thread(group, r, newName + "\\" + threadCount++); t.setPriority(Thread.NORM_PRIORITY - 1); t.setDaemon(true); return t; } }); try { ServerBootstrap bootstrab = new ServerBootstrap(); bootstrab.group(bossGroup, workerGroup); bootstrab.channel(NioServerSocketChannel.class); bootstrab.childHandler(new ClientboundConnectionInitializer("localhost", toPort)); bootstrab.childOption(ChannelOption.AUTO_READ, false); bootstrab.bind(fromPort).sync().channel().closeFuture().sync(); } catch (InterruptedException e) { e.printStackTrace(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } } }).start(); }
From source file:me.binf.socks5.client.proxy.HexDumpProxyFrontendHandler.java
License:Apache License
@Override public void channelActive(ChannelHandlerContext ctx) { final Channel inboundChannel = ctx.channel(); // Start the connection attempt. Bootstrap b = new Bootstrap(); b.group(inboundChannel.eventLoop()).channel(ctx.channel().getClass()) .handler(new HexDumpProxyBackendHandler(inboundChannel)).option(ChannelOption.AUTO_READ, false); ChannelFuture f = b.connect(remoteHost, remotePort); outboundChannel = f.channel();// ww w. ja v a2 s. c o m f.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) { if (future.isSuccess()) { proxyService.noticeView("?" + remoteHost + ":" + remotePort + "?!"); inboundChannel.read(); } else { proxyService.noticeView("?" + remoteHost + ":" + remotePort + "!"); inboundChannel.close(); } } }); }
From source file:org.apache.hyracks.http.server.HttpServer.java
License:Apache License
protected void doStart() throws InterruptedException { /*/* w w w . ja v a 2s . c o m*/ * This is a hacky way to ensure that IServlets with more specific paths are checked first. * For example: * "/path/to/resource/" * is checked before * "/path/to/" * which in turn is checked before * "/path/" * Note that it doesn't work for the case where multiple paths map to a single IServlet */ Collections.sort(servlets, (l1, l2) -> l2.getPaths()[0].length() - l1.getPaths()[0].length()); ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(RECEIVE_BUFFER_SIZE)) .childOption(ChannelOption.AUTO_READ, Boolean.FALSE) .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT) .childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, WRITE_BUFFER_WATER_MARK) .handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new HttpServerInitializer(this)); channel = b.bind(port).sync().channel(); }
From source file:org.apache.pulsar.proxy.server.DirectProxyHandler.java
License:Apache License
public DirectProxyHandler(ProxyService service, ProxyConnection proxyConnection, String targetBrokerUrl, int protocolVersion, SslContext sslCtx) { this.authentication = proxyConnection.getClientAuthentication(); this.inboundChannel = proxyConnection.ctx().channel(); this.originalPrincipal = proxyConnection.clientAuthRole; this.clientAuthData = proxyConnection.clientAuthData; this.clientAuthMethod = proxyConnection.clientAuthMethod; this.protocolVersion = protocolVersion; this.sslCtx = sslCtx; ProxyConfiguration config = service.getConfiguration(); // Start the connection attempt. Bootstrap b = new Bootstrap(); // Tie the backend connection on the same thread to avoid context // switches when passing data between the 2 // connections b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT); b.group(inboundChannel.eventLoop()).channel(inboundChannel.getClass()).option(ChannelOption.AUTO_READ, false);//from w w w .j a v a 2s. com b.handler(new ChannelInitializer<SocketChannel>() { @Override protected void initChannel(SocketChannel ch) throws Exception { if (sslCtx != null) { ch.pipeline().addLast(TLS_HANDLER, sslCtx.newHandler(ch.alloc())); } ch.pipeline().addLast("frameDecoder", new LengthFieldBasedFrameDecoder(PulsarDecoder.MaxFrameSize, 0, 4, 0, 4)); ch.pipeline().addLast("proxyOutboundHandler", new ProxyBackendHandler(config, protocolVersion)); } }); URI targetBroker; try { // targetBrokerUrl is coming in the "hostname:6650" form, so we need // to extract host and port targetBroker = new URI("pulsar://" + targetBrokerUrl); } catch (URISyntaxException e) { log.warn("[{}] Failed to parse broker url '{}'", inboundChannel, targetBrokerUrl, e); inboundChannel.close(); return; } ChannelFuture f = b.connect(targetBroker.getHost(), targetBroker.getPort()); outboundChannel = f.channel(); f.addListener(future -> { if (!future.isSuccess()) { // Close the connection if the connection attempt has failed. inboundChannel.close(); return; } final ProxyBackendHandler cnx = (ProxyBackendHandler) outboundChannel.pipeline() .get("proxyOutboundHandler"); cnx.setRemoteHostName(targetBroker.getHost()); }); }
From source file:org.apache.qpid.proton.netty.ProtonNetty.java
License:Apache License
public static void main(String[] args) throws Exception { // Configure the bootstrap. EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(); try {// w w w .j a va 2 s .com ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) .childHandler(new ProtonNettyInitializer()).childOption(ChannelOption.AUTO_READ, false) .bind(PORT).sync().channel().closeFuture().sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }
From source file:org.asynchttpclient.netty.channel.ChannelManager.java
License:Open Source License
public ChannelManager(final AsyncHttpClientConfig config, Timer nettyTimer) { this.config = config; this.sslEngineFactory = config.getSslEngineFactory() != null ? config.getSslEngineFactory() : new DefaultSslEngineFactory(); try {/*w ww .j av a 2 s .c om*/ this.sslEngineFactory.init(config); } catch (SSLException e) { throw new RuntimeException("Could not initialize sslEngineFactory", e); } ChannelPool channelPool = config.getChannelPool(); if (channelPool == null) { if (config.isKeepAlive()) { channelPool = new DefaultChannelPool(config, nettyTimer); } else { channelPool = NoopChannelPool.INSTANCE; } } this.channelPool = channelPool; tooManyConnections = trimStackTrace(new TooManyConnectionsException(config.getMaxConnections())); tooManyConnectionsPerHost = trimStackTrace( new TooManyConnectionsPerHostException(config.getMaxConnectionsPerHost())); maxTotalConnectionsEnabled = config.getMaxConnections() > 0; maxConnectionsPerHostEnabled = config.getMaxConnectionsPerHost() > 0; if (maxTotalConnectionsEnabled || maxConnectionsPerHostEnabled) { openChannels = new DefaultChannelGroup("asyncHttpClient", GlobalEventExecutor.INSTANCE) { @Override public boolean remove(Object o) { boolean removed = super.remove(o); if (removed) { if (maxTotalConnectionsEnabled) freeChannels.release(); if (maxConnectionsPerHostEnabled) { Object partitionKey = channelId2PartitionKey.remove(Channel.class.cast(o)); if (partitionKey != null) { Semaphore hostFreeChannels = freeChannelsPerHost.get(partitionKey); if (hostFreeChannels != null) hostFreeChannels.release(); } } } return removed; } }; freeChannels = new Semaphore(config.getMaxConnections()); } else { openChannels = new DefaultChannelGroup("asyncHttpClient", GlobalEventExecutor.INSTANCE); freeChannels = null; } handshakeTimeout = config.getHandshakeTimeout(); // check if external EventLoopGroup is defined ThreadFactory threadFactory = config.getThreadFactory() != null ? config.getThreadFactory() : new DefaultThreadFactory(config.getThreadPoolName()); allowReleaseEventLoopGroup = config.getEventLoopGroup() == null; ChannelFactory<? extends Channel> channelFactory; if (allowReleaseEventLoopGroup) { if (config.isUseNativeTransport()) { eventLoopGroup = newEpollEventLoopGroup(threadFactory); channelFactory = getEpollSocketChannelFactory(); } else { eventLoopGroup = new NioEventLoopGroup(0, threadFactory); channelFactory = NioSocketChannelFactory.INSTANCE; } } else { eventLoopGroup = config.getEventLoopGroup(); if (eventLoopGroup instanceof OioEventLoopGroup) throw new IllegalArgumentException("Oio is not supported"); if (eventLoopGroup instanceof NioEventLoopGroup) { channelFactory = NioSocketChannelFactory.INSTANCE; } else { channelFactory = getEpollSocketChannelFactory(); } } httpBootstrap = newBootstrap(channelFactory, eventLoopGroup, config); wsBootstrap = newBootstrap(channelFactory, eventLoopGroup, config); // for reactive streams httpBootstrap.option(ChannelOption.AUTO_READ, false); }
From source file:org.columbia.parikshan.duplicator.DuplicatorFrontendHandler.java
License:Apache License
@Override public void channelActive(ChannelHandlerContext ctx) { final Channel inboundChannel = ctx.channel(); // Start the connection attempt to SERVER 2 Bootstrap server2Bootstrap = new Bootstrap(); server2Bootstrap.group(inboundChannel.eventLoop()).channel(ctx.channel().getClass()) .handler(new DuplicatorBackendHandler(inboundChannel)).option(ChannelOption.AUTO_READ, false); ChannelFuture server2Future = server2Bootstrap.connect(remoteHost, remotePort); server2OutboundChannel = server2Future.channel(); server2Future.addListener(new ChannelFutureListener() { @Override/*w w w. ja v a2s .c om*/ public void operationComplete(ChannelFuture future) { if (future.isSuccess()) { // connection complete start to read first data inboundChannel.read(); } else { // Close the connection if the connection attempt has failed. inboundChannel.close(); } } }); // Start the connection attempt to SERVER 3 Bootstrap server3Bootstrap = new Bootstrap(); server3Bootstrap.group(inboundChannel.eventLoop()).channel(ctx.channel().getClass()) // You are only writing traffic to server 3 so you do not need to have a handler for the inbound traffic .handler(new DiscardServerHandler()) // EDIT .option(ChannelOption.AUTO_READ, false); ChannelFuture server3Future = server3Bootstrap.connect(remoteHost2, remotePort2); server3OutboundChannel = server3Future.channel(); //System.out.println("High Water Mark" + server3OutboundChannel.config().getWriteBufferHighWaterMark()); // Here we are going to add channels to channel group to save bytebuf work //channels.add(server2OutboundChannel); //channels.add(server3OutboundChannel); }
From source file:org.columbia.parikshan.proxy.NettyProxy.java
License:Apache License
public static void execute(int LOCAL_PORT, String REMOTE_HOST, int REMOTE_PORT) throws Exception { System.err.println("Proxying *:" + LOCAL_PORT + " to " + REMOTE_HOST + ':' + REMOTE_PORT + " ..."); LoggingHandler l;//from ww w. j av a2 s . c om if (Main.debug) l = new LoggingHandler(LogLevel.INFO); else l = new LoggingHandler(LogLevel.DEBUG); // Configure the bootstrap. EventLoopGroup bossGroup = new NioEventLoopGroup(1); EventLoopGroup workerGroup = new NioEventLoopGroup(); try { ServerBootstrap b = new ServerBootstrap(); b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).handler(l) .childHandler(new NettyProxyInitializer(REMOTE_HOST, REMOTE_PORT)) .childOption(ChannelOption.AUTO_READ, false).bind(LOCAL_PORT).sync().channel().closeFuture() .sync(); } finally { bossGroup.shutdownGracefully(); workerGroup.shutdownGracefully(); } }