Example usage for io.netty.channel ChannelOption AUTO_READ

List of usage examples for io.netty.channel ChannelOption AUTO_READ

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption AUTO_READ.

Prototype

ChannelOption AUTO_READ

To view the source code for io.netty.channel ChannelOption AUTO_READ.

Click Source Link

Usage

From source file:org.r358.poolnetty.test.funcobs.LifecycleTest.java

License:Open Source License

@Test
public void testLeaseCancellationViaFuture() throws Exception {
    TestPoolProviderListener ppl = new TestPoolProviderListener();
    final ArrayList<Object> serverReceivedMessages = new ArrayList<>();
    String testMessage = "The cat sat on the mat.";

    ////w ww.j a va  2 s . com
    // The simple server side for testing.
    //

    SimpleServer simpleServer = new SimpleServer("127.0.0.1", 1887, 10, new SimpleServerListener() {

        @Override
        public void newConnection(ChannelHandlerContext ctx) {

        }

        @Override
        public void newValue(ChannelHandlerContext ctx, String val) {
            serverReceivedMessages.add(val);
            ctx.writeAndFlush(val);
        }
    });

    simpleServer.start();

    final CountDownLatch leaseExpiredHandlerCalled = new CountDownLatch(1);

    //
    // Build the pool.
    //

    NettyConnectionPoolBuilder ncb = new NettyConnectionPoolBuilder();
    ncb.withImmortalCount(1);
    ncb.withMaxEphemeralCount(0);
    ncb.withReaperIntervalMillis(1000); // Set short for testing..
    ncb.withLeaseExpiryHarvester(new FullPassSimpleLeaseReaper());
    ncb.withLeaseExpiredHandler(new LeaseExpiredHandler() {
        @Override
        public boolean closeExpiredLease(LeasedContext context, PoolProvider provider) {
            leaseExpiredHandlerCalled.countDown();
            return true; // Cause lease to be expired.
        }
    });

    final EventLoopGroup elg = new NioEventLoopGroup();

    //
    // Create the boot strap.
    //
    ncb.withBootstrapProvider(new BootstrapProvider() {
        @Override
        public Bootstrap createBootstrap(PoolProvider poolProvider) {
            Bootstrap bs = new Bootstrap();
            bs.group(elg);
            bs.channel(NioSocketChannel.class);
            bs.option(ChannelOption.SO_KEEPALIVE, true);
            bs.option(ChannelOption.AUTO_READ, true);
            return bs;
        }
    });

    //
    // Sets up the connection info and the channel initializer.
    //
    ncb.withConnectionInfoProvider(new ConnectionInfoProvider() {
        @Override
        public ConnectionInfo connectionInfo(PoolProvider poolProvider) {

            return new ConnectionInfo(new InetSocketAddress("127.0.0.1", 1887), null, new ChannelInitializer() {
                @Override
                protected void initChannel(Channel ch) throws Exception {
                    ch.pipeline().addLast("decode", new SimpleInboundHandler(10));
                    ch.pipeline().addLast("encode", new SimpleOutboundHandler(10));
                }
            });

        }
    });

    //
    // Make the pool add listener and start.
    //
    NettyConnectionPool ncp = ncb.build();
    ncp.addListener(ppl);

    ncp.start(0, TimeUnit.SECONDS);

    //
    // Get the first lease.
    //
    LeasedChannel firstLease = ncp.lease(5, TimeUnit.SECONDS, "aardvarks");

    final AtomicBoolean failedInListener = new AtomicBoolean(false);

    Future<LeasedChannel> secondLease = ncp.leaseAsync(10, TimeUnit.SECONDS, "Erdferkel", new LeaseListener() {
        @Override
        public void leaseRequest(boolean success, LeasedChannel channel, Throwable th) {
            failedInListener.set(true);
        }
    });

    try {
        secondLease.get(1, TimeUnit.SECONDS);
        TestCase.fail();
    } catch (Exception ex) {
        TestCase.assertEquals(TimeoutException.class, ex.getClass());
    }

    secondLease.cancel(false); // The flag has no effect.

    firstLease.yield();

    //
    // Lease cancellation is asynchronous to the pool, but the detection of a canceled lease is done
    // 1. Before the lease logic executes on the lease request.
    // 2. After the lease logic executes, which will then cause an immediate yield to execute.
    //
    // However if the lease is pending it will be sitting in a holding queue and will be removed from there.
    //
    // The future listener event is driven from the future so calling cancel() on that will fire
    // the future listener on the thread that called cancel but the pool may fire leaseCanceled
    // potentially at some point in between because it is driven from the pools thread.
    //
    // Between those two sources 0f information the order of notification is indeterminate.

    //
    // The call to cancel() may also through an illegal state exception if the granting of the lease is in
    // progress at that moment.
    //
    // For testing sake we give it a moment to settle.

    Thread.sleep(500); // Excessive.

    TestCase.assertTrue(secondLease.isCancelled());
    TestCase.assertTrue(failedInListener.get());
    TestCase.assertTrue(ppl.getLeaseCanceled().await(5, TimeUnit.SECONDS));

    secondLease = ncp.leaseAsync(10, TimeUnit.SECONDS, "Foo");

    try {
        LeasedChannel lc = secondLease.get(1, TimeUnit.SECONDS);
        TestCase.assertEquals("Foo", lc.getUserObject());
    } catch (Exception ex) {
        TestCase.fail();
    }

    ncp.stop(true);
    TestCase.assertTrue(ppl.getStoppedLatch().await(5, TimeUnit.SECONDS));

    simpleServer.stop();
}

From source file:org.r358.poolnetty.test.MultiConnectionTest.java

License:Open Source License

/**
 * Test the creation and leasing of connections and harvesting of expired leases.
 *
 * @throws Exception//ww  w .  j  av a 2  s  .c o m
 */
@Test
public void testHarvestingWithMultiples() throws Exception {

    final int maxImmortal = 5;
    final int maxEphemeral = 10;
    final int maxAll = maxEphemeral + maxImmortal;

    TestPoolProviderListener ppl = new TestPoolProviderListener();
    final ArrayList<Object> serverReceivedMessages = new ArrayList<>();
    String testMessage = "The cat sat on the mat.";

    //
    // The simple server side for testing.
    //

    SimpleServer simpleServer = new SimpleServer("127.0.0.1", 1887, 10, new SimpleServerListener() {

        @Override
        public void newConnection(ChannelHandlerContext ctx) {

        }

        @Override
        public void newValue(ChannelHandlerContext ctx, String val) {
            serverReceivedMessages.add(val);
            ctx.writeAndFlush(val);
        }
    });

    simpleServer.start();

    final CountDownLatch leaseExpiredHandlerCalled = new CountDownLatch(1);

    //
    // Build the pool.
    //

    NettyConnectionPoolBuilder ncb = new NettyConnectionPoolBuilder();
    ncb.withImmortalCount(maxImmortal);
    ncb.withMaxEphemeralCount(maxEphemeral);
    ncb.withEphemeralLifespanMillis(5000);
    ncb.withReaperIntervalMillis(1000); // Set short for testing..
    ncb.withLeaseExpiryHarvester(new FullPassSimpleLeaseReaper());
    ncb.withLeaseExpiredHandler(new LeaseExpiredHandler() {
        @Override
        public boolean closeExpiredLease(LeasedContext context, PoolProvider provider) {
            leaseExpiredHandlerCalled.countDown();
            return true; // Cause lease to be expired.
        }
    });

    final EventLoopGroup elg = new NioEventLoopGroup();

    //
    // Create the boot strap.
    //
    ncb.withBootstrapProvider(new BootstrapProvider() {
        @Override
        public Bootstrap createBootstrap(PoolProvider poolProvider) {
            Bootstrap bs = new Bootstrap();
            bs.group(elg);
            bs.channel(NioSocketChannel.class);
            bs.option(ChannelOption.SO_KEEPALIVE, true);
            bs.option(ChannelOption.AUTO_READ, true);
            return bs;
        }
    });

    //
    // Sets up the connection info and the channel initializer.
    //
    ncb.withConnectionInfoProvider(new ConnectionInfoProvider() {
        @Override
        public ConnectionInfo connectionInfo(PoolProvider poolProvider) {

            return new ConnectionInfo(new InetSocketAddress("127.0.0.1", 1887), null, new ChannelInitializer() {
                @Override
                protected void initChannel(Channel ch) throws Exception {
                    ch.pipeline().addLast("decode", new SimpleInboundHandler(10));
                    ch.pipeline().addLast("encode", new SimpleOutboundHandler(10));
                }
            });

        }
    });

    //
    // Make the pool add listener and start.
    //
    NettyConnectionPool ncp = ncb.build();
    ncp.addListener(ppl);

    final CountDownLatch leaseAll = new CountDownLatch(maxAll);

    final CountDownLatch harvestedLeases = new CountDownLatch(maxAll);

    final CountDownLatch closedConnections = new CountDownLatch(maxEphemeral);

    final CountDownLatch openedConnections = new CountDownLatch(maxAll + maxImmortal);

    ncp.addListener(new PoolProviderListenerAdapter() {

        @Override
        public void connectionCreated(PoolProvider provider, Channel channel, boolean immortal) {
            openedConnections.countDown();
        }

        @Override
        public void leaseGranted(PoolProvider provider, Channel channel, Object userObject) {
            leaseAll.countDown();
        }

        @Override
        public void leaseExpired(PoolProvider provider, Channel channel, Object userObject) {
            harvestedLeases.countDown();
        }

        @Override
        public void connectionClosed(PoolProvider provider, Channel channel) {
            closedConnections.countDown();
        }
    });

    TestCase.assertTrue(ncp.start(10, TimeUnit.SECONDS));

    TestCase.assertEquals(5, ((List) TestUtil.getField(ncp, "immortalContexts")).size());
    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "ephemeralContexts")).size());

    //
    // Lease all 15 connections which is 5 immortal and 10 ephemeral.
    //

    List<LeasedChannel> leasedChannels = new ArrayList<>();

    for (int t = 0; t < 15; t++) {
        leasedChannels.add(ncp.lease(2, TimeUnit.SECONDS, t));
    }

    TestCase.assertTrue(leaseAll.await(5, TimeUnit.SECONDS));

    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "immortalContexts")).size());
    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "ephemeralContexts")).size());

    TestCase.assertEquals(maxAll, ((List) TestUtil.getField(ncp, "leasedContexts")).size());
    TestCase.assertEquals(maxAll, ((Set) TestUtil.getField(ncp, "leasedContextSet")).size());

    //
    // Over this period the leases should all expire and drop back to the pool.
    //

    TestCase.assertTrue(harvestedLeases.await(10, TimeUnit.SECONDS));

    //
    // Wait for ephemeral connections to be closed.
    //

    TestCase.assertTrue(closedConnections.await(10, TimeUnit.SECONDS));

    //
    // At this point we wait until the replace immortal connections are created.
    //

    // Total opened connections should be maxAll+maxImmortal.
    TestCase.assertTrue(openedConnections.await(10, TimeUnit.SECONDS));

    TestCase.assertEquals(5, ((List) TestUtil.getField(ncp, "immortalContexts")).size());

    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "ephemeralContexts")).size());

    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "leasedContexts")).size());
    TestCase.assertEquals(0, ((Set) TestUtil.getField(ncp, "leasedContextSet")).size());

    simpleServer.stop();

}

From source file:org.r358.poolnetty.test.MultiConnectionTest.java

License:Open Source License

@Test
public void leaseAndYield() throws Exception {
    final int maxImmortal = 5;
    final int maxEphemeral = 10;
    final int maxAll = maxEphemeral + maxImmortal;

    TestPoolProviderListener ppl = new TestPoolProviderListener();
    final ArrayList<Object> serverReceivedMessages = new ArrayList<>();
    String testMessage = "The cat sat on the mat.";

    ///*from  w w  w. j av a2 s  .co  m*/
    // The simple server side for testing.
    //

    SimpleServer simpleServer = new SimpleServer("127.0.0.1", 1887, 10, new SimpleServerListener() {

        @Override
        public void newConnection(ChannelHandlerContext ctx) {

        }

        @Override
        public void newValue(ChannelHandlerContext ctx, String val) {
            serverReceivedMessages.add(val);
            ctx.writeAndFlush(val);
        }
    });

    simpleServer.start();

    final CountDownLatch leaseExpiredHandlerCalled = new CountDownLatch(1);

    //
    // Build the pool.
    //

    NettyConnectionPoolBuilder ncb = new NettyConnectionPoolBuilder();
    ncb.withImmortalCount(maxImmortal);
    ncb.withMaxEphemeralCount(maxEphemeral);
    ncb.withEphemeralLifespanMillis(5000);
    ncb.withReaperIntervalMillis(1000); // Set short for testing..
    ncb.withLeaseExpiryHarvester(new FullPassSimpleLeaseReaper());
    ncb.withLeaseExpiredHandler(new LeaseExpiredHandler() {
        @Override
        public boolean closeExpiredLease(LeasedContext context, PoolProvider provider) {
            leaseExpiredHandlerCalled.countDown();
            return true; // Cause lease to be expired.
        }
    });

    final EventLoopGroup elg = new NioEventLoopGroup();

    //
    // Create the boot strap.
    //
    ncb.withBootstrapProvider(new BootstrapProvider() {
        @Override
        public Bootstrap createBootstrap(PoolProvider poolProvider) {
            Bootstrap bs = new Bootstrap();
            bs.group(elg);
            bs.channel(NioSocketChannel.class);
            bs.option(ChannelOption.SO_KEEPALIVE, true);
            bs.option(ChannelOption.AUTO_READ, true);
            return bs;
        }
    });

    //
    // Sets up the connection info and the channel initializer.
    //
    ncb.withConnectionInfoProvider(new ConnectionInfoProvider() {
        @Override
        public ConnectionInfo connectionInfo(PoolProvider poolProvider) {

            return new ConnectionInfo(new InetSocketAddress("127.0.0.1", 1887), null, new ChannelInitializer() {
                @Override
                protected void initChannel(Channel ch) throws Exception {
                    ch.pipeline().addLast("decode", new SimpleInboundHandler(10));
                    ch.pipeline().addLast("encode", new SimpleOutboundHandler(10));
                }
            });

        }
    });

    //
    // Make the pool add listener and start.
    //
    NettyConnectionPool ncp = ncb.build();
    ncp.addListener(ppl);

    final CountDownLatch leaseAll = new CountDownLatch(maxAll);

    final CountDownLatch closedConnections = new CountDownLatch(maxEphemeral);

    final CountDownLatch openedConnections = new CountDownLatch(maxAll);

    final CountDownLatch yieldedConnections = new CountDownLatch(maxAll);

    ncp.addListener(new PoolProviderListenerAdapter() {

        @Override
        public void connectionCreated(PoolProvider provider, Channel channel, boolean immortal) {
            openedConnections.countDown();
        }

        @Override
        public void leaseGranted(PoolProvider provider, Channel channel, Object userObject) {
            leaseAll.countDown();
        }

        @Override
        public void leaseYield(PoolProvider provider, Channel channel, Object userObject) {
            yieldedConnections.countDown();
        }

        @Override
        public void connectionClosed(PoolProvider provider, Channel channel) {
            closedConnections.countDown();
        }
    });

    TestCase.assertTrue(ncp.start(10, TimeUnit.SECONDS));

    TestCase.assertEquals(5, ((List) TestUtil.getField(ncp, "immortalContexts")).size());
    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "ephemeralContexts")).size());

    //
    // Lease all 15 connections which is 5 immortal and 10 ephemeral.
    //

    List<LeasedChannel> leasedChannels = new ArrayList<>();

    for (int t = 0; t < maxAll; t++) {
        leasedChannels.add(ncp.lease(2, TimeUnit.SECONDS, t));
    }

    TestCase.assertTrue(leaseAll.await(5, TimeUnit.SECONDS));

    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "immortalContexts")).size());
    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "ephemeralContexts")).size());

    TestCase.assertEquals(maxAll, ((List) TestUtil.getField(ncp, "leasedContexts")).size());
    TestCase.assertEquals(maxAll, ((Set) TestUtil.getField(ncp, "leasedContextSet")).size());

    while (!leasedChannels.isEmpty()) {
        leasedChannels.remove(0).yield();
    }

    TestCase.assertTrue(yieldedConnections.await(10, TimeUnit.SECONDS));

    //
    // At this point we have yielded the leases so we need to wait for
    // the ephemeral connections to be closed.
    //

    //
    // Wait for ephemeral connections to be closed.
    //

    TestCase.assertTrue(closedConnections.await(20, TimeUnit.SECONDS));

    //
    // At this point we wait until the replace immortal connections are created.
    //

    // Total opened connections should be maxAll.
    TestCase.assertTrue(openedConnections.await(10, TimeUnit.SECONDS));

    TestCase.assertEquals(5, ((List) TestUtil.getField(ncp, "immortalContexts")).size());

    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "ephemeralContexts")).size());

    TestCase.assertEquals(0, ((List) TestUtil.getField(ncp, "leasedContexts")).size());
    TestCase.assertEquals(0, ((Set) TestUtil.getField(ncp, "leasedContextSet")).size());

    simpleServer.stop();
}

From source file:org.restlet.engine.netty.NettyServerHelper.java

License:Open Source License

@Override
public void start() throws Exception {
    super.start();
    setBossGroup(new NioEventLoopGroup());
    setWorkerGroup(new NioEventLoopGroup());
    setServerBootstrap(new ServerBootstrap());
    getServerBootstrap().option(ChannelOption.SO_BACKLOG, 1024);
    getServerBootstrap().group(getBossGroup(), getWorkerGroup()).channel(NioServerSocketChannel.class)
            .childOption(ChannelOption.AUTO_READ, false).localAddress(getHelped().getPort())
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override// w ww. ja va  2s.  co m
                protected void initChannel(SocketChannel ch) throws Exception {
                    ChannelPipeline pipeline = ch.pipeline();

                    pipeline.addLast(new HttpRequestDecoder(), new HttpResponseEncoder())
                            .addLast("serverStreamsHandler", new HttpStreamsServerHandler());

                    HandlerSubscriber<HttpResponse> subscriber = new HandlerSubscriber<>(ch.eventLoop(), 2, 4);
                    HandlerPublisher<HttpRequest> publisher = new HandlerPublisher<>(ch.eventLoop(),
                            HttpRequest.class);

                    pipeline.addLast("serverSubscriber", subscriber);
                    pipeline.addLast("serverPublisher", publisher);

                    publisher.subscribe(NettyServerHelper.this);
                    NettyServerHelper.this.subscribe(subscriber);
                }
            });

    setServerChannel(getServerBootstrap().bind().sync().channel());
    setEphemeralPort(((InetSocketAddress) getServerChannel().localAddress()).getPort());
    getLogger().info("Starting the Netty " + getProtocols() + " server on port " + getHelped().getPort());
}

From source file:org.royaldev.enterprise.proxy.ProxyFrontendHandler.java

License:Apache License

@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
    final Channel inboundChannel = ctx.channel();

    // Start the connection attempt.
    Bootstrap b = new Bootstrap();
    b.group(inboundChannel.eventLoop()).channel(ctx.channel().getClass())
            .handler(new ProxyBackendHandler(inboundChannel)).option(ChannelOption.AUTO_READ, false);
    ChannelFuture f = b.connect(remoteHost, remotePort);
    outboundChannel = f.channel();//from w  w  w .  ja v a  2s  .  c  o m
    f.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                // connection complete start to read first data
                inboundChannel.read();
            } else {
                // Close the connection if the connection attempt has failed.
                inboundChannel.close();
            }
        }
    });
}

From source file:org.wso2.test.http.netty.proxy.DelayingProxy.java

License:Open Source License

public void initialize(ProxyConfig proxyConfig) {
    proxyConfigurator = new ProxyConfiguratorImpl(proxyConfig);
    // Configure the bootstrap.
    bossGroup = new NioEventLoopGroup(1);
    workerGroup = new NioEventLoopGroup();

    List<Channel> serverChannels = new ArrayList<Channel>();

    ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(10);

    try {//from  w ww .ja  v  a 2s .  c o  m
        for (ProxyConfigEntry proxyConfigEntry : proxyConfig.getProxyConfigs()) {
            ServerBootstrap b = new ServerBootstrap();
            Channel c = b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                    .handler(new LoggingHandler(LogLevel.INFO))
                    .childHandler(new DelayingProxyInitializer(proxyConfigEntry, scheduledExecutorService))
                    .childOption(ChannelOption.AUTO_READ, false).bind(proxyConfigEntry.getInboundPort()).sync()
                    .channel();
            serverChannels.add(c);
        }

    } catch (InterruptedException e) {
        //Ignore for now
    }
}

From source file:org.wso2.test.http.netty.proxy.DelayingProxyFrontendHandler.java

License:Open Source License

@Override
public void channelActive(ChannelHandlerContext ctx) {
    final Channel inboundChannel = ctx.channel();

    // Start the connection attempt.
    Bootstrap b = new Bootstrap();
    b.group(inboundChannel.eventLoop()).channel(ctx.channel().getClass())
            .handler(new DelayingProxyBackendHandler(inboundChannel)).option(ChannelOption.AUTO_READ, false);
    ChannelFuture f = b.connect(remoteHost, remotePort);
    outboundChannel = f.channel();/*from  w  w  w.  j  a  v  a 2  s  .c  om*/
    f.addListener(new ChannelFutureListener() {

        public void operationComplete(ChannelFuture future) {
            if (future.isSuccess()) {
                // connection complete start to read first data
                inboundChannel.read();
            } else {
                // Close the connection if the connection attempt has failed.
                inboundChannel.close();
            }
        }
    });
}

From source file:org.wyb.sows.server.WebSocketServerHandler.java

License:Apache License

private void handleWebSocketFrame(ChannelHandlerContext ctx, WebSocketFrame frame) {

    // Check for closing frame
    if (frame instanceof CloseWebSocketFrame) {
        handshaker.close(ctx.channel(), (CloseWebSocketFrame) frame.retain());
        return;//from   w  ww.  ja  v  a 2s . c  om
    }
    if (frame instanceof PingWebSocketFrame) {
        ctx.channel().write(new PongWebSocketFrame(frame.content().retain()));
        return;
    }
    if (!(frame instanceof WebSocketFrame)) {
        throw new UnsupportedOperationException(
                String.format("%s frame types not supported", frame.getClass().getName()));
    }
    if (frame instanceof BinaryWebSocketFrame) {
        if (remoteConnect) {
            BinaryWebSocketFrame binFrame = (BinaryWebSocketFrame) frame;
            if (outboundChannel.isActive()) {
                outboundChannel.writeAndFlush(binFrame.content().retain())
                        .addListener(new ChannelFutureListener() {
                            @Override
                            public void operationComplete(ChannelFuture future) {
                                if (future.isSuccess()) {
                                    ctx.channel().read();
                                } else {
                                    future.channel().close();
                                }
                            }
                        });
            }
        } else {
            ctx.close();
        }
    } else if (frame instanceof TextWebSocketFrame) {
        String request = ((TextWebSocketFrame) frame).text();
        if (!remoteConnect) {
            SowsConnectCmd cmd = new SowsConnectCmd();
            try {
                cmd.decode(request);
            } catch (UnsupportedEncodingException e) {
                e.printStackTrace();
                ctx.close();
            }
            String host = cmd.getHost();
            int port = cmd.getPort();
            final Channel inboundChannel = ctx.channel();
            // invalid host
            if (host.equals("localhost") || host.equals("127.0.0.1")) {
                logger.warn("Invalid host. Target=" + host + ":" + port);
                WebSocketFrame frame2 = new TextWebSocketFrame(SowsStatusType.FAILED.stringValue());
                inboundChannel.writeAndFlush(frame2).addListener(ChannelFutureListener.CLOSE);
            }

            // Start the connection attempt.
            Bootstrap b = new Bootstrap();
            b.group(inboundChannel.eventLoop()).channel(ctx.channel().getClass())
                    .handler(new HexDumpProxyInitializer(inboundChannel, handshaker))
                    .option(ChannelOption.AUTO_READ, false);
            ChannelFuture f = b.connect(host, port);
            outboundChannel = f.channel();
            f.addListener(new ChannelFutureListener() {
                @Override
                public void operationComplete(ChannelFuture future) {
                    if (future.isSuccess()) {
                        logger.info("Target connection is established. Target=" + host + ":" + port);
                        WebSocketFrame frame = new TextWebSocketFrame(SowsStatusType.SUCCESS.stringValue());
                        inboundChannel.writeAndFlush(frame);
                        // connection complete start to read first data
                        inboundChannel.read();
                    } else {
                        logger.warn("Not able to connect target. Target=" + host + ":" + port);
                        WebSocketFrame frame = new TextWebSocketFrame(SowsStatusType.FAILED.stringValue());
                        inboundChannel.writeAndFlush(frame).addListener(ChannelFutureListener.CLOSE);
                    }
                }
            });
            remoteConnect = true;
        } else {
            logger.warn("Unknown SowsCmd! " + request);
            ctx.close();
        }
    }

}

From source file:org.z.core.test.HexDumpProxyFrontendHandler.java

License:Apache License

public void channelActive(ChannelHandlerContext ctx) {
    final Channel inboundChannel = ctx.channel();

    // Start the connection attempt.
    Bootstrap b = new Bootstrap();
    b.group(inboundChannel.eventLoop()).channel(ctx.channel().getClass())
            .handler(new HexDumpProxyBackendHandler(inboundChannel)).option(ChannelOption.AUTO_READ, false);
    ChannelFuture f = b.connect(remoteHost, remotePort);
    outboundChannel = f.channel();/* www .j  av a 2s .co m*/
    f.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) {
            if (future.isSuccess()) {
                // connection complete start to read first data
                inboundChannel.read();
            } else {
                // Close the connection if the connection attempt has failed.
                inboundChannel.close();
            }
        }
    });
}

From source file:proxyService.Proxy.java

License:Apache License

public void run() {
    System.err.println("Proxying *:" + LOCAL_PORT + " to " + node.getIp() + ':' + node.getPort() + " ...");

    // Configure the bootstrap.
    EventLoopGroup bossGroup = new NioEventLoopGroup(1);
    EventLoopGroup workerGroup = new NioEventLoopGroup();
    try {//ww w .  ja  v a2  s . c  o m
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .handler(new LoggingHandler(LogLevel.INFO)).childHandler(new ProxyInitializer(node))
                .childOption(ChannelOption.AUTO_READ, false).bind(LOCAL_PORT).syncUninterruptibly().channel()
                .closeFuture().syncUninterruptibly();

    } finally {
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}