Example usage for io.netty.channel ChannelOption TCP_NODELAY

List of usage examples for io.netty.channel ChannelOption TCP_NODELAY

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption TCP_NODELAY.

Prototype

ChannelOption TCP_NODELAY

To view the source code for io.netty.channel ChannelOption TCP_NODELAY.

Click Source Link

Usage

From source file:com.googlecode.protobuf.pro.duplex.example.simple.NPETestingClient.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 4) {
        System.err.println("usage: <serverHostname> <serverPort> <clientHostname> <clientPort>");
        System.exit(-1);//w  w w  . j  a v  a  2  s  . c  o  m
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);
    String clientHostname = args[2];
    int clientPort = Integer.parseInt(args[3]);

    PeerInfo client = new PeerInfo(clientHostname, clientPort);
    PeerInfo server = new PeerInfo(serverHostname, serverPort);

    try {
        DuplexTcpClientPipelineFactory clientFactory = new DuplexTcpClientPipelineFactory();
        // force the use of a local port
        // - normally you don't need this
        clientFactory.setClientInfo(client);

        ExtensionRegistry r = ExtensionRegistry.newInstance();
        PingPong.registerAllExtensions(r);
        clientFactory.setExtensionRegistry(r);

        clientFactory.setConnectResponseTimeoutMillis(10000);
        RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(3, 10);
        clientFactory.setRpcServerCallExecutor(rpcExecutor);

        // RPC payloads are uncompressed when logged - so reduce logging
        CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
        logger.setLogRequestProto(false);
        logger.setLogResponseProto(false);
        clientFactory.setRpcLogger(logger);

        // Set up the event pipeline factory.
        // setup a RPC event listener - it just logs what happens
        RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();

        final RpcConnectionEventListener listener = new RpcConnectionEventListener() {

            @Override
            public void connectionReestablished(RpcClientChannel clientChannel) {
                log.info("connectionReestablished " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionOpened(RpcClientChannel clientChannel) {
                log.info("connectionOpened " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionLost(RpcClientChannel clientChannel) {
                log.info("connectionLost " + clientChannel);
            }

            @Override
            public void connectionChanged(RpcClientChannel clientChannel) {
                log.info("connectionChanged " + clientChannel);
                channel = clientChannel;
            }
        };
        rpcEventNotifier.addEventListener(listener);
        clientFactory.registerConnectionEventListener(rpcEventNotifier);

        // Configure the client to provide a Pong Service in both blocking an non blocking varieties
        BlockingService bPongService = BlockingPongService
                .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongServer());
        clientFactory.getRpcServiceRegistry().registerService(bPongService);

        Service nbPongService = NonBlockingPongService
                .newReflectiveService(new PingPongServiceFactory.NonBlockingPongServer());
        clientFactory.getRpcServiceRegistry().registerService(nbPongService);

        // we give the client a blocking and non blocking (pong capable) Ping Service
        BlockingService bPingService = BlockingPingService
                .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongingPingServer());
        clientFactory.getRpcServiceRegistry().registerService(bPingService);

        Service nbPingService = NonBlockingPingService
                .newReflectiveService(new PingPongServiceFactory.NonBlockingPongingPingServer());
        clientFactory.getRpcServiceRegistry().registerService(nbPingService);

        Bootstrap bootstrap = new Bootstrap();
        EventLoopGroup workers = new NioEventLoopGroup(16,
                new RenamingThreadFactoryProxy("workers", Executors.defaultThreadFactory()));

        bootstrap.group(workers);
        bootstrap.handler(clientFactory);
        bootstrap.channel(NioSocketChannel.class);
        bootstrap.option(ChannelOption.TCP_NODELAY, true);
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000);
        bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
        bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);

        RpcClientConnectionWatchdog watchdog = new RpcClientConnectionWatchdog(clientFactory, bootstrap);
        rpcEventNotifier.addEventListener(watchdog);
        watchdog.start();

        CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
        shutdownHandler.addResource(workers);
        shutdownHandler.addResource(rpcExecutor);

        clientFactory.peerWith(server, bootstrap);

        while (true && channel != null) {

            callServerNPEinNonBlockingPingnoPong();
            callServerNPEinBlockingPingnoPong();
            callBlockingPingWithBlockingPongNPE();
            callBlockingPingWithNonBlockingPongNPE();
            Thread.sleep(10000);

        }

    } catch (Exception e) {
        log.warn("Failure.", e);
    } finally {
        System.exit(0);
    }
}

From source file:com.googlecode.protobuf.pro.duplex.example.simple.SimpleClient.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 4) {
        System.err.println("usage: <serverHostname> <serverPort> <clientHostname> <clientPort>");
        System.exit(-1);/* w  w  w  .  ja v a 2s.  c o m*/
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);
    String clientHostname = args[2];
    int clientPort = Integer.parseInt(args[3]);

    PeerInfo client = new PeerInfo(clientHostname, clientPort);
    PeerInfo server = new PeerInfo(serverHostname, serverPort);

    try {
        DuplexTcpClientPipelineFactory clientFactory = new DuplexTcpClientPipelineFactory();
        // force the use of a local port
        // - normally you don't need this
        clientFactory.setClientInfo(client);

        ExtensionRegistry r = ExtensionRegistry.newInstance();
        PingPong.registerAllExtensions(r);
        clientFactory.setExtensionRegistry(r);

        clientFactory.setConnectResponseTimeoutMillis(10000);
        RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(3, 10);
        clientFactory.setRpcServerCallExecutor(rpcExecutor);

        // RPC payloads are uncompressed when logged - so reduce logging
        CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
        logger.setLogRequestProto(false);
        logger.setLogResponseProto(false);
        clientFactory.setRpcLogger(logger);

        // Set up the event pipeline factory.
        // setup a RPC event listener - it just logs what happens
        RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();

        final RpcConnectionEventListener listener = new RpcConnectionEventListener() {

            @Override
            public void connectionReestablished(RpcClientChannel clientChannel) {
                log.info("connectionReestablished " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionOpened(RpcClientChannel clientChannel) {
                log.info("connectionOpened " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionLost(RpcClientChannel clientChannel) {
                log.info("connectionLost " + clientChannel);
            }

            @Override
            public void connectionChanged(RpcClientChannel clientChannel) {
                log.info("connectionChanged " + clientChannel);
                channel = clientChannel;
            }
        };
        rpcEventNotifier.addEventListener(listener);
        clientFactory.registerConnectionEventListener(rpcEventNotifier);

        Bootstrap bootstrap = new Bootstrap();
        EventLoopGroup workers = new NioEventLoopGroup(16,
                new RenamingThreadFactoryProxy("workers", Executors.defaultThreadFactory()));

        bootstrap.group(workers);
        bootstrap.handler(clientFactory);
        bootstrap.channel(NioSocketChannel.class);
        bootstrap.option(ChannelOption.TCP_NODELAY, true);
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000);
        bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
        bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);

        RpcClientConnectionWatchdog watchdog = new RpcClientConnectionWatchdog(clientFactory, bootstrap);
        rpcEventNotifier.addEventListener(watchdog);
        watchdog.start();

        CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
        shutdownHandler.addResource(workers);
        shutdownHandler.addResource(rpcExecutor);

        clientFactory.peerWith(server, bootstrap);

        while (true && channel != null) {

            BlockingPingService.BlockingInterface blockingService = BlockingPingService
                    .newBlockingStub(channel);
            final ClientRpcController controller = channel.newRpcController();
            controller.setTimeoutMs(0);

            Ping.Builder pingBuilder = Ping.newBuilder();
            pingBuilder.setSequenceNo(1);
            pingBuilder.setPingDurationMs(1000);
            pingBuilder.setPingPayload(ByteString.copyFromUtf8("Hello World!"));
            pingBuilder.setPingPercentComplete(false);
            pingBuilder.setPongRequired(false);
            pingBuilder.setPongBlocking(true);
            pingBuilder.setPongDurationMs(1000);
            pingBuilder.setPongTimeoutMs(0);
            pingBuilder.setPongPercentComplete(false);

            // set an extension value
            pingBuilder.setExtension(ExtendedPing.extendedIntField, 111);

            Ping ping = pingBuilder.build();
            try {
                Pong pong = blockingService.ping(controller, ping);

                Integer ext = pong.getExtension(ExtendedPong.extendedIntField);
                if (ext == null || ext != 111) {
                    log.warn("Extension not parsed. Value=", ext);
                }
            } catch (ServiceException e) {
                log.warn("Call failed.", e);
            }

            Thread.sleep(10000);

        }

    } catch (Exception e) {
        log.warn("Failure.", e);
    } finally {
        System.exit(0);
    }
}

From source file:com.googlecode.protobuf.pro.duplex.example.simple.SimpleReconnectingClient.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 4) {
        System.err.println("usage: <serverHostname> <serverPort> <clientHostname> <clientPort>");
        System.exit(-1);/*from www .  j  ava2  s .  c  om*/
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);
    String clientHostname = args[2];
    int clientPort = Integer.parseInt(args[3]);

    //PeerInfo client = new PeerInfo(clientHostname, clientPort);
    PeerInfo server = new PeerInfo(serverHostname, serverPort);

    try {
        DuplexTcpClientPipelineFactory clientFactory = new DuplexTcpClientPipelineFactory();
        // force the use of a local port
        // - normally you don't need this
        //clientFactory.setClientInfo(client);

        ExtensionRegistry r = ExtensionRegistry.newInstance();
        PingPong.registerAllExtensions(r);
        clientFactory.setExtensionRegistry(r);

        clientFactory.setConnectResponseTimeoutMillis(10000);
        RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(3, 10);
        clientFactory.setRpcServerCallExecutor(rpcExecutor);

        // RPC payloads are uncompressed when logged - so reduce logging
        CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
        logger.setLogRequestProto(false);
        logger.setLogResponseProto(false);
        clientFactory.setRpcLogger(logger);

        // Set up the event pipeline factory.
        // setup a RPC event listener - it just logs what happens
        RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();

        final RpcConnectionEventListener listener = new RpcConnectionEventListener() {

            @Override
            public void connectionReestablished(RpcClientChannel clientChannel) {
                log.info("connectionReestablished " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionOpened(RpcClientChannel clientChannel) {
                log.info("connectionOpened " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionLost(RpcClientChannel clientChannel) {
                log.info("connectionLost " + clientChannel);
            }

            @Override
            public void connectionChanged(RpcClientChannel clientChannel) {
                log.info("connectionChanged " + clientChannel);
                channel = clientChannel;
            }
        };
        rpcEventNotifier.addEventListener(listener);
        clientFactory.registerConnectionEventListener(rpcEventNotifier);

        Bootstrap bootstrap = new Bootstrap();
        EventLoopGroup workers = new NioEventLoopGroup(16,
                new RenamingThreadFactoryProxy("workers", Executors.defaultThreadFactory()));

        bootstrap.group(workers);
        bootstrap.handler(clientFactory);
        bootstrap.channel(NioSocketChannel.class);
        bootstrap.option(ChannelOption.TCP_NODELAY, true);
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000);
        bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
        bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);

        RpcClientConnectionWatchdog watchdog = new RpcClientConnectionWatchdog(clientFactory, bootstrap);
        watchdog.setThreadName("watchdog"); // #48
        rpcEventNotifier.addEventListener(watchdog);
        watchdog.start();

        CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
        shutdownHandler.addResource(workers);
        shutdownHandler.addResource(rpcExecutor);
        shutdownHandler.addResource(watchdog);

        clientFactory.peerWith(server, bootstrap);

        while (true && channel != null) {

            BlockingPingService.BlockingInterface blockingService = BlockingPingService
                    .newBlockingStub(channel);
            final ClientRpcController controller = channel.newRpcController();
            controller.setTimeoutMs(0);

            Ping.Builder pingBuilder = Ping.newBuilder();
            pingBuilder.setSequenceNo(1);
            pingBuilder.setPingDurationMs(1000);
            pingBuilder.setPingPayload(ByteString.copyFromUtf8("Hello World!"));
            pingBuilder.setPingPercentComplete(false);
            pingBuilder.setPongRequired(false);
            pingBuilder.setPongBlocking(true);
            pingBuilder.setPongDurationMs(1000);
            pingBuilder.setPongTimeoutMs(0);
            pingBuilder.setPongPercentComplete(false);

            // set an extension value
            pingBuilder.setExtension(ExtendedPing.extendedIntField, 111);

            Ping ping = pingBuilder.build();
            try {
                Pong pong = blockingService.ping(controller, ping);

                Integer ext = pong.getExtension(ExtendedPong.extendedIntField);
                if (ext == null || ext != 111) {
                    log.warn("Extension not parsed. Value=", ext);
                }

                channel.close();
            } catch (ServiceException e) {
                log.warn("Call failed.", e);
            }

            Thread.sleep(10000);

        }

    } catch (Exception e) {
        log.warn("Failure.", e);
    } finally {
        System.exit(0);
    }
}

From source file:com.googlecode.protobuf.pro.duplex.example.simple.SimpleServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 2) {
        System.err.println("usage: <serverHostname> <serverPort>");
        System.exit(-1);//from  ww w  .j a va2 s  .co  m
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);

    PeerInfo serverInfo = new PeerInfo(serverHostname, serverPort);

    // RPC payloads are uncompressed when logged - so reduce logging
    CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
    logger.setLogRequestProto(false);
    logger.setLogResponseProto(false);

    // Configure the server.
    DuplexTcpServerPipelineFactory serverFactory = new DuplexTcpServerPipelineFactory(serverInfo);

    ExtensionRegistry r = ExtensionRegistry.newInstance();
    PingPong.registerAllExtensions(r);
    serverFactory.setExtensionRegistry(r);

    RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(10, 10);
    serverFactory.setRpcServerCallExecutor(rpcExecutor);
    serverFactory.setLogger(logger);

    // setup a RPC event listener - it just logs what happens
    RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();
    RpcConnectionEventListener listener = new RpcConnectionEventListener() {

        @Override
        public void connectionReestablished(RpcClientChannel clientChannel) {
            log.info("connectionReestablished " + clientChannel);
        }

        @Override
        public void connectionOpened(RpcClientChannel clientChannel) {
            log.info("connectionOpened " + clientChannel);
        }

        @Override
        public void connectionLost(RpcClientChannel clientChannel) {
            log.info("connectionLost " + clientChannel);
        }

        @Override
        public void connectionChanged(RpcClientChannel clientChannel) {
            log.info("connectionChanged " + clientChannel);
        }
    };
    rpcEventNotifier.setEventListener(listener);
    serverFactory.registerConnectionEventListener(rpcEventNotifier);

    // we give the server a blocking and non blocking (pong capable) Ping Service
    BlockingService bPingService = BlockingPingService
            .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(true, bPingService);

    Service nbPingService = NonBlockingPingService
            .newReflectiveService(new PingPongServiceFactory.NonBlockingPongingPingServer());
    serverFactory.getRpcServiceRegistry().registerService(true, nbPingService);

    ServerBootstrap bootstrap = new ServerBootstrap();
    EventLoopGroup boss = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("boss", Executors.defaultThreadFactory()));
    EventLoopGroup workers = new NioEventLoopGroup(2,
            new RenamingThreadFactoryProxy("worker", Executors.defaultThreadFactory()));
    bootstrap.group(boss, workers);
    bootstrap.channel(NioServerSocketChannel.class);
    bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_RCVBUF, 1048576);
    bootstrap.childOption(ChannelOption.SO_SNDBUF, 1048576);
    bootstrap.option(ChannelOption.TCP_NODELAY, true);
    bootstrap.childHandler(serverFactory);
    bootstrap.localAddress(serverInfo.getPort());

    CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
    shutdownHandler.addResource(boss);
    shutdownHandler.addResource(workers);
    shutdownHandler.addResource(rpcExecutor);

    // Bind and start to accept incoming connections.
    bootstrap.bind();
    log.info("Serving " + bootstrap);

    while (true) {

        List<RpcClientChannel> clients = serverFactory.getRpcClientRegistry().getAllClients();
        log.info("Number of clients=" + clients.size());

        Thread.sleep(5000);
    }
}

From source file:com.googlecode.protobuf.pro.duplex.example.simple.TimeoutTestingClient.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 4) {
        System.err.println("usage: <serverHostname> <serverPort> <clientHostname> <clientPort>");
        System.exit(-1);//from   ww w .ja  v a 2 s.com
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);
    String clientHostname = args[2];
    int clientPort = Integer.parseInt(args[3]);

    PeerInfo client = new PeerInfo(clientHostname, clientPort);
    PeerInfo server = new PeerInfo(serverHostname, serverPort);

    try {
        DuplexTcpClientPipelineFactory clientFactory = new DuplexTcpClientPipelineFactory();
        // force the use of a local port
        // - normally you don't need this
        clientFactory.setClientInfo(client);

        ExtensionRegistry r = ExtensionRegistry.newInstance();
        PingPong.registerAllExtensions(r);
        clientFactory.setExtensionRegistry(r);

        clientFactory.setConnectResponseTimeoutMillis(10000);
        RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(3, 10);
        clientFactory.setRpcServerCallExecutor(rpcExecutor);

        // RPC payloads are uncompressed when logged - so reduce logging
        CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
        logger.setLogRequestProto(false);
        logger.setLogResponseProto(false);
        clientFactory.setRpcLogger(logger);

        // Set up the event pipeline factory.
        // setup a RPC event listener - it just logs what happens
        RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();

        final RpcConnectionEventListener listener = new RpcConnectionEventListener() {

            @Override
            public void connectionReestablished(RpcClientChannel clientChannel) {
                log.info("connectionReestablished " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionOpened(RpcClientChannel clientChannel) {
                log.info("connectionOpened " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionLost(RpcClientChannel clientChannel) {
                log.info("connectionLost " + clientChannel);
            }

            @Override
            public void connectionChanged(RpcClientChannel clientChannel) {
                log.info("connectionChanged " + clientChannel);
                channel = clientChannel;
            }
        };
        rpcEventNotifier.addEventListener(listener);
        clientFactory.registerConnectionEventListener(rpcEventNotifier);

        // Configure the client to provide a Pong Service in both blocking an non blocking varieties
        BlockingService bPongService = BlockingPongService
                .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongServer());
        clientFactory.getRpcServiceRegistry().registerService(bPongService);

        Service nbPongService = NonBlockingPongService
                .newReflectiveService(new PingPongServiceFactory.NonBlockingPongServer());
        clientFactory.getRpcServiceRegistry().registerService(nbPongService);

        // we give the client a blocking and non blocking (pong capable) Ping Service
        BlockingService bPingService = BlockingPingService
                .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongingPingServer());
        clientFactory.getRpcServiceRegistry().registerService(bPingService);

        Service nbPingService = NonBlockingPingService
                .newReflectiveService(new PingPongServiceFactory.NonBlockingPongingPingServer());
        clientFactory.getRpcServiceRegistry().registerService(nbPingService);

        Bootstrap bootstrap = new Bootstrap();
        EventLoopGroup workers = new NioEventLoopGroup(16,
                new RenamingThreadFactoryProxy("workers", Executors.defaultThreadFactory()));

        bootstrap.group(workers);
        bootstrap.handler(clientFactory);
        bootstrap.channel(NioSocketChannel.class);
        bootstrap.option(ChannelOption.TCP_NODELAY, true);
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000);
        bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
        bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);

        RpcClientConnectionWatchdog watchdog = new RpcClientConnectionWatchdog(clientFactory, bootstrap);
        rpcEventNotifier.addEventListener(watchdog);
        watchdog.start();

        CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
        shutdownHandler.addResource(workers);
        shutdownHandler.addResource(rpcExecutor);

        clientFactory.peerWith(server, bootstrap);

        while (true && channel != null) {

            callNonBlockingServerPingWithBlockingClientTimeoutBeforeFinish();

            Thread.sleep(10000);

        }

    } catch (Exception e) {
        log.warn("Failure.", e);
    } finally {
        System.exit(0);
    }
}

From source file:com.gxkj.demo.netty.echo.EchoClient.java

License:Apache License

public void run() throws Exception {
    // Configure the client.
    EventLoopGroup group = new NioEventLoopGroup();
    try {//  w w  w .j a v  a  2 s  .  c  om
        Bootstrap b = new Bootstrap();
        b.group(group).channel(NioSocketChannel.class).option(ChannelOption.TCP_NODELAY, true)
                .handler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        ch.pipeline().addLast(
                                //new LoggingHandler(LogLevel.INFO),
                                new EchoClientHandler(firstMessageSize));
                    }
                });

        // Start the client.
        ChannelFuture f = b.connect(host, port).sync();

        // Wait until the connection is closed.
        f.channel().closeFuture().sync();
    } finally {
        // Shut down the event loop to terminate all threads.
        group.shutdownGracefully();
    }
}

From source file:com.hazelcast.openshift.TunnelClientAcceptor.java

License:Open Source License

protected Bootstrap createBootstrap(Channel socket) {
    return new Bootstrap().channel(NioSocketChannel.class).group(workerGroup)
            .option(ChannelOption.TCP_NODELAY, true).handler(new ChannelInitializer<SocketChannel>() {

                @Override//  w  w w.j  av  a 2s  .  c  om
                protected void initChannel(SocketChannel channel) throws Exception {
                    System.out.println("Configure plain-socket: (" + socket + ") => (" + forwardHost + ":"
                            + forwardPort + ")");
                    ChannelPipeline pipeline = channel.pipeline();
                    pipeline.addLast(new ProxyForwardHandler(socket));
                }
            });
}

From source file:com.hazelcast.openshift.TunnelServerConnector.java

License:Open Source License

protected Bootstrap createBootstrap(Channel socket, Promise promise) throws Exception {
    SslContext sslContext;//from  w w  w  .j  a v a2s  .c  o m
    if (!ssl) {
        sslContext = null;

    } else {
        sslContext = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
    }

    return new Bootstrap().channel(NioSocketChannel.class).group(workerGroup)
            .option(ChannelOption.TCP_NODELAY, true).handler(new ChannelInitializer<SocketChannel>() {

                @Override
                protected void initChannel(SocketChannel channel) throws Exception {
                    System.out.println(
                            "Configure plain-socket: (" + socket + ") => (" + httpHost + ":" + httpPort + ")");
                    ChannelPipeline pipeline = channel.pipeline();
                    if (sslContext != null) {
                        pipeline.addLast("ssl", sslContext.newHandler(channel.alloc()));
                    }
                    pipeline.addLast("http-codec", new HttpClientCodec());
                    pipeline.addLast(new TunnelServerAcceptor(socket, promise));
                }
            });
}

From source file:com.heelenyc.research.netty.basic.TimeClient.java

License:Apache License

/**
 * @param port/* w w w.j  av a 2  s .co m*/
 * @param host
 * @throws Exception
 */
public void connect(int port, String host) throws Exception {
    // ?NIO
    EventLoopGroup group = new NioEventLoopGroup();
    try {
        Bootstrap b = new Bootstrap();
        b.group(group).channel(NioSocketChannel.class).option(ChannelOption.TCP_NODELAY, true)
                .handler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        ch.pipeline().addLast(new TimeClientHandler());
                    }
                });

        // ??
        ChannelFuture f = b.connect(host, port).sync();

        // 
        f.channel().closeFuture().sync();
    } finally {
        // NIO
        group.shutdownGracefully();
    }
}

From source file:com.heliosapm.streams.onramp.OnRampBoot.java

License:Apache License

/**
 * Creates a new OnRampBoot/*  www  . j  a v  a  2s .  c om*/
 * @param appConfig  The application configuration
 */
public OnRampBoot(final Properties appConfig) {
    final String jmxmpUri = ConfigurationHelper.getSystemThenEnvProperty("jmx.jmxmp.uri",
            "jmxmp://0.0.0.0:1893", appConfig);
    JMXHelper.fireUpJMXMPServer(jmxmpUri);
    MessageForwarder.initialize(appConfig);
    port = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.port", 8091, appConfig);
    bindInterface = ConfigurationHelper.getSystemThenEnvProperty("onramp.network.bind", "0.0.0.0", appConfig);
    bindSocket = new InetSocketAddress(bindInterface, port);
    workerThreads = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.worker_threads", CORES * 2,
            appConfig);
    connectTimeout = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sotimeout", 0, appConfig);
    backlog = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.backlog", 3072, appConfig);
    writeSpins = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.writespins", 16, appConfig);
    recvBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.recbuffer", 43690, appConfig);
    sendBuffer = ConfigurationHelper.getIntSystemThenEnvProperty("onramp.network.sendbuffer", 8192, appConfig);
    disableEpoll = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.epoll.disable", false,
            appConfig);
    async = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.async_io", true, appConfig);
    tcpNoDelay = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.tcp_no_delay", true,
            appConfig);
    keepAlive = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.keep_alive", true,
            appConfig);
    reuseAddress = ConfigurationHelper.getBooleanSystemThenEnvProperty("onramp.network.reuse_address", true,
            appConfig);
    tcpPipelineFactory = new PipelineFactory(appConfig);
    udpPipelineFactory = new UDPPipelineFactory();
    tcpServerBootstrap.handler(new LoggingHandler(getClass(), LogLevel.INFO));
    tcpServerBootstrap.childHandler(tcpPipelineFactory);
    // Set the child options
    tcpServerBootstrap.childOption(ChannelOption.ALLOCATOR, BufferManager.getInstance().getAllocator());
    tcpServerBootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
    tcpServerBootstrap.childOption(ChannelOption.SO_KEEPALIVE, keepAlive);
    tcpServerBootstrap.childOption(ChannelOption.SO_RCVBUF, recvBuffer);
    tcpServerBootstrap.childOption(ChannelOption.SO_SNDBUF, sendBuffer);
    tcpServerBootstrap.childOption(ChannelOption.WRITE_SPIN_COUNT, writeSpins);
    // Set the server options
    tcpServerBootstrap.option(ChannelOption.SO_BACKLOG, backlog);
    tcpServerBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
    tcpServerBootstrap.option(ChannelOption.SO_RCVBUF, recvBuffer);
    tcpServerBootstrap.option(ChannelOption.SO_TIMEOUT, connectTimeout);

    final StringBuilder tcpUri = new StringBuilder("tcp");
    final StringBuilder udpUri = new StringBuilder("udp");
    if (IS_LINUX && !disableEpoll) {
        bossExecutorThreadFactory = new ExecutorThreadFactory("EpollServerBoss", true);
        bossGroup = new EpollEventLoopGroup(1, (ThreadFactory) bossExecutorThreadFactory);
        workerExecutorThreadFactory = new ExecutorThreadFactory("EpollServerWorker", true);
        workerGroup = new EpollEventLoopGroup(workerThreads, (ThreadFactory) workerExecutorThreadFactory);
        tcpChannelType = EpollServerSocketChannel.class;
        udpChannelType = EpollDatagramChannel.class;
        tcpUri.append("epoll");
        udpUri.append("epoll");
    } else {
        bossExecutorThreadFactory = new ExecutorThreadFactory("NioServerBoss", true);
        bossGroup = new NioEventLoopGroup(1, bossExecutorThreadFactory);
        workerExecutorThreadFactory = new ExecutorThreadFactory("NioServerWorker", true);
        workerGroup = new NioEventLoopGroup(workerThreads, workerExecutorThreadFactory);
        tcpChannelType = NioServerSocketChannel.class;
        udpChannelType = NioDatagramChannel.class;
        tcpUri.append("nio");
        udpUri.append("nio");
    }

    tcpUri.append("://").append(bindInterface).append(":").append(port);
    udpUri.append("://").append(bindInterface).append(":").append(port);
    URI u = null;
    try {
        u = new URI(tcpUri.toString());
    } catch (URISyntaxException e) {
        log.warn("Failed TCP server URI const: [{}]. Programmer Error", tcpUri, e);
    }
    tcpServerURI = u;
    try {
        u = new URI(udpUri.toString());
    } catch (URISyntaxException e) {
        log.warn("Failed UDP server URI const: [{}]. Programmer Error", udpUri, e);
    }
    udpServerURI = u;

    log.info(">>>>> Starting OnRamp TCP Listener on [{}]...", tcpServerURI);
    log.info(">>>>> Starting OnRamp UDP Listener on [{}]...", udpServerURI);
    final ChannelFuture cf = tcpServerBootstrap.channel(tcpChannelType).group(bossGroup, workerGroup)
            .bind(bindSocket).awaitUninterruptibly()
            .addListener(new GenericFutureListener<Future<? super Void>>() {
                public void operationComplete(final Future<? super Void> f) throws Exception {
                    log.info("<<<<< OnRamp TCP Listener on [{}] Started", tcpServerURI);
                };
            }).awaitUninterruptibly();
    final ChannelFuture ucf = udpBootstrap.channel(udpChannelType).group(workerGroup)
            .option(ChannelOption.SO_BROADCAST, true).handler(new UDPPipelineFactory()).bind(bindSocket)
            .awaitUninterruptibly().addListener(new GenericFutureListener<Future<? super Void>>() {
                public void operationComplete(final Future<? super Void> f) throws Exception {
                    log.info("<<<<< OnRamp UDP Listener on [{}] Started", udpServerURI);
                };
            }).awaitUninterruptibly();

    tcpServerChannel = cf.channel();
    udpServerChannel = ucf.channel();
    tcpCloseFuture = tcpServerChannel.closeFuture();
    udpCloseFuture = udpServerChannel.closeFuture();
    Runtime.getRuntime().addShutdownHook(shutdownHook);

}