Example usage for io.netty.channel ChannelOption CONNECT_TIMEOUT_MILLIS

List of usage examples for io.netty.channel ChannelOption CONNECT_TIMEOUT_MILLIS

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption CONNECT_TIMEOUT_MILLIS.

Prototype

ChannelOption CONNECT_TIMEOUT_MILLIS

To view the source code for io.netty.channel ChannelOption CONNECT_TIMEOUT_MILLIS.

Click Source Link

Usage

From source file:com.googlecode.protobuf.pro.duplex.example.simple.TimeoutTestingClient.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 4) {
        System.err.println("usage: <serverHostname> <serverPort> <clientHostname> <clientPort>");
        System.exit(-1);/*from   w  w  w . ja va 2 s .  c o m*/
    }
    String serverHostname = args[0];
    int serverPort = Integer.parseInt(args[1]);
    String clientHostname = args[2];
    int clientPort = Integer.parseInt(args[3]);

    PeerInfo client = new PeerInfo(clientHostname, clientPort);
    PeerInfo server = new PeerInfo(serverHostname, serverPort);

    try {
        DuplexTcpClientPipelineFactory clientFactory = new DuplexTcpClientPipelineFactory();
        // force the use of a local port
        // - normally you don't need this
        clientFactory.setClientInfo(client);

        ExtensionRegistry r = ExtensionRegistry.newInstance();
        PingPong.registerAllExtensions(r);
        clientFactory.setExtensionRegistry(r);

        clientFactory.setConnectResponseTimeoutMillis(10000);
        RpcServerCallExecutor rpcExecutor = new ThreadPoolCallExecutor(3, 10);
        clientFactory.setRpcServerCallExecutor(rpcExecutor);

        // RPC payloads are uncompressed when logged - so reduce logging
        CategoryPerServiceLogger logger = new CategoryPerServiceLogger();
        logger.setLogRequestProto(false);
        logger.setLogResponseProto(false);
        clientFactory.setRpcLogger(logger);

        // Set up the event pipeline factory.
        // setup a RPC event listener - it just logs what happens
        RpcConnectionEventNotifier rpcEventNotifier = new RpcConnectionEventNotifier();

        final RpcConnectionEventListener listener = new RpcConnectionEventListener() {

            @Override
            public void connectionReestablished(RpcClientChannel clientChannel) {
                log.info("connectionReestablished " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionOpened(RpcClientChannel clientChannel) {
                log.info("connectionOpened " + clientChannel);
                channel = clientChannel;
            }

            @Override
            public void connectionLost(RpcClientChannel clientChannel) {
                log.info("connectionLost " + clientChannel);
            }

            @Override
            public void connectionChanged(RpcClientChannel clientChannel) {
                log.info("connectionChanged " + clientChannel);
                channel = clientChannel;
            }
        };
        rpcEventNotifier.addEventListener(listener);
        clientFactory.registerConnectionEventListener(rpcEventNotifier);

        // Configure the client to provide a Pong Service in both blocking an non blocking varieties
        BlockingService bPongService = BlockingPongService
                .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongServer());
        clientFactory.getRpcServiceRegistry().registerService(bPongService);

        Service nbPongService = NonBlockingPongService
                .newReflectiveService(new PingPongServiceFactory.NonBlockingPongServer());
        clientFactory.getRpcServiceRegistry().registerService(nbPongService);

        // we give the client a blocking and non blocking (pong capable) Ping Service
        BlockingService bPingService = BlockingPingService
                .newReflectiveBlockingService(new PingPongServiceFactory.BlockingPongingPingServer());
        clientFactory.getRpcServiceRegistry().registerService(bPingService);

        Service nbPingService = NonBlockingPingService
                .newReflectiveService(new PingPongServiceFactory.NonBlockingPongingPingServer());
        clientFactory.getRpcServiceRegistry().registerService(nbPingService);

        Bootstrap bootstrap = new Bootstrap();
        EventLoopGroup workers = new NioEventLoopGroup(16,
                new RenamingThreadFactoryProxy("workers", Executors.defaultThreadFactory()));

        bootstrap.group(workers);
        bootstrap.handler(clientFactory);
        bootstrap.channel(NioSocketChannel.class);
        bootstrap.option(ChannelOption.TCP_NODELAY, true);
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000);
        bootstrap.option(ChannelOption.SO_SNDBUF, 1048576);
        bootstrap.option(ChannelOption.SO_RCVBUF, 1048576);

        RpcClientConnectionWatchdog watchdog = new RpcClientConnectionWatchdog(clientFactory, bootstrap);
        rpcEventNotifier.addEventListener(watchdog);
        watchdog.start();

        CleanShutdownHandler shutdownHandler = new CleanShutdownHandler();
        shutdownHandler.addResource(workers);
        shutdownHandler.addResource(rpcExecutor);

        clientFactory.peerWith(server, bootstrap);

        while (true && channel != null) {

            callNonBlockingServerPingWithBlockingClientTimeoutBeforeFinish();

            Thread.sleep(10000);

        }

    } catch (Exception e) {
        log.warn("Failure.", e);
    } finally {
        System.exit(0);
    }
}

From source file:com.graylog.splunk.output.senders.TCPSender.java

License:Open Source License

protected void createBootstrap(final EventLoopGroup workerGroup) {
    final Bootstrap bootstrap = new Bootstrap();
    final SplunkSenderThread senderThread = new SplunkSenderThread(queue);

    bootstrap.group(workerGroup).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000)
            .remoteAddress(new InetSocketAddress(hostname, port))
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override/*from w ww . j a  v  a2  s  .co m*/
                protected void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(new StringEncoder());

                    ch.pipeline().addLast(new SimpleChannelInboundHandler<ByteBuf>() {
                        @Override
                        protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception {
                            // we only send data, never read on the socket
                        }

                        @Override
                        public void channelActive(ChannelHandlerContext ctx) throws Exception {
                            senderThread.start(ctx.channel());
                        }

                        @Override
                        public void channelInactive(ChannelHandlerContext ctx) throws Exception {
                            LOG.info("Channel disconnected.");
                            senderThread.stop();
                            scheduleReconnect(ctx.channel().eventLoop());
                        }

                        @Override
                        public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause)
                                throws Exception {
                            LOG.error("Exception caught", cause);
                        }
                    });
                }
            });

    bootstrap.connect().addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                LOG.info("Connected.");
            } else {
                LOG.error("Connection failed: {}", future.cause().getMessage());
                scheduleReconnect(future.channel().eventLoop());
            }
        }
    });
}

From source file:com.gxkj.demo.netty.socksproxy.SocksServerConnectHandler.java

License:Apache License

@Override
public void messageReceived(final ChannelHandlerContext ctx, final SocksCmdRequest request) throws Exception {
    Promise<Channel> promise = ctx.executor().newPromise();
    promise.addListener(new GenericFutureListener<Future<Channel>>() {
        @Override/*from ww w . j a  va 2s .c o  m*/
        public void operationComplete(final Future<Channel> future) throws Exception {
            final Channel outboundChannel = future.getNow();
            if (future.isSuccess()) {
                ctx.channel().writeAndFlush(new SocksCmdResponse(SocksCmdStatus.SUCCESS, request.addressType()))
                        .addListener(new ChannelFutureListener() {
                            @Override
                            public void operationComplete(ChannelFuture channelFuture) throws Exception {
                                ctx.pipeline().remove(getName());
                                outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
                                ctx.channel().pipeline().addLast(new RelayHandler(outboundChannel));
                            }
                        });
            } else {
                ctx.channel()
                        .writeAndFlush(new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));
                SocksServerUtils.closeOnFlush(ctx.channel());
            }
        }
    });

    final Channel inboundChannel = ctx.channel();
    b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
            .handler(new DirectClientInitializer(promise));

    b.connect(request.host(), request.port()).addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (future.isSuccess()) {
                // Connection established use handler provided results
            } else {
                // Close the connection if the connection attempt has failed.
                ctx.channel()
                        .writeAndFlush(new SocksCmdResponse(SocksCmdStatus.FAILURE, request.addressType()));
                SocksServerUtils.closeOnFlush(ctx.channel());
            }
        }
    });
}

From source file:com.hazelcast.simulator.protocol.connector.ClientConnector.java

License:Open Source License

private Bootstrap getBootstrap() {
    Bootstrap bootstrap = new Bootstrap();
    bootstrap.group(group).channel(NioSocketChannel.class)
            .remoteAddress(new InetSocketAddress(remoteHost, remotePort))
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONNECT_TIMEOUT_MILLIS)
            .option(ChannelOption.SO_KEEPALIVE, true).handler(new ChannelInitializer<SocketChannel>() {
                @Override// w w w. ja  v a 2s  .  c  om
                public void initChannel(SocketChannel channel) {
                    pipelineConfigurator.configureClientPipeline(channel.pipeline(), remoteAddress, futureMap);
                }
            });
    return bootstrap;
}

From source file:com.heliosapm.streams.forwarder.HttpJsonMetricForwarder.java

License:Apache License

/**
 * Starts the forwarder// w ww.  ja  v a 2 s . c o  m
 * @throws Exception thrown on any error
 */
public void start() throws Exception {
    log.info(">>>>> Starting HttpJsonMetricForwarder [{}]....", beanName);
    threadPool = new JMXManagedThreadPool(EXECUTOR_OBJECT_NAME, beanName, workerThreads, workerThreads * 2, 1,
            60000, 100, 99, true);
    eventLoopGroup = new NioEventLoopGroup(workerThreads, (Executor) threadPool);
    outboundHandler = new HttpJsonOutboundHandler(1500, endpointHost, endpointUri);
    bootstrap.group(eventLoopGroup).channel(NioSocketChannel.class).handler(this)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000) // FIXME: config
            .option(ChannelOption.SO_SNDBUF, 64000) // FIXME: config
            .option(ChannelOption.SO_KEEPALIVE, true);
    senderChannel = bootstrap.connect(endpointHost, endpointPort).sync().channel(); // FIXME: short timeout, reconnect loop
    consumerProperties.put("bootstrap.servers", kafkaBootstrapServers);
    consumerProperties.put("group.id", kafkaGroupId);
    consumerProperties.put("enable.auto.commit", "" + kafkaAutoCommit);
    consumerProperties.put("auto.commit.interval.ms", "" + kafkaAutoCommitInterval);
    consumerProperties.put("session.timeout.ms", "" + 30000);
    consumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProperties.put("value.deserializer", HeliosSerdes.STREAMED_METRIC_VALUE_DESER.getClass().getName());
    subscriberThread = new Thread(this, "KafkaSubscriberThread-" + beanName);
    subscriberThread.setDaemon(true);
    subThreadActive.set(true);
    log.info("[{}] Subscriber Thread Starting...", beanName);
    subscriberThread.start();
    log.info("<<<<< HttpJsonMetricForwarder [{}] Started.", beanName);
}

From source file:com.heliosapm.streams.tracing.writers.NetWriter.java

License:Apache License

/**
 * {@inheritDoc}/*from ww  w  . j  a v a2 s.c  o m*/
 * @see com.heliosapm.streams.tracing.AbstractMetricWriter#configure(java.util.Properties)
 */
@Override
public void configure(final Properties config) {
    super.configure(config);
    remotes = ConfigurationHelper.getArraySystemThenEnvProperty(CONFIG_REMOTE_URIS, DEFAULT_REMOTE_URIS,
            config);
    Collections.addAll(remoteUris, remotes);
    channelGroupThreads = ConfigurationHelper.getIntSystemThenEnvProperty(CONFIG_EXEC_THREADS,
            DEFAULT_EXEC_THREADS, config);
    this.config.put("channelGroupThreads", channelGroupThreads);
    eventLoopThreads = ConfigurationHelper.getIntSystemThenEnvProperty(CONFIG_ELOOP_THREADS,
            DEFAULT_ELOOP_THREADS, config);
    this.config.put("eventLoopThreads", eventLoopThreads);
    eventExecutor = new UnorderedThreadPoolEventExecutor(channelGroupThreads, groupThreadFactory, this);
    channels = new DefaultChannelGroup(getClass().getSimpleName() + "Channels", eventExecutor);
    group = new NioEventLoopGroup(eventLoopThreads, eventLoopThreadFactory);
    bootstrap.group(group).channel(channelType).handler(getChannelInitializer());
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000); // FIXME: config
    bootstrap.option(ChannelOption.ALLOCATOR, BufferManager.getInstance());
    this.config.put("connectTimeout", 5000);

    // FIXME: Tweaks for channel configuration

}

From source file:com.hop.hhxx.example.socksproxy.SocksServerConnectHandler.java

License:Apache License

@Override
public void channelRead0(final ChannelHandlerContext ctx, final SocksMessage message) throws Exception {
    if (message instanceof Socks4CommandRequest) {
        final Socks4CommandRequest request = (Socks4CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            @Override/* w ww  .  j  a  v a 2  s  . c o m*/
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    ChannelFuture responseFuture = ctx.channel()
                            .writeAndFlush(new DefaultSocks4CommandResponse(Socks4CommandStatus.SUCCESS));

                    responseFuture.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture channelFuture) {
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline()
                                    .addLast(new io.netty.example.socksproxy.RelayHandler(ctx.channel()));
                            ctx.pipeline()
                                    .addLast(new io.netty.example.socksproxy.RelayHandler(outboundChannel));
                        }
                    });
                } else {
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    io.netty.example.socksproxy.SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });

        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new io.netty.example.socksproxy.DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(
                            new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
                    io.netty.example.socksproxy.SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else if (message instanceof Socks5CommandRequest) {
        final Socks5CommandRequest request = (Socks5CommandRequest) message;
        Promise<Channel> promise = ctx.executor().newPromise();
        promise.addListener(new FutureListener<Channel>() {
            @Override
            public void operationComplete(final Future<Channel> future) throws Exception {
                final Channel outboundChannel = future.getNow();
                if (future.isSuccess()) {
                    ChannelFuture responseFuture = ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(
                            Socks5CommandStatus.SUCCESS, request.dstAddrType()));

                    responseFuture.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture channelFuture) {
                            ctx.pipeline().remove(SocksServerConnectHandler.this);
                            outboundChannel.pipeline()
                                    .addLast(new io.netty.example.socksproxy.RelayHandler(ctx.channel()));
                            ctx.pipeline()
                                    .addLast(new io.netty.example.socksproxy.RelayHandler(outboundChannel));
                        }
                    });
                } else {
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    io.netty.example.socksproxy.SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });

        final Channel inboundChannel = ctx.channel();
        b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true)
                .handler(new DirectClientHandler(promise));

        b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    // Connection established use handler provided results
                } else {
                    // Close the connection if the connection attempt has failed.
                    ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE,
                            request.dstAddrType()));
                    io.netty.example.socksproxy.SocksServerUtils.closeOnFlush(ctx.channel());
                }
            }
        });
    } else {
        ctx.close();
    }
}

From source file:com.ibasco.agql.core.transport.NettyTransport.java

License:Open Source License

public NettyTransport(ChannelType channelType, ExecutorService executor) {
    executorService = executor;/* w  w w .  j  a  v a 2 s.  c om*/
    bootstrap = new Bootstrap();

    //Make sure we have a type set
    if (channelType == null)
        throw new IllegalStateException("No channel type has been specified");

    //Pick the proper event loop group
    if (eventLoopGroup == null) {
        eventLoopGroup = createEventLoopGroup(channelType);
    }

    //Default Channel Options
    addChannelOption(ChannelOption.ALLOCATOR, allocator);
    addChannelOption(ChannelOption.WRITE_BUFFER_WATER_MARK, WriteBufferWaterMark.DEFAULT);
    addChannelOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000);

    //Set resource leak detection if debugging is enabled
    if (log.isDebugEnabled())
        ResourceLeakDetector.setLevel(ResourceLeakDetector.Level.ADVANCED);

    //Initialize bootstrap
    bootstrap.group(eventLoopGroup).channel(channelType.getChannelClass());
}

From source file:com.ibm.mqlight.api.impl.network.NettyNetworkService.java

License:Apache License

/**
 * Request a {@link Bootstrap} for obtaining a {@link Channel} and track
 * that the workerGroup is being used.//from   w w w  .  j a  v a 2 s. c  om
 *
 * @param secure
 *            a {@code boolean} indicating whether or not a secure channel
 *            will be required
 * @param sslEngine
 *            an {@link SSLEngine} if one should be used to secure the channel
 * @param handler a {@link ChannelHandler} to use for serving the requests.
 * @return a netty {@link Bootstrap} object suitable for obtaining a
 *         {@link Channel} from
 */
private static synchronized Bootstrap getBootstrap(final boolean secure, final SSLEngine sslEngine,
        final ChannelHandler handler) {
    final String methodName = "getBootstrap";
    logger.entry(methodName, secure, sslEngine);

    ++useCount;
    if (useCount == 1) {
        EventLoopGroup workerGroup = new NioEventLoopGroup();
        bootstrap = new Bootstrap();
        bootstrap.group(workerGroup);
        bootstrap.channel(NioSocketChannel.class);
        bootstrap.option(ChannelOption.SO_KEEPALIVE, true);
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 30000);
        bootstrap.handler(handler);
    }

    final Bootstrap result;
    if (secure) {
        result = bootstrap.clone();
        result.handler(handler);
    } else {
        result = bootstrap;
    }

    logger.exit(methodName, result);

    return result;
}

From source file:com.kevinherron.GatewayHook.java

@Override
public void startup(LicenseState licenseState) {
    logger.info("startup()");

    try {//from   www  .j a v a2 s .  c o  m
        Bootstrap bootstrap = new Bootstrap();

        bootstrap.group(eventLoop).channel(NioSocketChannel.class)
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000).option(ChannelOption.TCP_NODELAY, true)
                .handler(new ChannelInitializer<SocketChannel>() {
                    @Override
                    protected void initChannel(SocketChannel socketChannel) throws Exception {
                        logger.info("initChannel");
                    }
                });

        bootstrap.connect("localhost", 1234).get(2, TimeUnit.SECONDS);
    } catch (Throwable t) {
        logger.error("failed getting un-gettable endpoints: {}", t.getMessage(), t);
    }
}