Example usage for io.netty.channel ChannelOption CONNECT_TIMEOUT_MILLIS

List of usage examples for io.netty.channel ChannelOption CONNECT_TIMEOUT_MILLIS

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption CONNECT_TIMEOUT_MILLIS.

Prototype

ChannelOption CONNECT_TIMEOUT_MILLIS

To view the source code for io.netty.channel ChannelOption CONNECT_TIMEOUT_MILLIS.

Click Source Link

Usage

From source file:org.apache.drill.exec.rpc.BasicServer.java

License:Apache License

public BasicServer(final RpcConfig rpcMapping, ByteBufAllocator alloc, EventLoopGroup eventLoopGroup) {
    super(rpcMapping);
    this.eventLoopGroup = eventLoopGroup;

    b = new ServerBootstrap().channel(TransportCheck.getServerSocketChannel())
            .option(ChannelOption.SO_BACKLOG, 1000).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 30 * 1000)
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_REUSEADDR, true)
            .option(ChannelOption.SO_RCVBUF, 1 << 17).option(ChannelOption.SO_SNDBUF, 1 << 17)
            .group(eventLoopGroup) //
            .childOption(ChannelOption.ALLOCATOR, alloc)

            // .handler(new LoggingHandler(LogLevel.INFO))

            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override// w  ww .ja v a2  s  .  c  o m
                protected void initChannel(SocketChannel ch) throws Exception {
                    //            logger.debug("Starting initialization of server connection.");
                    C connection = initRemoteConnection(ch);
                    ch.closeFuture().addListener(getCloseHandler(ch, connection));

                    final ChannelPipeline pipe = ch.pipeline();
                    pipe.addLast("protocol-decoder",
                            getDecoder(connection.getAllocator(), getOutOfMemoryHandler()));
                    pipe.addLast("message-decoder", new RpcDecoder("s-" + rpcConfig.getName()));
                    pipe.addLast("protocol-encoder", new RpcEncoder("s-" + rpcConfig.getName()));
                    pipe.addLast("handshake-handler", getHandshakeHandler(connection));

                    if (rpcMapping.hasTimeout()) {
                        pipe.addLast(TIMEOUT_HANDLER,
                                new LogggingReadTimeoutHandler(connection, rpcMapping.getTimeout()));
                    }

                    pipe.addLast("message-handler", new InboundHandler(connection));
                    pipe.addLast("exception-handler", new RpcExceptionHandler<C>(connection));

                    connect = true;
                    //            logger.debug("Server connection initialization completed.");
                }
            });

    //     if(TransportCheck.SUPPORTS_EPOLL){
    //       b.option(EpollChannelOption.SO_REUSEPORT, true); //
    //     }
}

From source file:org.apache.dubbo.remoting.transport.netty4.NettyClient.java

License:Apache License

/**
 * Init bootstrap/*from  w w  w .  j a va  2 s.  c  o  m*/
 *
 * @throws Throwable
 */
@Override
protected void doOpen() throws Throwable {
    final NettyClientHandler nettyClientHandler = new NettyClientHandler(getUrl(), this);
    bootstrap = new Bootstrap();
    bootstrap.group(nioEventLoopGroup).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.TCP_NODELAY, true)
            .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            //.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, getTimeout())
            .channel(NioSocketChannel.class);

    if (getConnectTimeout() < 3000) {
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 3000);
    } else {
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, getConnectTimeout());
    }

    bootstrap.handler(new ChannelInitializer() {

        @Override
        protected void initChannel(Channel ch) throws Exception {
            int heartbeatInterval = UrlUtils.getHeartbeat(getUrl());
            NettyCodecAdapter adapter = new NettyCodecAdapter(getCodec(), getUrl(), NettyClient.this);
            ch.pipeline()//.addLast("logging",new LoggingHandler(LogLevel.INFO))//for debug
                    .addLast("decoder", adapter.getDecoder()).addLast("encoder", adapter.getEncoder())
                    .addLast("client-idle-handler", new IdleStateHandler(heartbeatInterval, 0, 0, MILLISECONDS))
                    .addLast("handler", nettyClientHandler);
            String socksProxyHost = ConfigUtils.getProperty(SOCKS_PROXY_HOST);
            if (socksProxyHost != null) {
                int socksProxyPort = Integer
                        .parseInt(ConfigUtils.getProperty(SOCKS_PROXY_PORT, DEFAULT_SOCKS_PROXY_PORT));
                Socks5ProxyHandler socks5ProxyHandler = new Socks5ProxyHandler(
                        new InetSocketAddress(socksProxyHost, socksProxyPort));
                ch.pipeline().addFirst(socks5ProxyHandler);
            }
        }
    });
}

From source file:org.apache.flink.runtime.io.network.netty.NettyClient.java

License:Apache License

void init(final NettyProtocol protocol, NettyBufferPool nettyBufferPool) throws IOException {
    checkState(bootstrap == null, "Netty client has already been initialized.");

    long start = System.currentTimeMillis();

    bootstrap = new Bootstrap();

    // --------------------------------------------------------------------
    // Transport-specific configuration
    // --------------------------------------------------------------------

    switch (config.getTransportType()) {
    case NIO://from   w  w  w .  j ava 2s.  c  o  m
        initNioBootstrap();
        break;

    case EPOLL:
        initEpollBootstrap();
        break;

    case AUTO:
        if (Epoll.isAvailable()) {
            initEpollBootstrap();
            LOG.info("Transport type 'auto': using EPOLL.");
        } else {
            initNioBootstrap();
            LOG.info("Transport type 'auto': using NIO.");
        }
    }

    // --------------------------------------------------------------------
    // Configuration
    // --------------------------------------------------------------------

    bootstrap.option(ChannelOption.TCP_NODELAY, true);
    bootstrap.option(ChannelOption.SO_KEEPALIVE, true);

    // Timeout for new connections
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getClientConnectTimeoutSeconds() * 1000);

    // Pooled allocator for Netty's ByteBuf instances
    bootstrap.option(ChannelOption.ALLOCATOR, nettyBufferPool);

    // Receive and send buffer size
    int receiveAndSendBufferSize = config.getSendAndReceiveBufferSize();
    if (receiveAndSendBufferSize > 0) {
        bootstrap.option(ChannelOption.SO_SNDBUF, receiveAndSendBufferSize);
        bootstrap.option(ChannelOption.SO_RCVBUF, receiveAndSendBufferSize);
    }

    // --------------------------------------------------------------------
    // Child channel pipeline for accepted connections
    // --------------------------------------------------------------------

    bootstrap.handler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel channel) throws Exception {
            channel.pipeline().addLast(protocol.getClientChannelHandlers());
        }
    });

    long end = System.currentTimeMillis();
    LOG.info("Successful initialization (took {} ms).", (end - start));
}

From source file:org.apache.geode.redis.GeodeRedisServer.java

License:Apache License

/**
 * Helper method to start the server listening for connections. The server is bound to the port
 * specified by {@link GeodeRedisServer#serverPort}
 * //from ww  w.  j av a 2 s . co m
 * @throws IOException
 * @throws InterruptedException
 */
private void startRedisServer() throws IOException, InterruptedException {
    ThreadFactory selectorThreadFactory = new ThreadFactory() {
        private final AtomicInteger counter = new AtomicInteger();

        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("GeodeRedisServer-SelectorThread-" + counter.incrementAndGet());
            t.setDaemon(true);
            return t;
        }

    };

    ThreadFactory workerThreadFactory = new ThreadFactory() {
        private final AtomicInteger counter = new AtomicInteger();

        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("GeodeRedisServer-WorkerThread-" + counter.incrementAndGet());
            return t;
        }

    };

    bossGroup = null;
    workerGroup = null;
    Class<? extends ServerChannel> socketClass = null;
    if (singleThreadPerConnection) {
        bossGroup = new OioEventLoopGroup(Integer.MAX_VALUE, selectorThreadFactory);
        workerGroup = new OioEventLoopGroup(Integer.MAX_VALUE, workerThreadFactory);
        socketClass = OioServerSocketChannel.class;
    } else {
        bossGroup = new NioEventLoopGroup(this.numSelectorThreads, selectorThreadFactory);
        workerGroup = new NioEventLoopGroup(this.numWorkerThreads, workerThreadFactory);
        socketClass = NioServerSocketChannel.class;
    }
    InternalDistributedSystem system = (InternalDistributedSystem) cache.getDistributedSystem();
    String pwd = system.getConfig().getRedisPassword();
    final byte[] pwdB = Coder.stringToBytes(pwd);
    ServerBootstrap b = new ServerBootstrap();
    b.group(bossGroup, workerGroup).channel(socketClass).childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel ch) throws Exception {
            if (logger.fineEnabled())
                logger.fine("GeodeRedisServer-Connection established with " + ch.remoteAddress());
            ChannelPipeline p = ch.pipeline();
            p.addLast(ByteToCommandDecoder.class.getSimpleName(), new ByteToCommandDecoder());
            p.addLast(ExecutionHandlerContext.class.getSimpleName(),
                    new ExecutionHandlerContext(ch, cache, regionCache, GeodeRedisServer.this, pwdB));
        }
    }).option(ChannelOption.SO_REUSEADDR, true).option(ChannelOption.SO_RCVBUF, getBufferSize())
            .childOption(ChannelOption.SO_KEEPALIVE, true)
            .childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, GeodeRedisServer.connectTimeoutMillis)
            .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    // Bind and start to accept incoming connections.
    ChannelFuture f = b.bind(new InetSocketAddress(getBindAddress(), serverPort)).sync();
    if (this.logger.infoEnabled()) {
        String logMessage = "GeodeRedisServer started {" + getBindAddress() + ":" + serverPort
                + "}, Selector threads: " + this.numSelectorThreads;
        if (this.singleThreadPerConnection)
            logMessage += ", One worker thread per connection";
        else
            logMessage += ", Worker threads: " + this.numWorkerThreads;
        this.logger.info(logMessage);
    }
    this.serverChannel = f.channel();
}

From source file:org.apache.giraph.comm.netty.NettyClient.java

License:Apache License

/**
 * Only constructor/*w w w.  ja va  2 s .c o  m*/
 *
 * @param context Context for progress
 * @param conf Configuration
 * @param myTaskInfo Current task info
 * @param exceptionHandler handler for uncaught exception. Will
 *                         terminate job.
 */
public NettyClient(Mapper<?, ?, ?, ?>.Context context, final ImmutableClassesGiraphConfiguration conf,
        TaskInfo myTaskInfo, final Thread.UncaughtExceptionHandler exceptionHandler) {
    this.context = context;
    this.myTaskInfo = myTaskInfo;
    this.channelsPerServer = GiraphConstants.CHANNELS_PER_SERVER.get(conf);
    sendBufferSize = CLIENT_SEND_BUFFER_SIZE.get(conf);
    receiveBufferSize = CLIENT_RECEIVE_BUFFER_SIZE.get(conf);

    limitNumberOfOpenRequests = conf.getBoolean(LIMIT_NUMBER_OF_OPEN_REQUESTS,
            LIMIT_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
    if (limitNumberOfOpenRequests) {
        maxNumberOfOpenRequests = conf.getInt(MAX_NUMBER_OF_OPEN_REQUESTS, MAX_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Limit number of open requests to " + maxNumberOfOpenRequests);
        }
    } else {
        maxNumberOfOpenRequests = -1;
    }

    maxRequestMilliseconds = MAX_REQUEST_MILLISECONDS.get(conf);

    maxConnectionFailures = NETTY_MAX_CONNECTION_FAILURES.get(conf);

    waitingRequestMsecs = WAITING_REQUEST_MSECS.get(conf);

    maxPoolSize = GiraphConstants.NETTY_CLIENT_THREADS.get(conf);

    maxResolveAddressAttempts = MAX_RESOLVE_ADDRESS_ATTEMPTS.get(conf);

    clientRequestIdRequestInfoMap = new MapMaker().concurrencyLevel(maxPoolSize).makeMap();

    handlerToUseExecutionGroup = NETTY_CLIENT_EXECUTION_AFTER_HANDLER.get(conf);
    useExecutionGroup = NETTY_CLIENT_USE_EXECUTION_HANDLER.get(conf);
    if (useExecutionGroup) {
        int executionThreads = NETTY_CLIENT_EXECUTION_THREADS.get(conf);
        executionGroup = new DefaultEventExecutorGroup(executionThreads,
                ThreadUtils.createThreadFactory("netty-client-exec-%d", exceptionHandler));
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Using execution handler with " + executionThreads + " threads after "
                    + handlerToUseExecutionGroup + ".");
        }
    } else {
        executionGroup = null;
    }

    workerGroup = new NioEventLoopGroup(maxPoolSize,
            ThreadUtils.createThreadFactory("netty-client-worker-%d", exceptionHandler));

    bootstrap = new Bootstrap();
    bootstrap.group(workerGroup).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, MAX_CONNECTION_MILLISECONDS_DEFAULT)
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.SO_SNDBUF, sendBufferSize).option(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    /*if_not[HADOOP_NON_SECURE]*/
                    if (conf.authenticate()) {
                        LOG.info("Using Netty with authentication.");

                        // Our pipeline starts with just byteCounter, and then we use
                        // addLast() to incrementally add pipeline elements, so that we
                        // can name them for identification for removal or replacement
                        // after client is authenticated by server.
                        // After authentication is complete, the pipeline's SASL-specific
                        // functionality is removed, restoring the pipeline to exactly the
                        // same configuration as it would be without authentication.
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        // The following pipeline component is needed to decode the
                        // server's SASL tokens. It is replaced with a
                        // FixedLengthFrameDecoder (same as used with the
                        // non-authenticated pipeline) after authentication
                        // completes (as in non-auth pipeline below).
                        PipelineUtils.addLastWithExecutorCheck("length-field-based-frame-decoder",
                                new LengthFieldBasedFrameDecoder(1024, 0, 4, 0, 4), handlerToUseExecutionGroup,
                                executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        // The following pipeline component responds to the server's SASL
                        // tokens with its own responses. Both client and server share the
                        // same Hadoop Job token, which is used to create the SASL
                        // tokens to authenticate with each other.
                        // After authentication finishes, this pipeline component
                        // is removed.
                        PipelineUtils.addLastWithExecutorCheck("sasl-client-handler",
                                new SaslClientHandler(conf), handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                    } else {
                        LOG.info("Using Netty without authentication.");
                        /*end[HADOOP_NON_SECURE]*/
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("fixed-length-frame-decoder",
                                new FixedLengthFrameDecoder(RequestServerHandler.RESPONSE_BYTES),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);

                        /*if_not[HADOOP_NON_SECURE]*/
                    }
                    /*end[HADOOP_NON_SECURE]*/
                }
            });
}

From source file:org.apache.hadoop.hbase.ipc.AsyncRpcClient.java

License:Apache License

/**
 * Constructor for tests//from   w  w  w.  jav a  2 s .  c o  m
 *
 * @param configuration      to HBase
 * @param clusterId          for the cluster
 * @param localAddress       local address to connect to
 * @param channelInitializer for custom channel handlers
 */
@VisibleForTesting
AsyncRpcClient(Configuration configuration, String clusterId, SocketAddress localAddress,
        ChannelInitializer<SocketChannel> channelInitializer) {
    super(configuration, clusterId, localAddress);

    if (LOG.isDebugEnabled()) {
        LOG.debug("Starting async Hbase RPC client");
    }

    Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass;
    this.useGlobalEventLoopGroup = conf.getBoolean(USE_GLOBAL_EVENT_LOOP_GROUP, true);
    if (useGlobalEventLoopGroup) {
        eventLoopGroupAndChannelClass = getGlobalEventLoopGroup(configuration);
    } else {
        eventLoopGroupAndChannelClass = createEventLoopGroup(configuration);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Use " + (useGlobalEventLoopGroup ? "global" : "individual") + " event loop group "
                + eventLoopGroupAndChannelClass.getFirst().getClass().getSimpleName());
    }

    this.connections = new PoolMap<>(getPoolType(configuration), getPoolSize(configuration));
    this.failedServers = new FailedServers(configuration);

    int operationTimeout = configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
            HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);

    // Configure the default bootstrap.
    this.bootstrap = new Bootstrap();
    bootstrap.group(eventLoopGroupAndChannelClass.getFirst()).channel(eventLoopGroupAndChannelClass.getSecond())
            .option(ChannelOption.TCP_NODELAY, tcpNoDelay).option(ChannelOption.SO_KEEPALIVE, tcpKeepAlive)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, operationTimeout);
    if (channelInitializer == null) {
        channelInitializer = DEFAULT_CHANNEL_INITIALIZER;
    }
    bootstrap.handler(channelInitializer);
    if (localAddress != null) {
        bootstrap.localAddress(localAddress);
    }
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcConnection.java

License:Apache License

private void connect() {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Connecting to " + remoteId.address);
    }//from   www  .ja  v a  2s  . c  o  m

    this.channel = new Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass)
            .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay())
            .option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, rpcClient.connectTO)
            .handler(new BufferCallBeforeInitHandler()).localAddress(rpcClient.localAddr)
            .remoteAddress(remoteId.address).connect().addListener(new ChannelFutureListener() {

                @Override
                public void operationComplete(ChannelFuture future) throws Exception {
                    Channel ch = future.channel();
                    if (!future.isSuccess()) {
                        failInit(ch, toIOE(future.cause()));
                        rpcClient.failedServers.addToFailedServers(remoteId.address);
                        return;
                    }
                    ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate());
                    if (useSasl) {
                        saslNegotiate(ch);
                    } else {
                        // send the connection header to server
                        ch.write(connectionHeaderWithLength.retainedDuplicate());
                        established(ch);
                    }
                }
            }).channel();
}

From source file:org.apache.hive.spark.client.rpc.Rpc.java

License:Apache License

/**
 * Creates an RPC client for a server running on the given remote host and port.
 *
 * @param config RPC configuration data.
 * @param eloop Event loop for managing the connection.
 * @param host Host name or IP address to connect to.
 * @param port Port where server is listening.
 * @param clientId The client ID that identifies the connection.
 * @param secret Secret for authenticating the client with the server.
 * @param dispatcher Dispatcher used to handle RPC calls.
 * @return A future that can be used to monitor the creation of the RPC object.
 *//*w w w  .  j  a v  a2  s  .  co m*/
public static Promise<Rpc> createClient(Map<String, String> config, final NioEventLoopGroup eloop, String host,
        int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
    final RpcConfiguration rpcConf = new RpcConfiguration(config);
    int connectTimeoutMs = (int) rpcConf.getConnectTimeoutMs();

    final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
    }).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);

    final Promise<Rpc> promise = eloop.next().newPromise();
    final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();

    // Set up a timeout to undo everything.
    final Runnable timeoutTask = new Runnable() {
        @Override
        public void run() {
            promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
        }
    };
    final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, rpcConf.getServerConnectTimeoutMs(),
            TimeUnit.MILLISECONDS);

    // The channel listener instantiates the Rpc instance when the connection is established,
    // and initiates the SASL handshake.
    cf.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture cf) throws Exception {
            if (cf.isSuccess()) {
                SaslClientHandler saslHandler = new SaslClientHandler(rpcConf, clientId, promise, timeoutFuture,
                        secret, dispatcher);
                Rpc rpc = createRpc(rpcConf, saslHandler, (SocketChannel) cf.channel(), eloop);
                saslHandler.rpc = rpc;
                saslHandler.sendHello(cf.channel());
            } else {
                promise.setFailure(cf.cause());
            }
        }
    });

    // Handle cancellation of the promise.
    promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
        @Override
        public void operationComplete(Promise<Rpc> p) {
            if (p.isCancelled()) {
                cf.cancel(true);
            }
        }
    });

    return promise;
}

From source file:org.apache.jackrabbit.oak.plugins.segment.standby.client.StandbyClient.java

License:Apache License

public void run() {
    if (!isRunning()) {
        // manually stopped
        return;//  w ww . j  av  a2s  .  c  o  m
    }

    Bootstrap b;
    synchronized (this.sync) {
        if (this.active) {
            return;
        }
        state = STATUS_STARTING;
        handler = new StandbyClientHandler(this.store, observer, running, readTimeoutMs, autoClean);
        group = new NioEventLoopGroup();

        b = new Bootstrap();
        b.group(group);
        b.channel(NioSocketChannel.class);
        b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, readTimeoutMs);
        b.option(ChannelOption.TCP_NODELAY, true);
        b.option(ChannelOption.SO_REUSEADDR, true);
        b.option(ChannelOption.SO_KEEPALIVE, true);

        b.handler(new ChannelInitializer<SocketChannel>() {
            @Override
            public void initChannel(SocketChannel ch) throws Exception {
                ChannelPipeline p = ch.pipeline();
                if (sslContext != null) {
                    p.addLast(sslContext.newHandler(ch.alloc()));
                }
                p.addLast("readTimeoutHandler", new ReadTimeoutHandler(readTimeoutMs, TimeUnit.MILLISECONDS));
                p.addLast(new StringEncoder(CharsetUtil.UTF_8));
                p.addLast(new SnappyFramedDecoder(true));
                p.addLast(new RecordIdDecoder(store));
                p.addLast(handler);
            }
        });
        state = STATUS_RUNNING;
        this.active = true;
    }

    try {
        long startTimestamp = System.currentTimeMillis();
        // Start the client.
        ChannelFuture f = b.connect(host, port).sync();
        // Wait until the connection is closed.
        f.channel().closeFuture().sync();
        this.failedRequests = 0;
        this.syncStartTimestamp = startTimestamp;
        this.syncEndTimestamp = System.currentTimeMillis();
        this.lastSuccessfulRequest = syncEndTimestamp / 1000;
    } catch (Exception e) {
        this.failedRequests++;
        log.error("Failed synchronizing state.", e);
    } finally {
        synchronized (this.sync) {
            this.active = false;
            shutdownNetty();
        }
    }
}

From source file:org.apache.qpid.jms.transports.netty.NettyTcpTransport.java

License:Apache License

private void configureNetty(Bootstrap bootstrap, TransportOptions options) {
    bootstrap.option(ChannelOption.TCP_NODELAY, options.isTcpNoDelay());
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeout());
    bootstrap.option(ChannelOption.SO_KEEPALIVE, options.isTcpKeepAlive());
    bootstrap.option(ChannelOption.SO_LINGER, options.getSoLinger());
    bootstrap.option(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE);

    if (options.getSendBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_SNDBUF, options.getSendBufferSize());
    }/* w w w. j a va2s .c o m*/

    if (options.getReceiveBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_RCVBUF, options.getReceiveBufferSize());
        bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR,
                new FixedRecvByteBufAllocator(options.getReceiveBufferSize()));
    }

    if (options.getTrafficClass() != -1) {
        bootstrap.option(ChannelOption.IP_TOS, options.getTrafficClass());
    }
}