Example usage for io.netty.channel ChannelOption TCP_NODELAY

List of usage examples for io.netty.channel ChannelOption TCP_NODELAY

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption TCP_NODELAY.

Prototype

ChannelOption TCP_NODELAY

To view the source code for io.netty.channel ChannelOption TCP_NODELAY.

Click Source Link

Usage

From source file:org.apache.drill.exec.rpc.BasicClient.java

License:Apache License

public BasicClient(RpcConfig rpcMapping, ByteBufAllocator alloc, EventLoopGroup eventLoopGroup, T handshakeType,
        Class<HANDSHAKE_RESPONSE> responseClass, Parser<HANDSHAKE_RESPONSE> handshakeParser) {
    super(rpcMapping);
    this.responseClass = responseClass;
    this.handshakeType = handshakeType;
    this.handshakeParser = handshakeParser;
    final long timeoutInMillis = rpcMapping.hasTimeout()
            ? (long) (rpcMapping.getTimeout() * 1000.0 * PERCENT_TIMEOUT_BEFORE_SENDING_PING)
            : -1;// ww  w  .j  a  v a2s .  c  o  m
    this.pingHandler = rpcMapping.hasTimeout() ? new IdlePingHandler(timeoutInMillis) : null;

    b = new Bootstrap() //
            .group(eventLoopGroup) //
            .channel(TransportCheck.getClientSocketChannel()) //
            .option(ChannelOption.ALLOCATOR, alloc) //
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 30 * 1000).option(ChannelOption.SO_REUSEADDR, true)
            .option(ChannelOption.SO_RCVBUF, 1 << 17) //
            .option(ChannelOption.SO_SNDBUF, 1 << 17) //
            .option(ChannelOption.TCP_NODELAY, true).handler(new ChannelInitializer<SocketChannel>() {

                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    // logger.debug("initializing client connection.");
                    connection = initRemoteConnection(ch);

                    ch.closeFuture().addListener(getCloseHandler(ch, connection));

                    final ChannelPipeline pipe = ch.pipeline();

                    pipe.addLast("protocol-decoder", getDecoder(connection.getAllocator()));
                    pipe.addLast("message-decoder", new RpcDecoder("c-" + rpcConfig.getName()));
                    pipe.addLast("protocol-encoder", new RpcEncoder("c-" + rpcConfig.getName()));
                    pipe.addLast("handshake-handler", new ClientHandshakeHandler(connection));

                    if (pingHandler != null) {
                        pipe.addLast("idle-state-handler", pingHandler);
                    }

                    pipe.addLast("message-handler", new InboundHandler(connection));
                    pipe.addLast("exception-handler", new RpcExceptionHandler<R>(connection));
                }
            }); //

    // if(TransportCheck.SUPPORTS_EPOLL){
    // b.option(EpollChannelOption.SO_REUSEPORT, true); //
    // }
}

From source file:org.apache.drill.exec.rpc.BasicServer.java

License:Apache License

public BasicServer(final RpcConfig rpcMapping, ByteBufAllocator alloc, EventLoopGroup eventLoopGroup) {
    super(rpcMapping);
    this.eventLoopGroup = eventLoopGroup;

    b = new ServerBootstrap().channel(TransportCheck.getServerSocketChannel())
            .option(ChannelOption.SO_BACKLOG, 1000).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 30 * 1000)
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_REUSEADDR, true)
            .option(ChannelOption.SO_RCVBUF, 1 << 17).option(ChannelOption.SO_SNDBUF, 1 << 17)
            .group(eventLoopGroup) //
            .childOption(ChannelOption.ALLOCATOR, alloc)

            // .handler(new LoggingHandler(LogLevel.INFO))

            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override/*from   www  . j  ava2 s  .c  o m*/
                protected void initChannel(SocketChannel ch) throws Exception {
                    //            logger.debug("Starting initialization of server connection.");
                    C connection = initRemoteConnection(ch);
                    ch.closeFuture().addListener(getCloseHandler(ch, connection));

                    final ChannelPipeline pipe = ch.pipeline();
                    pipe.addLast("protocol-decoder",
                            getDecoder(connection.getAllocator(), getOutOfMemoryHandler()));
                    pipe.addLast("message-decoder", new RpcDecoder("s-" + rpcConfig.getName()));
                    pipe.addLast("protocol-encoder", new RpcEncoder("s-" + rpcConfig.getName()));
                    pipe.addLast("handshake-handler", getHandshakeHandler(connection));

                    if (rpcMapping.hasTimeout()) {
                        pipe.addLast(TIMEOUT_HANDLER,
                                new LogggingReadTimeoutHandler(connection, rpcMapping.getTimeout()));
                    }

                    pipe.addLast("message-handler", new InboundHandler(connection));
                    pipe.addLast("exception-handler", new RpcExceptionHandler<C>(connection));

                    connect = true;
                    //            logger.debug("Server connection initialization completed.");
                }
            });

    //     if(TransportCheck.SUPPORTS_EPOLL){
    //       b.option(EpollChannelOption.SO_REUSEPORT, true); //
    //     }
}

From source file:org.apache.dubbo.qos.server.Server.java

License:Apache License

/**
 * start server, bind port/*from ww  w  .j av a 2 s. co m*/
 */
public void start() throws Throwable {
    if (!started.compareAndSet(false, true)) {
        return;
    }
    boss = new NioEventLoopGroup(1, new DefaultThreadFactory("qos-boss", true));
    worker = new NioEventLoopGroup(0, new DefaultThreadFactory("qos-worker", true));
    ServerBootstrap serverBootstrap = new ServerBootstrap();
    serverBootstrap.group(boss, worker);
    serverBootstrap.channel(NioServerSocketChannel.class);
    serverBootstrap.childOption(ChannelOption.TCP_NODELAY, true);
    serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, true);
    serverBootstrap.childHandler(new ChannelInitializer<Channel>() {

        @Override
        protected void initChannel(Channel ch) throws Exception {
            ch.pipeline().addLast(new QosProcessHandler(welcome, acceptForeignIp));
        }
    });
    try {
        serverBootstrap.bind(port).sync();
        logger.info("qos-server bind localhost:" + port);
    } catch (Throwable throwable) {
        logger.error("qos-server can not bind localhost:" + port, throwable);
        throw throwable;
    }
}

From source file:org.apache.dubbo.remoting.transport.netty4.NettyClient.java

License:Apache License

/**
 * Init bootstrap// w w  w.  j  a va  2s  .c o m
 *
 * @throws Throwable
 */
@Override
protected void doOpen() throws Throwable {
    final NettyClientHandler nettyClientHandler = new NettyClientHandler(getUrl(), this);
    bootstrap = new Bootstrap();
    bootstrap.group(nioEventLoopGroup).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.TCP_NODELAY, true)
            .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            //.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, getTimeout())
            .channel(NioSocketChannel.class);

    if (getConnectTimeout() < 3000) {
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 3000);
    } else {
        bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, getConnectTimeout());
    }

    bootstrap.handler(new ChannelInitializer() {

        @Override
        protected void initChannel(Channel ch) throws Exception {
            int heartbeatInterval = UrlUtils.getHeartbeat(getUrl());
            NettyCodecAdapter adapter = new NettyCodecAdapter(getCodec(), getUrl(), NettyClient.this);
            ch.pipeline()//.addLast("logging",new LoggingHandler(LogLevel.INFO))//for debug
                    .addLast("decoder", adapter.getDecoder()).addLast("encoder", adapter.getEncoder())
                    .addLast("client-idle-handler", new IdleStateHandler(heartbeatInterval, 0, 0, MILLISECONDS))
                    .addLast("handler", nettyClientHandler);
            String socksProxyHost = ConfigUtils.getProperty(SOCKS_PROXY_HOST);
            if (socksProxyHost != null) {
                int socksProxyPort = Integer
                        .parseInt(ConfigUtils.getProperty(SOCKS_PROXY_PORT, DEFAULT_SOCKS_PROXY_PORT));
                Socks5ProxyHandler socks5ProxyHandler = new Socks5ProxyHandler(
                        new InetSocketAddress(socksProxyHost, socksProxyPort));
                ch.pipeline().addFirst(socks5ProxyHandler);
            }
        }
    });
}

From source file:org.apache.dubbo.remoting.transport.netty4.NettyServer.java

License:Apache License

/**
 * Init and start netty server/*from  ww w. j a  v  a 2  s . c o m*/
 *
 * @throws Throwable
 */
@Override
protected void doOpen() throws Throwable {
    bootstrap = new ServerBootstrap();

    bossGroup = new NioEventLoopGroup(1, new DefaultThreadFactory("NettyServerBoss", true));
    workerGroup = new NioEventLoopGroup(
            getUrl().getPositiveParameter(IO_THREADS_KEY, Constants.DEFAULT_IO_THREADS),
            new DefaultThreadFactory("NettyServerWorker", true));

    final NettyServerHandler nettyServerHandler = new NettyServerHandler(getUrl(), this);
    channels = nettyServerHandler.getChannels();

    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .childOption(ChannelOption.TCP_NODELAY, Boolean.TRUE)
            .childOption(ChannelOption.SO_REUSEADDR, Boolean.TRUE)
            .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .childHandler(new ChannelInitializer<NioSocketChannel>() {
                @Override
                protected void initChannel(NioSocketChannel ch) throws Exception {
                    // FIXME: should we use getTimeout()?
                    int idleTimeout = UrlUtils.getIdleTimeout(getUrl());
                    NettyCodecAdapter adapter = new NettyCodecAdapter(getCodec(), getUrl(), NettyServer.this);
                    ch.pipeline()//.addLast("logging",new LoggingHandler(LogLevel.INFO))//for debug
                            .addLast("decoder", adapter.getDecoder()).addLast("encoder", adapter.getEncoder())
                            .addLast("server-idle-handler",
                                    new IdleStateHandler(0, 0, idleTimeout, MILLISECONDS))
                            .addLast("handler", nettyServerHandler);
                }
            });
    // bind
    ChannelFuture channelFuture = bootstrap.bind(getBindAddress());
    channelFuture.syncUninterruptibly();
    channel = channelFuture.channel();

}

From source file:org.apache.flink.runtime.io.network.netty.NettyClient.java

License:Apache License

void init(final NettyProtocol protocol, NettyBufferPool nettyBufferPool) throws IOException {
    checkState(bootstrap == null, "Netty client has already been initialized.");

    long start = System.currentTimeMillis();

    bootstrap = new Bootstrap();

    // --------------------------------------------------------------------
    // Transport-specific configuration
    // --------------------------------------------------------------------

    switch (config.getTransportType()) {
    case NIO://from w  w w  .  jav  a  2 s.  c  o m
        initNioBootstrap();
        break;

    case EPOLL:
        initEpollBootstrap();
        break;

    case AUTO:
        if (Epoll.isAvailable()) {
            initEpollBootstrap();
            LOG.info("Transport type 'auto': using EPOLL.");
        } else {
            initNioBootstrap();
            LOG.info("Transport type 'auto': using NIO.");
        }
    }

    // --------------------------------------------------------------------
    // Configuration
    // --------------------------------------------------------------------

    bootstrap.option(ChannelOption.TCP_NODELAY, true);
    bootstrap.option(ChannelOption.SO_KEEPALIVE, true);

    // Timeout for new connections
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getClientConnectTimeoutSeconds() * 1000);

    // Pooled allocator for Netty's ByteBuf instances
    bootstrap.option(ChannelOption.ALLOCATOR, nettyBufferPool);

    // Receive and send buffer size
    int receiveAndSendBufferSize = config.getSendAndReceiveBufferSize();
    if (receiveAndSendBufferSize > 0) {
        bootstrap.option(ChannelOption.SO_SNDBUF, receiveAndSendBufferSize);
        bootstrap.option(ChannelOption.SO_RCVBUF, receiveAndSendBufferSize);
    }

    // --------------------------------------------------------------------
    // Child channel pipeline for accepted connections
    // --------------------------------------------------------------------

    bootstrap.handler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel channel) throws Exception {
            channel.pipeline().addLast(protocol.getClientChannelHandlers());
        }
    });

    long end = System.currentTimeMillis();
    LOG.info("Successful initialization (took {} ms).", (end - start));
}

From source file:org.apache.flink.runtime.io.network.netty.NettyConnectionManager.java

License:Apache License

@Override
public void start(ChannelManager channelManager) throws IOException {
    LOG.info(String.format("Starting with %d incoming and %d outgoing connection threads.", numInThreads,
            numOutThreads));//from   ww w .  j a  v  a  2 s .  c om
    LOG.info(String.format("Setting low water mark to %d and high water mark to %d bytes.", lowWaterMark,
            highWaterMark));
    LOG.info(String.format("Close channels after idle for %d ms.", closeAfterIdleForMs));

    final BufferProviderBroker bufferProviderBroker = channelManager;
    final EnvelopeDispatcher envelopeDispatcher = channelManager;

    int numHeapArenas = 0;
    int numDirectArenas = numInThreads + numOutThreads;
    int pageSize = bufferSize << 1;
    int chunkSize = 16 << 20; // 16 MB

    // shift pageSize maxOrder times to get to chunkSize
    int maxOrder = (int) (Math.log(chunkSize / pageSize) / Math.log(2));

    PooledByteBufAllocator pooledByteBufAllocator = new PooledByteBufAllocator(true, numHeapArenas,
            numDirectArenas, pageSize, maxOrder);

    String msg = String.format(
            "Instantiated PooledByteBufAllocator with direct arenas: %d, heap arenas: %d, "
                    + "page size (bytes): %d, chunk size (bytes): %d.",
            numDirectArenas, numHeapArenas, pageSize, (pageSize << maxOrder));
    LOG.info(msg);

    // --------------------------------------------------------------------
    // server bootstrap (incoming connections)
    // --------------------------------------------------------------------
    in = new ServerBootstrap();
    in.group(new NioEventLoopGroup(numInThreads)).channel(NioServerSocketChannel.class)
            .localAddress(bindAddress, bindPort).childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new InboundEnvelopeDecoder(bufferProviderBroker))
                            .addLast(new InboundEnvelopeDispatcher(envelopeDispatcher));
                }
            }).option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(pageSize))
            .option(ChannelOption.ALLOCATOR, pooledByteBufAllocator);

    // --------------------------------------------------------------------
    // client bootstrap (outgoing connections)
    // --------------------------------------------------------------------
    out = new Bootstrap();
    out.group(new NioEventLoopGroup(numOutThreads)).channel(NioSocketChannel.class)
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new OutboundEnvelopeEncoder());
                }
            }).option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, lowWaterMark)
            .option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, highWaterMark)
            .option(ChannelOption.ALLOCATOR, pooledByteBufAllocator).option(ChannelOption.TCP_NODELAY, false)
            .option(ChannelOption.SO_KEEPALIVE, true);

    try {
        in.bind().sync();
    } catch (InterruptedException e) {
        throw new IOException(e);
    }

    if (LOG.isDebugEnabled()) {
        new Thread(new Runnable() {
            @Override
            public void run() {
                Date date = new Date();

                while (true) {
                    try {
                        Thread.sleep(DEBUG_PRINT_QUEUED_ENVELOPES_EVERY_MS);

                        date.setTime(System.currentTimeMillis());

                        System.out.println(date);
                        System.out.println(getNonZeroNumQueuedEnvelopes());
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                }
            }
        }).start();
    }
}

From source file:org.apache.flink.runtime.io.network.netty.OutboundConnectionQueueTest.java

License:Apache License

/**
 * Verifies that concurrent enqueue and close events are handled
 * correctly./*from   w w  w . j ava 2  s . com*/
 */
private void doTestConcurrentEnqueueAndClose(final int numProducers, final int numEnvelopesPerProducer,
        final int minSleepTimeMs, final int maxSleepTimeMs) throws Exception {

    final InetAddress bindHost = InetAddress.getLocalHost();
    final int bindPort = 20000;

    // Testing concurrent enqueue and close requires real TCP channels,
    // because Netty's testing EmbeddedChannel does not implement the
    // same threading model as the NioEventLoopGroup (for example there
    // is no difference between being IN and OUTSIDE of the event loop
    // thread).

    final ServerBootstrap in = new ServerBootstrap();
    in.group(new NioEventLoopGroup(1)).channel(NioServerSocketChannel.class).localAddress(bindHost, bindPort)
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new ChannelInboundHandlerAdapter());
                }
            });

    final Bootstrap out = new Bootstrap();
    out.group(new NioEventLoopGroup(1)).channel(NioSocketChannel.class)
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new ChannelOutboundHandlerAdapter());
                }
            }).option(ChannelOption.TCP_NODELAY, false).option(ChannelOption.SO_KEEPALIVE, true);

    in.bind().sync();

    // --------------------------------------------------------------------

    // The testing pipeline looks as follows:
    // - Test Verification Handler [OUT]
    // - Test Control Handler [IN]
    // - Idle State Handler [IN/OUT] [added by OutboundConnectionQueue]
    // - Outbound queue (SUT) [IN] [added by OutboundConnectionQueue]

    channel = out.connect(bindHost, bindPort).sync().channel();

    queue = new OutboundConnectionQueue(channel, receiver, connectionManager, 0);

    controller = new TestControlHandler(true);
    verifier = new TestVerificationHandler();

    channel.pipeline().addFirst("Test Control Handler", controller);
    channel.pipeline().addFirst("Test Verification Handler", verifier);

    // --------------------------------------------------------------------

    final Random rand = new Random(RANDOM_SEED);

    // Every producer works on their local reference of the queue and only
    // updates it to the new channel when enqueue returns false, which
    // should only happen if the channel has been closed.
    final ConcurrentMap<ChannelID, OutboundConnectionQueue> producerQueues = new ConcurrentHashMap<ChannelID, OutboundConnectionQueue>();

    final ChannelID[] ids = new ChannelID[numProducers];

    for (int i = 0; i < numProducers; i++) {
        ids[i] = new ChannelID();

        producerQueues.put(ids[i], queue);
    }

    final CountDownLatch receivedAllEnvelopesLatch = verifier.waitForEnvelopes(numEnvelopesPerProducer - 1,
            ids);

    final List<Channel> closedChannels = new ArrayList<Channel>();

    // --------------------------------------------------------------------

    final Runnable closer = new Runnable() {
        @Override
        public void run() {
            while (receivedAllEnvelopesLatch.getCount() != 0) {
                try {
                    controller.fireIdle();

                    // Test two idle events arriving "closely"
                    // after each other
                    if (rand.nextBoolean()) {
                        controller.fireIdle();
                    }

                    Thread.sleep(minSleepTimeMs / 2);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
    };

    final Runnable[] producers = new Runnable[numProducers];

    for (int i = 0; i < numProducers; i++) {
        final int index = i;

        producers[i] = new Runnable() {
            @Override
            public void run() {
                final JobID jid = new JobID();
                final ChannelID cid = ids[index];

                for (int j = 0; j < numEnvelopesPerProducer; j++) {
                    OutboundConnectionQueue localQueue = producerQueues.get(cid);

                    try {
                        // This code path is handled by the NetworkConnectionManager
                        // in production to enqueue the envelope either to the current
                        // channel or a new one if it was closed.
                        while (!localQueue.enqueue(new Envelope(j, jid, cid))) {
                            synchronized (lock) {
                                if (localQueue == queue) {
                                    closedChannels.add(channel);

                                    channel = out.connect(bindHost, bindPort).sync().channel();

                                    queue = new OutboundConnectionQueue(channel, receiver, connectionManager,
                                            0);

                                    channel.pipeline().addFirst("Test Control Handler", controller);
                                    channel.pipeline().addFirst("Test Verification Handler", verifier);
                                }
                            }

                            producerQueues.put(cid, queue);
                            localQueue = queue;
                        }

                        int sleepTime = rand.nextInt((maxSleepTimeMs - minSleepTimeMs) + 1) + minSleepTimeMs;
                        Thread.sleep(sleepTime);
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        };
    }

    for (int i = 0; i < numProducers; i++) {
        new Thread(producers[i], "Producer " + i).start();
    }

    new Thread(closer, "Closer").start();

    // --------------------------------------------------------------------

    while (receivedAllEnvelopesLatch.getCount() != 0) {
        receivedAllEnvelopesLatch.await();
    }

    // Final close, if the last close didn't make it.
    synchronized (lock) {
        if (channel != null) {
            controller.fireIdle();
        }
    }

    verifier.waitForClose();

    // If the producers do not sleep after each envelope, the close
    // should not make it through and no channel should have been
    // added to the list of closed channels
    if (minSleepTimeMs == 0 && maxSleepTimeMs == 0) {
        Assert.assertEquals(0, closedChannels.size());
    }

    for (Channel ch : closedChannels) {
        Assert.assertFalse(ch.isOpen());
    }

    System.out.println(closedChannels.size() + " channels were closed during execution.");

    out.group().shutdownGracefully().sync();
    in.group().shutdownGracefully().sync();
}

From source file:org.apache.giraph.comm.netty.NettyClient.java

License:Apache License

/**
 * Only constructor/*from  w  w  w  .  j a  v a2 s. co  m*/
 *
 * @param context Context for progress
 * @param conf Configuration
 * @param myTaskInfo Current task info
 * @param exceptionHandler handler for uncaught exception. Will
 *                         terminate job.
 */
public NettyClient(Mapper<?, ?, ?, ?>.Context context, final ImmutableClassesGiraphConfiguration conf,
        TaskInfo myTaskInfo, final Thread.UncaughtExceptionHandler exceptionHandler) {
    this.context = context;
    this.myTaskInfo = myTaskInfo;
    this.channelsPerServer = GiraphConstants.CHANNELS_PER_SERVER.get(conf);
    sendBufferSize = CLIENT_SEND_BUFFER_SIZE.get(conf);
    receiveBufferSize = CLIENT_RECEIVE_BUFFER_SIZE.get(conf);

    limitNumberOfOpenRequests = conf.getBoolean(LIMIT_NUMBER_OF_OPEN_REQUESTS,
            LIMIT_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
    if (limitNumberOfOpenRequests) {
        maxNumberOfOpenRequests = conf.getInt(MAX_NUMBER_OF_OPEN_REQUESTS, MAX_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Limit number of open requests to " + maxNumberOfOpenRequests);
        }
    } else {
        maxNumberOfOpenRequests = -1;
    }

    maxRequestMilliseconds = MAX_REQUEST_MILLISECONDS.get(conf);

    maxConnectionFailures = NETTY_MAX_CONNECTION_FAILURES.get(conf);

    waitingRequestMsecs = WAITING_REQUEST_MSECS.get(conf);

    maxPoolSize = GiraphConstants.NETTY_CLIENT_THREADS.get(conf);

    maxResolveAddressAttempts = MAX_RESOLVE_ADDRESS_ATTEMPTS.get(conf);

    clientRequestIdRequestInfoMap = new MapMaker().concurrencyLevel(maxPoolSize).makeMap();

    handlerToUseExecutionGroup = NETTY_CLIENT_EXECUTION_AFTER_HANDLER.get(conf);
    useExecutionGroup = NETTY_CLIENT_USE_EXECUTION_HANDLER.get(conf);
    if (useExecutionGroup) {
        int executionThreads = NETTY_CLIENT_EXECUTION_THREADS.get(conf);
        executionGroup = new DefaultEventExecutorGroup(executionThreads,
                ThreadUtils.createThreadFactory("netty-client-exec-%d", exceptionHandler));
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Using execution handler with " + executionThreads + " threads after "
                    + handlerToUseExecutionGroup + ".");
        }
    } else {
        executionGroup = null;
    }

    workerGroup = new NioEventLoopGroup(maxPoolSize,
            ThreadUtils.createThreadFactory("netty-client-worker-%d", exceptionHandler));

    bootstrap = new Bootstrap();
    bootstrap.group(workerGroup).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, MAX_CONNECTION_MILLISECONDS_DEFAULT)
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.SO_SNDBUF, sendBufferSize).option(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    /*if_not[HADOOP_NON_SECURE]*/
                    if (conf.authenticate()) {
                        LOG.info("Using Netty with authentication.");

                        // Our pipeline starts with just byteCounter, and then we use
                        // addLast() to incrementally add pipeline elements, so that we
                        // can name them for identification for removal or replacement
                        // after client is authenticated by server.
                        // After authentication is complete, the pipeline's SASL-specific
                        // functionality is removed, restoring the pipeline to exactly the
                        // same configuration as it would be without authentication.
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        // The following pipeline component is needed to decode the
                        // server's SASL tokens. It is replaced with a
                        // FixedLengthFrameDecoder (same as used with the
                        // non-authenticated pipeline) after authentication
                        // completes (as in non-auth pipeline below).
                        PipelineUtils.addLastWithExecutorCheck("length-field-based-frame-decoder",
                                new LengthFieldBasedFrameDecoder(1024, 0, 4, 0, 4), handlerToUseExecutionGroup,
                                executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        // The following pipeline component responds to the server's SASL
                        // tokens with its own responses. Both client and server share the
                        // same Hadoop Job token, which is used to create the SASL
                        // tokens to authenticate with each other.
                        // After authentication finishes, this pipeline component
                        // is removed.
                        PipelineUtils.addLastWithExecutorCheck("sasl-client-handler",
                                new SaslClientHandler(conf), handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                    } else {
                        LOG.info("Using Netty without authentication.");
                        /*end[HADOOP_NON_SECURE]*/
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("fixed-length-frame-decoder",
                                new FixedLengthFrameDecoder(RequestServerHandler.RESPONSE_BYTES),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);

                        /*if_not[HADOOP_NON_SECURE]*/
                    }
                    /*end[HADOOP_NON_SECURE]*/
                }
            });
}

From source file:org.apache.giraph.comm.netty.NettyServer.java

License:Apache License

/**
 * Start the server with the appropriate port
 *///from   w ww  .  j a v a  2  s  . c om
public void start() {
    bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .option(ChannelOption.SO_BACKLOG, tcpBacklog)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_SNDBUF, sendBufferSize)
            .childOption(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .childOption(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(receiveBufferSize / 4,
                    receiveBufferSize, receiveBufferSize));

    /**
     * Pipeline setup: depends on whether configured to use authentication
     * or not.
     */
    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        protected void initChannel(SocketChannel ch) throws Exception {
            /*if_not[HADOOP_NON_SECURE]*/
            if (conf.authenticate()) {
                LOG.info("start: Will use Netty pipeline with "
                        + "authentication and authorization of clients.");
                // After a client authenticates, the two authentication-specific
                // pipeline components SaslServerHandler and ResponseEncoder are
                // removed, leaving the pipeline the same as in the non-authenticated
                // configuration except for the presence of the Authorize component.
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("saslServerHandler",
                        saslServerHandlerFactory.newHandler(conf), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck("authorizeServerHandler", new AuthorizeServerHandler(),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("responseEncoder", new ResponseEncoder(),
                        handlerToUseExecutionGroup, executionGroup, ch);
            } else {
                LOG.info("start: Using Netty without authentication.");
                /*end[HADOOP_NON_SECURE]*/
                // Store all connected channels in order to ensure that we can close
                // them on stop(), or else stop() may hang waiting for the
                // connections to close on their own
                ch.pipeline().addLast("connectedChannels", new ChannelInboundHandlerAdapter() {
                    @Override
                    public void channelActive(ChannelHandlerContext ctx) throws Exception {
                        accepted.add(ctx.channel());
                        ctx.fireChannelActive();
                    }
                });
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                /*if_not[HADOOP_NON_SECURE]*/
            }
            /*end[HADOOP_NON_SECURE]*/
        }
    });

    int taskId = conf.getTaskPartition();
    int numTasks = conf.getInt("mapred.map.tasks", 1);
    // Number of workers + 1 for master
    int numServers = conf.getInt(GiraphConstants.MAX_WORKERS, numTasks) + 1;
    int portIncrementConstant = (int) Math.pow(10, Math.ceil(Math.log10(numServers)));
    int bindPort = GiraphConstants.IPC_INITIAL_PORT.get(conf) + taskId;
    int bindAttempts = 0;
    final int maxIpcPortBindAttempts = MAX_IPC_PORT_BIND_ATTEMPTS.get(conf);
    final boolean failFirstPortBindingAttempt = GiraphConstants.FAIL_FIRST_IPC_PORT_BIND_ATTEMPT.get(conf);

    // Simple handling of port collisions on the same machine while
    // preserving debugability from the port number alone.
    // Round up the max number of workers to the next power of 10 and use
    // it as a constant to increase the port number with.
    while (bindAttempts < maxIpcPortBindAttempts) {
        this.myAddress = new InetSocketAddress(localHostname, bindPort);
        if (failFirstPortBindingAttempt && bindAttempts == 0) {
            if (LOG.isInfoEnabled()) {
                LOG.info("start: Intentionally fail first "
                        + "binding attempt as giraph.failFirstIpcPortBindAttempt " + "is true, port "
                        + bindPort);
            }
            ++bindAttempts;
            bindPort += portIncrementConstant;
            continue;
        }

        try {
            ChannelFuture f = bootstrap.bind(myAddress).sync();
            accepted.add(f.channel());
            break;
        } catch (InterruptedException e) {
            throw new IllegalStateException(e);
            // CHECKSTYLE: stop IllegalCatchCheck
        } catch (Exception e) {
            // CHECKSTYLE: resume IllegalCatchCheck
            LOG.warn("start: Likely failed to bind on attempt " + bindAttempts + " to port " + bindPort,
                    e.getCause());
            ++bindAttempts;
            bindPort += portIncrementConstant;
        }
    }
    if (bindAttempts == maxIpcPortBindAttempts || myAddress == null) {
        throw new IllegalStateException(
                "start: Failed to start NettyServer with " + bindAttempts + " attempts");
    }

    if (LOG.isInfoEnabled()) {
        LOG.info("start: Started server " + "communication server: " + myAddress + " with up to " + maxPoolSize
                + " threads on bind attempt " + bindAttempts + " with sendBufferSize = " + sendBufferSize
                + " receiveBufferSize = " + receiveBufferSize);
    }
}