Example usage for io.netty.channel ChannelOption ALLOCATOR

List of usage examples for io.netty.channel ChannelOption ALLOCATOR

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption ALLOCATOR.

Prototype

ChannelOption ALLOCATOR

To view the source code for io.netty.channel ChannelOption ALLOCATOR.

Click Source Link

Usage

From source file:org.apache.flink.runtime.io.network.netty.NettyClient.java

License:Apache License

void init(final NettyProtocol protocol, NettyBufferPool nettyBufferPool) throws IOException {
    checkState(bootstrap == null, "Netty client has already been initialized.");

    long start = System.currentTimeMillis();

    bootstrap = new Bootstrap();

    // --------------------------------------------------------------------
    // Transport-specific configuration
    // --------------------------------------------------------------------

    switch (config.getTransportType()) {
    case NIO://ww w. j a  va 2s . c  om
        initNioBootstrap();
        break;

    case EPOLL:
        initEpollBootstrap();
        break;

    case AUTO:
        if (Epoll.isAvailable()) {
            initEpollBootstrap();
            LOG.info("Transport type 'auto': using EPOLL.");
        } else {
            initNioBootstrap();
            LOG.info("Transport type 'auto': using NIO.");
        }
    }

    // --------------------------------------------------------------------
    // Configuration
    // --------------------------------------------------------------------

    bootstrap.option(ChannelOption.TCP_NODELAY, true);
    bootstrap.option(ChannelOption.SO_KEEPALIVE, true);

    // Timeout for new connections
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getClientConnectTimeoutSeconds() * 1000);

    // Pooled allocator for Netty's ByteBuf instances
    bootstrap.option(ChannelOption.ALLOCATOR, nettyBufferPool);

    // Receive and send buffer size
    int receiveAndSendBufferSize = config.getSendAndReceiveBufferSize();
    if (receiveAndSendBufferSize > 0) {
        bootstrap.option(ChannelOption.SO_SNDBUF, receiveAndSendBufferSize);
        bootstrap.option(ChannelOption.SO_RCVBUF, receiveAndSendBufferSize);
    }

    // --------------------------------------------------------------------
    // Child channel pipeline for accepted connections
    // --------------------------------------------------------------------

    bootstrap.handler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel channel) throws Exception {
            channel.pipeline().addLast(protocol.getClientChannelHandlers());
        }
    });

    long end = System.currentTimeMillis();
    LOG.info("Successful initialization (took {} ms).", (end - start));
}

From source file:org.apache.flink.runtime.io.network.netty.NettyConnectionManager.java

License:Apache License

@Override
public void start(ChannelManager channelManager) throws IOException {
    LOG.info(String.format("Starting with %d incoming and %d outgoing connection threads.", numInThreads,
            numOutThreads));/*from  w ww  .  j  av a2  s .c  o m*/
    LOG.info(String.format("Setting low water mark to %d and high water mark to %d bytes.", lowWaterMark,
            highWaterMark));
    LOG.info(String.format("Close channels after idle for %d ms.", closeAfterIdleForMs));

    final BufferProviderBroker bufferProviderBroker = channelManager;
    final EnvelopeDispatcher envelopeDispatcher = channelManager;

    int numHeapArenas = 0;
    int numDirectArenas = numInThreads + numOutThreads;
    int pageSize = bufferSize << 1;
    int chunkSize = 16 << 20; // 16 MB

    // shift pageSize maxOrder times to get to chunkSize
    int maxOrder = (int) (Math.log(chunkSize / pageSize) / Math.log(2));

    PooledByteBufAllocator pooledByteBufAllocator = new PooledByteBufAllocator(true, numHeapArenas,
            numDirectArenas, pageSize, maxOrder);

    String msg = String.format(
            "Instantiated PooledByteBufAllocator with direct arenas: %d, heap arenas: %d, "
                    + "page size (bytes): %d, chunk size (bytes): %d.",
            numDirectArenas, numHeapArenas, pageSize, (pageSize << maxOrder));
    LOG.info(msg);

    // --------------------------------------------------------------------
    // server bootstrap (incoming connections)
    // --------------------------------------------------------------------
    in = new ServerBootstrap();
    in.group(new NioEventLoopGroup(numInThreads)).channel(NioServerSocketChannel.class)
            .localAddress(bindAddress, bindPort).childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new InboundEnvelopeDecoder(bufferProviderBroker))
                            .addLast(new InboundEnvelopeDispatcher(envelopeDispatcher));
                }
            }).option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(pageSize))
            .option(ChannelOption.ALLOCATOR, pooledByteBufAllocator);

    // --------------------------------------------------------------------
    // client bootstrap (outgoing connections)
    // --------------------------------------------------------------------
    out = new Bootstrap();
    out.group(new NioEventLoopGroup(numOutThreads)).channel(NioSocketChannel.class)
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new OutboundEnvelopeEncoder());
                }
            }).option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, lowWaterMark)
            .option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, highWaterMark)
            .option(ChannelOption.ALLOCATOR, pooledByteBufAllocator).option(ChannelOption.TCP_NODELAY, false)
            .option(ChannelOption.SO_KEEPALIVE, true);

    try {
        in.bind().sync();
    } catch (InterruptedException e) {
        throw new IOException(e);
    }

    if (LOG.isDebugEnabled()) {
        new Thread(new Runnable() {
            @Override
            public void run() {
                Date date = new Date();

                while (true) {
                    try {
                        Thread.sleep(DEBUG_PRINT_QUEUED_ENVELOPES_EVERY_MS);

                        date.setTime(System.currentTimeMillis());

                        System.out.println(date);
                        System.out.println(getNonZeroNumQueuedEnvelopes());
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                }
            }
        }).start();
    }
}

From source file:org.apache.flink.runtime.io.network.netty.NettyServer.java

License:Apache License

void init(final NettyProtocol protocol, NettyBufferPool nettyBufferPool) throws IOException {
    checkState(bootstrap == null, "Netty server has already been initialized.");

    long start = System.currentTimeMillis();

    bootstrap = new ServerBootstrap();

    // --------------------------------------------------------------------
    // Transport-specific configuration
    // --------------------------------------------------------------------

    switch (config.getTransportType()) {
    case NIO://from  ww  w  .j a v  a  2 s .  c o m
        initNioBootstrap();
        break;

    case EPOLL:
        initEpollBootstrap();
        break;

    case AUTO:
        if (Epoll.isAvailable()) {
            initEpollBootstrap();
            LOG.info("Transport type 'auto': using EPOLL.");
        } else {
            initNioBootstrap();
            LOG.info("Transport type 'auto': using NIO.");
        }
    }

    // --------------------------------------------------------------------
    // Configuration
    // --------------------------------------------------------------------

    // Server bind address
    bootstrap.localAddress(config.getServerAddress(), config.getServerPort());

    // Pooled allocators for Netty's ByteBuf instances
    bootstrap.option(ChannelOption.ALLOCATOR, nettyBufferPool);
    bootstrap.childOption(ChannelOption.ALLOCATOR, nettyBufferPool);

    if (config.getServerConnectBacklog() > 0) {
        bootstrap.option(ChannelOption.SO_BACKLOG, config.getServerConnectBacklog());
    }

    // Receive and send buffer size
    int receiveAndSendBufferSize = config.getSendAndReceiveBufferSize();
    if (receiveAndSendBufferSize > 0) {
        bootstrap.childOption(ChannelOption.SO_SNDBUF, receiveAndSendBufferSize);
        bootstrap.childOption(ChannelOption.SO_RCVBUF, receiveAndSendBufferSize);
    }

    // Low and high water marks for flow control
    bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, config.getMemorySegmentSize() + 1);
    bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 2 * config.getMemorySegmentSize());

    // --------------------------------------------------------------------
    // Child channel pipeline for accepted connections
    // --------------------------------------------------------------------

    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel channel) throws Exception {
            channel.pipeline().addLast(protocol.getServerChannelHandlers());
        }
    });

    // --------------------------------------------------------------------
    // Start Server
    // --------------------------------------------------------------------

    bindFuture = bootstrap.bind().syncUninterruptibly();

    long end = System.currentTimeMillis();
    LOG.info("Successful initialization (took {} ms). Listening on SocketAddress {}.", (end - start),
            bindFuture.channel().localAddress().toString());
}

From source file:org.apache.flink.runtime.query.netty.KvStateClient.java

License:Apache License

/**
 * Creates a client with the specified number of event loop threads.
 *
 * @param numEventLoopThreads Number of event loop threads (minimum 1).
 *//*  w ww.  j  a  v a 2 s  .  c om*/
public KvStateClient(int numEventLoopThreads, KvStateRequestStats stats) {
    Preconditions.checkArgument(numEventLoopThreads >= 1, "Non-positive number of event loop threads.");
    NettyBufferPool bufferPool = new NettyBufferPool(numEventLoopThreads);

    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("Flink KvStateClient Event Loop Thread %d").build();

    NioEventLoopGroup nioGroup = new NioEventLoopGroup(numEventLoopThreads, threadFactory);

    this.bootstrap = new Bootstrap().group(nioGroup).channel(NioSocketChannel.class)
            .option(ChannelOption.ALLOCATOR, bufferPool).handler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4))
                            // ChunkedWriteHandler respects Channel writability
                            .addLast(new ChunkedWriteHandler());
                }
            });

    this.stats = Preconditions.checkNotNull(stats, "Statistics tracker");
}

From source file:org.apache.flink.runtime.query.netty.KvStateServer.java

License:Apache License

/**
 * Creates the {@link KvStateServer}./*from   w  w  w .  ja  v  a2s . c o  m*/
 *
 * <p>The server needs to be started via {@link #start()} in order to bind
 * to the configured bind address.
 *
 * @param bindAddress         Address to bind to
 * @param bindPort            Port to bind to. Pick random port if 0.
 * @param numEventLoopThreads Number of event loop threads
 * @param numQueryThreads     Number of query threads
 * @param kvStateRegistry     KvStateRegistry to query for KvState instances
 * @param stats               Statistics tracker
 */
public KvStateServer(InetAddress bindAddress, int bindPort, int numEventLoopThreads, int numQueryThreads,
        KvStateRegistry kvStateRegistry, KvStateRequestStats stats) {

    Preconditions.checkArgument(bindPort >= 0 && bindPort <= 65536,
            "Port " + bindPort + " is out of valid port range (0-65536).");

    Preconditions.checkArgument(numEventLoopThreads >= 1, "Non-positive number of event loop threads.");
    Preconditions.checkArgument(numQueryThreads >= 1, "Non-positive number of query threads.");

    Preconditions.checkNotNull(kvStateRegistry, "KvStateRegistry");
    Preconditions.checkNotNull(stats, "KvStateRequestStats");

    NettyBufferPool bufferPool = new NettyBufferPool(numEventLoopThreads);

    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat("Flink KvStateServer EventLoop Thread %d").build();

    NioEventLoopGroup nioGroup = new NioEventLoopGroup(numEventLoopThreads, threadFactory);

    queryExecutor = createQueryExecutor(numQueryThreads);

    // Shared between all channels
    KvStateServerHandler serverHandler = new KvStateServerHandler(kvStateRegistry, queryExecutor, stats);

    bootstrap = new ServerBootstrap()
            // Bind address and port
            .localAddress(bindAddress, bindPort)
            // NIO server channels
            .group(nioGroup).channel(NioServerSocketChannel.class)
            // Server channel Options
            .option(ChannelOption.ALLOCATOR, bufferPool)
            // Child channel options
            .childOption(ChannelOption.ALLOCATOR, bufferPool)
            .childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, LOW_WATER_MARK)
            .childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, HIGH_WATER_MARK)
            // See initializer for pipeline details
            .childHandler(new KvStateServerChannelInitializer(serverHandler));
}

From source file:org.apache.geode.redis.GeodeRedisServer.java

License:Apache License

/**
 * Helper method to start the server listening for connections. The server is bound to the port
 * specified by {@link GeodeRedisServer#serverPort}
 * //from ww  w .  j  a v  a  2s  . c  o m
 * @throws IOException
 * @throws InterruptedException
 */
private void startRedisServer() throws IOException, InterruptedException {
    ThreadFactory selectorThreadFactory = new ThreadFactory() {
        private final AtomicInteger counter = new AtomicInteger();

        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("GeodeRedisServer-SelectorThread-" + counter.incrementAndGet());
            t.setDaemon(true);
            return t;
        }

    };

    ThreadFactory workerThreadFactory = new ThreadFactory() {
        private final AtomicInteger counter = new AtomicInteger();

        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("GeodeRedisServer-WorkerThread-" + counter.incrementAndGet());
            return t;
        }

    };

    bossGroup = null;
    workerGroup = null;
    Class<? extends ServerChannel> socketClass = null;
    if (singleThreadPerConnection) {
        bossGroup = new OioEventLoopGroup(Integer.MAX_VALUE, selectorThreadFactory);
        workerGroup = new OioEventLoopGroup(Integer.MAX_VALUE, workerThreadFactory);
        socketClass = OioServerSocketChannel.class;
    } else {
        bossGroup = new NioEventLoopGroup(this.numSelectorThreads, selectorThreadFactory);
        workerGroup = new NioEventLoopGroup(this.numWorkerThreads, workerThreadFactory);
        socketClass = NioServerSocketChannel.class;
    }
    InternalDistributedSystem system = (InternalDistributedSystem) cache.getDistributedSystem();
    String pwd = system.getConfig().getRedisPassword();
    final byte[] pwdB = Coder.stringToBytes(pwd);
    ServerBootstrap b = new ServerBootstrap();
    b.group(bossGroup, workerGroup).channel(socketClass).childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel ch) throws Exception {
            if (logger.fineEnabled())
                logger.fine("GeodeRedisServer-Connection established with " + ch.remoteAddress());
            ChannelPipeline p = ch.pipeline();
            p.addLast(ByteToCommandDecoder.class.getSimpleName(), new ByteToCommandDecoder());
            p.addLast(ExecutionHandlerContext.class.getSimpleName(),
                    new ExecutionHandlerContext(ch, cache, regionCache, GeodeRedisServer.this, pwdB));
        }
    }).option(ChannelOption.SO_REUSEADDR, true).option(ChannelOption.SO_RCVBUF, getBufferSize())
            .childOption(ChannelOption.SO_KEEPALIVE, true)
            .childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, GeodeRedisServer.connectTimeoutMillis)
            .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    // Bind and start to accept incoming connections.
    ChannelFuture f = b.bind(new InetSocketAddress(getBindAddress(), serverPort)).sync();
    if (this.logger.infoEnabled()) {
        String logMessage = "GeodeRedisServer started {" + getBindAddress() + ":" + serverPort
                + "}, Selector threads: " + this.numSelectorThreads;
        if (this.singleThreadPerConnection)
            logMessage += ", One worker thread per connection";
        else
            logMessage += ", Worker threads: " + this.numWorkerThreads;
        this.logger.info(logMessage);
    }
    this.serverChannel = f.channel();
}

From source file:org.apache.giraph.comm.netty.NettyClient.java

License:Apache License

/**
 * Only constructor//from  ww  w. j av a 2s  .  c om
 *
 * @param context Context for progress
 * @param conf Configuration
 * @param myTaskInfo Current task info
 * @param exceptionHandler handler for uncaught exception. Will
 *                         terminate job.
 */
public NettyClient(Mapper<?, ?, ?, ?>.Context context, final ImmutableClassesGiraphConfiguration conf,
        TaskInfo myTaskInfo, final Thread.UncaughtExceptionHandler exceptionHandler) {
    this.context = context;
    this.myTaskInfo = myTaskInfo;
    this.channelsPerServer = GiraphConstants.CHANNELS_PER_SERVER.get(conf);
    sendBufferSize = CLIENT_SEND_BUFFER_SIZE.get(conf);
    receiveBufferSize = CLIENT_RECEIVE_BUFFER_SIZE.get(conf);

    limitNumberOfOpenRequests = conf.getBoolean(LIMIT_NUMBER_OF_OPEN_REQUESTS,
            LIMIT_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
    if (limitNumberOfOpenRequests) {
        maxNumberOfOpenRequests = conf.getInt(MAX_NUMBER_OF_OPEN_REQUESTS, MAX_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Limit number of open requests to " + maxNumberOfOpenRequests);
        }
    } else {
        maxNumberOfOpenRequests = -1;
    }

    maxRequestMilliseconds = MAX_REQUEST_MILLISECONDS.get(conf);

    maxConnectionFailures = NETTY_MAX_CONNECTION_FAILURES.get(conf);

    waitingRequestMsecs = WAITING_REQUEST_MSECS.get(conf);

    maxPoolSize = GiraphConstants.NETTY_CLIENT_THREADS.get(conf);

    maxResolveAddressAttempts = MAX_RESOLVE_ADDRESS_ATTEMPTS.get(conf);

    clientRequestIdRequestInfoMap = new MapMaker().concurrencyLevel(maxPoolSize).makeMap();

    handlerToUseExecutionGroup = NETTY_CLIENT_EXECUTION_AFTER_HANDLER.get(conf);
    useExecutionGroup = NETTY_CLIENT_USE_EXECUTION_HANDLER.get(conf);
    if (useExecutionGroup) {
        int executionThreads = NETTY_CLIENT_EXECUTION_THREADS.get(conf);
        executionGroup = new DefaultEventExecutorGroup(executionThreads,
                ThreadUtils.createThreadFactory("netty-client-exec-%d", exceptionHandler));
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Using execution handler with " + executionThreads + " threads after "
                    + handlerToUseExecutionGroup + ".");
        }
    } else {
        executionGroup = null;
    }

    workerGroup = new NioEventLoopGroup(maxPoolSize,
            ThreadUtils.createThreadFactory("netty-client-worker-%d", exceptionHandler));

    bootstrap = new Bootstrap();
    bootstrap.group(workerGroup).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, MAX_CONNECTION_MILLISECONDS_DEFAULT)
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.SO_SNDBUF, sendBufferSize).option(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    /*if_not[HADOOP_NON_SECURE]*/
                    if (conf.authenticate()) {
                        LOG.info("Using Netty with authentication.");

                        // Our pipeline starts with just byteCounter, and then we use
                        // addLast() to incrementally add pipeline elements, so that we
                        // can name them for identification for removal or replacement
                        // after client is authenticated by server.
                        // After authentication is complete, the pipeline's SASL-specific
                        // functionality is removed, restoring the pipeline to exactly the
                        // same configuration as it would be without authentication.
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        // The following pipeline component is needed to decode the
                        // server's SASL tokens. It is replaced with a
                        // FixedLengthFrameDecoder (same as used with the
                        // non-authenticated pipeline) after authentication
                        // completes (as in non-auth pipeline below).
                        PipelineUtils.addLastWithExecutorCheck("length-field-based-frame-decoder",
                                new LengthFieldBasedFrameDecoder(1024, 0, 4, 0, 4), handlerToUseExecutionGroup,
                                executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        // The following pipeline component responds to the server's SASL
                        // tokens with its own responses. Both client and server share the
                        // same Hadoop Job token, which is used to create the SASL
                        // tokens to authenticate with each other.
                        // After authentication finishes, this pipeline component
                        // is removed.
                        PipelineUtils.addLastWithExecutorCheck("sasl-client-handler",
                                new SaslClientHandler(conf), handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                    } else {
                        LOG.info("Using Netty without authentication.");
                        /*end[HADOOP_NON_SECURE]*/
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("fixed-length-frame-decoder",
                                new FixedLengthFrameDecoder(RequestServerHandler.RESPONSE_BYTES),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);

                        /*if_not[HADOOP_NON_SECURE]*/
                    }
                    /*end[HADOOP_NON_SECURE]*/
                }
            });
}

From source file:org.apache.giraph.comm.netty.NettyServer.java

License:Apache License

/**
 * Start the server with the appropriate port
 *///from   w  w  w  .j  a  v  a2s .  co  m
public void start() {
    bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .option(ChannelOption.SO_BACKLOG, tcpBacklog)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_SNDBUF, sendBufferSize)
            .childOption(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .childOption(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(receiveBufferSize / 4,
                    receiveBufferSize, receiveBufferSize));

    /**
     * Pipeline setup: depends on whether configured to use authentication
     * or not.
     */
    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        protected void initChannel(SocketChannel ch) throws Exception {
            /*if_not[HADOOP_NON_SECURE]*/
            if (conf.authenticate()) {
                LOG.info("start: Will use Netty pipeline with "
                        + "authentication and authorization of clients.");
                // After a client authenticates, the two authentication-specific
                // pipeline components SaslServerHandler and ResponseEncoder are
                // removed, leaving the pipeline the same as in the non-authenticated
                // configuration except for the presence of the Authorize component.
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("saslServerHandler",
                        saslServerHandlerFactory.newHandler(conf), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck("authorizeServerHandler", new AuthorizeServerHandler(),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("responseEncoder", new ResponseEncoder(),
                        handlerToUseExecutionGroup, executionGroup, ch);
            } else {
                LOG.info("start: Using Netty without authentication.");
                /*end[HADOOP_NON_SECURE]*/
                // Store all connected channels in order to ensure that we can close
                // them on stop(), or else stop() may hang waiting for the
                // connections to close on their own
                ch.pipeline().addLast("connectedChannels", new ChannelInboundHandlerAdapter() {
                    @Override
                    public void channelActive(ChannelHandlerContext ctx) throws Exception {
                        accepted.add(ctx.channel());
                        ctx.fireChannelActive();
                    }
                });
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                /*if_not[HADOOP_NON_SECURE]*/
            }
            /*end[HADOOP_NON_SECURE]*/
        }
    });

    int taskId = conf.getTaskPartition();
    int numTasks = conf.getInt("mapred.map.tasks", 1);
    // Number of workers + 1 for master
    int numServers = conf.getInt(GiraphConstants.MAX_WORKERS, numTasks) + 1;
    int portIncrementConstant = (int) Math.pow(10, Math.ceil(Math.log10(numServers)));
    int bindPort = GiraphConstants.IPC_INITIAL_PORT.get(conf) + taskId;
    int bindAttempts = 0;
    final int maxIpcPortBindAttempts = MAX_IPC_PORT_BIND_ATTEMPTS.get(conf);
    final boolean failFirstPortBindingAttempt = GiraphConstants.FAIL_FIRST_IPC_PORT_BIND_ATTEMPT.get(conf);

    // Simple handling of port collisions on the same machine while
    // preserving debugability from the port number alone.
    // Round up the max number of workers to the next power of 10 and use
    // it as a constant to increase the port number with.
    while (bindAttempts < maxIpcPortBindAttempts) {
        this.myAddress = new InetSocketAddress(localHostname, bindPort);
        if (failFirstPortBindingAttempt && bindAttempts == 0) {
            if (LOG.isInfoEnabled()) {
                LOG.info("start: Intentionally fail first "
                        + "binding attempt as giraph.failFirstIpcPortBindAttempt " + "is true, port "
                        + bindPort);
            }
            ++bindAttempts;
            bindPort += portIncrementConstant;
            continue;
        }

        try {
            ChannelFuture f = bootstrap.bind(myAddress).sync();
            accepted.add(f.channel());
            break;
        } catch (InterruptedException e) {
            throw new IllegalStateException(e);
            // CHECKSTYLE: stop IllegalCatchCheck
        } catch (Exception e) {
            // CHECKSTYLE: resume IllegalCatchCheck
            LOG.warn("start: Likely failed to bind on attempt " + bindAttempts + " to port " + bindPort,
                    e.getCause());
            ++bindAttempts;
            bindPort += portIncrementConstant;
        }
    }
    if (bindAttempts == maxIpcPortBindAttempts || myAddress == null) {
        throw new IllegalStateException(
                "start: Failed to start NettyServer with " + bindAttempts + " attempts");
    }

    if (LOG.isInfoEnabled()) {
        LOG.info("start: Started server " + "communication server: " + myAddress + " with up to " + maxPoolSize
                + " threads on bind attempt " + bindAttempts + " with sendBufferSize = " + sendBufferSize
                + " receiveBufferSize = " + receiveBufferSize);
    }
}

From source file:org.apache.hadoop.hbase.ipc.NettyRpcServer.java

License:Apache License

public NettyRpcServer(final Server server, final String name, final List<BlockingServiceAndInterface> services,
        final InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler) throws IOException {
    super(server, name, services, bindAddress, conf, scheduler);
    this.bindAddress = bindAddress;
    boolean useEpoll = useEpoll(conf);
    int workerCount = conf.getInt("hbase.netty.rpc.server.worker.count",
            Runtime.getRuntime().availableProcessors() / 4);
    EventLoopGroup bossGroup = null;//from  ww  w . j a v a2 s. c o m
    EventLoopGroup workerGroup = null;
    if (useEpoll) {
        bossGroup = new EpollEventLoopGroup(1);
        workerGroup = new EpollEventLoopGroup(workerCount);
    } else {
        bossGroup = new NioEventLoopGroup(1);
        workerGroup = new NioEventLoopGroup(workerCount);
    }
    ServerBootstrap bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup);
    if (useEpoll) {
        bootstrap.channel(EpollServerSocketChannel.class);
    } else {
        bootstrap.channel(NioServerSocketChannel.class);
    }
    bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
    bootstrap.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive);
    bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    bootstrap.childHandler(new Initializer(maxRequestSize));

    try {
        serverChannel = bootstrap.bind(this.bindAddress).sync().channel();
        LOG.info("NettyRpcServer bind to address=" + serverChannel.localAddress()
                + ", hbase.netty.rpc.server.worker.count=" + workerCount + ", useEpoll=" + useEpoll);
        allChannels.add(serverChannel);
    } catch (InterruptedException e) {
        throw new InterruptedIOException(e.getMessage());
    }
    initReconfigurable(conf);
    this.scheduler.init(new RpcSchedulerContext(this));
}

From source file:org.apache.hyracks.http.server.HttpServer.java

License:Apache License

protected void doStart() throws InterruptedException {
    /*/*from w ww.  ja  va  2  s.  c om*/
     * This is a hacky way to ensure that IServlets with more specific paths are checked first.
     * For example:
     * "/path/to/resource/"
     * is checked before
     * "/path/to/"
     * which in turn is checked before
     * "/path/"
     * Note that it doesn't work for the case where multiple paths map to a single IServlet
     */
    Collections.sort(servlets, (l1, l2) -> l2.getPaths()[0].length() - l1.getPaths()[0].length());
    ServerBootstrap b = new ServerBootstrap();
    b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(RECEIVE_BUFFER_SIZE))
            .childOption(ChannelOption.AUTO_READ, Boolean.FALSE)
            .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, WRITE_BUFFER_WATER_MARK)
            .handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new HttpServerInitializer(this));
    channel = b.bind(port).sync().channel();
}