Example usage for io.netty.channel ChannelOption SO_SNDBUF

List of usage examples for io.netty.channel ChannelOption SO_SNDBUF

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption SO_SNDBUF.

Prototype

ChannelOption SO_SNDBUF

To view the source code for io.netty.channel ChannelOption SO_SNDBUF.

Click Source Link

Usage

From source file:org.apache.camel.component.netty4.SingleUDPNettyServerBootstrapFactory.java

License:Apache License

protected void startServerBootstrap() throws Exception {
    // create non-shared worker pool
    EventLoopGroup wg = configuration.getWorkerGroup();
    if (wg == null) {
        // create new pool which we should shutdown when stopping as its not shared
        workerGroup = new NettyWorkerPoolBuilder().withWorkerCount(configuration.getWorkerCount())
                .withName("NettyServerTCPWorker").build();
        wg = workerGroup;// w  w w.ja  va 2s.c o  m
    }

    Bootstrap bootstrap = new Bootstrap();
    bootstrap.group(wg).channel(NioDatagramChannel.class);
    // We cannot set the child option here      
    bootstrap.option(ChannelOption.SO_REUSEADDR, configuration.isReuseAddress());
    bootstrap.option(ChannelOption.SO_SNDBUF, configuration.getSendBufferSize());
    bootstrap.option(ChannelOption.SO_RCVBUF, configuration.getReceiveBufferSize());
    bootstrap.option(ChannelOption.SO_BROADCAST, configuration.isBroadcast());
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, configuration.getConnectTimeout());

    // TODO need to find the right setting of below option
    // only set this if user has specified
    /*
    if (configuration.getReceiveBufferSizePredictor() > 0) {
    bootstrap.setOption("receiveBufferSizePredictorFactory",
            new FixedReceiveBufferSizePredictorFactory(configuration.getReceiveBufferSizePredictor()));
    }*/

    if (configuration.getBacklog() > 0) {
        bootstrap.option(ChannelOption.SO_BACKLOG, configuration.getBacklog());
    }

    //TODO need to check the additional netty options
    /*
    if (configuration.getOptions() != null) {
    for (Map.Entry<String, Object> entry : configuration.getOptions().entrySet()) {
        connectionlessBootstrap.setOption(entry.getKey(), entry.getValue());
    }
    }*/

    LOG.debug("Created ConnectionlessBootstrap {}", bootstrap);

    // set the pipeline factory, which creates the pipeline for each newly created channels
    bootstrap.handler(pipelineFactory);

    InetSocketAddress hostAddress = new InetSocketAddress(configuration.getHost(), configuration.getPort());
    SubnetUtils multicastSubnet = new SubnetUtils(MULTICAST_SUBNET);

    if (multicastSubnet.getInfo().isInRange(configuration.getHost())) {
        ChannelFuture channelFuture = bootstrap.bind(hostAddress);
        channelFuture.awaitUninterruptibly();
        channel = channelFuture.channel();
        DatagramChannel datagramChannel = (DatagramChannel) channel;
        String networkInterface = configuration.getNetworkInterface() == null ? LOOPBACK_INTERFACE
                : configuration.getNetworkInterface();
        multicastNetworkInterface = NetworkInterface.getByName(networkInterface);
        ObjectHelper.notNull(multicastNetworkInterface,
                "No network interface found for '" + networkInterface + "'.");
        LOG.info("ConnectionlessBootstrap joining {}:{} using network interface: {}", new Object[] {
                configuration.getHost(), configuration.getPort(), multicastNetworkInterface.getName() });
        datagramChannel.joinGroup(hostAddress, multicastNetworkInterface).syncUninterruptibly();
        allChannels.add(datagramChannel);
    } else {
        LOG.info("ConnectionlessBootstrap binding to {}:{}", configuration.getHost(), configuration.getPort());
        ChannelFuture channelFuture = bootstrap.bind(hostAddress);
        channelFuture.awaitUninterruptibly();
        channel = channelFuture.channel();
        allChannels.add(channel);
    }
}

From source file:org.apache.drill.exec.rpc.BasicClient.java

License:Apache License

public BasicClient(RpcConfig rpcMapping, ByteBufAllocator alloc, EventLoopGroup eventLoopGroup, T handshakeType,
        Class<HANDSHAKE_RESPONSE> responseClass, Parser<HANDSHAKE_RESPONSE> handshakeParser) {
    super(rpcMapping);
    this.responseClass = responseClass;
    this.handshakeType = handshakeType;
    this.handshakeParser = handshakeParser;
    final long timeoutInMillis = rpcMapping.hasTimeout()
            ? (long) (rpcMapping.getTimeout() * 1000.0 * PERCENT_TIMEOUT_BEFORE_SENDING_PING)
            : -1;/* ww  w.  jav a 2  s . co  m*/
    this.pingHandler = rpcMapping.hasTimeout() ? new IdlePingHandler(timeoutInMillis) : null;

    b = new Bootstrap() //
            .group(eventLoopGroup) //
            .channel(TransportCheck.getClientSocketChannel()) //
            .option(ChannelOption.ALLOCATOR, alloc) //
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 30 * 1000).option(ChannelOption.SO_REUSEADDR, true)
            .option(ChannelOption.SO_RCVBUF, 1 << 17) //
            .option(ChannelOption.SO_SNDBUF, 1 << 17) //
            .option(ChannelOption.TCP_NODELAY, true).handler(new ChannelInitializer<SocketChannel>() {

                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    // logger.debug("initializing client connection.");
                    connection = initRemoteConnection(ch);

                    ch.closeFuture().addListener(getCloseHandler(ch, connection));

                    final ChannelPipeline pipe = ch.pipeline();

                    pipe.addLast("protocol-decoder", getDecoder(connection.getAllocator()));
                    pipe.addLast("message-decoder", new RpcDecoder("c-" + rpcConfig.getName()));
                    pipe.addLast("protocol-encoder", new RpcEncoder("c-" + rpcConfig.getName()));
                    pipe.addLast("handshake-handler", new ClientHandshakeHandler(connection));

                    if (pingHandler != null) {
                        pipe.addLast("idle-state-handler", pingHandler);
                    }

                    pipe.addLast("message-handler", new InboundHandler(connection));
                    pipe.addLast("exception-handler", new RpcExceptionHandler<R>(connection));
                }
            }); //

    // if(TransportCheck.SUPPORTS_EPOLL){
    // b.option(EpollChannelOption.SO_REUSEPORT, true); //
    // }
}

From source file:org.apache.drill.exec.rpc.BasicServer.java

License:Apache License

public BasicServer(final RpcConfig rpcMapping, ByteBufAllocator alloc, EventLoopGroup eventLoopGroup) {
    super(rpcMapping);
    this.eventLoopGroup = eventLoopGroup;

    b = new ServerBootstrap().channel(TransportCheck.getServerSocketChannel())
            .option(ChannelOption.SO_BACKLOG, 1000).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 30 * 1000)
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_REUSEADDR, true)
            .option(ChannelOption.SO_RCVBUF, 1 << 17).option(ChannelOption.SO_SNDBUF, 1 << 17)
            .group(eventLoopGroup) //
            .childOption(ChannelOption.ALLOCATOR, alloc)

            // .handler(new LoggingHandler(LogLevel.INFO))

            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override//from w  w w  .  j  a v  a2 s  . c  om
                protected void initChannel(SocketChannel ch) throws Exception {
                    //            logger.debug("Starting initialization of server connection.");
                    C connection = initRemoteConnection(ch);
                    ch.closeFuture().addListener(getCloseHandler(ch, connection));

                    final ChannelPipeline pipe = ch.pipeline();
                    pipe.addLast("protocol-decoder",
                            getDecoder(connection.getAllocator(), getOutOfMemoryHandler()));
                    pipe.addLast("message-decoder", new RpcDecoder("s-" + rpcConfig.getName()));
                    pipe.addLast("protocol-encoder", new RpcEncoder("s-" + rpcConfig.getName()));
                    pipe.addLast("handshake-handler", getHandshakeHandler(connection));

                    if (rpcMapping.hasTimeout()) {
                        pipe.addLast(TIMEOUT_HANDLER,
                                new LogggingReadTimeoutHandler(connection, rpcMapping.getTimeout()));
                    }

                    pipe.addLast("message-handler", new InboundHandler(connection));
                    pipe.addLast("exception-handler", new RpcExceptionHandler<C>(connection));

                    connect = true;
                    //            logger.debug("Server connection initialization completed.");
                }
            });

    //     if(TransportCheck.SUPPORTS_EPOLL){
    //       b.option(EpollChannelOption.SO_REUSEPORT, true); //
    //     }
}

From source file:org.apache.flink.runtime.io.network.netty.NettyClient.java

License:Apache License

void init(final NettyProtocol protocol, NettyBufferPool nettyBufferPool) throws IOException {
    checkState(bootstrap == null, "Netty client has already been initialized.");

    long start = System.currentTimeMillis();

    bootstrap = new Bootstrap();

    // --------------------------------------------------------------------
    // Transport-specific configuration
    // --------------------------------------------------------------------

    switch (config.getTransportType()) {
    case NIO://from   ww  w. j  a v  a  2  s  .  com
        initNioBootstrap();
        break;

    case EPOLL:
        initEpollBootstrap();
        break;

    case AUTO:
        if (Epoll.isAvailable()) {
            initEpollBootstrap();
            LOG.info("Transport type 'auto': using EPOLL.");
        } else {
            initNioBootstrap();
            LOG.info("Transport type 'auto': using NIO.");
        }
    }

    // --------------------------------------------------------------------
    // Configuration
    // --------------------------------------------------------------------

    bootstrap.option(ChannelOption.TCP_NODELAY, true);
    bootstrap.option(ChannelOption.SO_KEEPALIVE, true);

    // Timeout for new connections
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getClientConnectTimeoutSeconds() * 1000);

    // Pooled allocator for Netty's ByteBuf instances
    bootstrap.option(ChannelOption.ALLOCATOR, nettyBufferPool);

    // Receive and send buffer size
    int receiveAndSendBufferSize = config.getSendAndReceiveBufferSize();
    if (receiveAndSendBufferSize > 0) {
        bootstrap.option(ChannelOption.SO_SNDBUF, receiveAndSendBufferSize);
        bootstrap.option(ChannelOption.SO_RCVBUF, receiveAndSendBufferSize);
    }

    // --------------------------------------------------------------------
    // Child channel pipeline for accepted connections
    // --------------------------------------------------------------------

    bootstrap.handler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel channel) throws Exception {
            channel.pipeline().addLast(protocol.getClientChannelHandlers());
        }
    });

    long end = System.currentTimeMillis();
    LOG.info("Successful initialization (took {} ms).", (end - start));
}

From source file:org.apache.flink.runtime.io.network.netty.NettyServer.java

License:Apache License

void init(final NettyProtocol protocol, NettyBufferPool nettyBufferPool) throws IOException {
    checkState(bootstrap == null, "Netty server has already been initialized.");

    long start = System.currentTimeMillis();

    bootstrap = new ServerBootstrap();

    // --------------------------------------------------------------------
    // Transport-specific configuration
    // --------------------------------------------------------------------

    switch (config.getTransportType()) {
    case NIO:/*from  w  w w . j  av  a2  s  . co  m*/
        initNioBootstrap();
        break;

    case EPOLL:
        initEpollBootstrap();
        break;

    case AUTO:
        if (Epoll.isAvailable()) {
            initEpollBootstrap();
            LOG.info("Transport type 'auto': using EPOLL.");
        } else {
            initNioBootstrap();
            LOG.info("Transport type 'auto': using NIO.");
        }
    }

    // --------------------------------------------------------------------
    // Configuration
    // --------------------------------------------------------------------

    // Server bind address
    bootstrap.localAddress(config.getServerAddress(), config.getServerPort());

    // Pooled allocators for Netty's ByteBuf instances
    bootstrap.option(ChannelOption.ALLOCATOR, nettyBufferPool);
    bootstrap.childOption(ChannelOption.ALLOCATOR, nettyBufferPool);

    if (config.getServerConnectBacklog() > 0) {
        bootstrap.option(ChannelOption.SO_BACKLOG, config.getServerConnectBacklog());
    }

    // Receive and send buffer size
    int receiveAndSendBufferSize = config.getSendAndReceiveBufferSize();
    if (receiveAndSendBufferSize > 0) {
        bootstrap.childOption(ChannelOption.SO_SNDBUF, receiveAndSendBufferSize);
        bootstrap.childOption(ChannelOption.SO_RCVBUF, receiveAndSendBufferSize);
    }

    // Low and high water marks for flow control
    bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, config.getMemorySegmentSize() + 1);
    bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 2 * config.getMemorySegmentSize());

    // --------------------------------------------------------------------
    // Child channel pipeline for accepted connections
    // --------------------------------------------------------------------

    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel channel) throws Exception {
            channel.pipeline().addLast(protocol.getServerChannelHandlers());
        }
    });

    // --------------------------------------------------------------------
    // Start Server
    // --------------------------------------------------------------------

    bindFuture = bootstrap.bind().syncUninterruptibly();

    long end = System.currentTimeMillis();
    LOG.info("Successful initialization (took {} ms). Listening on SocketAddress {}.", (end - start),
            bindFuture.channel().localAddress().toString());
}

From source file:org.apache.giraph.comm.netty.NettyClient.java

License:Apache License

/**
 * Only constructor/*from   w w w . ja va2 s  .c  o m*/
 *
 * @param context Context for progress
 * @param conf Configuration
 * @param myTaskInfo Current task info
 * @param exceptionHandler handler for uncaught exception. Will
 *                         terminate job.
 */
public NettyClient(Mapper<?, ?, ?, ?>.Context context, final ImmutableClassesGiraphConfiguration conf,
        TaskInfo myTaskInfo, final Thread.UncaughtExceptionHandler exceptionHandler) {
    this.context = context;
    this.myTaskInfo = myTaskInfo;
    this.channelsPerServer = GiraphConstants.CHANNELS_PER_SERVER.get(conf);
    sendBufferSize = CLIENT_SEND_BUFFER_SIZE.get(conf);
    receiveBufferSize = CLIENT_RECEIVE_BUFFER_SIZE.get(conf);

    limitNumberOfOpenRequests = conf.getBoolean(LIMIT_NUMBER_OF_OPEN_REQUESTS,
            LIMIT_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
    if (limitNumberOfOpenRequests) {
        maxNumberOfOpenRequests = conf.getInt(MAX_NUMBER_OF_OPEN_REQUESTS, MAX_NUMBER_OF_OPEN_REQUESTS_DEFAULT);
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Limit number of open requests to " + maxNumberOfOpenRequests);
        }
    } else {
        maxNumberOfOpenRequests = -1;
    }

    maxRequestMilliseconds = MAX_REQUEST_MILLISECONDS.get(conf);

    maxConnectionFailures = NETTY_MAX_CONNECTION_FAILURES.get(conf);

    waitingRequestMsecs = WAITING_REQUEST_MSECS.get(conf);

    maxPoolSize = GiraphConstants.NETTY_CLIENT_THREADS.get(conf);

    maxResolveAddressAttempts = MAX_RESOLVE_ADDRESS_ATTEMPTS.get(conf);

    clientRequestIdRequestInfoMap = new MapMaker().concurrencyLevel(maxPoolSize).makeMap();

    handlerToUseExecutionGroup = NETTY_CLIENT_EXECUTION_AFTER_HANDLER.get(conf);
    useExecutionGroup = NETTY_CLIENT_USE_EXECUTION_HANDLER.get(conf);
    if (useExecutionGroup) {
        int executionThreads = NETTY_CLIENT_EXECUTION_THREADS.get(conf);
        executionGroup = new DefaultEventExecutorGroup(executionThreads,
                ThreadUtils.createThreadFactory("netty-client-exec-%d", exceptionHandler));
        if (LOG.isInfoEnabled()) {
            LOG.info("NettyClient: Using execution handler with " + executionThreads + " threads after "
                    + handlerToUseExecutionGroup + ".");
        }
    } else {
        executionGroup = null;
    }

    workerGroup = new NioEventLoopGroup(maxPoolSize,
            ThreadUtils.createThreadFactory("netty-client-worker-%d", exceptionHandler));

    bootstrap = new Bootstrap();
    bootstrap.group(workerGroup).channel(NioSocketChannel.class)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, MAX_CONNECTION_MILLISECONDS_DEFAULT)
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.SO_SNDBUF, sendBufferSize).option(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    /*if_not[HADOOP_NON_SECURE]*/
                    if (conf.authenticate()) {
                        LOG.info("Using Netty with authentication.");

                        // Our pipeline starts with just byteCounter, and then we use
                        // addLast() to incrementally add pipeline elements, so that we
                        // can name them for identification for removal or replacement
                        // after client is authenticated by server.
                        // After authentication is complete, the pipeline's SASL-specific
                        // functionality is removed, restoring the pipeline to exactly the
                        // same configuration as it would be without authentication.
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        // The following pipeline component is needed to decode the
                        // server's SASL tokens. It is replaced with a
                        // FixedLengthFrameDecoder (same as used with the
                        // non-authenticated pipeline) after authentication
                        // completes (as in non-auth pipeline below).
                        PipelineUtils.addLastWithExecutorCheck("length-field-based-frame-decoder",
                                new LengthFieldBasedFrameDecoder(1024, 0, 4, 0, 4), handlerToUseExecutionGroup,
                                executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        // The following pipeline component responds to the server's SASL
                        // tokens with its own responses. Both client and server share the
                        // same Hadoop Job token, which is used to create the SASL
                        // tokens to authenticate with each other.
                        // After authentication finishes, this pipeline component
                        // is removed.
                        PipelineUtils.addLastWithExecutorCheck("sasl-client-handler",
                                new SaslClientHandler(conf), handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                    } else {
                        LOG.info("Using Netty without authentication.");
                        /*end[HADOOP_NON_SECURE]*/
                        PipelineUtils.addLastWithExecutorCheck("clientInboundByteCounter", inboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                                    conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("clientOutboundByteCounter", outboundByteCounter,
                                handlerToUseExecutionGroup, executionGroup, ch);
                        if (conf.doCompression()) {
                            PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                                    conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup,
                                    executionGroup, ch);
                        }
                        PipelineUtils.addLastWithExecutorCheck("fixed-length-frame-decoder",
                                new FixedLengthFrameDecoder(RequestServerHandler.RESPONSE_BYTES),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("request-encoder", new RequestEncoder(conf),
                                handlerToUseExecutionGroup, executionGroup, ch);
                        PipelineUtils.addLastWithExecutorCheck("response-handler",
                                new ResponseClientHandler(clientRequestIdRequestInfoMap, conf),
                                handlerToUseExecutionGroup, executionGroup, ch);

                        /*if_not[HADOOP_NON_SECURE]*/
                    }
                    /*end[HADOOP_NON_SECURE]*/
                }
            });
}

From source file:org.apache.giraph.comm.netty.NettyServer.java

License:Apache License

/**
 * Start the server with the appropriate port
 *//*from  ww  w  . ja  va 2 s .co  m*/
public void start() {
    bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .option(ChannelOption.SO_BACKLOG, tcpBacklog)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_SNDBUF, sendBufferSize)
            .childOption(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .childOption(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(receiveBufferSize / 4,
                    receiveBufferSize, receiveBufferSize));

    /**
     * Pipeline setup: depends on whether configured to use authentication
     * or not.
     */
    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        protected void initChannel(SocketChannel ch) throws Exception {
            /*if_not[HADOOP_NON_SECURE]*/
            if (conf.authenticate()) {
                LOG.info("start: Will use Netty pipeline with "
                        + "authentication and authorization of clients.");
                // After a client authenticates, the two authentication-specific
                // pipeline components SaslServerHandler and ResponseEncoder are
                // removed, leaving the pipeline the same as in the non-authenticated
                // configuration except for the presence of the Authorize component.
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("saslServerHandler",
                        saslServerHandlerFactory.newHandler(conf), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck("authorizeServerHandler", new AuthorizeServerHandler(),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("responseEncoder", new ResponseEncoder(),
                        handlerToUseExecutionGroup, executionGroup, ch);
            } else {
                LOG.info("start: Using Netty without authentication.");
                /*end[HADOOP_NON_SECURE]*/
                // Store all connected channels in order to ensure that we can close
                // them on stop(), or else stop() may hang waiting for the
                // connections to close on their own
                ch.pipeline().addLast("connectedChannels", new ChannelInboundHandlerAdapter() {
                    @Override
                    public void channelActive(ChannelHandlerContext ctx) throws Exception {
                        accepted.add(ctx.channel());
                        ctx.fireChannelActive();
                    }
                });
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                /*if_not[HADOOP_NON_SECURE]*/
            }
            /*end[HADOOP_NON_SECURE]*/
        }
    });

    int taskId = conf.getTaskPartition();
    int numTasks = conf.getInt("mapred.map.tasks", 1);
    // Number of workers + 1 for master
    int numServers = conf.getInt(GiraphConstants.MAX_WORKERS, numTasks) + 1;
    int portIncrementConstant = (int) Math.pow(10, Math.ceil(Math.log10(numServers)));
    int bindPort = GiraphConstants.IPC_INITIAL_PORT.get(conf) + taskId;
    int bindAttempts = 0;
    final int maxIpcPortBindAttempts = MAX_IPC_PORT_BIND_ATTEMPTS.get(conf);
    final boolean failFirstPortBindingAttempt = GiraphConstants.FAIL_FIRST_IPC_PORT_BIND_ATTEMPT.get(conf);

    // Simple handling of port collisions on the same machine while
    // preserving debugability from the port number alone.
    // Round up the max number of workers to the next power of 10 and use
    // it as a constant to increase the port number with.
    while (bindAttempts < maxIpcPortBindAttempts) {
        this.myAddress = new InetSocketAddress(localHostname, bindPort);
        if (failFirstPortBindingAttempt && bindAttempts == 0) {
            if (LOG.isInfoEnabled()) {
                LOG.info("start: Intentionally fail first "
                        + "binding attempt as giraph.failFirstIpcPortBindAttempt " + "is true, port "
                        + bindPort);
            }
            ++bindAttempts;
            bindPort += portIncrementConstant;
            continue;
        }

        try {
            ChannelFuture f = bootstrap.bind(myAddress).sync();
            accepted.add(f.channel());
            break;
        } catch (InterruptedException e) {
            throw new IllegalStateException(e);
            // CHECKSTYLE: stop IllegalCatchCheck
        } catch (Exception e) {
            // CHECKSTYLE: resume IllegalCatchCheck
            LOG.warn("start: Likely failed to bind on attempt " + bindAttempts + " to port " + bindPort,
                    e.getCause());
            ++bindAttempts;
            bindPort += portIncrementConstant;
        }
    }
    if (bindAttempts == maxIpcPortBindAttempts || myAddress == null) {
        throw new IllegalStateException(
                "start: Failed to start NettyServer with " + bindAttempts + " attempts");
    }

    if (LOG.isInfoEnabled()) {
        LOG.info("start: Started server " + "communication server: " + myAddress + " with up to " + maxPoolSize
                + " threads on bind attempt " + bindAttempts + " with sendBufferSize = " + sendBufferSize
                + " receiveBufferSize = " + receiveBufferSize);
    }
}

From source file:org.apache.qpid.jms.transports.netty.NettyTcpTransport.java

License:Apache License

private void configureNetty(Bootstrap bootstrap, TransportOptions options) {
    bootstrap.option(ChannelOption.TCP_NODELAY, options.isTcpNoDelay());
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeout());
    bootstrap.option(ChannelOption.SO_KEEPALIVE, options.isTcpKeepAlive());
    bootstrap.option(ChannelOption.SO_LINGER, options.getSoLinger());
    bootstrap.option(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE);

    if (options.getSendBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_SNDBUF, options.getSendBufferSize());
    }//from w  ww  . j  a v a2  s  . c  o m

    if (options.getReceiveBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_RCVBUF, options.getReceiveBufferSize());
        bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR,
                new FixedRecvByteBufAllocator(options.getReceiveBufferSize()));
    }

    if (options.getTrafficClass() != -1) {
        bootstrap.option(ChannelOption.IP_TOS, options.getTrafficClass());
    }
}

From source file:org.apache.rocketmq.remoting.netty.NettyRemotingClient.java

License:Apache License

@Override
public void start() {
    this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(//
            nettyClientConfig.getClientWorkerThreads(), //
            new ThreadFactory() {

                private AtomicInteger threadIndex = new AtomicInteger(0);

                @Override/*from www  . ja va2  s. co  m*/
                public Thread newThread(Runnable r) {
                    return new Thread(r, "NettyClientWorkerThread_" + this.threadIndex.incrementAndGet());
                }
            });

    Bootstrap handler = this.bootstrap.group(this.eventLoopGroupWorker).channel(NioSocketChannel.class)//
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, false)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, nettyClientConfig.getConnectTimeoutMillis())
            .option(ChannelOption.SO_SNDBUF, nettyClientConfig.getClientSocketSndBufSize())
            .option(ChannelOption.SO_RCVBUF, nettyClientConfig.getClientSocketRcvBufSize())
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(defaultEventExecutorGroup, new NettyEncoder(), new NettyDecoder(),
                            new IdleStateHandler(0, 0, nettyClientConfig.getClientChannelMaxIdleTimeSeconds()),
                            new NettyConnectManageHandler(), new NettyClientHandler());
                }
            });

    this.timer.scheduleAtFixedRate(new TimerTask() {
        @Override
        public void run() {
            try {
                NettyRemotingClient.this.scanResponseTable();
            } catch (Exception e) {
                log.error("scanResponseTable exception", e);
            }
        }
    }, 1000 * 3, 1000);

    if (this.channelEventListener != null) {
        this.nettyEventExecuter.start();
    }
}

From source file:org.apache.rocketmq.remoting.netty.NettyRemotingServer.java

License:Apache License

@Override
public void start() {
    this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(nettyServerConfig.getServerWorkerThreads(),
            new ThreadFactory() {

                private AtomicInteger threadIndex = new AtomicInteger(0);

                @Override/* ww w  .  j  a  v  a  2  s.co m*/
                public Thread newThread(Runnable r) {
                    return new Thread(r, "NettyServerCodecThread_" + this.threadIndex.incrementAndGet());
                }
            });

    ServerBootstrap childHandler = this.serverBootstrap
            .group(this.eventLoopGroupBoss, this.eventLoopGroupSelector).channel(NioServerSocketChannel.class)
            .option(ChannelOption.SO_BACKLOG, 1024).option(ChannelOption.SO_REUSEADDR, true)
            .option(ChannelOption.SO_KEEPALIVE, false).childOption(ChannelOption.TCP_NODELAY, true)
            .option(ChannelOption.SO_SNDBUF, nettyServerConfig.getServerSocketSndBufSize())
            .option(ChannelOption.SO_RCVBUF, nettyServerConfig.getServerSocketRcvBufSize())
            .localAddress(new InetSocketAddress(this.nettyServerConfig.getListenPort()))
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(defaultEventExecutorGroup, new NettyEncoder(), new NettyDecoder(),
                            new IdleStateHandler(0, 0, nettyServerConfig.getServerChannelMaxIdleTimeSeconds()),
                            new NettyConnetManageHandler(), new NettyServerHandler());
                }
            });

    if (nettyServerConfig.isServerPooledByteBufAllocatorEnable()) {
        childHandler.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    }

    try {
        ChannelFuture sync = this.serverBootstrap.bind().sync();
        InetSocketAddress addr = (InetSocketAddress) sync.channel().localAddress();
        this.port = addr.getPort();
    } catch (InterruptedException e1) {
        throw new RuntimeException("this.serverBootstrap.bind().sync() InterruptedException", e1);
    }

    if (this.channelEventListener != null) {
        this.nettyEventExecuter.start();
    }

    this.timer.scheduleAtFixedRate(new TimerTask() {

        @Override
        public void run() {
            try {
                NettyRemotingServer.this.scanResponseTable();
            } catch (Exception e) {
                log.error("scanResponseTable exception", e);
            }
        }
    }, 1000 * 3, 1000);
}