Example usage for io.netty.channel ChannelOption RCVBUF_ALLOCATOR

List of usage examples for io.netty.channel ChannelOption RCVBUF_ALLOCATOR

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption RCVBUF_ALLOCATOR.

Prototype

ChannelOption RCVBUF_ALLOCATOR

To view the source code for io.netty.channel ChannelOption RCVBUF_ALLOCATOR.

Click Source Link

Usage

From source file:net.tomp2p.connection.ChannelCreator.java

License:Apache License

/**
 * Creates a "channel" to the given address. This won't send any message
 * unlike TCP.//ww w  .jav a  2s. c o m
 * 
 * @param broadcast
 *            Sets this channel to be able to broadcast
 * @param channelHandlers
 *            The handlers to set
 * @param futureResponse
 *            The futureResponse
 * @return The channel future object or null if we are shut down
 */
public ChannelFuture createUDP(final boolean broadcast,
        final Map<String, Pair<EventExecutorGroup, ChannelHandler>> channelHandlers,
        FutureResponse futureResponse) {
    readUDP.lock();
    try {
        if (shutdownUDP) {
            return null;
        }
        if (!semaphoreUPD.tryAcquire()) {
            LOG.error("Tried to acquire more resources (UDP) than announced! Announced {}", maxPermitsUDP);
            throw new RuntimeException("Tried to acquire more resources (UDP) than announced!");
        }
        final Bootstrap b = new Bootstrap();
        b.group(workerGroup);
        b.channel(NioDatagramChannel.class);
        b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(ConnectionBean.UDP_LIMIT));
        if (broadcast) {
            b.option(ChannelOption.SO_BROADCAST, true);
        }
        Map<String, Pair<EventExecutorGroup, ChannelHandler>> channelHandlers2 = channelClientConfiguration
                .pipelineFilter().filter(channelHandlers, false, true);
        addHandlers(b, channelHandlers2);
        // Here we need to bind, as opposed to the TCP, were we connect if
        // we do a connect, we cannot receive
        // broadcast messages
        final ChannelFuture channelFuture;
        channelFuture = b.bind(new InetSocketAddress(channelClientConfiguration.senderUDP(), 0));
        recipients.add(channelFuture.channel());
        setupCloseListener(channelFuture, semaphoreUPD, futureResponse);
        return channelFuture;
    } finally {
        readUDP.unlock();
    }
}

From source file:net.tomp2p.connection.ChannelServer.java

License:Apache License

/**
 * Start to listen on a UPD port.// w w w.  j a  va  2s.c  om
 * 
 * @param listenAddresses
 *            The address to listen to
 * @param config
 *            Can create handlers to be attached to this port
 * @param broadcastFlag 
 * @return True if startup was successful
 */
boolean startupUDP(final InetSocketAddress listenAddresses, final ChannelServerConfiguration config,
        boolean broadcastFlag) {
    Bootstrap b = new Bootstrap();
    b.group(workerGroup);
    b.channel(NioDatagramChannel.class);
    //option broadcast only required as we not listen to the broadcast address directly
    if (broadcastFlag) {
        b.option(ChannelOption.SO_BROADCAST, true);
    }
    b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(ConnectionBean.UDP_LIMIT));

    b.handler(new ChannelInitializer<Channel>() {
        @Override
        protected void initChannel(final Channel ch) throws Exception {
            ch.config().setAllocator(channelServerConfiguration.byteBufAllocator());
            for (Map.Entry<String, Pair<EventExecutorGroup, ChannelHandler>> entry : handlers(false)
                    .entrySet()) {
                if (!entry.getValue().isEmpty()) {
                    ch.pipeline().addLast(entry.getValue().element0(), entry.getKey(),
                            entry.getValue().element1());
                } else if (entry.getValue().element1() != null) {
                    ch.pipeline().addLast(entry.getKey(), entry.getValue().element1());
                }
            }
        }
    });

    ChannelFuture future = b.bind(listenAddresses);
    channelsUDP.put(listenAddresses.getAddress(), future.channel());
    return handleFuture(future);
}

From source file:net.tomp2p.connection2.ChannelCreator.java

License:Apache License

/**
 * Creates a "channel" to the given address. This won't send any message unlike TCP.
 * /*from  www.  j  av a 2s .co m*/
 * @param recipient
 *            The recipient of the a message
 * 
 * @param broadcast
 *            Sets this channel to be able to broadcast
 * @param channelHandlers
 *            The handlers to set
 * @return The channel future object or null if we are shut down
 */
public ChannelFuture createUDP(final SocketAddress recipient, final boolean broadcast,
        final Map<String, ChannelHandler> channelHandlers) {
    readUDP.lock();
    try {
        if (shutdownUDP) {
            return null;
        }
        if (!semaphoreUPD.tryAcquire()) {
            LOG.error("Tried to acquire more resources (UDP) than announced!");
            throw new RuntimeException("Tried to acquire more resources (UDP) than announced!");
        }
        final Bootstrap b = new Bootstrap();
        b.group(workerGroup);
        b.channel(NioDatagramChannel.class);
        b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(ConnectionBean.UDP_LIMIT));
        if (broadcast) {
            b.option(ChannelOption.SO_BROADCAST, true);
        }
        channelClientConfiguration.pipelineFilter().filter(channelHandlers, false, true);
        addHandlers(b, channelHandlers);
        // Here we need to bind, as opposed to the TCP, were we connect if we do a connect, we cannot receive
        // broadcast messages
        final ChannelFuture channelFuture;
        if (broadcast) {
            channelFuture = b.bind(new InetSocketAddress(0));
        } else {
            channelFuture = b.connect(recipient);
        }

        setupCloseListener(channelFuture, semaphoreUPD);
        CREATED_UDP_CONNECTIONS.incrementAndGet();
        return channelFuture;
    } finally {
        readUDP.unlock();
    }
}

From source file:net.tomp2p.connection2.ChannelServer.java

License:Apache License

/**
 * Start to listen on a UPD port.//from ww w . j a v  a 2s . c  o m
 * 
 * @param listenAddresses
 *            The address to listen to
 * @param config
 *            Can create handlers to be attached to this port
 * @return True if startup was successful
 */
boolean startupUDP(final InetSocketAddress listenAddresses, final ChannelServerConficuration config) {
    Bootstrap b = new Bootstrap();
    b.group(workerGroup);
    b.channel(NioDatagramChannel.class);
    b.option(ChannelOption.SO_BROADCAST, true);
    b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(ConnectionBean.UDP_LIMIT));

    b.handler(new ChannelInitializer<Channel>() {
        @Override
        protected void initChannel(final Channel ch) throws Exception {
            for (Map.Entry<String, ChannelHandler> entry : handlers(false).entrySet()) {
                ch.pipeline().addLast(entry.getKey(), entry.getValue());
            }
        }
    });

    ChannelFuture future = b.bind(listenAddresses);
    channelUDP = future.channel();
    return handleFuture(future);
}

From source file:org.apache.activemq.transport.amqp.client.transport.NettyTcpTransport.java

License:Apache License

private void configureNetty(Bootstrap bootstrap, NettyTransportOptions options) {
    bootstrap.option(ChannelOption.TCP_NODELAY, options.isTcpNoDelay());
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeout());
    bootstrap.option(ChannelOption.SO_KEEPALIVE, options.isTcpKeepAlive());
    bootstrap.option(ChannelOption.SO_LINGER, options.getSoLinger());
    bootstrap.option(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE);

    if (options.getSendBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_SNDBUF, options.getSendBufferSize());
    }/*from   ww  w. jav  a 2  s .c  o  m*/

    if (options.getReceiveBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_RCVBUF, options.getReceiveBufferSize());
        bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR,
                new FixedRecvByteBufAllocator(options.getReceiveBufferSize()));
    }

    if (options.getTrafficClass() != -1) {
        bootstrap.option(ChannelOption.IP_TOS, options.getTrafficClass());
    }
}

From source file:org.apache.activemq.transport.netty.NettyTcpTransport.java

License:Apache License

private void configureNetty(Bootstrap bootstrap, NettyTransportOptions options) {
    bootstrap.option(ChannelOption.TCP_NODELAY, options.isTcpNoDelay());
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeout());
    bootstrap.option(ChannelOption.SO_KEEPALIVE, options.isTcpKeepAlive());
    bootstrap.option(ChannelOption.SO_LINGER, options.getSoLinger());

    if (options.getSendBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_SNDBUF, options.getSendBufferSize());
    }/*from   w  ww.ja va  2  s . c o  m*/

    if (options.getReceiveBufferSize() != -1) {
        bootstrap.option(ChannelOption.SO_RCVBUF, options.getReceiveBufferSize());
        bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR,
                new FixedRecvByteBufAllocator(options.getReceiveBufferSize()));
    }

    if (options.getTrafficClass() != -1) {
        bootstrap.option(ChannelOption.IP_TOS, options.getTrafficClass());
    }
}

From source file:org.apache.bookkeeper.proto.BookieNettyServer.java

License:Apache License

private void listenOn(InetSocketAddress address, BookieSocketAddress bookieAddress)
        throws InterruptedException {
    if (!conf.isDisableServerSocketBind()) {
        ServerBootstrap bootstrap = new ServerBootstrap();
        bootstrap.option(ChannelOption.ALLOCATOR, allocator);
        bootstrap.childOption(ChannelOption.ALLOCATOR, allocator);
        bootstrap.group(eventLoopGroup, eventLoopGroup);
        bootstrap.childOption(ChannelOption.TCP_NODELAY, conf.getServerTcpNoDelay());
        bootstrap.childOption(ChannelOption.SO_LINGER, conf.getServerSockLinger());
        bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
                new AdaptiveRecvByteBufAllocator(conf.getRecvByteBufAllocatorSizeMin(),
                        conf.getRecvByteBufAllocatorSizeInitial(), conf.getRecvByteBufAllocatorSizeMax()));
        bootstrap.option(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(
                conf.getServerWriteBufferLowWaterMark(), conf.getServerWriteBufferHighWaterMark()));

        if (eventLoopGroup instanceof EpollEventLoopGroup) {
            bootstrap.channel(EpollServerSocketChannel.class);
        } else {//from w w  w  .  java 2 s. co  m
            bootstrap.channel(NioServerSocketChannel.class);
        }

        bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
            @Override
            protected void initChannel(SocketChannel ch) throws Exception {
                synchronized (suspensionLock) {
                    while (suspended) {
                        suspensionLock.wait();
                    }
                }

                BookieSideConnectionPeerContextHandler contextHandler = new BookieSideConnectionPeerContextHandler();
                ChannelPipeline pipeline = ch.pipeline();

                // For ByteBufList, skip the usual LengthFieldPrepender and have the encoder itself to add it
                pipeline.addLast("bytebufList", ByteBufList.ENCODER_WITH_SIZE);

                pipeline.addLast("lengthbaseddecoder",
                        new LengthFieldBasedFrameDecoder(maxFrameSize, 0, 4, 0, 4));
                pipeline.addLast("lengthprepender", new LengthFieldPrepender(4));

                pipeline.addLast("bookieProtoDecoder", new BookieProtoEncoding.RequestDecoder(registry));
                pipeline.addLast("bookieProtoEncoder", new BookieProtoEncoding.ResponseEncoder(registry));
                pipeline.addLast("bookieAuthHandler", new AuthHandler.ServerSideHandler(
                        contextHandler.getConnectionPeer(), authProviderFactory));

                ChannelInboundHandler requestHandler = isRunning.get()
                        ? new BookieRequestHandler(conf, requestProcessor, allChannels)
                        : new RejectRequestHandler();
                pipeline.addLast("bookieRequestHandler", requestHandler);

                pipeline.addLast("contextHandler", contextHandler);
            }
        });

        // Bind and start to accept incoming connections
        Channel listen = bootstrap.bind(address.getAddress(), address.getPort()).sync().channel();
        if (listen.localAddress() instanceof InetSocketAddress) {
            if (conf.getBookiePort() == 0) {
                conf.setBookiePort(((InetSocketAddress) listen.localAddress()).getPort());
            }
        }
    }

    if (conf.isEnableLocalTransport()) {
        ServerBootstrap jvmBootstrap = new ServerBootstrap();
        jvmBootstrap.childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
        jvmBootstrap.group(jvmEventLoopGroup, jvmEventLoopGroup);
        jvmBootstrap.childOption(ChannelOption.TCP_NODELAY, conf.getServerTcpNoDelay());
        jvmBootstrap.childOption(ChannelOption.SO_KEEPALIVE, conf.getServerSockKeepalive());
        jvmBootstrap.childOption(ChannelOption.SO_LINGER, conf.getServerSockLinger());
        jvmBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR,
                new AdaptiveRecvByteBufAllocator(conf.getRecvByteBufAllocatorSizeMin(),
                        conf.getRecvByteBufAllocatorSizeInitial(), conf.getRecvByteBufAllocatorSizeMax()));
        jvmBootstrap.option(ChannelOption.WRITE_BUFFER_WATER_MARK, new WriteBufferWaterMark(
                conf.getServerWriteBufferLowWaterMark(), conf.getServerWriteBufferHighWaterMark()));

        if (jvmEventLoopGroup instanceof DefaultEventLoopGroup) {
            jvmBootstrap.channel(LocalServerChannel.class);
        } else if (jvmEventLoopGroup instanceof EpollEventLoopGroup) {
            jvmBootstrap.channel(EpollServerSocketChannel.class);
        } else {
            jvmBootstrap.channel(NioServerSocketChannel.class);
        }

        jvmBootstrap.childHandler(new ChannelInitializer<LocalChannel>() {
            @Override
            protected void initChannel(LocalChannel ch) throws Exception {
                synchronized (suspensionLock) {
                    while (suspended) {
                        suspensionLock.wait();
                    }
                }

                BookieSideConnectionPeerContextHandler contextHandler = new BookieSideConnectionPeerContextHandler();
                ChannelPipeline pipeline = ch.pipeline();

                pipeline.addLast("lengthbaseddecoder",
                        new LengthFieldBasedFrameDecoder(maxFrameSize, 0, 4, 0, 4));
                pipeline.addLast("lengthprepender", new LengthFieldPrepender(4));

                pipeline.addLast("bookieProtoDecoder", new BookieProtoEncoding.RequestDecoder(registry));
                pipeline.addLast("bookieProtoEncoder", new BookieProtoEncoding.ResponseEncoder(registry));
                pipeline.addLast("bookieAuthHandler", new AuthHandler.ServerSideHandler(
                        contextHandler.getConnectionPeer(), authProviderFactory));

                ChannelInboundHandler requestHandler = isRunning.get()
                        ? new BookieRequestHandler(conf, requestProcessor, allChannels)
                        : new RejectRequestHandler();
                pipeline.addLast("bookieRequestHandler", requestHandler);

                pipeline.addLast("contextHandler", contextHandler);
            }
        });

        // use the same address 'name', so clients can find local Bookie still discovering them using ZK
        jvmBootstrap.bind(bookieAddress.getLocalAddress()).sync();
        LocalBookiesRegistry.registerLocalBookieAddress(bookieAddress);
    }
}

From source file:org.apache.flink.runtime.io.network.netty.NettyConnectionManager.java

License:Apache License

@Override
public void start(ChannelManager channelManager) throws IOException {
    LOG.info(String.format("Starting with %d incoming and %d outgoing connection threads.", numInThreads,
            numOutThreads));/* ww w. j  av  a  2  s  .  co  m*/
    LOG.info(String.format("Setting low water mark to %d and high water mark to %d bytes.", lowWaterMark,
            highWaterMark));
    LOG.info(String.format("Close channels after idle for %d ms.", closeAfterIdleForMs));

    final BufferProviderBroker bufferProviderBroker = channelManager;
    final EnvelopeDispatcher envelopeDispatcher = channelManager;

    int numHeapArenas = 0;
    int numDirectArenas = numInThreads + numOutThreads;
    int pageSize = bufferSize << 1;
    int chunkSize = 16 << 20; // 16 MB

    // shift pageSize maxOrder times to get to chunkSize
    int maxOrder = (int) (Math.log(chunkSize / pageSize) / Math.log(2));

    PooledByteBufAllocator pooledByteBufAllocator = new PooledByteBufAllocator(true, numHeapArenas,
            numDirectArenas, pageSize, maxOrder);

    String msg = String.format(
            "Instantiated PooledByteBufAllocator with direct arenas: %d, heap arenas: %d, "
                    + "page size (bytes): %d, chunk size (bytes): %d.",
            numDirectArenas, numHeapArenas, pageSize, (pageSize << maxOrder));
    LOG.info(msg);

    // --------------------------------------------------------------------
    // server bootstrap (incoming connections)
    // --------------------------------------------------------------------
    in = new ServerBootstrap();
    in.group(new NioEventLoopGroup(numInThreads)).channel(NioServerSocketChannel.class)
            .localAddress(bindAddress, bindPort).childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new InboundEnvelopeDecoder(bufferProviderBroker))
                            .addLast(new InboundEnvelopeDispatcher(envelopeDispatcher));
                }
            }).option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(pageSize))
            .option(ChannelOption.ALLOCATOR, pooledByteBufAllocator);

    // --------------------------------------------------------------------
    // client bootstrap (outgoing connections)
    // --------------------------------------------------------------------
    out = new Bootstrap();
    out.group(new NioEventLoopGroup(numOutThreads)).channel(NioSocketChannel.class)
            .handler(new ChannelInitializer<SocketChannel>() {
                @Override
                public void initChannel(SocketChannel channel) throws Exception {
                    channel.pipeline().addLast(new OutboundEnvelopeEncoder());
                }
            }).option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, lowWaterMark)
            .option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, highWaterMark)
            .option(ChannelOption.ALLOCATOR, pooledByteBufAllocator).option(ChannelOption.TCP_NODELAY, false)
            .option(ChannelOption.SO_KEEPALIVE, true);

    try {
        in.bind().sync();
    } catch (InterruptedException e) {
        throw new IOException(e);
    }

    if (LOG.isDebugEnabled()) {
        new Thread(new Runnable() {
            @Override
            public void run() {
                Date date = new Date();

                while (true) {
                    try {
                        Thread.sleep(DEBUG_PRINT_QUEUED_ENVELOPES_EVERY_MS);

                        date.setTime(System.currentTimeMillis());

                        System.out.println(date);
                        System.out.println(getNonZeroNumQueuedEnvelopes());
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                }
            }
        }).start();
    }
}

From source file:org.apache.giraph.comm.netty.NettyServer.java

License:Apache License

/**
 * Start the server with the appropriate port
 *///ww  w.ja v a 2s .c om
public void start() {
    bootstrap = new ServerBootstrap();
    bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .option(ChannelOption.SO_BACKLOG, tcpBacklog)
            .option(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true)
            .childOption(ChannelOption.SO_SNDBUF, sendBufferSize)
            .childOption(ChannelOption.SO_RCVBUF, receiveBufferSize)
            .childOption(ChannelOption.ALLOCATOR, conf.getNettyAllocator())
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new AdaptiveRecvByteBufAllocator(receiveBufferSize / 4,
                    receiveBufferSize, receiveBufferSize));

    /**
     * Pipeline setup: depends on whether configured to use authentication
     * or not.
     */
    bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        protected void initChannel(SocketChannel ch) throws Exception {
            /*if_not[HADOOP_NON_SECURE]*/
            if (conf.authenticate()) {
                LOG.info("start: Will use Netty pipeline with "
                        + "authentication and authorization of clients.");
                // After a client authenticates, the two authentication-specific
                // pipeline components SaslServerHandler and ResponseEncoder are
                // removed, leaving the pipeline the same as in the non-authenticated
                // configuration except for the presence of the Authorize component.
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("saslServerHandler",
                        saslServerHandlerFactory.newHandler(conf), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck("authorizeServerHandler", new AuthorizeServerHandler(),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                // Removed after authentication completes:
                PipelineUtils.addLastWithExecutorCheck("responseEncoder", new ResponseEncoder(),
                        handlerToUseExecutionGroup, executionGroup, ch);
            } else {
                LOG.info("start: Using Netty without authentication.");
                /*end[HADOOP_NON_SECURE]*/
                // Store all connected channels in order to ensure that we can close
                // them on stop(), or else stop() may hang waiting for the
                // connections to close on their own
                ch.pipeline().addLast("connectedChannels", new ChannelInboundHandlerAdapter() {
                    @Override
                    public void channelActive(ChannelHandlerContext ctx) throws Exception {
                        accepted.add(ctx.channel());
                        ctx.fireChannelActive();
                    }
                });
                PipelineUtils.addLastWithExecutorCheck("serverInboundByteCounter", inByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionDecoder",
                            conf.getNettyCompressionDecoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("serverOutboundByteCounter", outByteCounter,
                        handlerToUseExecutionGroup, executionGroup, ch);
                if (conf.doCompression()) {
                    PipelineUtils.addLastWithExecutorCheck("compressionEncoder",
                            conf.getNettyCompressionEncoder(), handlerToUseExecutionGroup, executionGroup, ch);
                }
                PipelineUtils.addLastWithExecutorCheck("requestFrameDecoder",
                        new LengthFieldBasedFrameDecoder(1024 * 1024 * 1024, 0, 4, 0, 4),
                        handlerToUseExecutionGroup, executionGroup, ch);
                PipelineUtils.addLastWithExecutorCheck("requestDecoder",
                        new RequestDecoder(conf, inByteCounter), handlerToUseExecutionGroup, executionGroup,
                        ch);
                PipelineUtils.addLastWithExecutorCheck(
                        "requestServerHandler", requestServerHandlerFactory.newHandler(workerRequestReservedMap,
                                conf, myTaskInfo, exceptionHandler),
                        handlerToUseExecutionGroup, executionGroup, ch);
                /*if_not[HADOOP_NON_SECURE]*/
            }
            /*end[HADOOP_NON_SECURE]*/
        }
    });

    int taskId = conf.getTaskPartition();
    int numTasks = conf.getInt("mapred.map.tasks", 1);
    // Number of workers + 1 for master
    int numServers = conf.getInt(GiraphConstants.MAX_WORKERS, numTasks) + 1;
    int portIncrementConstant = (int) Math.pow(10, Math.ceil(Math.log10(numServers)));
    int bindPort = GiraphConstants.IPC_INITIAL_PORT.get(conf) + taskId;
    int bindAttempts = 0;
    final int maxIpcPortBindAttempts = MAX_IPC_PORT_BIND_ATTEMPTS.get(conf);
    final boolean failFirstPortBindingAttempt = GiraphConstants.FAIL_FIRST_IPC_PORT_BIND_ATTEMPT.get(conf);

    // Simple handling of port collisions on the same machine while
    // preserving debugability from the port number alone.
    // Round up the max number of workers to the next power of 10 and use
    // it as a constant to increase the port number with.
    while (bindAttempts < maxIpcPortBindAttempts) {
        this.myAddress = new InetSocketAddress(localHostname, bindPort);
        if (failFirstPortBindingAttempt && bindAttempts == 0) {
            if (LOG.isInfoEnabled()) {
                LOG.info("start: Intentionally fail first "
                        + "binding attempt as giraph.failFirstIpcPortBindAttempt " + "is true, port "
                        + bindPort);
            }
            ++bindAttempts;
            bindPort += portIncrementConstant;
            continue;
        }

        try {
            ChannelFuture f = bootstrap.bind(myAddress).sync();
            accepted.add(f.channel());
            break;
        } catch (InterruptedException e) {
            throw new IllegalStateException(e);
            // CHECKSTYLE: stop IllegalCatchCheck
        } catch (Exception e) {
            // CHECKSTYLE: resume IllegalCatchCheck
            LOG.warn("start: Likely failed to bind on attempt " + bindAttempts + " to port " + bindPort,
                    e.getCause());
            ++bindAttempts;
            bindPort += portIncrementConstant;
        }
    }
    if (bindAttempts == maxIpcPortBindAttempts || myAddress == null) {
        throw new IllegalStateException(
                "start: Failed to start NettyServer with " + bindAttempts + " attempts");
    }

    if (LOG.isInfoEnabled()) {
        LOG.info("start: Started server " + "communication server: " + myAddress + " with up to " + maxPoolSize
                + " threads on bind attempt " + bindAttempts + " with sendBufferSize = " + sendBufferSize
                + " receiveBufferSize = " + receiveBufferSize);
    }
}

From source file:org.apache.hyracks.http.server.HttpServer.java

License:Apache License

protected void doStart() throws InterruptedException {
    /*//from   w ww.ja  v  a  2 s .c  om
     * This is a hacky way to ensure that IServlets with more specific paths are checked first.
     * For example:
     * "/path/to/resource/"
     * is checked before
     * "/path/to/"
     * which in turn is checked before
     * "/path/"
     * Note that it doesn't work for the case where multiple paths map to a single IServlet
     */
    Collections.sort(servlets, (l1, l2) -> l2.getPaths()[0].length() - l1.getPaths()[0].length());
    ServerBootstrap b = new ServerBootstrap();
    b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(RECEIVE_BUFFER_SIZE))
            .childOption(ChannelOption.AUTO_READ, Boolean.FALSE)
            .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, WRITE_BUFFER_WATER_MARK)
            .handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new HttpServerInitializer(this));
    channel = b.bind(port).sync().channel();
}