Example usage for io.netty.channel ChannelOption ALLOCATOR

List of usage examples for io.netty.channel ChannelOption ALLOCATOR

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption ALLOCATOR.

Prototype

ChannelOption ALLOCATOR

To view the source code for io.netty.channel ChannelOption ALLOCATOR.

Click Source Link

Usage

From source file:org.proton.plug.test.minimalserver.MinimalServer.java

License:Apache License

public synchronized void start(String host, int port, final boolean sasl) throws Exception {
    this.host = host;
    this.port = port;
    this.sasl = sasl;

    if (channelClazz != null) {
        // Already started
        return;//from w w  w  .  ja  va  2 s  . co m
    }

    int threadsToUse = Runtime.getRuntime().availableProcessors() * 3;
    channelClazz = NioServerSocketChannel.class;
    eventLoopGroup = new NioEventLoopGroup(threadsToUse, new SimpleServerThreadFactory("simple-server", true,
            Thread.currentThread().getContextClassLoader()));

    bootstrap = new ServerBootstrap();
    bootstrap.group(eventLoopGroup);
    bootstrap.channel(channelClazz);

    ChannelInitializer<Channel> factory = new ChannelInitializer<Channel>() {
        @Override
        public void initChannel(Channel channel) throws Exception {
            ChannelPipeline pipeline = channel.pipeline();
            pipeline.addLast("amqp-handler", new ProtocolDecoder());
        }
    };
    bootstrap.childHandler(factory);

    bootstrap.option(ChannelOption.SO_REUSEADDR, true).childOption(ChannelOption.SO_REUSEADDR, true)
            .childOption(ChannelOption.SO_KEEPALIVE, true).
            //       childOption(ChannelOption.AUTO_READ, false).
            childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    channelGroup = new DefaultChannelGroup("activemq-accepted-channels", GlobalEventExecutor.INSTANCE);

    serverChannelGroup = new DefaultChannelGroup("activemq-acceptor-channels", GlobalEventExecutor.INSTANCE);

    SocketAddress address;
    address = new InetSocketAddress(host, port);
    Channel serverChannel = bootstrap.bind(address).syncUninterruptibly().channel();
    serverChannelGroup.add(serverChannel);

}

From source file:org.ratpackframework.bootstrap.internal.NettyRatpackService.java

License:Apache License

@Override
protected void startUp() throws Exception {
    ServerBootstrap bootstrap = new ServerBootstrap();
    group = new NioEventLoopGroup(MultithreadEventLoopGroup.DEFAULT_EVENT_LOOP_THREADS,
            new DefaultThreadFactory("ratpack-group", Thread.MAX_PRIORITY));

    bootstrap.group(group).channel(NioServerSocketChannel.class).childHandler(channelInitializer);

    bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
    bootstrap.option(ChannelOption.SO_REUSEADDR, true);
    bootstrap.option(ChannelOption.SO_BACKLOG, 1024);

    channel = bootstrap.bind(requestedAddress).sync().channel();
    boundAddress = (InetSocketAddress) channel.localAddress();

    if (logger.isLoggable(Level.INFO)) {
        logger.info(String.format("Ratpack started for http://%s:%s", getBindHost(), getBindPort()));
    }//  www  . j  a v  a  2  s.c o m
}

From source file:org.ratpackframework.server.internal.NettyRatpackService.java

License:Apache License

@Override
protected void startUp() throws Exception {
    ServerBootstrap bootstrap = new ServerBootstrap();
    group = new NioEventLoopGroup(launchConfig.getMainThreads(),
            new DefaultThreadFactory("ratpack-group", Thread.MAX_PRIORITY));

    bootstrap.group(group).channel(NioServerSocketChannel.class).childHandler(channelInitializer)
            .childOption(ChannelOption.ALLOCATOR, launchConfig.getBufferAllocator())
            .childOption(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_REUSEADDR, true)
            .option(ChannelOption.SO_BACKLOG, 1024)
            .option(ChannelOption.ALLOCATOR, launchConfig.getBufferAllocator());

    try {/*from   www  . j  ava  2 s.c  o  m*/
        channel = bootstrap.bind(buildSocketAddress()).sync().channel();
    } catch (Exception e) {
        partialShutdown();
        throw e;
    }
    boundAddress = (InetSocketAddress) channel.localAddress();

    if (logger.isLoggable(Level.INFO)) {
        logger.info(String.format("Ratpack started for http://%s:%s", getBindHost(), getBindPort()));
    }
}

From source file:org.restcomm.imscf.common.lwcomm.service.impl.LwCommListener.java

License:Open Source License

void start() {
    // TODO: handle AS-resolved pools
    int receiveTransportThreads = config.getReceiveTransportPoolConfig().getMaxThreads();
    int receiveWorkerThreads = config.getReceiveWorkerPoolConfig().getMaxThreads();

    // Netty 4.0 does not handle parallel UDP servers well.
    // See: https://github.com/netty/netty/issues/1706
    // We differentiate two listener modes:
    ///*from  w ww.j a v  a2  s. c o m*/
    // a) NIO
    // ------
    // In this case a simple NioEventLoopGroup is used. The NioEventLoopGroup is given
    // "receiveTransportThreads" number of threads. User listener will be called
    // in a different executor which has receiveWorkerThreads number of threads.
    // This does not work well with netty 4.0 but still implemented here
    // in case of it will be fixed in a future netty version (the problem is
    // that regardless of setting the nThreads parameter in NioEventLoopGroup only
    // one thread is used for incoming packet processing...).
    //
    // c) EPOLL
    // --------
    // The solution offered in the link above:
    // 1) Use the epoll transport (Linux only)
    // 2) Turn on SO_REUSEPORT option
    // 3) Create multiple datagram channels bound to the same port
    // According to this: http://stackoverflow.com/questions/3261965/so-reuseport-on-linux
    // only works on Linux with kernel 3.9+ or RHEL 6.5+ -- if epoll is not available,
    // it falls back to NIO mode.

    LwCommServiceImpl.LOGGER.info(
            "Starting LwCommListener. Receive transport threads: {}, receive worker threads: {}",
            receiveTransportThreads, receiveWorkerThreads);
    Configuration.ListenerMode listenerMode = config.getListenerMode();
    LwCommServiceImpl.LOGGER.info("Listener mode configured is {}", config.getListenerMode());
    if (listenerMode == Configuration.ListenerMode.EPOLL && !Epoll.isAvailable()) {
        LwCommServiceImpl.LOGGER
                .warn("Listener mode EPOLL is configured but is not available. Falling back to NIO mode.");
        listenerMode = Configuration.ListenerMode.NIO;
    }
    Bootstrap b = new Bootstrap();

    b.group(receiveTransportGroup);
    if (receiveTransportGroup instanceof EpollEventLoopGroup) {
        b.channel(EpollDatagramChannel.class);
        b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
        b.option(EpollChannelOption.SO_REUSEPORT, true);
    } else {
        b.channel(NioDatagramChannel.class);
        b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    }
    channels = new HashSet<Channel>();
    b.handler(new ChannelInitializer<DatagramChannel>() {
        protected void initChannel(DatagramChannel channel) throws Exception {
            LwCommServiceImpl.LOGGER.info("Initializing channel: '{}'", channel);
            channels.add(channel);
            channel.pipeline().addLast(channelHandler);
        }
    });
    // TODO FIXME: hardcoded 256K limit for receive buffer!
    b.option(ChannelOption.SO_RCVBUF, 256 * 1024);
    b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(10240));

    InetAddress host = null;
    int port = config.getLocalNode().getPort();
    try {
        host = InetAddress.getByName(config.getLocalNode().getHost());
        ChannelFuture future;
        if (listenerMode == ListenerMode.NIO) {
            future = b.bind(host, port).sync();
            if (!future.isSuccess()) {
                LwCommServiceImpl.LOGGER.error("Error while binding socket to {}:{}", host, port);
            } else {
                LwCommServiceImpl.LOGGER.info("Binding socket to {}:{} - SUCCESS", host, port);
            }
        } else {
            for (int i = 0; i < receiveTransportThreads; i++) {
                future = b.bind(host, port).sync();
                if (!future.isSuccess()) {
                    LwCommServiceImpl.LOGGER.error("Error while binding {} of {} socket to {}:{}", i + 1,
                            receiveTransportThreads, host, port);
                } else {
                    LwCommServiceImpl.LOGGER.info("Successfully bound socket {} of {} to {}:{} - ", i + 1,
                            receiveTransportThreads, host, port, future.channel());
                }
            }
        }
    } catch (Exception e) {
        LwCommServiceImpl.LOGGER.error("Error while binding socket or getting local node address.", e);
    }
}

From source file:org.restnext.server.Server.java

License:Apache License

/**
 * Starts the server./*from  w  w w . ja  v  a 2s  . c om*/
 */
public void start() {
    loadAndPrintBanner();
    try {
        InetSocketAddress bindAddress = serverInitializer.getBindAddress();

        ServerBootstrap serverBootstrap = Epoll.isAvailable() ? newEpoolServerBootstrap()
                : newNioServerBootstrap();

        ChannelFuture channelFuture = serverBootstrap
                //.handler(new LoggingHandler(LogLevel.INFO))
                .childHandler(serverInitializer)
                .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT).bind(bindAddress).sync();

        LOGGER.info("Application is running at - {}://{}",
                serverInitializer.isSslConfigured() ? "https" : "http", bindAddress);

        channelFuture.channel().closeFuture().sync();

    } catch (Exception e) {
        throw new ServerException("Could not start the server", e);
    } finally {
        stop();
    }
}

From source file:org.scache.network.client.TransportClientFactory.java

License:Apache License

/** Create a completely new {@link TransportClient} to the remote address. */
private TransportClient createClient(InetSocketAddress address) throws IOException {
    logger.debug("Creating new connection to " + address);

    Bootstrap bootstrap = new Bootstrap();
    bootstrap.group(workerGroup).channel(socketChannelClass)
            // Disable Nagle's Algorithm since we don't want packets to wait
            .option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, true)
            .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs())
            .option(ChannelOption.ALLOCATOR, pooledAllocator);

    final AtomicReference<TransportClient> clientRef = new AtomicReference<>();
    final AtomicReference<Channel> channelRef = new AtomicReference<>();

    bootstrap.handler(new ChannelInitializer<SocketChannel>() {
        @Override//  ww  w  .j a va  2  s  .  c om
        public void initChannel(SocketChannel ch) {
            TransportChannelHandler clientHandler = context.initializePipeline(ch);
            clientRef.set(clientHandler.getClient());
            channelRef.set(ch);
        }
    });

    // Connect to the remote server
    long preConnect = System.nanoTime();
    ChannelFuture cf = bootstrap.connect(address);
    if (!cf.awaitUninterruptibly(conf.connectionTimeoutMs())) {
        throw new IOException(
                String.format("Connecting to %s timed out (%s ms)", address, conf.connectionTimeoutMs()));
    } else if (cf.cause() != null) {
        throw new IOException(String.format("Failed to connect to %s", address), cf.cause());
    }

    TransportClient client = clientRef.get();
    Channel channel = channelRef.get();
    assert client != null : "Channel future completed successfully with null client";

    // Execute any client bootstraps synchronously before marking the Client as successful.
    long preBootstrap = System.nanoTime();
    logger.debug("Connection to {} successful, running bootstraps...", address);
    try {
        for (TransportClientBootstrap clientBootstrap : clientBootstraps) {
            clientBootstrap.doBootstrap(client, channel);
        }
    } catch (Exception e) { // catch non-RuntimeExceptions too as bootstrap may be written in Scala
        long bootstrapTimeMs = (System.nanoTime() - preBootstrap) / 1000000;
        logger.error("Exception while bootstrapping client after " + bootstrapTimeMs + " ms", e);
        client.close();
        throw Throwables.propagate(e);
    }
    long postBootstrap = System.nanoTime();

    logger.info("Successfully created connection to {} after {} ms ({} ms spent in bootstraps)", address,
            (postBootstrap - preConnect) / 1000000, (postBootstrap - preBootstrap) / 1000000);

    return client;
}

From source file:org.springframework.web.reactive.function.client.WebClientDataBufferAllocatingTests.java

License:Apache License

private ReactorClientHttpConnector initConnector() {
    if (bufferFactory instanceof NettyDataBufferFactory) {
        ByteBufAllocator allocator = ((NettyDataBufferFactory) bufferFactory).getByteBufAllocator();
        return new ReactorClientHttpConnector(this.factory, httpClient -> httpClient
                .tcpConfiguration(tcpClient -> tcpClient.option(ChannelOption.ALLOCATOR, allocator)));
    } else {/*from ww w  . j a  va 2  s. co  m*/
        return new ReactorClientHttpConnector();
    }
}

From source file:org.starnub.starnubserver.servers.starbound.TCPProxyServer.java

License:Open Source License

public void start() {
    ServerBootstrap starNubInbound_TCP_Socket = new ServerBootstrap();
    serverChannel = starNubInbound_TCP_Socket.group(connectionBossGroup, connectionWorkerGroup)
            .channel(channelClass).option(ChannelOption.SO_BACKLOG, serverBacklog)
            .childOption(ChannelOption.TCP_NODELAY, noDelay).childOption(ChannelOption.ALLOCATOR, socketBuffer)
            .childHandler(new TCPProxyServerInitializer(starboundAddress, starboundPort)).bind(starnubPort)
            .channel();//w  w w. j  a v a  2s  .co  m
}

From source file:org.starnub.starnubserver.servers.starbound.TCPProxyServerPacketDecoder.java

License:Open Source License

private boolean openServerConnection(ChannelHandlerContext ctx) {
    new StarNubEvent("StarNub_Socket_Connection_Attempt_Server", ctx);
    Bootstrap starNubMainOutboundSocket = new Bootstrap();
    starNubMainOutboundSocket.group(ctx.channel().eventLoop()).channel(ctx.channel().getClass())
            .option(ChannelOption.TCP_NODELAY, StarboundServer.getInstance().getTcpProxyServer().isNoDelay())
            .option(ChannelOption.ALLOCATOR,
                    StarboundServer.getInstance().getTcpProxyServer().getSocketBuffer())
            .handler(new TCPProxyServerPacketDecoder(Packet.Direction.TO_STARBOUND_SERVER, ctx));
    ChannelFuture f = starNubMainOutboundSocket.connect(starboundAddress, starboundPort);
    destinationCTX = f.channel().pipeline().firstContext();
    if (destinationCTX != null) {
        new StarNubEvent("StarNub_Socket_Connection_Success_Server", ctx);
        StarNubProxyConnection.ConnectionProcessingType connectionProcessingType = null;
        if ((boolean) StarNub.getConfiguration().getNestedValue("advanced_settings", "packet_decoding")) {
            connectionProcessingType = StarNubProxyConnection.ConnectionProcessingType.PLAYER;
        } else {//from w w w.  j  av a  2s .  c  o m
            connectionProcessingType = StarNubProxyConnection.ConnectionProcessingType.PLAYER_NO_DECODING;
        }
        starNubProxyConnection = new StarNubProxyConnection(connectionProcessingType, ctx, destinationCTX);
        return true;
    } else {
        new StarNubEvent("StarNub_Socket_Connection_Failure_Cannot_Connect_Starbound", ctx);
        return false;
    }
}

From source file:org.starnub.utilities.connectivity.client.TCPClient.java

License:Open Source License

public ChannelFuture connect(String ipAddress, int port, ChannelInitializer<SocketChannel> channelInitializer) {
    Bootstrap b = new Bootstrap().group(workerGroup).channel(NioSocketChannel.class)
            .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .option(ChannelOption.SO_KEEPALIVE, true).handler(channelInitializer);
    return b.connect(ipAddress, port);
}