Example usage for io.netty.buffer PooledByteBufAllocator DEFAULT

List of usage examples for io.netty.buffer PooledByteBufAllocator DEFAULT

Introduction

In this page you can find the example usage for io.netty.buffer PooledByteBufAllocator DEFAULT.

Prototype

PooledByteBufAllocator DEFAULT

To view the source code for io.netty.buffer PooledByteBufAllocator DEFAULT.

Click Source Link

Usage

From source file:org.lanternpowered.pingy.Pingy.java

License:MIT License

/**
 * Starts the pingy server.//ww w.j  av a  2 s  .com
 *
 * @throws IOException
 */
public void start() throws IOException {
    boolean epoll = false;

    if (this.properties.isUseEpollWhenAvailable()) {
        if (Epoll.isAvailable()) {
            debugInfo("Epoll is available");
            epoll = true;
        } else {
            debugWarn(
                    "Epoll is unavailable (The following exception is only used to print the cause why it's unavailable, "
                            + "it won't affect the functionality.)");
            //noinspection ThrowableResultOfMethodCallIgnored
            debug(() -> Epoll.unavailabilityCause().printStackTrace());
        }
    }

    final ServerBootstrap bootstrap = new ServerBootstrap();
    final EventLoopGroup group = epoll ? new EpollEventLoopGroup() : new NioEventLoopGroup();

    final ChannelFuture future = bootstrap.group(group)
            .channel(epoll ? EpollServerSocketChannel.class : NioServerSocketChannel.class)
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(new ReadTimeoutHandler(20))
                            .addLast(new PingyLegacyHandler(properties)).addLast(new PingyFramingHandler())
                            .addLast(new PingyHandler(properties));
                }
            }).childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .childOption(ChannelOption.TCP_NODELAY, true).childOption(ChannelOption.SO_KEEPALIVE, true)
            .bind(getBindAddress(this.properties.getIp(), this.properties.getPort()));
    final Channel channel = future.awaitUninterruptibly().channel();
    if (!channel.isActive()) {
        final Throwable cause = future.cause();
        if (cause instanceof BindException) {
            throw (BindException) cause;
        }
        throw new RuntimeException("Failed to bind to address", cause);
    }
    info("Successfully bound to: " + channel.localAddress());
}

From source file:org.lanternpowered.server.network.NetworkManager.java

License:MIT License

@Override
protected ChannelFuture init0(SocketAddress address, boolean epoll) {
    this.bootstrap = new ServerBootstrap();
    // Take advantage of the fast thread local threads,
    // this is also provided by the default thread factory
    final ThreadFactory threadFactory = ThreadHelper
            .newFastThreadLocalThreadFactory(() -> "netty-" + threadCounter.getAndIncrement());
    this.bossGroup = createEventLoopGroup(epoll, threadFactory);
    this.workerGroup = createEventLoopGroup(epoll, threadFactory);
    this.socketAddress = address;
    return this.bootstrap.group(this.bossGroup, this.workerGroup).channel(getServerSocketChannelClass(epoll))
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override/*from w w  w  .j av  a 2  s . com*/
                protected void initChannel(SocketChannel ch) throws Exception {
                    final ChannelPipeline pipeline = ch.pipeline();
                    final NetworkSession networkSession = new NetworkSession(ch, server, NetworkManager.this);
                    final CodecContext codecContext = new SimpleCodecContext(
                            new LanternByteBufferAllocator(ch.alloc()), ch, networkSession);
                    pipeline.addLast(new ReadTimeoutHandler(NetworkSession.READ_TIMEOUT_SECONDS))
                            .addLast(NetworkSession.LEGACY_PING, new LegacyProtocolHandler(networkSession))
                            .addLast(NetworkSession.ENCRYPTION, NoopHandler.INSTANCE)
                            .addLast(NetworkSession.FRAMING, new MessageFramingHandler())
                            .addLast(NetworkSession.COMPRESSION, NoopHandler.INSTANCE)
                            .addLast(NetworkSession.CODECS, new MessageCodecHandler(codecContext))
                            .addLast(NetworkSession.PROCESSOR, new MessageProcessorHandler(codecContext))
                            .addLast(NetworkSession.HANDLER, networkSession);
                }
            }).childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .childOption(ChannelOption.TCP_NODELAY, true).childOption(ChannelOption.SO_KEEPALIVE, true)
            .bind(address);
}

From source file:org.neo4j.bolt.transport.NettyServer.java

License:Open Source License

@Override
public void start() throws Throwable {
    // The boss thread accepts new incoming connections and chooses a worker thread to be responsible for the
    // IO of the new connection. We expect new connections to be (comparatively) rare, so we allocate a single
    // thread for this.
    // TODO: In fact, dedicating a whole thread to sit and spin in #select for new connections may be a waste of
    // time, we could have the same event loop groups for both handling new connections and for handling events
    // on existing connections
    bossGroup = new NioEventLoopGroup(1, tf);

    // These threads handle live channels. Each thread has a set of channels it is responsible for, and it will
    // continuously run a #select() loop to react to new events on these channels.
    selectorGroup = new NioEventLoopGroup(NUM_SELECTOR_THREADS, tf);

    // Bootstrap the various ports and protocols we want to handle

    for (ProtocolInitializer initializer : bootstrappers) {
        try {/*from   w  w  w.ja v a  2  s. co m*/
            new ServerBootstrap().option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
                    .group(bossGroup, selectorGroup).channel(NioServerSocketChannel.class)
                    .childHandler(initializer.channelInitializer())
                    .bind(initializer.address().getHost(), initializer.address().getPort()).sync();
        } catch (Throwable e) {
            // We catch throwable here because netty uses clever tricks to have method signatures that look like they do not
            // throw checked exceptions, but they actually do. The compiler won't let us catch them explicitly because in theory
            // they shouldn't be possible, so we have to catch Throwable and do our own checks to grab them

            // In any case, we do all this just in order to throw a more helpful bind exception, oh, and here's that part coming right now!
            if (e instanceof BindException) {
                throw new PortBindException(initializer.address(), (BindException) e);
            }
            throw e;
        }
    }
}

From source file:org.neo4j.bolt.transport.SocketTransport.java

License:Open Source License

@Override
public ChannelInitializer<SocketChannel> channelInitializer() {
    return new ChannelInitializer<SocketChannel>() {
        @Override//w  w  w . jav  a 2s.  c  o m
        public void initChannel(SocketChannel ch) throws Exception {
            ch.config().setAllocator(PooledByteBufAllocator.DEFAULT);
            ch.pipeline().addLast(new TransportSelectionHandler(sslCtx, logging, protocolVersions));
        }
    };
}

From source file:org.ogcs.netty.impl.TcpProtocolServer.java

License:Apache License

@Override
public ServerBootstrap createBootstrap() {
    bootstrap = new ServerBootstrap();
    if (isEpollAvailable) {
        this.parentGroup = new EpollEventLoopGroup();
        this.childGroup = new EpollEventLoopGroup();
        bootstrap.channel(EpollServerSocketChannel.class);
    } else {/*from   w w  w  .  j  a v  a  2 s. c  o  m*/
        this.parentGroup = new NioEventLoopGroup();
        this.childGroup = new NioEventLoopGroup();
        bootstrap.channel(NioServerSocketChannel.class);
    }
    bootstrap.group(parentGroup(), childGroup());
    bootstrap.childHandler(newChannelInitializer());

    bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    //        bootstrap.option(ChannelOption.SO_REUSEADDR, true);
    //        bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
    bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    return bootstrap;
}

From source file:org.onlab.netty.NettyMessaging.java

License:Apache License

private void startAcceptingConnections() throws InterruptedException {
    ServerBootstrap b = new ServerBootstrap();
    b.option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024);
    b.option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024);
    b.option(ChannelOption.SO_RCVBUF, 1048576);
    b.option(ChannelOption.TCP_NODELAY, true);
    b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    b.group(serverGroup, clientGroup);/*from w ww.j  ava2s.  c o  m*/
    b.channel(serverChannelClass);
    if (enableNettyTLS) {
        b.childHandler(new SSLServerCommunicationChannelInitializer());
    } else {
        b.childHandler(new OnosCommunicationChannelInitializer());
    }
    b.option(ChannelOption.SO_BACKLOG, 128);
    b.childOption(ChannelOption.SO_KEEPALIVE, true);

    // Bind and start to accept incoming connections.
    b.bind(localEp.port()).sync().addListener(future -> {
        if (future.isSuccess()) {
            log.info("{} accepting incoming connections on port {}", localEp.host(), localEp.port());
        } else {
            log.warn("{} failed to bind to port {}", localEp.host(), localEp.port(), future.cause());
        }
    });
}

From source file:org.onlab.netty.NettyMessagingService.java

License:Apache License

private void startAcceptingConnections() throws InterruptedException {
    ServerBootstrap b = new ServerBootstrap();
    b.option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024);
    b.option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024);
    b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    b.group(serverGroup, clientGroup).channel(serverChannelClass)
            .childHandler(new OnosCommunicationChannelInitializer()).option(ChannelOption.SO_BACKLOG, 128)
            .childOption(ChannelOption.SO_KEEPALIVE, true);

    // Bind and start to accept incoming connections.
    b.bind(localEp.port()).sync();//from   ww w  .j  a va 2 s.com
}

From source file:org.onosproject.lisp.ctl.impl.LispControllerBootstrap.java

License:Apache License

/**
 * Configures bootstrap options to tune the communication performance.
 *
 * @param bootstrap LISP server bootstrap
 *//*from   w  w  w.ja  v a  2 s . c  om*/
private void configBootstrapOptions(Bootstrap bootstrap) {
    bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
}

From source file:org.onosproject.ovsdb.controller.impl.Controller.java

License:Apache License

/**
 * Accepts incoming connections./*from   w  ww .j a v a2 s  .c  om*/
 */
private void startAcceptingConnections() throws InterruptedException {
    ServerBootstrap b = new ServerBootstrap();

    b.group(bossGroup, workerGroup).channel(serverChannelClass)
            .childHandler(new OnosCommunicationChannelInitializer());
    b.option(ChannelOption.SO_BACKLOG, 128);
    b.option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024);
    b.option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024);
    b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    b.childOption(ChannelOption.SO_KEEPALIVE, true);
    b.bind(ovsdbPort).sync();
}

From source file:org.onosproject.store.cluster.messaging.impl.NettyMessagingManager.java

License:Apache License

private void startAcceptingConnections() throws InterruptedException {
    ServerBootstrap b = new ServerBootstrap();
    b.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024);
    b.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024);
    b.option(ChannelOption.SO_RCVBUF, 1048576);
    b.option(ChannelOption.TCP_NODELAY, true);
    b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    b.group(serverGroup, clientGroup);/*from   www.  ja va2 s  .  c o  m*/
    b.channel(serverChannelClass);
    if (enableNettyTls) {
        b.childHandler(new SslServerCommunicationChannelInitializer());
    } else {
        b.childHandler(new OnosCommunicationChannelInitializer());
    }
    b.option(ChannelOption.SO_BACKLOG, 128);
    b.childOption(ChannelOption.SO_KEEPALIVE, true);

    // Bind and start to accept incoming connections.
    b.bind(localEp.port()).sync().addListener(future -> {
        if (future.isSuccess()) {
            log.info("{} accepting incoming connections on port {}", localEp.host(), localEp.port());
        } else {
            log.warn("{} failed to bind to port {}", localEp.host(), localEp.port(), future.cause());
        }
    });
}