Example usage for io.netty.channel ChannelOption ALLOCATOR

List of usage examples for io.netty.channel ChannelOption ALLOCATOR

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption ALLOCATOR.

Prototype

ChannelOption ALLOCATOR

To view the source code for io.netty.channel ChannelOption ALLOCATOR.

Click Source Link

Usage

From source file:org.hornetq.tests.integration.transports.netty.NettyConnectorWithHTTPUpgradeTest.java

License:Apache License

private void startWebServer(int port) throws InterruptedException {
    bossGroup = new NioEventLoopGroup();
    workerGroup = new NioEventLoopGroup();
    ServerBootstrap b = new ServerBootstrap();
    b.childOption(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE);
    b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override// www  .java  2  s.c  o  m
                protected void initChannel(SocketChannel ch) throws Exception {
                    // create a HTTP server
                    ChannelPipeline p = ch.pipeline();
                    p.addLast("decoder", new HttpRequestDecoder());
                    p.addLast("encoder", new HttpResponseEncoder());
                    p.addLast("http-upgrade-handler", new SimpleChannelInboundHandler<Object>() {
                        // handle HTTP GET + Upgrade with a handshake specific to HornetQ remoting.
                        @Override
                        protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
                            if (msg instanceof HttpRequest) {
                                HttpRequest request = (HttpRequest) msg;

                                for (Map.Entry<String, String> entry : request.headers()) {
                                    System.out.println(entry);
                                }
                                String upgrade = request.headers().get(UPGRADE);
                                String secretKey = request.headers().get(SEC_HORNETQ_REMOTING_KEY);

                                FullHttpResponse response = new DefaultFullHttpResponse(HTTP_1_1,
                                        SWITCHING_PROTOCOLS);
                                response.headers().set(UPGRADE, upgrade);
                                response.headers().set(SEC_HORNETQ_REMOTING_ACCEPT,
                                        createExpectedResponse(MAGIC_NUMBER, secretKey));
                                ctx.writeAndFlush(response);

                                // when the handshake is successful, the HTTP handlers are removed
                                ctx.pipeline().remove("decoder");
                                ctx.pipeline().remove("encoder");
                                ctx.pipeline().remove(this);

                                System.out.println("HTTP handshake sent, transferring channel");
                                // transfer the control of the channel to the Netty Acceptor
                                NettyAcceptor acceptor = (NettyAcceptor) server.getRemotingService()
                                        .getAcceptor(acceptorName);
                                acceptor.transfer(ctx.channel());
                                // at this point, the HTTP upgrade process is over and the netty acceptor behaves like regular ones.
                            }
                        }
                    });
                }

                @Override
                public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
                    ctx.flush();
                }
            });
    b.bind(port).sync();
}

From source file:org.jupiter.transport.netty.NettyAcceptor.java

License:Apache License

protected void setOptions() {
    JConfig parent = configGroup().parent(); // parent options
    JConfig child = configGroup().child(); // child options

    setIoRatio(parent.getOption(JOption.IO_RATIO), child.getOption(JOption.IO_RATIO));

    boolean direct = child.getOption(JOption.PREFER_DIRECT);
    if (child.getOption(JOption.USE_POOLED_ALLOCATOR)) {
        if (direct) {
            allocator = new PooledByteBufAllocator(PlatformDependent.directBufferPreferred());
        } else {//  w w  w  . j  a  v  a2s  .c  o m
            allocator = new PooledByteBufAllocator(false);
        }
    } else {
        if (direct) {
            allocator = new UnpooledByteBufAllocator(PlatformDependent.directBufferPreferred());
        } else {
            allocator = new UnpooledByteBufAllocator(false);
        }
    }
    bootstrap.childOption(ChannelOption.ALLOCATOR, allocator).childOption(ChannelOption.MESSAGE_SIZE_ESTIMATOR,
            JMessageSizeEstimator.DEFAULT);
}

From source file:org.jupiter.transport.netty.NettyConnector.java

License:Apache License

protected void setOptions() {
    JConfig child = config();/* w  w w .  j av a2  s.  c  o  m*/

    setIoRatio(child.getOption(JOption.IO_RATIO));

    boolean direct = child.getOption(JOption.PREFER_DIRECT);
    if (child.getOption(JOption.USE_POOLED_ALLOCATOR)) {
        if (direct) {
            allocator = new PooledByteBufAllocator(PlatformDependent.directBufferPreferred());
        } else {
            allocator = new PooledByteBufAllocator(false);
        }
    } else {
        if (direct) {
            allocator = new UnpooledByteBufAllocator(PlatformDependent.directBufferPreferred());
        } else {
            allocator = new UnpooledByteBufAllocator(false);
        }
    }
    bootstrap.option(ChannelOption.ALLOCATOR, allocator).option(ChannelOption.MESSAGE_SIZE_ESTIMATOR,
            JMessageSizeEstimator.DEFAULT);
}

From source file:org.lanternpowered.pingy.Pingy.java

License:MIT License

/**
 * Starts the pingy server.//w w  w .j a v  a2  s. com
 *
 * @throws IOException
 */
public void start() throws IOException {
    boolean epoll = false;

    if (this.properties.isUseEpollWhenAvailable()) {
        if (Epoll.isAvailable()) {
            debugInfo("Epoll is available");
            epoll = true;
        } else {
            debugWarn(
                    "Epoll is unavailable (The following exception is only used to print the cause why it's unavailable, "
                            + "it won't affect the functionality.)");
            //noinspection ThrowableResultOfMethodCallIgnored
            debug(() -> Epoll.unavailabilityCause().printStackTrace());
        }
    }

    final ServerBootstrap bootstrap = new ServerBootstrap();
    final EventLoopGroup group = epoll ? new EpollEventLoopGroup() : new NioEventLoopGroup();

    final ChannelFuture future = bootstrap.group(group)
            .channel(epoll ? EpollServerSocketChannel.class : NioServerSocketChannel.class)
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override
                protected void initChannel(SocketChannel ch) throws Exception {
                    ch.pipeline().addLast(new ReadTimeoutHandler(20))
                            .addLast(new PingyLegacyHandler(properties)).addLast(new PingyFramingHandler())
                            .addLast(new PingyHandler(properties));
                }
            }).childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .childOption(ChannelOption.TCP_NODELAY, true).childOption(ChannelOption.SO_KEEPALIVE, true)
            .bind(getBindAddress(this.properties.getIp(), this.properties.getPort()));
    final Channel channel = future.awaitUninterruptibly().channel();
    if (!channel.isActive()) {
        final Throwable cause = future.cause();
        if (cause instanceof BindException) {
            throw (BindException) cause;
        }
        throw new RuntimeException("Failed to bind to address", cause);
    }
    info("Successfully bound to: " + channel.localAddress());
}

From source file:org.lanternpowered.server.network.NetworkManager.java

License:MIT License

@Override
protected ChannelFuture init0(SocketAddress address, boolean epoll) {
    this.bootstrap = new ServerBootstrap();
    // Take advantage of the fast thread local threads,
    // this is also provided by the default thread factory
    final ThreadFactory threadFactory = ThreadHelper
            .newFastThreadLocalThreadFactory(() -> "netty-" + threadCounter.getAndIncrement());
    this.bossGroup = createEventLoopGroup(epoll, threadFactory);
    this.workerGroup = createEventLoopGroup(epoll, threadFactory);
    this.socketAddress = address;
    return this.bootstrap.group(this.bossGroup, this.workerGroup).channel(getServerSocketChannelClass(epoll))
            .childHandler(new ChannelInitializer<SocketChannel>() {
                @Override/*ww  w  .ja  va  2s  . c om*/
                protected void initChannel(SocketChannel ch) throws Exception {
                    final ChannelPipeline pipeline = ch.pipeline();
                    final NetworkSession networkSession = new NetworkSession(ch, server, NetworkManager.this);
                    final CodecContext codecContext = new SimpleCodecContext(
                            new LanternByteBufferAllocator(ch.alloc()), ch, networkSession);
                    pipeline.addLast(new ReadTimeoutHandler(NetworkSession.READ_TIMEOUT_SECONDS))
                            .addLast(NetworkSession.LEGACY_PING, new LegacyProtocolHandler(networkSession))
                            .addLast(NetworkSession.ENCRYPTION, NoopHandler.INSTANCE)
                            .addLast(NetworkSession.FRAMING, new MessageFramingHandler())
                            .addLast(NetworkSession.COMPRESSION, NoopHandler.INSTANCE)
                            .addLast(NetworkSession.CODECS, new MessageCodecHandler(codecContext))
                            .addLast(NetworkSession.PROCESSOR, new MessageProcessorHandler(codecContext))
                            .addLast(NetworkSession.HANDLER, networkSession);
                }
            }).childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .childOption(ChannelOption.TCP_NODELAY, true).childOption(ChannelOption.SO_KEEPALIVE, true)
            .bind(address);
}

From source file:org.neo4j.bolt.transport.NettyServer.java

License:Open Source License

@Override
public void start() throws Throwable {
    // The boss thread accepts new incoming connections and chooses a worker thread to be responsible for the
    // IO of the new connection. We expect new connections to be (comparatively) rare, so we allocate a single
    // thread for this.
    // TODO: In fact, dedicating a whole thread to sit and spin in #select for new connections may be a waste of
    // time, we could have the same event loop groups for both handling new connections and for handling events
    // on existing connections
    bossGroup = new NioEventLoopGroup(1, tf);

    // These threads handle live channels. Each thread has a set of channels it is responsible for, and it will
    // continuously run a #select() loop to react to new events on these channels.
    selectorGroup = new NioEventLoopGroup(NUM_SELECTOR_THREADS, tf);

    // Bootstrap the various ports and protocols we want to handle

    for (ProtocolInitializer initializer : bootstrappers) {
        try {//w w w  .  j a  v a2 s. c  om
            new ServerBootstrap().option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
                    .group(bossGroup, selectorGroup).channel(NioServerSocketChannel.class)
                    .childHandler(initializer.channelInitializer())
                    .bind(initializer.address().getHost(), initializer.address().getPort()).sync();
        } catch (Throwable e) {
            // We catch throwable here because netty uses clever tricks to have method signatures that look like they do not
            // throw checked exceptions, but they actually do. The compiler won't let us catch them explicitly because in theory
            // they shouldn't be possible, so we have to catch Throwable and do our own checks to grab them

            // In any case, we do all this just in order to throw a more helpful bind exception, oh, and here's that part coming right now!
            if (e instanceof BindException) {
                throw new PortBindException(initializer.address(), (BindException) e);
            }
            throw e;
        }
    }
}

From source file:org.ogcs.netty.impl.TcpProtocolServer.java

License:Apache License

@Override
public ServerBootstrap createBootstrap() {
    bootstrap = new ServerBootstrap();
    if (isEpollAvailable) {
        this.parentGroup = new EpollEventLoopGroup();
        this.childGroup = new EpollEventLoopGroup();
        bootstrap.channel(EpollServerSocketChannel.class);
    } else {//  ww  w.j a  v a 2  s.c  o  m
        this.parentGroup = new NioEventLoopGroup();
        this.childGroup = new NioEventLoopGroup();
        bootstrap.channel(NioServerSocketChannel.class);
    }
    bootstrap.group(parentGroup(), childGroup());
    bootstrap.childHandler(newChannelInitializer());

    bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    //        bootstrap.option(ChannelOption.SO_REUSEADDR, true);
    //        bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
    bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    return bootstrap;
}

From source file:org.onlab.netty.NettyMessaging.java

License:Apache License

private void startAcceptingConnections() throws InterruptedException {
    ServerBootstrap b = new ServerBootstrap();
    b.option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024);
    b.option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024);
    b.option(ChannelOption.SO_RCVBUF, 1048576);
    b.option(ChannelOption.TCP_NODELAY, true);
    b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    b.group(serverGroup, clientGroup);// w w w. ja v  a 2  s . c  o  m
    b.channel(serverChannelClass);
    if (enableNettyTLS) {
        b.childHandler(new SSLServerCommunicationChannelInitializer());
    } else {
        b.childHandler(new OnosCommunicationChannelInitializer());
    }
    b.option(ChannelOption.SO_BACKLOG, 128);
    b.childOption(ChannelOption.SO_KEEPALIVE, true);

    // Bind and start to accept incoming connections.
    b.bind(localEp.port()).sync().addListener(future -> {
        if (future.isSuccess()) {
            log.info("{} accepting incoming connections on port {}", localEp.host(), localEp.port());
        } else {
            log.warn("{} failed to bind to port {}", localEp.host(), localEp.port(), future.cause());
        }
    });
}

From source file:org.onlab.netty.NettyMessagingService.java

License:Apache License

private void startAcceptingConnections() throws InterruptedException {
    ServerBootstrap b = new ServerBootstrap();
    b.option(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 32 * 1024);
    b.option(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 8 * 1024);
    b.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    b.group(serverGroup, clientGroup).channel(serverChannelClass)
            .childHandler(new OnosCommunicationChannelInitializer()).option(ChannelOption.SO_BACKLOG, 128)
            .childOption(ChannelOption.SO_KEEPALIVE, true);

    // Bind and start to accept incoming connections.
    b.bind(localEp.port()).sync();//from  ww w . ja  va  2  s .  c o m
}

From source file:org.onosproject.lisp.ctl.impl.LispControllerBootstrap.java

License:Apache License

/**
 * Configures bootstrap options to tune the communication performance.
 *
 * @param bootstrap LISP server bootstrap
 *//*from   w  w w .j ava 2 s  .c  o m*/
private void configBootstrapOptions(Bootstrap bootstrap) {
    bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
}