Example usage for io.netty.buffer PooledByteBufAllocator PooledByteBufAllocator

List of usage examples for io.netty.buffer PooledByteBufAllocator PooledByteBufAllocator

Introduction

In this page you can find the example usage for io.netty.buffer PooledByteBufAllocator PooledByteBufAllocator.

Prototype

@SuppressWarnings("deprecation")
    public PooledByteBufAllocator(boolean preferDirect) 

Source Link

Usage

From source file:com.aerofs.baseline.http.HttpServer.java

License:Apache License

public HttpServer(String serverIdentifier, HttpConfiguration http, Timer timer,
        ApplicationHandler applicationHandler) {
    PooledByteBufAllocator allocator = new PooledByteBufAllocator(http.isDirectMemoryBacked());

    this.serverIdentifier = serverIdentifier;
    this.host = http.getHost();
    this.port = http.getPort();
    this.requestProcessingExecutor = Executors.newFixedThreadPool(http.getNumRequestProcessingThreads(),
            Threads.newNamedThreadFactory(serverIdentifier + "-requests-%d"));
    this.bossEventLoopGroup = new NioEventLoopGroup(com.aerofs.baseline.http.Constants.DEFAULT_NUM_BOSS_THREADS,
            Threads.newNamedThreadFactory(serverIdentifier + "-nio-boss-%d"));
    this.workEventLoopGroup = new NioEventLoopGroup(http.getNumNetworkThreads(),
            Threads.newNamedThreadFactory(serverIdentifier + "-nio-work-%d"));
    this.bootstrap = new ServerBootstrap();
    this.bootstrap.group(bossEventLoopGroup, workEventLoopGroup).channel(NioServerSocketChannel.class)
            .childHandler(new AcceptedChannelInitializer(http, applicationHandler,
                    URI.create(String.format("http://%s:%s/", host, port)), requestProcessingExecutor, timer))
            .option(ALLOCATOR, allocator).option(SO_BACKLOG, http.getMaxAcceptQueueSize())
            .childOption(AUTO_READ, false).childOption(ALLOCATOR, allocator);
}

From source file:com.ancun.netty.httpserver.HttpServer.java

License:Apache License

private void setBootstrapOptions(ServerBootstrap bootstrap) {
    bootstrap.option(ChannelOption.SO_KEEPALIVE, useKeepAlive());
    bootstrap.option(ChannelOption.SO_BACKLOG, 1024);
    bootstrap.option(ChannelOption.TCP_NODELAY, useTcpNoDelay());
    bootstrap.option(ChannelOption.SO_KEEPALIVE, serverSettings.isKeepAlive());
    bootstrap.option(ChannelOption.SO_REUSEADDR, shouldReuseAddress());
    bootstrap.option(ChannelOption.SO_LINGER, getSoLinger());
    bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, getConnectTimeoutMillis());
    bootstrap.option(ChannelOption.SO_RCVBUF, getReceiveBufferSize());
    bootstrap.option(ChannelOption.MAX_MESSAGES_PER_READ, Integer.MAX_VALUE);

    bootstrap.childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
    bootstrap.childOption(ChannelOption.MAX_MESSAGES_PER_READ, Integer.MAX_VALUE);
    bootstrap.childOption(ChannelOption.SO_RCVBUF, getReceiveBufferSize());
    bootstrap.childOption(ChannelOption.SO_REUSEADDR, shouldReuseAddress());
}

From source file:com.github.lburgazzoli.quickfixj.transport.netty.NettySocketInitiator.java

License:Apache License

/**
 *
 *//*from w  w  w  . j a v a2 s  .com*/
@Override
public void connect() {
    try {
        m_boot = new Bootstrap();
        m_boot.group(new NioEventLoopGroup());
        m_boot.channel(NioSocketChannel.class);
        m_boot.option(ChannelOption.SO_KEEPALIVE, true);
        m_boot.option(ChannelOption.TCP_NODELAY, true);
        m_boot.option(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
        m_boot.handler(new NettyChannelInitializer(this, getHelper(), FIXSessionType.INITIATOR));

        getHelper().getSession().getSessionID();
        String host = getHelper().getSettings().getString("SocketConnectHost");
        int port = getHelper().getSettings().getInt("SocketConnectPort");

        m_boot.remoteAddress(new InetSocketAddress(host, port));

        if (!isRunning()) {
            setRunning(true);
            doConnect();
        }
    } catch (Exception e) {
        LOGGER.warn("Exception", e);
        setRunning(false);
    }
}

From source file:com.github.thinker0.mesos.MesosHealthCheckerServer.java

License:Apache License

/**
 * Initializes the server, socket, and channel.
 *
 * @param loopGroup          The event loop group.
 * @param serverChannelClass The socket channel class.
 * @throws InterruptedException on interruption.
 *///from w w  w .  ja  va  2s . c o m
private void start(final EventLoopGroup loopGroup, final Class<? extends ServerChannel> serverChannelClass)
        throws InterruptedException {

    try {
        final InetSocketAddress inet = new InetSocketAddress(port);

        final ServerBootstrap b = new ServerBootstrap();
        b.option(ChannelOption.SO_BACKLOG, 1024);
        b.option(ChannelOption.SO_REUSEADDR, true);
        b.option(ChannelOption.SO_LINGER, 0);
        b.group(loopGroup).channel(serverChannelClass).childHandler(new WebServerInitializer());
        b.childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true));
        b.childOption(ChannelOption.SO_REUSEADDR, true);

        // final Channel ch = b.bind(inet).sync().channel();
        // ch.closeFuture().sync();
        b.bind(inet);
        logger.info("Listening for Admin on {}", inet);
    } catch (Throwable t) {
        logger.warn(t.getMessage(), t);
    } finally {
        // loopGroup.shutdownGracefully().sync();
    }
}

From source file:com.mastfrog.scamper.NettyBootstrapModule.java

License:Open Source License

@Override
protected void configure() {
    bind(DataEncoding.class).toInstance(encoding);
    switch (encoding) {
    case BSON:/*from   w  ww.  j av a2 s .c o m*/
        BsonFactory bsonFactory = new BsonFactory();
        bind(BsonFactory.class).toInstance(bsonFactory);
        ObjectMapper mapper = new ObjectMapper(bsonFactory);
        for (com.fasterxml.jackson.databind.Module m : jacksonModules) {
            mapper.registerModule(m);
        }
        bind(ObjectMapper.class).annotatedWith(Names.named(GUICE_BINDING_SCAMPER_CODEC)).toInstance(mapper);
        bind(Codec.class).annotatedWith(Names.named(GUICE_BINDING_SCAMPER_CODEC)).to(CodecImpl.class);
        break;
    case JSON:
        ObjectMapper mapper2 = new ObjectMapper();
        for (com.fasterxml.jackson.databind.Module m : jacksonModules) {
            mapper2.registerModule(m);
        }
        bind(ObjectMapper.class).annotatedWith(Names.named(GUICE_BINDING_SCAMPER_CODEC)).toInstance(mapper2);
        bind(Codec.class).annotatedWith(Names.named(GUICE_BINDING_SCAMPER_CODEC)).to(CodecImpl.class);
        break;
    case JAVA_SERIALIZATION:
        bind(Codec.class).annotatedWith(Names.named(GUICE_BINDING_SCAMPER_CODEC)).to(SerializationCodec.class);
        break;
    default:
        throw new AssertionError(encoding);
    }
    bind(ChannelHandlerAdapter.class).annotatedWith(Names.named("dispatcher")).to(adap);
    bind(ChannelHandlerAdapter.class).annotatedWith(Names.named("processor")).to(InboundMessageDecoder.class);
    bind(EventLoopGroup.class).annotatedWith(Names.named(GUICE_BINDING_SCAMPER_BOSS_THREADS))
            .toInstance(new NioEventLoopGroup(bossThreads));
    bind(EventLoopGroup.class).annotatedWith(Names.named(GUICE_BINDING_SCAMPER_WORKER_THREADS))
            .toInstance(workerThreads == -1 ? new NioEventLoopGroup() : new NioEventLoopGroup(workerThreads));
    bind(ByteBufAllocator.class).annotatedWith(Names.named(GUICE_BINDING_SCAMPER_CODEC))
            .toInstance(new PooledByteBufAllocator(true));
    bind(ShutdownHandler.class).asEagerSingleton();
}

From source file:darks.grid.network.GridMessageClient.java

License:Apache License

@Override
public boolean initialize() {
    super.initialize();
    try {/*from w  w  w  . j  a v a2  s  . c om*/
        log.info("Initialize message client.");
        NetworkConfig config = GridRuntime.config().getNetworkConfig();
        int workerNum = config.getClientWorkerThreadNumber();
        workerGroup = new NioEventLoopGroup(workerNum, ThreadUtils.getThreadFactory());
        bootstrap = new Bootstrap();
        bootstrap.group(workerGroup).channel(NioSocketChannel.class)
                .option(ChannelOption.TCP_NODELAY, config.isTcpNodelay())
                .option(ChannelOption.SO_KEEPALIVE, config.isTcpKeepAlive())
                .option(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true))
                //                   .option(ChannelOption.SO_TIMEOUT, config.getRecvTimeout())
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getConnectTimeout())
                .option(ChannelOption.SO_SNDBUF, config.getTcpSendBufferSize())
                .option(ChannelOption.SO_RCVBUF, config.getTcpRecvBufferSize());
        bootstrap.handler(newChannelHandler());
        return true;
    } catch (Exception e) {
        log.error(e.getMessage(), e);
        return false;
    }
}

From source file:darks.grid.network.GridMessageServer.java

License:Apache License

@Override
public boolean initialize() {
    try {/*from  w  ww.  j a  v a2s  . co  m*/
        NetworkConfig config = GridRuntime.config().getNetworkConfig();
        int bossNum = Runtime.getRuntime().availableProcessors() * config.getServerBossThreadDelta();
        int workerNum = config.getServerWorkerThreadNumber();
        bossGroup = new NioEventLoopGroup(bossNum);
        workerGroup = new NioEventLoopGroup(workerNum);
        super.initialize();
        bootstrap = new ServerBootstrap();
        bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .option(ChannelOption.TCP_NODELAY, config.isTcpNodelay())
                .option(ChannelOption.SO_KEEPALIVE, config.isTcpKeepAlive())
                .option(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true))
                //            .option(ChannelOption.SO_TIMEOUT, config.getRecvTimeout())
                .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getConnectTimeout())
                .option(ChannelOption.SO_REUSEADDR, config.isTcpReuseAddr())
                //            .option(ChannelOption.SO_BACKLOG, config.getTcpBacklog())
                .option(ChannelOption.SO_SNDBUF, config.getTcpSendBufferSize())
                .option(ChannelOption.SO_RCVBUF, config.getTcpRecvBufferSize())
                .childOption(ChannelOption.TCP_NODELAY, config.isTcpNodelay())
                .childOption(ChannelOption.SO_KEEPALIVE, config.isTcpKeepAlive())
                .childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true))
                .childOption(ChannelOption.SO_TIMEOUT, config.getRecvTimeout())
                .childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.getConnectTimeout())
                .childOption(ChannelOption.SO_REUSEADDR, config.isTcpReuseAddr())
                .childOption(ChannelOption.SO_BACKLOG, config.getTcpBacklog())
                .childOption(ChannelOption.SO_SNDBUF, config.getTcpSendBufferSize())
                .childOption(ChannelOption.SO_RCVBUF, config.getTcpRecvBufferSize());
        bootstrap.childHandler(newChannelHandler());
        return true;
    } catch (Exception e) {
        log.error(e.getMessage(), e);
        return false;
    }
}

From source file:github.com.cp149.netty.server.NettyappenderServer.java

License:Apache License

public void run() throws Exception {

    try {/*from ww  w.j a  v  a  2  s  . co  m*/
        bootstrap = new ServerBootstrap();
        final EventExecutorGroup executor = new DefaultEventExecutorGroup(4);

        bootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .childOption(ChannelOption.SO_KEEPALIVE, true).childOption(ChannelOption.TCP_NODELAY, true)
                .childOption(ChannelOption.SO_RCVBUF, 65535).childOption(ChannelOption.SO_SNDBUF, 2048)
                .childOption(ChannelOption.SO_REUSEADDR, true) //reuse address
                .childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(false))// heap buf 's better               
                .childHandler(new ChannelInitializer<SocketChannel>() {

                    @Override
                    public void initChannel(SocketChannel ch) throws Exception {
                        //                     ch.pipeline().addLast( new MarshallingEncoder(MarshallUtil.createProvider()));
                        //                     ch.pipeline().addLast(new CompatibleObjectDecoder());
                        //                     ch.pipeline().addLast(new ObjectEncoder(),
                        //                                  new ObjectDecoder(ClassResolvers.cacheDisabled(null)));
                        ch.pipeline().addLast(new MarshallingDecoder(MarshallUtil.createUnProvider()));

                        ch.pipeline().addLast(executor, new NettyappenderServerHandler());
                        //
                    }
                });

        bootstrap.bind(port).sync();

    } finally {

    }
    // bootstrap = new ServerBootstrap(new
    // NioServerSocketChannelFactory(Executors.newFixedThreadPool(4),
    // Executors.newFixedThreadPool(4)));
    // final ExecutionHandler executionHandler = new ExecutionHandler(new
    // OrderedMemoryAwareThreadPoolExecutor(4, 1024 * 1024 * 300, 1024 *
    // 1024 * 300 * 2));
    // bootstrap.setOption("tcpNoDelay", true);
    // bootstrap.setOption("keepAlive", true);
    // // bootstrap.setOption("writeBufferHighWaterMark", 100 * 64 * 1024);
    // // bootstrap.setOption("sendBufferSize", 1048576);
    // bootstrap.setOption("receiveBufferSize", 1048576*10 );
    //
    // // Set up the pipeline factory.
    // bootstrap.setPipelineFactory(new ChannelPipelineFactory() {
    // public ChannelPipeline getPipeline() throws Exception {
    // return Channels.pipeline(executionHandler, new
    // MarshallingDecoder(createProvider(createMarshallerFactory(),
    // createMarshallingConfig())),
    // new NettyappenderServerHandler());
    // }
    // });

    // // Bind and start to accept incoming connections.
    // bootstrap.bind(new InetSocketAddress(port));
    //       LoggerFactory.getLogger(this.getClass()).info("start server at" +
    //             port);
}

From source file:io.urmia.api.Main.java

License:Open Source License

public static void main(String[] args) throws Exception {

    boolean autoRegister = ArgumentParseUtil.isAutoRegister(args);
    String zkURL = ArgumentParseUtil.getZooKeeperURL(args);

    log.info("starting with zk at: {}, auto register: {}", zkURL, autoRegister);

    ns = new ZkNamingServiceImpl(zkURL, AZ);

    Optional<ServiceInstance<NodeType>> meOpt = ns.whoAmI(NodeType.MDS, autoRegister);

    if (!meOpt.isPresent()) {
        System.err.println("unable to find my instance. use auto register or cli-admin to add my node");
        System.exit(1);//  w ww .  ja  v  a 2  s  .  co  m
        return;
    }

    Runtime.getRuntime().addShutdownHook(new ShutdownHook());

    uuid = new RandomUuidImpl();

    //Properties properties = parseArguments(args);
    EventLoopGroup bossGroup = new NioEventLoopGroup(/*1*/);
    EventLoopGroup workerGroup = new NioEventLoopGroup();

    try {
        me = meOpt.get();

        log.info("my service instance: {}", me);

        BoneCPConfig boneCPConfig = getBoneCPConfig(ns);

        ns.register(me);

        int port = me.getPort();

        JdbcPool pool = new JdbcPool.BoneCPJdbcPool(boneCPConfig);

        MetadataRepository repository = new PsqlMetadataRepositoryImpl(pool);

        mds = new DefaultMetadataServiceImpl(repository);

        // http://normanmaurer.me/presentations/2014-facebook-eng-netty/slides.html#14.0
        ServerBootstrap b = new ServerBootstrap();

        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .childOption(ChannelOption.AUTO_READ, false)
                .childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(true))
                .childHandler(new HttpUploadServerInitializer());

        Channel ch = b.bind(port).sync().channel();
        log.info("object metadata API server (MDS) at port: {}", port);

        ch.closeFuture().sync();
    } finally {
        ns.deregister(me);
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:net.NettyEngine4.ServerServiceImpl.java

License:Apache License

/**
 *  run netty server/*w ww .jav  a2s  .co m*/
 *  NioEventLoopGroup used for NIO Selector based Channels
 *  two NioEventLoopGroup(AioEventLoopGroup) will be used. The first is
 *  used to handle the accept of new connections and the second will serve the IO of them.
 *  IoEventLoopGroupThread +1;
 *  DefaultEventExecutorGroup ?
 *
 *  bossExecutor:?SocketChannel
 *  workerExecutorSocketChannel?channel/
 *  executionLogicHandlerThread????
 *  AIO ?channelgroup?group?
 *  option() is for the NioServerSocketChannel that accepts incoming connections
 *  childOption() is for the Channels accepted by the parent ServerChannel,
 *  which is NioServerSocketChannel in this case.
 *
 *   ServerBootstrap ?? parent channel
 *   parent channel ? connections
 *  ? connection ? child channel ??
 */
@Override
public void run() throws Exception {
    NioEventLoopGroup EventLoopGroupLister = new NioEventLoopGroup(0x1,
            new PriorityThreadFactory("@+main_reactor+@", Thread.NORM_PRIORITY));
    NioEventLoopGroup IOEventLoopGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors() + 1,
            new PriorityThreadFactory("@+sub_reactor+@", Thread.NORM_PRIORITY));
    ServerBootstrap serverBootstrap = new ServerBootstrap();
    try {
        serverBootstrap.group(EventLoopGroupLister, IOEventLoopGroup).channel(NioServerSocketChannel.class)
                .childOption(ChannelOption.TCP_NODELAY, true).childOption(ChannelOption.SO_KEEPALIVE, true)
                .childOption(ChannelOption.SO_REUSEADDR, true) //??
                .childOption(ChannelOption.ALLOCATOR, new PooledByteBufAllocator(false))// heap buf 's better
                .childOption(ChannelOption.SO_RCVBUF, 1048576).childOption(ChannelOption.SO_SNDBUF, 1048576)
                .childHandler(new ServerChannelInitializer());//used to serve the request for the {@link Channel}'s
        // Bind and start to accept incoming connections.
        ChannelFuture channelFuture = serverBootstrap
                .bind(new InetSocketAddress(Config.DEFAULT_VALUE.SERVER_VALUE.gameserverPort)).sync();
        if (LOGGER.isDebugEnabled())
            LOGGER.debug("server??:" + Config.DEFAULT_VALUE.SERVER_VALUE.gameserverPort);
        // Wait until the server socket is closed.
        // In this server, this does not happen, but you can do that to gracefully
        // shut down your server.
        channelFuture.channel().closeFuture().sync();
    } finally {
        // Shut down all event loops to terminate all threads.
        EventLoopGroupLister.shutdownGracefully();
        IOEventLoopGroup.shutdownGracefully();
    }
}