Example usage for io.netty.channel ChannelOption ALLOCATOR

List of usage examples for io.netty.channel ChannelOption ALLOCATOR

Introduction

In this page you can find the example usage for io.netty.channel ChannelOption ALLOCATOR.

Prototype

ChannelOption ALLOCATOR

To view the source code for io.netty.channel ChannelOption ALLOCATOR.

Click Source Link

Usage

From source file:com.relayrides.pushy.apns.ApnsConnection.java

License:Open Source License

/**
 * Asynchronously connects to the APNs gateway in this connection's environment. The outcome of the connection
 * attempt is reported via this connection's listener.
 *
 * @see ApnsConnectionListener#handleConnectionSuccess(ApnsConnection)
 * @see ApnsConnectionListener#handleConnectionFailure(ApnsConnection, Throwable)
 *//*from   www . j a v a  2 s.  c  o m*/
@SuppressWarnings("deprecation")
public synchronized void connect() {

    final ApnsConnection<T> apnsConnection = this;

    if (this.connectFuture != null) {
        throw new IllegalStateException(String.format("%s already started a connection attempt.", this.name));
    }

    final Bootstrap bootstrap = new Bootstrap();
    bootstrap.group(this.eventLoopGroup);
    bootstrap.channel(NioSocketChannel.class);
    bootstrap.option(ChannelOption.SO_KEEPALIVE, true);
    bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    // TODO Remove this when Netty 5 is available
    bootstrap.option(ChannelOption.AUTO_CLOSE, false);

    bootstrap.handler(new ChannelInitializer<SocketChannel>() {

        @Override
        protected void initChannel(final SocketChannel channel) {
            final ChannelPipeline pipeline = channel.pipeline();

            final SSLEngine sslEngine = apnsConnection.sslContext.createSSLEngine();
            sslEngine.setUseClientMode(true);

            pipeline.addLast("ssl", new SslHandler(sslEngine));
            pipeline.addLast("decoder", new RejectedNotificationDecoder());
            pipeline.addLast("encoder", new ApnsPushNotificationEncoder());
            pipeline.addLast("handler", new ApnsConnectionHandler(apnsConnection));
        }
    });

    log.debug("{} beginning connection process.", apnsConnection.name);
    this.connectFuture = bootstrap.connect(this.environment.getApnsGatewayHost(),
            this.environment.getApnsGatewayPort());
    this.connectFuture.addListener(new GenericFutureListener<ChannelFuture>() {

        public void operationComplete(final ChannelFuture connectFuture) {
            if (connectFuture.isSuccess()) {
                log.debug("{} connected; waiting for TLS handshake.", apnsConnection.name);

                final SslHandler sslHandler = connectFuture.channel().pipeline().get(SslHandler.class);

                try {
                    sslHandler.handshakeFuture().addListener(new GenericFutureListener<Future<Channel>>() {

                        public void operationComplete(final Future<Channel> handshakeFuture) {
                            if (handshakeFuture.isSuccess()) {
                                log.debug("{} successfully completed TLS handshake.", apnsConnection.name);

                                apnsConnection.handshakeCompleted = true;
                                apnsConnection.listener.handleConnectionSuccess(apnsConnection);
                            } else {
                                log.debug("{} failed to complete TLS handshake with APNs gateway.",
                                        apnsConnection.name, handshakeFuture.cause());

                                connectFuture.channel().close();
                                apnsConnection.listener.handleConnectionFailure(apnsConnection,
                                        handshakeFuture.cause());
                            }
                        }
                    });
                } catch (NullPointerException e) {
                    log.warn("{} failed to get SSL handler and could not wait for a TLS handshake.",
                            apnsConnection.name);

                    connectFuture.channel().close();
                    apnsConnection.listener.handleConnectionFailure(apnsConnection, e);
                }
            } else {
                log.debug("{} failed to connect to APNs gateway.", apnsConnection.name, connectFuture.cause());

                apnsConnection.listener.handleConnectionFailure(apnsConnection, connectFuture.cause());
            }
        }
    });
}

From source file:com.Server.java

License:Apache License

/**
 * This is passive mode server//from  w  w  w.j  a  v a  2s.  co m
 * @param fs FTP Session Handler
 * @param host Server IP address
 * @param port Passive port no.
 */
public Server(String host, int port, int mode, String fileName) {
    InetSocketAddress inSocketAddress = new InetSocketAddress(host, port);
    try {
        ServerBootstrap bootStrap = new ServerBootstrap();
        bootStrap.group(bossGroup, workerGroup);
        bootStrap.channel(NioServerSocketChannel.class);
        bootStrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 1);
        bootStrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 1);
        bootStrap.childHandler(new MyChannelInitializer(this, mode, fileName));
        bootStrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
        bootStrap.bind(inSocketAddress);
        System.out.println("Server started");
    } catch (Exception eg) {
        eg.printStackTrace();
        stop();
    }
}

From source file:com.spotify.netty4.handler.codec.zmtp.benchmarks.CustomReqRepBenchmark.java

License:Apache License

public static void main(final String... args) throws InterruptedException {
    final ProgressMeter meter = new ProgressMeter("requests", true);

    // Codecs//from w  w  w .  java2s.c o m
    final ZMTPCodec serverCodec = ZMTPCodec.builder().socketType(ROUTER).encoder(ReplyEncoder.class)
            .decoder(RequestDecoder.class).build();

    final ZMTPCodec clientCodec = ZMTPCodec.builder().socketType(DEALER).encoder(RequestEncoder.class)
            .decoder(ReplyDecoder.class).build();

    // Server
    final Executor serverExecutor = new ForkJoinPool(1, ForkJoinPool.defaultForkJoinWorkerThreadFactory,
            UNCAUGHT_EXCEPTION_HANDLER, true);
    final ServerBootstrap serverBootstrap = new ServerBootstrap()
            .group(new NioEventLoopGroup(1), new NioEventLoopGroup()).channel(NioServerSocketChannel.class)
            .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .childOption(ChannelOption.MESSAGE_SIZE_ESTIMATOR, ByteBufSizeEstimator.INSTANCE)
            .childHandler(new ChannelInitializer<NioSocketChannel>() {
                @Override
                protected void initChannel(final NioSocketChannel ch) throws Exception {
                    ch.pipeline().addLast(serverCodec);
                    ch.pipeline().addLast(new ServerRequestTracker());
                    ch.pipeline().addLast(new ServerHandler(serverExecutor));
                }
            });
    final Channel server = serverBootstrap.bind(ANY_PORT).awaitUninterruptibly().channel();

    // Client
    final Executor clientExecutor = new ForkJoinPool(1, ForkJoinPool.defaultForkJoinWorkerThreadFactory,
            UNCAUGHT_EXCEPTION_HANDLER, true);
    final SocketAddress address = server.localAddress();
    final Bootstrap clientBootstrap = new Bootstrap().group(new NioEventLoopGroup())
            .channel(NioSocketChannel.class).option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .option(ChannelOption.MESSAGE_SIZE_ESTIMATOR, ByteBufSizeEstimator.INSTANCE)
            .handler(new ChannelInitializer<NioSocketChannel>() {
                @Override
                protected void initChannel(final NioSocketChannel ch) throws Exception {
                    ch.pipeline().addLast(clientCodec);
                    ch.pipeline().addLast(new ClientRequestTracker());
                    ch.pipeline().addLast(new ClientHandler(meter, clientExecutor));
                }
            });
    final Channel client = clientBootstrap.connect(address).awaitUninterruptibly().channel();

    // Run until client is closed
    client.closeFuture().await();
}

From source file:com.spotify.netty4.handler.codec.zmtp.benchmarks.ReqRepBenchmark.java

License:Apache License

public static void main(final String... args) throws InterruptedException {
    final ProgressMeter meter = new ProgressMeter("requests");

    // Codecs//from w ww . j  av a2 s  .  c o  m

    // Server
    final ServerBootstrap serverBootstrap = new ServerBootstrap()
            .group(new NioEventLoopGroup(1), new NioEventLoopGroup()).channel(NioServerSocketChannel.class)
            .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .childHandler(new ChannelInitializer<NioSocketChannel>() {
                @Override
                protected void initChannel(final NioSocketChannel ch) throws Exception {
                    ch.pipeline().addLast(ZMTPCodec.builder().socketType(ROUTER).build());
                    ch.pipeline().addLast(new ServerHandler());
                }
            });
    final Channel server = serverBootstrap.bind(ANY_PORT).awaitUninterruptibly().channel();

    // Client
    final SocketAddress address = server.localAddress();
    final Bootstrap clientBootstrap = new Bootstrap().group(new NioEventLoopGroup())
            .channel(NioSocketChannel.class).option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
            .handler(new ChannelInitializer<NioSocketChannel>() {
                @Override
                protected void initChannel(final NioSocketChannel ch) throws Exception {
                    ch.pipeline().addLast(ZMTPCodec.builder().socketType(DEALER).build());
                    ch.pipeline().addLast(new ClientHandler(meter));
                }
            });
    final Channel client = clientBootstrap.connect(address).awaitUninterruptibly().channel();

    // Run until client is closed
    client.closeFuture().await();
}

From source file:com.squareup.okhttp.benchmarks.NettyHttpClient.java

License:Apache License

@Override
public void prepare(final Benchmark benchmark) {
    this.concurrencyLevel = benchmark.concurrencyLevel;
    this.targetBacklog = benchmark.targetBacklog;

    ChannelInitializer<SocketChannel> channelInitializer = new ChannelInitializer<SocketChannel>() {
        @Override/*from   w  ww.ja  v a  2 s.  c  o m*/
        public void initChannel(SocketChannel channel) throws Exception {
            ChannelPipeline pipeline = channel.pipeline();

            if (benchmark.tls) {
                SSLContext sslContext = SslContextBuilder.localhost();
                SSLEngine engine = sslContext.createSSLEngine();
                engine.setUseClientMode(true);
                pipeline.addLast("ssl", new SslHandler(engine));
            }

            pipeline.addLast("codec", new HttpClientCodec());
            pipeline.addLast("inflater", new HttpContentDecompressor());
            pipeline.addLast("handler", new HttpChannel(channel));
        }
    };

    bootstrap = new Bootstrap();
    bootstrap.group(new NioEventLoopGroup(concurrencyLevel))
            .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT).channel(NioSocketChannel.class)
            .handler(channelInitializer);
}

From source file:com.streamsets.pipeline.lib.udp.UDPConsumingServer.java

License:Apache License

@Override
protected Bootstrap bootstrap(boolean enableEpoll) {
    if (enableEpoll) {
        // Direct buffers required for Epoll
        enableDirectBuffers();/*from w w  w  .j a  va2s.co  m*/
        EventLoopGroup group = new EpollEventLoopGroup(numThreads);
        groups.add(group);
        return new Bootstrap().group(group).channel(EpollDatagramChannel.class).handler(handler)
                .option(EpollChannelOption.SO_REUSEADDR, true).option(EpollChannelOption.SO_REUSEPORT, true)
                .option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
    } else {
        disableDirectBuffers();
        EventLoopGroup group = new NioEventLoopGroup(numThreads);
        groups.add(group);
        return new Bootstrap().group(group).channel(NioDatagramChannel.class).handler(handler)
                .option(ChannelOption.SO_REUSEADDR, true)
                .option(ChannelOption.ALLOCATOR, new PooledByteBufAllocator()); // use on-heap buffers
    }
}

From source file:com.streamsets.pipeline.stage.origin.udp.UDPConsumingServer.java

License:Apache License

public void listen() throws Exception {
    group = new NioEventLoopGroup();
    for (SocketAddress address : addresses) {
        Bootstrap b = new Bootstrap();
        b.group(group).channel(NioDatagramChannel.class).handler(new UDPConsumingServerHandler(udpConsumer))
                .option(ChannelOption.SO_REUSEADDR, true)
                .option(ChannelOption.ALLOCATOR, new PooledByteBufAllocator()); // use on-heap buffers
        LOG.info("Starting server on address {}", address);
        ChannelFuture channelFuture = b.bind(address).sync();
        channelFutures.add(channelFuture);
    }/* w  w  w. j a v a 2s . c o m*/
}

From source file:com.tc.websocket.server.DominoWebSocketServer.java

License:Apache License

@Override
public synchronized void start() {

    if (this.isOn())
        return;//from   w  ww.ja  v  a2s .  c  om

    try {
        try {

            ServerBootstrap boot = new ServerBootstrap();

            if (cfg.isNativeTransport()) {
                boot.channel(EpollServerSocketChannel.class);
            } else {
                boot.channel(NioServerSocketChannel.class);
            }

            boot.group(bossGroup, workerGroup).option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
                    .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
                    .childOption(ChannelOption.WRITE_BUFFER_WATER_MARK,
                            new WriteBufferWaterMark(8 * 1024, 32 * 1024))
                    .childOption(ChannelOption.SO_SNDBUF, cfg.getSendBuffer())
                    .childOption(ChannelOption.SO_RCVBUF, cfg.getReceiveBuffer())
                    .childOption(ChannelOption.TCP_NODELAY, true).childHandler(init);

            //bind to the main port
            boot.bind(cfg.getPort()).sync();

            //bind to the redirect port (e.g. 80 will redirect to 443)
            for (Integer port : cfg.getRedirectPorts()) {
                ChannelFuture f = boot.bind(port);
                f.sync();
            }

            this.on.set(true);

            String version = BundleUtils.getVersion(Activator.bundle);
            String name = BundleUtils.getName(Activator.bundle);
            cfg.print(name + " ready and listening on " + cfg.getPort() + " running version " + version);

        } finally {

        }
    } catch (Exception e) {
        LOG.log(Level.SEVERE, null, e);
    }
}

From source file:com.tesora.dve.db.mysql.MysqlConnection.java

License:Open Source License

@Override
public void connect(String url, final String userid, final String password, final long clientCapabilities)
        throws PEException {
    PEUrl peUrl = PEUrl.fromUrlString(url);

    if (!"mysql".equalsIgnoreCase(peUrl.getSubProtocol()))
        throw new PEException(MysqlConnection.class.getSimpleName()
                + " does not support the sub protocol of url \"" + url + "\"");

    InetSocketAddress serverAddress = new InetSocketAddress(peUrl.getHost(), peUrl.getPort());
    final MyBackendDecoder.CharsetDecodeHelper charsetHelper = new CharsetDecodeHelper();
    mysqlBootstrap = new Bootstrap();
    mysqlBootstrap // .group(inboundChannel.eventLoop())
            .channel(NioSocketChannel.class).group(connectionEventGroup)
            .option(ChannelOption.ALLOCATOR,
                    USE_POOLED_BUFFERS ? PooledByteBufAllocator.DEFAULT : UnpooledByteBufAllocator.DEFAULT)
            .handler(new ChannelInitializer<Channel>() {
                @Override/*from  w  w w  . ja  va  2s  .com*/
                protected void initChannel(Channel ch) throws Exception {
                    authHandler = new MysqlClientAuthenticationHandler(new UserCredentials(userid, password),
                            clientCapabilities, NativeCharSetCatalogImpl.getDefaultCharSetCatalog(DBType.MYSQL),
                            targetCharset);

                    if (PACKET_LOGGER)
                        ch.pipeline().addLast(new LoggingHandler(LogLevel.INFO));

                    ch.pipeline().addLast(authHandler)
                            .addLast(MyBackendDecoder.class.getSimpleName(),
                                    new MyBackendDecoder(site.getName(), charsetHelper))
                            .addLast(StreamValve.class.getSimpleName(), new StreamValve())
                            .addLast(MysqlCommandSenderHandler.class.getSimpleName(),
                                    new MysqlCommandSenderHandler(site));
                }
            });

    pendingConnection = mysqlBootstrap.connect(serverAddress);

    //      System.out.println("Create connection: Allocated " + totalConnections.incrementAndGet() + ", active " + activeConnections.incrementAndGet());
    channel = pendingConnection.channel();
    physicalID = UUID.randomUUID();

    //TODO: this was moved from execute to connect, which avoids blocking on the execute to be netty friendly, but causes lag on checkout.  Should make this event driven like everything else. -sgossard
    syncToServerConnect();
    authHandler.assertAuthenticated();
    //      channel.closeFuture().addListener(new GenericFutureListener<Future<Void>>() {
    //         @Override
    //         public void operationComplete(Future<Void> future) throws Exception {
    //            System.out.println(channel + " is closed");
    //         }
    //      });
}

From source file:com.tesora.dve.db.mysql.portal.MySqlPortal.java

License:Open Source License

public MySqlPortal(Properties props) throws PEException {
    // This is the port the Portal is going to listen on -
    // default to Mysql's port
    int port = Singletons.require(HostService.class).getPortalPort(props);
    Singletons.replace(MySqlPortalService.class, this);

    InternalLoggerFactory.setDefaultFactory(new Log4JLoggerFactory());

    int max_concurrent = KnownVariables.MAX_CONCURRENT.getValue(null).intValue();

    //TODO: parse/plan is on this pool, which is probably ok, especially with blocking calls to catalog.  Check for responses that can be done by backend netty threads and avoid two context shifts.

    clientExecutorService = new PEThreadPoolExecutor(max_concurrent, max_concurrent, 30L, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), //The thread count limits concurrency here.  Using a bounded queue here would block netty threads (very bad), so this pool could be overrun by 'bad' clients that pipeline. -sgossard
            new PEDefaultThreadFactory("msp-client"));
    clientExecutorService.allowCoreThreadTimeOut(true);

    bossGroup = new NioEventLoopGroup(1, new PEDefaultThreadFactory("msp-boss"));

    //fixes the number of Netty NIO threads to the number of available CPUs.
    workerGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors(),
            new PEDefaultThreadFactory("netty-worker"));

    ServerBootstrap b = new ServerBootstrap();
    try {/*from w w w .j av a 2s. com*/
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .childHandler(new ChannelInitializer<SocketChannel>() {

                    @Override
                    protected void initChannel(SocketChannel ch) throws Exception {
                        if (PACKET_LOGGER)
                            ch.pipeline().addFirst(new LoggingHandler(LogLevel.INFO));
                        ch.pipeline()
                                .addLast(MSPProtocolDecoder.class.getSimpleName(),
                                        new MSPProtocolDecoder(
                                                MSPProtocolDecoder.MyDecoderState.READ_CLIENT_AUTH))
                                .addLast(new MSPAuthenticateHandlerV10())
                                .addLast(MSPCommandHandler.class.getSimpleName(),
                                        new MSPCommandHandler(clientExecutorService))
                                .addLast(ConnectionHandlerAdapter.getInstance());
                    }
                })

                .childOption(ChannelOption.ALLOCATOR,
                        USE_POOLED_BUFFERS ? PooledByteBufAllocator.DEFAULT : UnpooledByteBufAllocator.DEFAULT)
                .childOption(ChannelOption.TCP_NODELAY, true).childOption(ChannelOption.SO_KEEPALIVE, true)
                .bind(port).sync();

        logger.info("DVE Server bound to port " + port);

    } catch (Exception e) {
        throw new PEException("Failed to bind DVE server to port " + port + " - " + e.getMessage(), e);
    }
}