Example usage for io.netty.channel.nio NioEventLoopGroup NioEventLoopGroup

List of usage examples for io.netty.channel.nio NioEventLoopGroup NioEventLoopGroup

Introduction

In this page you can find the example usage for io.netty.channel.nio NioEventLoopGroup NioEventLoopGroup.

Prototype

public NioEventLoopGroup(int nThreads, Executor executor) 

Source Link

Usage

From source file:com.eucalyptus.cassandra.common.CassandraPersistence.java

License:Open Source License

private static Session buildSession(final List<ServiceConfiguration> configurations, final String keyspace) {
    final Cluster cluster = Cluster.builder()
            .addContactPointsWithPorts(configurations.stream().map(ServiceConfiguration::getSocketAddress)
                    .collect(Collectors.toList()))
            //.withLoadBalancingPolicy(  ) //TODO topology aware policy?
            .withNettyOptions(new NettyOptions() {
                @Override/*from   w w  w .  jav a  2s  .c  om*/
                public EventLoopGroup eventLoopGroup(final ThreadFactory threadFactory) {
                    return new NioEventLoopGroup(0, threadFactory);
                }
            }).withReconnectionPolicy(new ExponentialReconnectionPolicy(1_000L, 60_000L))
            .withRetryPolicy(DefaultRetryPolicy.INSTANCE)
            //.withSSL( new NettySSLOptions( ) ) //TODO use ssl
            .withThreadingOptions(new ThreadingOptions() {
                @Override
                public ThreadFactory createThreadFactory(final String clusterName, final String executorName) {
                    return super.createThreadFactory("cassandra-client", executorName);
                }
            }).withoutJMXReporting().build();
    return cluster.connect(keyspace);
}

From source file:com.eucalyptus.ws.WebServices.java

License:Open Source License

private static EventLoopGroup clientEventLoopGroup() {
    if (clientEventLoopGroup != null) {
        return clientEventLoopGroup;
    } else/* w ww.  ja  v  a  2s . com*/
        try (final LockResource resourceLock = LockResource.lock(clientResourceLock)) {
            if (clientEventLoopGroup != null) {
                return clientEventLoopGroup;
            } else {
                final NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup(
                        StackConfiguration.CLIENT_POOL_MAX_THREADS,
                        Threads.threadFactory("web-services-client-pool-%d"));
                OrderedShutdown.registerPostShutdownHook(() -> {
                    LOG.info("Client shutdown requested");
                    try {
                        final Future<?> terminationFuture = clientEventLoopGroup.shutdownGracefully(0, 5,
                                TimeUnit.SECONDS);
                        terminationFuture.await(10, TimeUnit.SECONDS);
                        if (terminationFuture.isDone()) {
                            LOG.info("Client shutdown complete");
                        } else {
                            LOG.warn("Client shutdown timed out");
                        }
                    } catch (final InterruptedException e) {
                        LOG.info("Client shutdown interrupted");
                    }
                });
                return clientEventLoopGroup = eventLoopGroup;
            }
        }
}

From source file:com.gemstone.gemfire.redis.GemFireRedisServer.java

License:Apache License

/**
 * Helper method to start the server listening for connections. The
 * server is bound to the port specified by {@link GemFireRedisServer#serverPort}
 * /*w  ww . j a  v  a 2  s . c o  m*/
 * @throws IOException
 * @throws InterruptedException
 */
private void startRedisServer() throws IOException, InterruptedException {
    ThreadFactory selectorThreadFactory = new ThreadFactory() {
        private final AtomicInteger counter = new AtomicInteger();

        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("GemFireRedisServer-SelectorThread-" + counter.incrementAndGet());
            t.setDaemon(true);
            return t;
        }

    };

    ThreadFactory workerThreadFactory = new ThreadFactory() {
        private final AtomicInteger counter = new AtomicInteger();

        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("GemFireRedisServer-WorkerThread-" + counter.incrementAndGet());
            return t;
        }

    };

    bossGroup = null;
    workerGroup = null;
    Class<? extends ServerChannel> socketClass = null;
    if (singleThreadPerConnection) {
        bossGroup = new OioEventLoopGroup(Integer.MAX_VALUE, selectorThreadFactory);
        workerGroup = new OioEventLoopGroup(Integer.MAX_VALUE, workerThreadFactory);
        socketClass = OioServerSocketChannel.class;
    } else {
        bossGroup = new NioEventLoopGroup(this.numSelectorThreads, selectorThreadFactory);
        workerGroup = new NioEventLoopGroup(this.numWorkerThreads, workerThreadFactory);
        socketClass = NioServerSocketChannel.class;
    }
    InternalDistributedSystem system = (InternalDistributedSystem) cache.getDistributedSystem();
    String pwd = system.getConfig().getRedisPassword();
    final byte[] pwdB = Coder.stringToBytes(pwd);
    ServerBootstrap b = new ServerBootstrap();
    b.group(bossGroup, workerGroup).channel(socketClass).childHandler(new ChannelInitializer<SocketChannel>() {
        @Override
        public void initChannel(SocketChannel ch) throws Exception {
            if (logger.fineEnabled())
                logger.fine("GemFireRedisServer-Connection established with " + ch.remoteAddress());
            ChannelPipeline p = ch.pipeline();
            p.addLast(ByteToCommandDecoder.class.getSimpleName(), new ByteToCommandDecoder());
            p.addLast(ExecutionHandlerContext.class.getSimpleName(),
                    new ExecutionHandlerContext(ch, cache, regionCache, GemFireRedisServer.this, pwdB));
        }
    }).option(ChannelOption.SO_REUSEADDR, true).option(ChannelOption.SO_RCVBUF, getBufferSize())
            .childOption(ChannelOption.SO_KEEPALIVE, true)
            .childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, GemFireRedisServer.connectTimeoutMillis)
            .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

    // Bind and start to accept incoming connections.
    ChannelFuture f = b.bind(new InetSocketAddress(getBindAddress(), serverPort)).sync();
    if (this.logger.infoEnabled()) {
        String logMessage = "GemFireRedisServer started {" + getBindAddress() + ":" + serverPort
                + "}, Selector threads: " + this.numSelectorThreads;
        if (this.singleThreadPerConnection)
            logMessage += ", One worker thread per connection";
        else
            logMessage += ", Worker threads: " + this.numWorkerThreads;
        this.logger.info(logMessage);
    }
    this.serverChannel = f.channel();
}

From source file:com.github.ibole.microservice.rpc.core.RpcSharedThreadPools.java

License:Apache License

/**
 * <p>init.</p>//w ww .  j av a2s  .c o m
 */
protected void init() {
    batchThreadPool = Executors.newCachedThreadPool(createThreadFactory(BATCH_POOL_THREAD_NAME));
    elg = new NioEventLoopGroup(0, createThreadFactory(RPC_EVENTLOOP_GROUP_NAME));
}

From source file:com.github.milenkovicm.kafka.KafkaProducer.java

License:Apache License

public KafkaProducer(String hostname, int port, String topicName, ProducerProperties properties) {
    this(hostname, port, topicName, properties,
            new NioEventLoopGroup(properties.get(ProducerProperties.NETTY_THREAD_COUNT),
                    new DefaultThreadFactory("producer-" + topicName, Thread.MAX_PRIORITY)));
}

From source file:com.github.rmannibucau.featuredmock.http.DefaultFeaturedHttpServer.java

License:Apache License

@Override
public FeaturedHttpServer start() {
    workerGroup = new NioEventLoopGroup(threads, new FeaturedThreadFactory());

    try {/*  ww  w  .j a  va2 s. co m*/
        final ServerBootstrap bootstrap = new ServerBootstrap();
        bootstrap.option(ChannelOption.SO_REUSEADDR, true).option(ChannelOption.SO_SNDBUF, 1024)
                .option(ChannelOption.TCP_NODELAY, true).group(workerGroup)
                .channel(NioServerSocketChannel.class)
                .childHandler(new FeaturedChannelInitializer(mappers, engine)).bind(host, port)
                .addListener(new ChannelFutureListener() {
                    @Override
                    public void operationComplete(final ChannelFuture future) throws Exception {
                        if (!future.isSuccess()) {
                            LOGGER.severe("Can't start HTTP server");
                        } else {
                            LOGGER.info(String.format("Server started on http://%s:%s", host, port));
                        }
                    }
                }).sync();
    } catch (final InterruptedException e) {
        LOGGER.log(Level.SEVERE, e.getMessage(), e);
    }

    return this;
}

From source file:com.github.sinsinpub.pero.frontend.NettySocksServer.java

License:Apache License

public void run() throws InterruptedException {
    EventLoopGroup bossGroup = new NioEventLoopGroup(1, ThreadFactoryRepository.BOSS_GORUP);
    EventLoopGroup workerGroup = new NioEventLoopGroup(getMaxWorkerThreads(),
            ThreadFactoryRepository.WORKER_GROUP);
    try {/* www  . ja va 2 s  .c om*/
        ServerBootstrap b = new ServerBootstrap();
        b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
                .handler(new LoggingHandler(LogLevel.INFO)).childHandler(getSocksServerInitializer());
        ChannelFuture cf = b.bind(getPort()).sync();
        logger.info(
                String.format("Proxy server %s %s started.", ApplicationVersion.DEFAULT.getApplicationName(),
                        ApplicationVersion.DEFAULT.getApplicationVersion()));
        cf.channel().closeFuture().sync();
    } finally {
        bossGroup.shutdownGracefully();
        workerGroup.shutdownGracefully();
    }
}

From source file:com.github.sparkfy.network.util.NettyUtils.java

License:Apache License

/** Creates a Netty EventLoopGroup based on the IOMode. */
public static EventLoopGroup createEventLoop(IOMode mode, int numThreads, String threadPrefix) {
    ThreadFactory threadFactory = createThreadFactory(threadPrefix);

    switch (mode) {
    case NIO:/*from w  w w .jav a  2  s. co m*/
        return new NioEventLoopGroup(numThreads, threadFactory);
    case EPOLL:
        return new EpollEventLoopGroup(numThreads, threadFactory);
    default:
        throw new IllegalArgumentException("Unknown io mode: " + mode);
    }
}

From source file:com.google.cloud.bigtable.grpc.BigtableSessionSharedThreadPools.java

License:Open Source License

protected void init() {
    batchThreadPool = Executors.newCachedThreadPool(createThreadFactory(BATCH_POOL_THREAD_NAME));
    elg = new NioEventLoopGroup(0, createThreadFactory(GRPC_EVENTLOOP_GROUP_NAME));
    retryExecutor = Executors.newScheduledThreadPool(RETRY_THREAD_COUNT,
            createThreadFactory(RETRY_THREADPOOL_NAME));
}

From source file:com.google.cloud.bigtable.hbase.BigtableOptionsFactory.java

License:Open Source License

public static BigtableOptions fromConfiguration(Configuration configuration) throws IOException {
    BigtableOptions.Builder optionsBuilder = new BigtableOptions.Builder();

    String projectId = configuration.get(PROJECT_ID_KEY);
    Preconditions.checkArgument(!Strings.isNullOrEmpty(projectId),
            String.format("Project ID must be supplied via %s", PROJECT_ID_KEY));
    optionsBuilder.setProjectId(projectId);
    LOG.debug("Project ID %s", projectId);

    String zone = configuration.get(ZONE_KEY);
    Preconditions.checkArgument(!Strings.isNullOrEmpty(zone),
            String.format("Zone must be supplied via %s", ZONE_KEY));
    optionsBuilder.setZone(zone);//from  w w  w  .  ja va  2 s . co  m
    LOG.debug("Zone %s", zone);

    String cluster = configuration.get(CLUSTER_KEY);
    Preconditions.checkArgument(!Strings.isNullOrEmpty(cluster),
            String.format("Cluster must be supplied via %s", CLUSTER_KEY));
    optionsBuilder.setCluster(cluster);
    LOG.debug("Cluster %s", cluster);

    String overrideIp = configuration.get(IP_OVERRIDE_KEY);
    InetAddress overrideIpAddress = null;
    if (!Strings.isNullOrEmpty(overrideIp)) {
        LOG.debug("Using override IP address %s", overrideIp);
        overrideIpAddress = InetAddress.getByName(overrideIp);
    }

    String host = configuration.get(BIGTABLE_HOST_KEY, BIGTABLE_HOST_DEFAULT);
    Preconditions.checkArgument(!Strings.isNullOrEmpty(host),
            String.format("API endpoint host must be supplied via %s", BIGTABLE_HOST_KEY));
    if (overrideIpAddress == null) {
        LOG.debug("Data endpoint host %s", host);
        optionsBuilder.setHost(InetAddress.getByName(host));
    } else {
        LOG.debug("Data endpoint host %s. Using override IP address.", host);
        optionsBuilder.setHost(InetAddress.getByAddress(host, overrideIpAddress.getAddress()));
    }

    String adminHost = configuration.get(BIGTABLE_ADMIN_HOST_KEY, BIGTABLE_ADMIN_HOST_DEFAULT);
    if (Strings.isNullOrEmpty(adminHost)) {
        LOG.debug("Admin endpoint host not configured, assuming we should use data endpoint.");
        adminHost = host;
    }

    String clusterAdminHost = configuration.get(BIGTABLE_CLUSTER_ADMIN_HOST_KEY,
            BIGTABLE_CLUSTER_ADMIN_HOST_DEFAULT);
    if (Strings.isNullOrEmpty(adminHost)) {
        // Most environments don't need cluster admin.
        LOG.debug("Cluster Admin endpoint host not configured.");
    } else {
        optionsBuilder.setClusterAdminHost(InetAddress.getByName(clusterAdminHost));
    }

    if (overrideIpAddress == null) {
        LOG.debug("Admin endpoint host %s", host);
        optionsBuilder.setAdminHost(InetAddress.getByName(adminHost));
    } else {
        LOG.debug("Admin endpoint host %s. Using override IP address.", host);
        optionsBuilder.setAdminHost(InetAddress.getByAddress(adminHost, overrideIpAddress.getAddress()));
    }

    int port = configuration.getInt(BIGTABLE_PORT_KEY, DEFAULT_BIGTABLE_PORT);
    optionsBuilder.setPort(port);

    try {
        if (configuration.getBoolean(BIGTABE_USE_SERVICE_ACCOUNTS_KEY, BIGTABLE_USE_SERVICE_ACCOUNTS_DEFAULT)) {
            LOG.debug("Using service accounts");

            String serviceAccountJson = System.getenv().get(SERVICE_ACCOUNT_JSON_ENV_VARIABLE);
            String serviceAccountEmail = configuration.get(BIGTABLE_SERVICE_ACCOUNT_EMAIL_KEY);
            if (!Strings.isNullOrEmpty(serviceAccountJson)) {
                LOG.debug("Using JSON file: %s", serviceAccountJson);
                optionsBuilder.setCredential(CredentialFactory.getApplicationDefaultCredential());
            } else if (!Strings.isNullOrEmpty(serviceAccountEmail)) {
                LOG.debug("Service account %s specified.", serviceAccountEmail);
                String keyfileLocation = configuration.get(BIGTABLE_SERVICE_ACCOUNT_P12_KEYFILE_LOCATION_KEY);
                Preconditions.checkState(!Strings.isNullOrEmpty(keyfileLocation),
                        "Key file location must be specified when setting service account email");
                LOG.debug("Using p12 keyfile: %s", keyfileLocation);
                optionsBuilder.setCredential(CredentialFactory
                        .getCredentialFromPrivateKeyServiceAccount(serviceAccountEmail, keyfileLocation));
            } else {
                optionsBuilder.setCredential(CredentialFactory.getCredentialFromMetadataServiceAccount());
            }
        } else if (configuration.getBoolean(BIGTABLE_NULL_CREDENTIAL_ENABLE_KEY,
                BIGTABLE_NULL_CREDENTIAL_ENABLE_DEFAULT)) {
            optionsBuilder.setCredential(null); // Intended for testing purposes only.
            LOG.info("Enabling the use of null credentials. This should not be used in production.");
        } else {
            throw new IllegalStateException("Either service account or null credentials must be enabled");
        }
    } catch (GeneralSecurityException gse) {
        throw new IOException("Failed to acquire credential.", gse);
    }

    ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true)
            .setNameFormat(GRPC_EVENTLOOP_GROUP_NAME + "-%d").build();
    EventLoopGroup elg = new NioEventLoopGroup(0, threadFactory);
    optionsBuilder.setCustomEventLoopGroup(elg);

    ScheduledExecutorService retryExecutor = Executors.newScheduledThreadPool(RETRY_THREAD_COUNT,
            new ThreadFactoryBuilder().setDaemon(true).setNameFormat(RETRY_THREADPOOL_NAME + "-%d").build());
    optionsBuilder.setRpcRetryExecutorService(retryExecutor);

    // Set up aggregate performance and call error rate logging:
    if (!Strings.isNullOrEmpty(configuration.get(CALL_REPORT_DIRECTORY_KEY))) {
        String reportDirectory = configuration.get(CALL_REPORT_DIRECTORY_KEY);
        Path reportDirectoryPath = FileSystems.getDefault().getPath(reportDirectory);
        if (Files.exists(reportDirectoryPath)) {
            Preconditions.checkState(Files.isDirectory(reportDirectoryPath),
                    "Report path %s must be a directory");
        } else {
            Files.createDirectories(reportDirectoryPath);
        }
        String callStatusReport = reportDirectoryPath.resolve("call_status.txt").toAbsolutePath().toString();
        String callTimingReport = reportDirectoryPath.resolve("call_timing.txt").toAbsolutePath().toString();
        LOG.debug("Logging call status aggregates to %s", callStatusReport);
        LOG.debug("Logging call timing aggregates to %s", callTimingReport);
        optionsBuilder.setCallStatusReportPath(callStatusReport);
        optionsBuilder.setCallTimingReportPath(callTimingReport);
    }

    boolean enableRetries = configuration.getBoolean(ENABLE_GRPC_RETRIES_KEY, ENABLE_GRPC_RETRIES_DEFAULT);
    LOG.debug("gRPC retries enabled: %s", enableRetries);
    optionsBuilder.setRetriesEnabled(enableRetries);

    boolean retryOnDeadlineExceeded = configuration.getBoolean(ENABLE_GRPC_RETRY_DEADLINEEXCEEDED_KEY,
            ENABLE_GRPC_RETRY_DEADLINEEXCEEDED_DEFAULT);
    LOG.debug("gRPC retry on deadline exceeded enabled: %s", retryOnDeadlineExceeded);
    optionsBuilder.setRetryOnDeadlineExceeded(retryOnDeadlineExceeded);

    int channelCount = configuration.getInt(BIGTABLE_CHANNEL_COUNT_KEY, BIGTABLE_CHANNEL_COUNT_DEFAULT);
    optionsBuilder.setChannelCount(channelCount);

    long channelTimeout = configuration.getLong(BIGTABLE_CHANNEL_TIMEOUT_MS_KEY,
            BIGTABLE_CHANNEL_TIMEOUT_MS_DEFAULT);
    optionsBuilder.setChannelTimeoutMs(channelTimeout);

    return optionsBuilder.build();
}