Example usage for java.util.concurrent CountDownLatch await

List of usage examples for java.util.concurrent CountDownLatch await

Introduction

In this page you can find the example usage for java.util.concurrent CountDownLatch await.

Prototype

public void await() throws InterruptedException 

Source Link

Document

Causes the current thread to wait until the latch has counted down to zero, unless the thread is Thread#interrupt interrupted .

Usage

From source file:com.weibo.motan.demo.client.DemoRpcClient.java

public static void main(String[] args) throws Exception {
    final DescriptiveStatistics stats = new SynchronizedDescriptiveStatistics();

    int threads = Integer.parseInt(args[0]);

    DubboBenchmark.BenchmarkMessage msg = prepareArgs();
    final byte[] msgBytes = msg.toByteArray();

    int n = 1000000;
    final CountDownLatch latch = new CountDownLatch(n);

    ExecutorService es = Executors.newFixedThreadPool(threads);

    final AtomicInteger trans = new AtomicInteger(0);
    final AtomicInteger transOK = new AtomicInteger(0);

    ApplicationContext ctx = new ClassPathXmlApplicationContext(
            new String[] { "classpath:motan_demo_client.xml" });

    MotanDemoService service = (MotanDemoService) ctx.getBean("motanDemoReferer");

    long start = System.currentTimeMillis();
    for (int i = 0; i < n; i++) {
        es.submit(() -> {//  ww w . j  a  v  a2 s  .c  om
            try {

                long t = System.currentTimeMillis();
                DubboBenchmark.BenchmarkMessage m = testSay(service, msgBytes);
                t = System.currentTimeMillis() - t;
                stats.addValue(t);

                trans.incrementAndGet();

                if (m != null && m.getField1().equals("OK")) {
                    transOK.incrementAndGet();
                }

            } finally {
                latch.countDown();
            }
        });
    }

    latch.await();

    start = System.currentTimeMillis() - start;

    System.out.printf("sent     requests    : %d\n", n);
    System.out.printf("received requests    : %d\n", trans.get());
    System.out.printf("received requests_OK : %d\n", transOK.get());
    System.out.printf("throughput  (TPS)    : %d\n", n * 1000 / start);

    System.out.printf("mean: %f\n", stats.getMean());
    System.out.printf("median: %f\n", stats.getPercentile(50));
    System.out.printf("max: %f\n", stats.getMax());
    System.out.printf("min: %f\n", stats.getMin());

    System.out.printf("99P: %f\n", stats.getPercentile(90));

}

From source file:com.frostwire.search.tests.KATSearchTest.java

public static void main(String[] args) throws InterruptedException {
    SearchEngine KAT = new SearchEngine(1, "KAT", SearchEnginesSettings.KAT_SEARCH_ENABLED, "kat.cr") {
        @Override/*from w  ww  .  j a v  a2 s.  c  om*/
        public SearchPerformer getPerformer(long token, String keywords) {
            return new KATSearchPerformer(KAT.getDomainName(), token, keywords, 10000);
        }
    };

    final CountDownLatch latch = new CountDownLatch(1);
    final SearchPerformer performer;
    performer = KAT.getPerformer(1, "public domain");

    Action1 onNextAction = new Action1<List<? extends SearchResult>>() {
        @Override
        public void call(List<? extends SearchResult> searchResults) {
            System.out.println("doOnNext!");
            if (searchResults instanceof List) {
                try {
                    if (!testOnSearchResults((List<KATSearchResult>) searchResults)) {
                        System.out.println("Test failed.");
                    } else {
                        System.out.println("Test passed.");
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                }
                latch.countDown();
            }
        }
    };

    final Observable<List<? extends SearchResult>> observable = performer.observable();
    observable.forEach(onNextAction);
    performer.perform();
    System.out.println("performer.perform()\nWaiting...");
    latch.await();

    //System.out.println("Bye bye");

    /**
     byte[] readAllBytes = Files.readAllBytes(Paths.get("/Users/gubatron/tmp/eztv4.html"));
     String fileStr = new String(readAllBytes,"utf-8");
            
     //Pattern pattern = Pattern.compile(REGEX);
     Pattern pattern = Pattern.compile(HTML_REGEX);
            
     Matcher matcher = pattern.matcher(fileStr);
            
     int found = 0;
     while (matcher.find()) {
     found++;
     System.out.println("\nfound " + found);
     System.out.println("displayname: " + matcher.group("displayname"));
     System.out.println("infohash: " + matcher.group("infohash"));
     System.out.println("torrenturl: " + matcher.group("torrenturl"));
     System.out.println("creationtime: " + matcher.group("creationtime"));
     System.out.println("filesize: " + matcher.group("filesize"));
     System.out.println("===");
     }
     //System.out.println("-done-");
     */
}

From source file:com.github.ambry.store.DiskReformatter.java

public static void main(String[] args) throws Exception {
    VerifiableProperties properties = ToolUtils.getVerifiableProperties(args);
    DiskReformatterConfig config = new DiskReformatterConfig(properties);
    StoreConfig storeConfig = new StoreConfig(properties);
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
    ServerConfig serverConfig = new ServerConfig(properties);
    ClusterAgentsFactory clusterAgentsFactory = Utils.getObj(clusterMapConfig.clusterMapClusterAgentsFactory,
            clusterMapConfig, config.hardwareLayoutFilePath, config.partitionLayoutFilePath);
    try (ClusterMap clusterMap = clusterAgentsFactory.getClusterMap()) {
        StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(
                serverConfig.serverStoreKeyConverterFactory, properties, clusterMap.getMetricRegistry());
        StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
        DataNodeId dataNodeId = clusterMap.getDataNodeId(config.datanodeHostname, config.datanodePort);
        if (dataNodeId == null) {
            throw new IllegalArgumentException("Did not find node in clustermap with hostname:port - "
                    + config.datanodeHostname + ":" + config.datanodePort);
        }/*from   w ww .ja  v a 2  s . c om*/
        DiskReformatter reformatter = new DiskReformatter(dataNodeId, Collections.EMPTY_LIST,
                config.fetchSizeInBytes, storeConfig, storeKeyFactory, clusterMap, SystemTime.getInstance(),
                storeKeyConverterFactory.getStoreKeyConverter());
        AtomicInteger exitStatus = new AtomicInteger(0);
        CountDownLatch latch = new CountDownLatch(config.diskMountPaths.length);
        for (int i = 0; i < config.diskMountPaths.length; i++) {
            int finalI = i;
            Runnable runnable = () -> {
                try {
                    reformatter.reformat(config.diskMountPaths[finalI], new File(config.scratchPaths[finalI]));
                    latch.countDown();
                } catch (Exception e) {
                    throw new IllegalStateException(e);
                }
            };
            Thread thread = Utils.newThread(config.diskMountPaths[finalI] + "-reformatter", runnable, true);
            thread.setUncaughtExceptionHandler((t, e) -> {
                exitStatus.set(1);
                logger.error("Reformatting {} failed", config.diskMountPaths[finalI], e);
                latch.countDown();
            });
            thread.start();
        }
        latch.await();
        System.exit(exitStatus.get());
    }
}

From source file:com.yulore.demo.NHttpClient.java

public static void main(String[] args) throws Exception {
    // Create HTTP protocol processing chain
    HttpProcessor httpproc = HttpProcessorBuilder.create()
            // Use standard client-side protocol interceptors
            .add(new RequestContent()).add(new RequestTargetHost()).add(new RequestConnControl())
            .add(new RequestUserAgent("Test/1.1")).add(new RequestExpectContinue(true)).build();
    // Create client-side HTTP protocol handler
    HttpAsyncRequestExecutor protocolHandler = new HttpAsyncRequestExecutor();
    // Create client-side I/O event dispatch
    final IOEventDispatch ioEventDispatch = new DefaultHttpClientIODispatch(protocolHandler,
            ConnectionConfig.DEFAULT);/*from  w ww .  jav  a 2 s.  com*/
    // Create client-side I/O reactor
    final ConnectingIOReactor ioReactor = new DefaultConnectingIOReactor();
    // Create HTTP connection pool
    BasicNIOConnPool pool = new BasicNIOConnPool(ioReactor, ConnectionConfig.DEFAULT);
    // Limit total number of connections to just two
    pool.setDefaultMaxPerRoute(2);
    pool.setMaxTotal(2);
    // Run the I/O reactor in a separate thread
    Thread t = new Thread(new Runnable() {

        public void run() {
            try {
                // Ready to go!
                ioReactor.execute(ioEventDispatch);
            } catch (InterruptedIOException ex) {
                System.err.println("Interrupted");
            } catch (IOException e) {
                System.err.println("I/O error: " + e.getMessage());
            }
            System.out.println("Shutdown");
        }

    });
    // Start the client thread
    t.start();
    // Create HTTP requester
    HttpAsyncRequester requester = new HttpAsyncRequester(httpproc);
    // Execute HTTP GETs to the following hosts and
    HttpHost[] targets = new HttpHost[] { new HttpHost("www.apache.org", 80, "http"),
            new HttpHost("www.verisign.com", 443, "https"), new HttpHost("www.google.com", 80, "http") };
    final CountDownLatch latch = new CountDownLatch(targets.length);
    for (final HttpHost target : targets) {
        BasicHttpRequest request = new BasicHttpRequest("GET", "/");
        HttpCoreContext coreContext = HttpCoreContext.create();
        requester.execute(new BasicAsyncRequestProducer(target, request), new BasicAsyncResponseConsumer(),
                pool, coreContext,
                // Handle HTTP response from a callback
                new FutureCallback<HttpResponse>() {

                    public void completed(final HttpResponse response) {
                        latch.countDown();
                        System.out.println(target + "->" + response.getStatusLine());
                    }

                    public void failed(final Exception ex) {
                        latch.countDown();
                        System.out.println(target + "->" + ex);
                    }

                    public void cancelled() {
                        latch.countDown();
                        System.out.println(target + " cancelled");
                    }

                });
    }
    latch.await();
    System.out.println("Shutting down I/O reactor");
    ioReactor.shutdown();
    System.out.println("Done");

}

From source file:framework.httpclient.nio.NHttpClient.java

public static void main(String[] args) throws Exception {
    // Create HTTP protocol processing chain
    HttpProcessor httpproc = HttpProcessorBuilder.create()
            // Use standard client-side protocol interceptors
            .add(new RequestContent()).add(new RequestTargetHost()).add(new RequestConnControl())
            .add(new RequestUserAgent("LinkedHashSetVsTreeSet/1.1")).add(new RequestExpectContinue(true))
            .build();/*from w  ww.  j ava  2 s . com*/

    // Create client-side HTTP protocol handler
    HttpAsyncRequestExecutor protocolHandler = new HttpAsyncRequestExecutor();

    // Create client-side I/O event dispatch
    //   IO 
    final IOEventDispatch ioEventDispatch = new DefaultHttpClientIODispatch(protocolHandler,
            ConnectionConfig.DEFAULT);

    // Create client-side I/O reactor
    //   IO reactor
    final ConnectingIOReactor ioReactor = new DefaultConnectingIOReactor();

    // Create HTTP connection pool
    //  HTTP 
    BasicNIOConnPool pool = new BasicNIOConnPool(ioReactor, ConnectionConfig.DEFAULT);

    // Limit total number of connections to just two
    pool.setDefaultMaxPerRoute(2);
    pool.setMaxTotal(2);

    // Run the I/O reactor in a separate thread
    Thread t = new Thread(new Runnable() {

        public void run() {
            try {
                // Ready to go!
                ioReactor.execute(ioEventDispatch);
            } catch (InterruptedIOException ex) {
                System.err.println("Interrupted");
            } catch (IOException e) {
                System.err.println("I/O error: " + e.getMessage());
            }
            System.out.println("Shutdown");
        }

    });
    // Start the client thread
    t.start();

    // Create HTTP requester
    //  HTTP 
    HttpAsyncRequester requester = new HttpAsyncRequester(httpproc);

    // Execute HTTP GETs to the following hosts and
    HttpHost[] targets = new HttpHost[] { new HttpHost("www.baidu.org", -1, "https"),
            //            new HttpHost("www.zhihu.com", -1, "https"),
            new HttpHost("www.bilibili.com", -1, "https") };

    final CountDownLatch latch = new CountDownLatch(targets.length);

    for (final HttpHost target : targets) {
        BasicHttpRequest request = new BasicHttpRequest("GET", "/");
        HttpCoreContext coreContext = HttpCoreContext.create();
        requester.execute(new BasicAsyncRequestProducer(target, request), new BasicAsyncResponseConsumer(),
                pool, coreContext,
                // Handle HTTP response from a callback
                new FutureCallback<HttpResponse>() {

                    public void completed(final HttpResponse response) {
                        latch.countDown();
                        System.out.println(target + "->" + response.getStatusLine());
                    }

                    public void failed(final Exception ex) {
                        latch.countDown();
                        System.err.println(target + "->" + ex);
                        ex.printStackTrace();
                    }

                    public void cancelled() {
                        latch.countDown();
                        System.out.println(target + " cancelled");
                    }

                });
    }
    latch.await();
    System.out.println("Shutting down I/O reactor");
    ioReactor.shutdown();
    System.out.println("Done");
}

From source file:com.twitter.distributedlog.basic.MultiReader.java

public static void main(String[] args) throws Exception {
    if (2 != args.length) {
        System.out.println(HELP);
        return;/*from  ww w  .  j  av a 2 s .c  om*/
    }

    String dlUriStr = args[0];
    final String streamList = args[1];

    URI uri = URI.create(dlUriStr);
    DistributedLogConfiguration conf = new DistributedLogConfiguration();
    DistributedLogNamespace namespace = DistributedLogNamespaceBuilder.newBuilder().conf(conf).uri(uri).build();

    String[] streamNameList = StringUtils.split(streamList, ',');
    DistributedLogManager[] managers = new DistributedLogManager[streamNameList.length];

    for (int i = 0; i < managers.length; i++) {
        String streamName = streamNameList[i];
        // open the dlm
        System.out.println("Opening log stream " + streamName);
        managers[i] = namespace.openLog(streamName);
    }

    final CountDownLatch keepAliveLatch = new CountDownLatch(1);

    for (DistributedLogManager dlm : managers) {
        final DistributedLogManager manager = dlm;
        dlm.getLastLogRecordAsync().addEventListener(new FutureEventListener<LogRecordWithDLSN>() {
            @Override
            public void onFailure(Throwable cause) {
                if (cause instanceof LogNotFoundException) {
                    System.err.println(
                            "Log stream " + manager.getStreamName() + " is not found. Please create it first.");
                    keepAliveLatch.countDown();
                } else if (cause instanceof LogEmptyException) {
                    System.err.println("Log stream " + manager.getStreamName() + " is empty.");
                    readLoop(manager, DLSN.InitialDLSN, keepAliveLatch);
                } else {
                    System.err.println("Encountered exception on process stream " + manager.getStreamName());
                    keepAliveLatch.countDown();
                }
            }

            @Override
            public void onSuccess(LogRecordWithDLSN record) {
                readLoop(manager, record.getDlsn(), keepAliveLatch);
            }
        });
    }
    keepAliveLatch.await();
    for (DistributedLogManager dlm : managers) {
        dlm.close();
    }
    namespace.close();
}

From source file:org.eclipse.ecf.provider.filetransfer.httpcore.NHttpClient.java

public static void main(String[] args) throws Exception {
    HttpParams params = new BasicHttpParams();
    params.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 5000)
            .setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 10000)
            .setIntParameter(CoreConnectionPNames.SOCKET_BUFFER_SIZE, 8 * 1024)
            .setBooleanParameter(CoreConnectionPNames.STALE_CONNECTION_CHECK, false)
            .setBooleanParameter(CoreConnectionPNames.TCP_NODELAY, true)
            .setParameter(CoreProtocolPNames.USER_AGENT, "HttpComponents/1.1");

    final ConnectingIOReactor ioReactor = new DefaultConnectingIOReactor(2, params);

    BasicHttpProcessor httpproc = new BasicHttpProcessor();
    httpproc.addInterceptor(new RequestContent());
    httpproc.addInterceptor(new RequestTargetHost());
    httpproc.addInterceptor(new RequestConnControl());
    httpproc.addInterceptor(new RequestUserAgent());
    httpproc.addInterceptor(new RequestExpectContinue());

    // We are going to use this object to synchronize between the 
    // I/O event and main threads
    //CountDownLatch requestCount = new CountDownLatch(3);
    CountDownLatch requestCount = new CountDownLatch(1);

    BufferingHttpClientHandler handler = new MyBufferingHttpClientHandler(httpproc,
            new MyHttpRequestExecutionHandler(requestCount), new DefaultConnectionReuseStrategy(), params);

    handler.setEventListener(new EventLogger());

    final IOEventDispatch ioEventDispatch = new DefaultClientIOEventDispatch(handler, params);

    Thread t = new Thread(new Runnable() {

        public void run() {
            try {
                ioReactor.execute(ioEventDispatch);
            } catch (InterruptedIOException ex) {
                System.err.println("Interrupted");
            } catch (IOException e) {
                System.err.println("I/O error: " + e.getMessage());
            }//from w  ww.  ja  va  2 s .c om
            System.out.println("Shutdown");
        }

    });
    t.start();

    SessionRequest[] reqs = new SessionRequest[1];
    reqs[0] = ioReactor.connect(new InetSocketAddress("ftp.osuosl.org", 80), null,
            new HttpHost("ftp.osuosl.org"), new MySessionRequestCallback(requestCount));
    // Block until all connections signal
    // completion of the request execution
    requestCount.await();

    System.out.println("Shutting down I/O reactor");

    ioReactor.shutdown();

    System.out.println("Done");
}

From source file:com.xoom.rabbit.test.Main.java

public static void main(String[] args) throws InterruptedException {
    if (args.length != 9) {
        System.out.println(//  w  ww  .j  a  va 2 s  .  c o  m
                "usage: java -jar target/rabbit-tester-0.1-SNAPSHOT-standalone.jar [consumer_threads] [number_of_messages] [amqp_host] [amqp_port] [produce] [consume] [message size in bytes] [username] [password]");
        return;
    }
    final long startTime = System.currentTimeMillis();
    int consumerThreads = Integer.parseInt(args[0]);
    final int messages = Integer.parseInt(args[1]);
    String host = args[2];
    int port = Integer.parseInt(args[3]);
    boolean produce = Boolean.parseBoolean(args[4]);
    boolean consume = Boolean.parseBoolean(args[5]);
    final int messageSize = Integer.parseInt(args[6]);
    String username = args[7];
    String password = args[8];

    if (produce) {
        System.out.println("Sending " + messages + " messages to " + host + ":" + port);
    }
    if (consume) {
        System.out.println("Consuming " + messages + " messages from " + host + ":" + port);
    }
    if (!produce && !consume) {
        System.out.println("Not producing or consuming any messages.");
    }

    CachingConnectionFactory connectionFactory = new CachingConnectionFactory(host, port);
    connectionFactory.setUsername(username);
    connectionFactory.setPassword(password);
    connectionFactory.setChannelCacheSize(consumerThreads + 1);

    RabbitAdmin amqpAdmin = new RabbitAdmin(connectionFactory);

    DirectExchange exchange = new DirectExchange(EXCHANGE_NAME, true, false);
    Queue queue = new Queue(QUEUE_NAME);
    amqpAdmin.declareExchange(exchange);
    amqpAdmin.declareQueue(queue);
    amqpAdmin.declareBinding(BindingBuilder.bind(queue).to(exchange).with(ROUTING_KEY));

    final AmqpTemplate amqpTemplate = new RabbitTemplate(connectionFactory);

    final CountDownLatch producerLatch = new CountDownLatch(messages);
    final CountDownLatch consumerLatch = new CountDownLatch(messages);

    SimpleMessageListenerContainer listenerContainer = null;

    if (consume) {
        listenerContainer = new SimpleMessageListenerContainer();
        listenerContainer.setConnectionFactory(connectionFactory);
        listenerContainer.setQueueNames(QUEUE_NAME);
        listenerContainer.setConcurrentConsumers(consumerThreads);
        listenerContainer.setMessageListener(new MessageListener() {
            @Override
            public void onMessage(Message message) {
                if (consumerLatch.getCount() == 1) {
                    System.out.println("Finished consuming " + messages + " messages in "
                            + (System.currentTimeMillis() - startTime) + "ms");
                }
                consumerLatch.countDown();
            }
        });
        listenerContainer.start();
    }

    if (produce) {
        while (producerLatch.getCount() > 0) {
            try {
                byte[] message = new byte[messageSize];
                RND.nextBytes(message);
                amqpTemplate.send(EXCHANGE_NAME, ROUTING_KEY, new Message(message, new MessageProperties()));
                producerLatch.countDown();
            } catch (Exception e) {
                System.out.println("Failed to send message " + (messages - producerLatch.getCount())
                        + " will retry forever.");
            }
        }
    }

    if (consume) {
        consumerLatch.await();
        listenerContainer.shutdown();
    }

    connectionFactory.destroy();
}

From source file:NHttpClient.java

public static void main(String[] args) throws Exception {
    HttpParams params = new BasicHttpParams();
    params.setIntParameter(CoreConnectionPNames.SO_TIMEOUT, 5000)
            .setIntParameter(CoreConnectionPNames.CONNECTION_TIMEOUT, 10000)
            .setIntParameter(CoreConnectionPNames.SOCKET_BUFFER_SIZE, 8 * 1024)
            .setBooleanParameter(CoreConnectionPNames.STALE_CONNECTION_CHECK, false)
            .setBooleanParameter(CoreConnectionPNames.TCP_NODELAY, true)
            .setParameter(CoreProtocolPNames.USER_AGENT, "HttpComponents/1.1");

    final ConnectingIOReactor ioReactor = new DefaultConnectingIOReactor(2, params);

    BasicHttpProcessor httpproc = new BasicHttpProcessor();
    httpproc.addInterceptor(new RequestContent());
    httpproc.addInterceptor(new RequestTargetHost());
    httpproc.addInterceptor(new RequestConnControl());
    httpproc.addInterceptor(new RequestUserAgent());
    httpproc.addInterceptor(new RequestExpectContinue());

    // We are going to use this object to synchronize between the 
    // I/O event and main threads
    CountDownLatch requestCount = new CountDownLatch(3);

    BufferingHttpClientHandler handler = new BufferingHttpClientHandler(httpproc,
            new MyHttpRequestExecutionHandler(requestCount), new DefaultConnectionReuseStrategy(), params);

    handler.setEventListener(new EventLogger());

    final IOEventDispatch ioEventDispatch = new DefaultClientIOEventDispatch(handler, params);

    Thread t = new Thread(new Runnable() {

        public void run() {
            try {
                ioReactor.execute(ioEventDispatch);
            } catch (InterruptedIOException ex) {
                System.err.println("Interrupted");
            } catch (IOException e) {
                System.err.println("I/O error: " + e.getMessage());
            }//  w  w  w.ja  v  a 2s . c o m
            System.out.println("Shutdown");
        }

    });
    t.start();

    SessionRequest[] reqs = new SessionRequest[3];
    reqs[0] = ioReactor.connect(new InetSocketAddress("www.yahoo.com", 80), null, new HttpHost("www.yahoo.com"),
            new MySessionRequestCallback(requestCount));
    reqs[1] = ioReactor.connect(new InetSocketAddress("www.google.com", 80), null,
            new HttpHost("www.google.ch"), new MySessionRequestCallback(requestCount));
    reqs[2] = ioReactor.connect(new InetSocketAddress("www.apache.org", 80), null,
            new HttpHost("www.apache.org"), new MySessionRequestCallback(requestCount));

    // Block until all connections signal
    // completion of the request execution
    requestCount.await();

    System.out.println("Shutting down I/O reactor");

    ioReactor.shutdown();

    System.out.println("Done");
}

From source file:co.paralleluniverse.photon.Photon.java

public static void main(final String[] args) throws InterruptedException, IOException {

    final Options options = new Options();
    options.addOption("rate", true, "Requests per second (default " + rateDefault + ")");
    options.addOption("duration", true,
            "Minimum test duration in seconds: will wait for <duration> * <rate> requests to terminate or, if progress check enabled, no progress after <duration> (default "
                    + durationDefault + ")");
    options.addOption("maxconnections", true,
            "Maximum number of open connections (default " + maxConnectionsDefault + ")");
    options.addOption("timeout", true,
            "Connection and read timeout in millis (default " + timeoutDefault + ")");
    options.addOption("print", true,
            "Print cycle in millis, 0 to disable intermediate statistics (default " + printCycleDefault + ")");
    options.addOption("check", true,
            "Progress check cycle in millis, 0 to disable progress check (default " + checkCycleDefault + ")");
    options.addOption("stats", false, "Print full statistics when finish (default false)");
    options.addOption("minmax", false, "Print min/mean/stddev/max stats when finish (default false)");
    options.addOption("name", true, "Test name to print in the statistics (default '" + testNameDefault + "')");
    options.addOption("help", false, "Print help");

    try {//ww  w  .  j a  va 2s  .co  m
        final CommandLine cmd = new BasicParser().parse(options, args);
        final String[] ar = cmd.getArgs();
        if (cmd.hasOption("help") || ar.length != 1)
            printUsageAndExit(options);

        final String url = ar[0];

        final int timeout = Integer.parseInt(cmd.getOptionValue("timeout", timeoutDefault));
        final int maxConnections = Integer
                .parseInt(cmd.getOptionValue("maxconnections", maxConnectionsDefault));
        final int duration = Integer.parseInt(cmd.getOptionValue("duration", durationDefault));
        final int printCycle = Integer.parseInt(cmd.getOptionValue("print", printCycleDefault));
        final int checkCycle = Integer.parseInt(cmd.getOptionValue("check", checkCycleDefault));
        final String testName = cmd.getOptionValue("name", testNameDefault);
        final int rate = Integer.parseInt(cmd.getOptionValue("rate", rateDefault));

        final MetricRegistry metrics = new MetricRegistry();
        final Meter requestMeter = metrics.meter("request");
        final Meter responseMeter = metrics.meter("response");
        final Meter errorsMeter = metrics.meter("errors");
        final Logger log = LoggerFactory.getLogger(Photon.class);
        final ConcurrentHashMap<String, AtomicInteger> errors = new ConcurrentHashMap<>();
        final HttpGet request = new HttpGet(url);
        final StripedTimeSeries<Long> sts = new StripedTimeSeries<>(30000, false);
        final StripedHistogram sh = new StripedHistogram(60000, 5);

        log.info("name: " + testName + " url:" + url + " rate:" + rate + " duration:" + duration
                + " maxconnections:" + maxConnections + ", " + "timeout:" + timeout);
        final DefaultConnectingIOReactor ioreactor = new DefaultConnectingIOReactor(IOReactorConfig.custom()
                .setConnectTimeout(timeout).setIoThreadCount(10).setSoTimeout(timeout).build());

        Runtime.getRuntime().addShutdownHook(new Thread(() -> {
            final List<ExceptionEvent> events = ioreactor.getAuditLog();
            if (events != null)
                events.stream().filter(event -> event != null).forEach(event -> {
                    System.err.println(
                            "Apache Async HTTP Client I/O Reactor Error Time: " + event.getTimestamp());
                    //noinspection ThrowableResultOfMethodCallIgnored
                    if (event.getCause() != null)
                        //noinspection ThrowableResultOfMethodCallIgnored
                        event.getCause().printStackTrace();
                });
            if (cmd.hasOption("stats"))
                printFinishStatistics(errorsMeter, sts, sh, testName);
            if (!errors.keySet().isEmpty())
                errors.entrySet().stream()
                        .forEach(p -> log.info(testName + " " + p.getKey() + " " + p.getValue() + "ms"));
            System.out.println(
                    testName + " responseTime(90%): " + sh.getHistogramData().getValueAtPercentile(90) + "ms");
            if (cmd.hasOption("minmax")) {
                final HistogramData hd = sh.getHistogramData();
                System.out.format("%s %8s%8s%8s%8s\n", testName, "min", "mean", "sd", "max");
                System.out.format("%s %8d%8.2f%8.2f%8d\n", testName, hd.getMinValue(), hd.getMean(),
                        hd.getStdDeviation(), hd.getMaxValue());
            }
        }));

        final PoolingNHttpClientConnectionManager mngr = new PoolingNHttpClientConnectionManager(ioreactor);
        mngr.setDefaultMaxPerRoute(maxConnections);
        mngr.setMaxTotal(maxConnections);
        final CloseableHttpAsyncClient ahc = HttpAsyncClientBuilder.create().setConnectionManager(mngr)
                .setDefaultRequestConfig(RequestConfig.custom().setLocalAddress(null).build()).build();
        try (final CloseableHttpClient client = new FiberHttpClient(ahc)) {
            final int num = duration * rate;

            final CountDownLatch cdl = new CountDownLatch(num);
            final Semaphore sem = new Semaphore(maxConnections);
            final RateLimiter rl = RateLimiter.create(rate);

            spawnStatisticsThread(printCycle, cdl, log, requestMeter, responseMeter, errorsMeter, testName);

            for (int i = 0; i < num; i++) {
                rl.acquire();
                if (sem.availablePermits() == 0)
                    log.debug("Maximum connections count reached, waiting...");
                sem.acquireUninterruptibly();

                new Fiber<Void>(() -> {
                    requestMeter.mark();
                    final long start = System.nanoTime();
                    try {
                        try (final CloseableHttpResponse ignored = client.execute(request)) {
                            responseMeter.mark();
                        } catch (final Throwable t) {
                            markError(errorsMeter, errors, t);
                        }
                    } catch (final Throwable t) {
                        markError(errorsMeter, errors, t);
                    } finally {
                        final long now = System.nanoTime();
                        final long millis = TimeUnit.NANOSECONDS.toMillis(now - start);
                        sts.record(start, millis);
                        sh.recordValue(millis);
                        sem.release();
                        cdl.countDown();
                    }
                }).start();
            }
            spawnProgressCheckThread(log, duration, checkCycle, cdl);
            cdl.await();
        }
    } catch (final ParseException ex) {
        System.err.println("Parsing failed.  Reason: " + ex.getMessage());
    }
}