Example usage for java.util.concurrent CountDownLatch CountDownLatch

List of usage examples for java.util.concurrent CountDownLatch CountDownLatch

Introduction

In this page you can find the example usage for java.util.concurrent CountDownLatch CountDownLatch.

Prototype

public CountDownLatch(int count) 

Source Link

Document

Constructs a CountDownLatch initialized with the given count.

Usage

From source file:com.appdynamics.monitors.azure.statsCollector.AzureServiceBusStatsCollector.java

public Map<String, String> collectQueueStats(final Azure azure, final String namespaceName,
        Set<String> queueNames, Set<String> queueStats, int queueThreads) throws TaskExecutionException {

    final Map<String, String> valueMap = createValueMap(azure, namespaceName, QUEUES, queueStats);

    ListeningExecutorService queueService = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(queueThreads));
    final Map<String, String> queueMetricMap = new HashMap<String, String>();
    final CountDownLatch countDownLatch = new CountDownLatch(queueNames.size());

    try {//from  w  ww  .  j  a va  2 s  .c  o  m
        for (final String queueName : queueNames) {
            valueMap.put("ResourceName", queueName);
            try {
                ListenableFuture<Map<String, String>> getQueueNames = queueService
                        .submit(new Callable<Map<String, String>>() {
                            public Map<String, String> call() throws IOException {
                                return getStatsFromAzure(azure, namespaceName, valueMap, queueName, QUEUES);
                            }
                        });

                Futures.addCallback(getQueueNames, new FutureCallback<Map<String, String>>() {
                    public void onSuccess(Map<String, String> queueStats) {
                        countDownLatch.countDown();
                        queueMetricMap.putAll(queueStats);
                    }

                    public void onFailure(Throwable thrown) {
                        countDownLatch.countDown();
                        logger.error("Unable to get stats for queue [" + queueName + "] in namespace ["
                                + namespaceName + "]", thrown);
                    }
                });

            } catch (Exception e) {
                logger.error("Error getting stats for queue [" + namespaceName + "/" + queueName + "]", e);
                throw new TaskExecutionException(
                        "Error getting stats for queue [" + namespaceName + "/" + queueName + "]", e);
            }
        }
    } finally {
        queueService.shutdown();
    }
    try {
        countDownLatch.await();
    } catch (InterruptedException e) {
        logger.error("Unable to wait till getting the queue stats", e);
    }
    return queueMetricMap;
}

From source file:com.vmware.photon.controller.api.client.resource.DeploymentApiTest.java

@Test
public void testListAllAsync() throws Exception {
    Deployment deployment = getNewDeployment();

    ResourceList<Deployment> deploymentResourceList = new ResourceList<>(Arrays.asList(deployment));

    ObjectMapper mapper = new ObjectMapper();
    String serializedResponse = mapper.writeValueAsString(deploymentResourceList);

    setupMocks(serializedResponse, HttpStatus.SC_OK);

    DeploymentApi deploymentApi = new DeploymentApi(restClient);

    final CountDownLatch latch = new CountDownLatch(1);

    deploymentApi.listAllAsync(new FutureCallback<ResourceList<Deployment>>() {
        @Override/*from  w w w  .j a v  a  2  s .  c  om*/
        public void onSuccess(@Nullable ResourceList<Deployment> result) {
            assertEquals(result.getItems(), deploymentResourceList.getItems());
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            fail(t.toString());
            latch.countDown();
        }
    });

    assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true));
}

From source file:io.undertow.server.handlers.RequestLimitingHandlerTestCase.java

@Test
public void testRateLimitingHandlerQueueFull() throws ExecutionException, InterruptedException {
    latch.countDown();/*  w w w  . j  av  a 2 s . c  o  m*/
    latch = new CountDownLatch(1);
    ExecutorService executor = Executors.newFixedThreadPool(N_THREADS * 2);
    try {
        final List<Future<?>> futures = new ArrayList<>();
        for (int i = 0; i < N_THREADS * 2; ++i) {
            futures.add(executor.submit(new Callable<String>() {
                @Override
                public String call() {
                    TestHttpClient client = new TestHttpClient();
                    try {
                        HttpGet get = new HttpGet(DefaultServer.getDefaultServerURL());
                        HttpResponse result = client.execute(get);
                        if (result.getStatusLine().getStatusCode() == 513) {
                            return "513";
                        }
                        Assert.assertEquals(StatusCodes.OK, result.getStatusLine().getStatusCode());
                        return HttpClientUtils.readResponse(result);
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    } finally {
                        client.getConnectionManager().shutdown();
                    }
                }
            }));
        }
        Thread.sleep(300);
        latch.countDown();
        for (Future<?> future : futures) {
            String res = (String) future.get();
            Assert.assertTrue(res, res.equals("1") || res.equals("2") || res.equals("513"));
        }
        futures.clear();
        for (int i = 0; i < 2; ++i) {
            futures.add(executor.submit(new Callable<String>() {
                @Override
                public String call() {
                    TestHttpClient client = new TestHttpClient();
                    try {
                        HttpGet get = new HttpGet(DefaultServer.getDefaultServerURL());
                        HttpResponse result = client.execute(get);
                        Assert.assertEquals(StatusCodes.OK, result.getStatusLine().getStatusCode());
                        return HttpClientUtils.readResponse(result);
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    } finally {
                        client.getConnectionManager().shutdown();
                    }
                }
            }));
        }

        for (Future<?> future : futures) {
            String res = (String) future.get();
            Assert.assertTrue(res, res.equals("1") || res.equals("2"));
        }

    } finally {
        executor.shutdown();
    }

}

From source file:com.microsoft.office.core.ContactsAsyncTestCase.java

private void readAndCheck() throws Exception {
    // reread a contact
    final CountDownLatch cdl = new CountDownLatch(1);
    Futures.addCallback(Me.getContacts().getAsync(contact.getId()), new FutureCallback<IContact>() {
        @Override//  ww  w. j a v  a  2  s.  c  o m
        public void onFailure(Throwable t) {
            reportError(t);
            cdl.countDown();
        }

        @Override
        public void onSuccess(IContact result) {
            contact = result;
            try {
                Class<?> cls = contact.getClass();
                Class<?>[] emptyParametersArray = new Class<?>[0];
                for (ODataProperty property : sourceContact.getProperties()) {
                    try {
                        Method getter = cls.getMethod("get" + property.getName(), emptyParametersArray);
                        assertEquals(property.getPrimitiveValue().toValue(), getter.invoke(contact));
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
            } catch (Throwable t) {
                reportError(t);
            }

            cdl.countDown();
        }
    });
    cdl.await();
}

From source file:org.cleverbus.core.common.asynch.queue.MessagePollExecutorTest.java

@Test
public void testGetNextMessage_moreThreads() throws InterruptedException {
    // prepare threads
    int threads = 5;
    final CountDownLatch latch = new CountDownLatch(threads);
    Runnable task = new Runnable() {

        @Override//from  w  w w  . j a va  2 s. c o m
        public void run() {
            try {
                messagePollExecutor.run();
            } finally {
                latch.countDown();
            }
        }
    };

    mock.expectedMessageCount(3);

    // start processing and waits for result
    for (int i = 0; i < threads; i++) {
        new Thread(task).start();
    }

    latch.await();

    mock.assertIsSatisfied();

    // verify messages
    Message msg = findMessage("1234_4567");
    assertThat(msg, notNullValue());
    assertThat(msg.getState(), is(MsgStateEnum.PROCESSING));

    msg = findMessage("1234_4567_8");
    assertThat(msg, notNullValue());
    assertThat(msg.getState(), is(MsgStateEnum.PROCESSING));

    msg = findMessage("1234_4567_9");
    assertThat(msg, notNullValue());
    assertThat(msg.getState(), is(MsgStateEnum.PROCESSING));
}

From source file:com.mozilla.fhr.consumer.FHRConsumer.java

@Override
public void poll() {
    final CountDownLatch latch = new CountDownLatch(streams.size());
    workers = new ArrayList<Future<Void>>(streams.size());
    for (final KafkaStream<Message> stream : streams) {
        workers.add(executor.submit(new FHRConsumerWorker(stream, latch)));
    }//from   w  ww.  j  a  v  a 2s.  c o  m

    // Wait for all tasks to complete which in the normal case they will
    // run indefinitely unless killed
    try {
        while (true) {
            latch.await(10, TimeUnit.SECONDS);
            if (latch.getCount() != streams.size()) {
                // we have a dead thread and should exit
                break;
            }
        }
    } catch (InterruptedException e) {
        LOG.info("Interrupted during polling", e);
    }

    // Spit out errors if there were any
    for (Future<Void> worker : workers) {
        try {
            if (worker.isDone() && !worker.isCancelled()) {
                worker.get(1, TimeUnit.SECONDS);
            }
        } catch (InterruptedException e) {
            LOG.error("Thread was interrupted:", e);
        } catch (ExecutionException e) {
            LOG.error("Exception occured in thread:", e);
        } catch (TimeoutException e) {
            LOG.error("Timed out waiting for thread result:", e);
        } catch (CancellationException e) {
            LOG.error("Thread has been canceled: ", e);
        }
    }
}

From source file:au.org.ala.layers.stats.ObjectsStatsGenerator.java

private static void updateArea(String fid) {

    try {//from   w  w w. j  a v  a2s.  com
        Connection conn = getConnection();
        String sql = "SELECT pid from objects where area_km is null and st_geometrytype(the_geom) <> 'Point'";
        if (fid != null) {
            sql = sql + " and fid = '" + fid + "'";
        }

        sql = sql + " limit 200000;";

        System.out.println("loading area_km ...");
        Statement s1 = conn.createStatement();
        ResultSet rs1 = s1.executeQuery(sql);

        LinkedBlockingQueue<String> data = new LinkedBlockingQueue<String>();
        while (rs1.next()) {
            data.put(rs1.getString("pid"));
        }

        CountDownLatch cdl = new CountDownLatch(data.size());

        AreaThread[] threads = new AreaThread[CONCURRENT_THREADS];
        for (int j = 0; j < CONCURRENT_THREADS; j++) {
            threads[j] = new AreaThread(data, cdl, getConnection().createStatement());
            threads[j].start();
        }

        cdl.await();

        for (int j = 0; j < CONCURRENT_THREADS; j++) {
            threads[j].s.close();
            threads[j].interrupt();
        }
        rs1.close();
        s1.close();
        conn.close();
        return;
    } catch (Exception e) {
        logger.error(e.getMessage(), e);
    }
    return;
}

From source file:com.vmware.photon.controller.api.client.resource.DeploymentRestApiTest.java

@Test
public void testListAllAsync() throws Exception {
    Deployment deployment = getNewDeployment();

    ResourceList<Deployment> deploymentResourceList = new ResourceList<>(Arrays.asList(deployment));

    ObjectMapper mapper = new ObjectMapper();
    String serializedResponse = mapper.writeValueAsString(deploymentResourceList);

    setupMocks(serializedResponse, HttpStatus.SC_OK);

    DeploymentApi deploymentApi = new DeploymentRestApi(restClient);

    final CountDownLatch latch = new CountDownLatch(1);

    deploymentApi.listAllAsync(new FutureCallback<ResourceList<Deployment>>() {
        @Override//from w  w w .j a  v  a 2  s  .  co  m
        public void onSuccess(@Nullable ResourceList<Deployment> result) {
            assertEquals(result.getItems(), deploymentResourceList.getItems());
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            fail(t.toString());
            latch.countDown();
        }
    });

    assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true));
}

From source file:com.datatorrent.contrib.kafka.KafkaInputOperatorTest.java

/**
 * Test AbstractKafkaSinglePortInputOperator (i.e. an input adapter for
 * Kafka, aka consumer). This module receives data from an outside test
 * generator through Kafka message bus and feed that data into Malhar
 * streaming platform.//from   w w  w.  j  a  v  a 2 s .c o  m
 *
 * [Generate message and send that to Kafka message bus] ==> [Receive that
 * message through Kafka input adapter(i.e. consumer) and send using
 * emitTuples() interface on output port during onMessage call]
 *
 *
 * @throws Exception
 */
public void testKafkaInputOperator(int sleepTime, final int totalCount, KafkaConsumer consumer, boolean isValid,
        boolean idempotent) throws Exception {
    // initial the latch for this test
    latch = new CountDownLatch(1);

    // Start producer
    KafkaTestProducer p = new KafkaTestProducer(TEST_TOPIC);
    p.setSendCount(totalCount);
    new Thread(p).start();

    // Create DAG for testing.
    LocalMode lma = LocalMode.newInstance();
    DAG dag = lma.getDAG();

    // Create KafkaSinglePortStringInputOperator
    KafkaSinglePortStringInputOperator node = dag.addOperator("Kafka message consumer",
            KafkaSinglePortStringInputOperator.class);
    if (isSuicide) {
        // make some extreme assumptions to make it fail if checkpointing wrong offsets
        dag.setAttribute(Context.DAGContext.CHECKPOINT_WINDOW_COUNT, 1);
        dag.setAttribute(Context.OperatorContext.STORAGE_AGENT,
                new FSStorageAgent("target/ck", new Configuration()));
        node.setMaxTuplesPerWindow(500);
    }

    if (idempotent) {
        node.setWindowDataManager(new FSWindowDataManager());
    }
    consumer.setTopic(TEST_TOPIC);

    node.setConsumer(consumer);

    consumer.setCacheSize(5000);

    if (isValid) {
        node.setZookeeper("localhost:" + KafkaOperatorTestBase.TEST_ZOOKEEPER_PORT[0]);
    }

    // Create Test tuple collector
    CollectorModule<String> collector = dag.addOperator("TestMessageCollector", new CollectorModule<String>());

    // Connect ports
    dag.addStream("Kafka message", node.outputPort, collector.inputPort).setLocality(Locality.CONTAINER_LOCAL);

    // Create local cluster
    final LocalMode.Controller lc = lma.getController();
    lc.setHeartbeatMonitoringEnabled(false);

    lc.runAsync();

    // Wait 30s for consumer finish consuming all the messages
    Assert.assertTrue("TIMEOUT: 30s ", latch.await(300000, TimeUnit.MILLISECONDS));

    // Check results
    Assert.assertTrue("Expected count >= " + totalCount + "; Actual count " + tupleCount.intValue(),
            totalCount <= tupleCount.intValue());
    logger.debug(String.format("Number of emitted tuples: %d", tupleCount.intValue()));

    p.close();
    lc.shutdown();
}