Example usage for java.util.concurrent CountDownLatch countDown

List of usage examples for java.util.concurrent CountDownLatch countDown

Introduction

In this page you can find the example usage for java.util.concurrent CountDownLatch countDown.

Prototype

public void countDown() 

Source Link

Document

Decrements the count of the latch, releasing all waiting threads if the count reaches zero.

Usage

From source file:com.yozio.android.YozioHelperTest.java

public void testInitializeExperimentsAsync() throws Throwable {
    JSONObject configs = null;//from   w  w w. j av a  2  s . c  o m
    try {
        configs = new JSONObject().put("key", "123");
        fakeApiService.setExperimentConfigs(configs);
    } catch (JSONException e) {
        fail();
    }

    final CountDownLatch signal = new CountDownLatch(1);
    runTestOnUiThread(new Runnable() {
        public void run() {
            helper.initializeExperimentsAsync(new InitializeExperimentsCallback() {
                public void onComplete() {
                    assertEquals(123, helper.intForKey("key", 111));
                    signal.countDown();
                }
            });
        }
    });
    signal.await(10, TimeUnit.SECONDS);
}

From source file:com.dangdang.ddframe.job.internal.job.dataflow.AbstractDataFlowElasticJob.java

@SuppressWarnings("unchecked")
private void processDataForThroughput(final JobExecutionMultipleShardingContext shardingContext,
        final List<T> data) {
    int threadCount = getJobFacade().getConcurrentDataProcessThreadCount();
    if (threadCount <= 1 || data.size() <= threadCount) {
        processDataWithStatistics((C) shardingContext, data);
        return;/*from  w w  w . j a v  a 2 s . c  o m*/
    }
    List<List<T>> splitData = Lists.partition(data, data.size() / threadCount);
    final CountDownLatch latch = new CountDownLatch(splitData.size());
    for (final List<T> each : splitData) {
        executorService.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    processDataWithStatistics((C) shardingContext, each);
                } finally {
                    latch.countDown();
                }
            }
        });
    }
    latchAwait(latch);
}

From source file:com.twitter.hbc.httpclient.BasicClientTest.java

@Test
public void testIOExceptionDuringProcessing() throws Exception {
    ClientBase clientBase = new ClientBase("name", mockClient, new HttpHosts("http://hi"),
            new RawEndpoint("/endpoint", HttpConstants.HTTP_GET), mockAuth, mockProcessor,
            mockReconnectionManager, mockRateTracker);
    BasicClient client = new BasicClient(clientBase, executorService);
    final CountDownLatch latch = new CountDownLatch(1);
    when(mockStatusLine.getStatusCode()).thenReturn(200);

    doNothing().when(mockProcessor).setup(any(InputStream.class));
    doThrow(new IOException()).doThrow(new IOException()).doThrow(new IOException()).doAnswer(new Answer() {
        @Override/*from ww  w  . j av  a  2 s .  c  o m*/
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            latch.countDown();
            return null;
        }
    }).when(mockProcessor).process();

    client.connect();
    latch.await();
    assertFalse(clientBase.isDone());
    verify(mockProcessor, times(4)).setup(any(InputStream.class));
    // throw 3 exceptions, 4th one keeps going
    verify(mockProcessor, atLeast(4)).process();

    client.stop();
    verify(mockConnectionManager, atLeastOnce()).shutdown();
    assertTrue(client.isDone());
    assertEquals(EventType.STOPPED_BY_USER, clientBase.getExitEvent().getEventType());
}

From source file:com.vmware.photon.controller.api.client.resource.DisksApiTest.java

@Test
public void testGetTasksForDisksAsync() throws IOException, InterruptedException {
    Task task1 = new Task();
    task1.setId("task1");

    Task task2 = new Task();
    task2.setId("task2");

    final ResourceList<Task> taskResourceList = new ResourceList<>(Arrays.asList(task1, task2));

    ObjectMapper mapper = new ObjectMapper();
    String serializedTask = mapper.writeValueAsString(taskResourceList);

    setupMocks(serializedTask, HttpStatus.SC_OK);

    DisksApi disksApi = new DisksApi(restClient);
    final CountDownLatch latch = new CountDownLatch(1);

    disksApi.getTasksForDiskAsync("persistentDisk", new FutureCallback<ResourceList<Task>>() {
        @Override//w w w .  j  a v  a2s.co  m
        public void onSuccess(@Nullable ResourceList<Task> result) {
            assertEquals(result.getItems(), taskResourceList.getItems());
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            fail(t.toString());
            latch.countDown();
        }
    });

    assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true));
    ;
}

From source file:com.vmware.photon.controller.api.client.resource.DisksRestApiTest.java

@Test
public void testGetTasksForDisksAsync() throws IOException, InterruptedException {
    Task task1 = new Task();
    task1.setId("task1");

    Task task2 = new Task();
    task2.setId("task2");

    final ResourceList<Task> taskResourceList = new ResourceList<>(Arrays.asList(task1, task2));

    ObjectMapper mapper = new ObjectMapper();
    String serializedTask = mapper.writeValueAsString(taskResourceList);

    setupMocks(serializedTask, HttpStatus.SC_OK);

    DisksApi disksApi = new DisksRestApi(restClient);
    final CountDownLatch latch = new CountDownLatch(1);

    disksApi.getTasksForDiskAsync("persistentDisk", new FutureCallback<ResourceList<Task>>() {
        @Override// ww  w .j  av  a  2 s . c  o m
        public void onSuccess(@Nullable ResourceList<Task> result) {
            assertEquals(result.getItems(), taskResourceList.getItems());
            latch.countDown();
        }

        @Override
        public void onFailure(Throwable t) {
            fail(t.toString());
            latch.countDown();
        }
    });

    assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true));
    ;
}

From source file:org.springframework.cloud.stream.config.MessageChannelConfigurerTests.java

@Test
public void testMessageConverterConfigurer() throws Exception {
    final CountDownLatch latch = new CountDownLatch(1);
    MessageHandler messageHandler = new MessageHandler() {
        @Override//  w w  w.  j a  v a2 s.  co  m
        public void handleMessage(Message<?> message) throws MessagingException {
            assertThat(message.getPayload()).isInstanceOf(Tuple.class);
            assertThat(((Tuple) message.getPayload()).getFieldNames().get(0)).isEqualTo("message");
            assertThat(((Tuple) message.getPayload()).getValue(0)).isEqualTo("Hi");
            latch.countDown();
        }
    };
    testSink.input().subscribe(messageHandler);
    testSink.input().send(MessageBuilder.withPayload("{\"message\":\"Hi\"}").build());
    assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
    testSink.input().unsubscribe(messageHandler);
}

From source file:com.fusesource.forge.jmstest.frontend.CommandLineClientTest.java

@Test
public void testRunWithAbsoluteConfigLocation() throws InterruptedException {
    File benchmarkDir = JmsTesterUtils.getResourceAsFile("/simple");
    assertTrue(benchmarkDir.isDirectory());

    final CountDownLatch testPassedLatch = new CountDownLatch(1);
    SimpleTestExecutionContainer container = new SimpleTestExecutionContainer() {
        @Override/*from   w  w  w  . j a  v a2s.c om*/
        protected BenchmarkCommandHandler createTestHandler() {
            return new DefaultCommandHandler() {
                public boolean handleCommand(BenchmarkCommand command) {
                    if (command.getCommandType() == CommandTypes.SUBMIT_BENCHMARK) {
                        testPassedLatch.countDown();
                        return true;
                    }
                    return false;
                }
            };
        }
    };
    container.start();

    // exercise unit
    commandLineClient.run(new String[] { "-command", "submit:" + benchmarkDir.getAbsolutePath() });

    assertTrue("CommandLineClient did not send a SUBMIT_BENCHMARK command",
            testPassedLatch.await(1, TimeUnit.SECONDS));

    container.stop();
}

From source file:com.fusesource.forge.jmstest.frontend.CommandLineClientTest.java

@Test
public void testRunWithRelativeConfigLocation() throws InterruptedException {
    File benchmarkDir = JmsTesterUtils.getResourceAsFile("/simple");
    assertTrue(benchmarkDir.isDirectory());

    final CountDownLatch testPassedLatch = new CountDownLatch(1);
    SimpleTestExecutionContainer container = new SimpleTestExecutionContainer() {
        @Override//from   w  ww  . java  2  s .  c o m
        protected BenchmarkCommandHandler createTestHandler() {
            return new DefaultCommandHandler() {
                public boolean handleCommand(BenchmarkCommand command) {
                    if (command.getCommandType() == CommandTypes.SUBMIT_BENCHMARK) {
                        testPassedLatch.countDown();
                        return true;
                    }
                    return false;
                }
            };
        }
    };
    container.start();

    // exercise unit   note the relative path
    commandLineClient.run(new String[] { "-command", "submit:src/test/resources/simple" });

    assertTrue("CommandLineClient did not send a SUBMIT_BENCHMARK command",
            testPassedLatch.await(1, TimeUnit.SECONDS));
    container.stop();

}

From source file:example.springdata.mongodb.people.ReactivePersonRepositoryIntegrationTest.java

/**
 * This sample performs a count, inserts data and performs a count again using reactive operator chaining.
 *//* www.ja  v  a 2s  . c o m*/
@Test
public void shouldInsertAndCountData() throws Exception {

    CountDownLatch countDownLatch = new CountDownLatch(1);

    repository.count() //
            .doOnNext(System.out::println) //
            .thenMany(repository.saveAll(Flux.just(new Person("Hank", "Schrader", 43), //
                    new Person("Mike", "Ehrmantraut", 62)))) //
            .last() //
            .flatMap(v -> repository.count()) //
            .doOnNext(System.out::println) //
            .doOnSuccess(it -> countDownLatch.countDown()) //
            .doOnError(throwable -> countDownLatch.countDown()) //
            .subscribe();

    countDownLatch.await();
}

From source file:com.dangdang.ddframe.job.api.type.dataflow.executor.DataflowJobExecutor.java

private void processDataForSequence(final ShardingContext shardingContext,
        final Map<Integer, List<Object>> data) {
    final CountDownLatch latch = new CountDownLatch(data.size());
    for (final Map.Entry<Integer, List<Object>> each : data.entrySet()) {
        getExecutorService().submit(new Runnable() {

            @Override/*from w ww . j  a  v a  2s  . co m*/
            public void run() {
                try {
                    processData(shardingContext.getShardingContext(each.getKey()), each.getValue());
                } finally {
                    latch.countDown();
                }
            }
        });
    }
    latchAwait(latch);
}