List of usage examples for java.util.concurrent CountDownLatch countDown
public void countDown()
From source file:com.sixt.service.framework.kafka.messaging.KafkaFailoverIntegrationTest.java
@Ignore @Test//from w w w .j a v a 2s . c o m public void consumerSubscribesToNonExistingTopic() throws InterruptedException { ServiceProperties serviceProperties = fillServiceProperties(); Topic cruft = new Topic("krufty"); CountDownLatch latch = new CountDownLatch(1); Consumer consumer = consumerFactoryWithHandler(serviceProperties, SayHelloToCmd.class, new MessageHandler<SayHelloToCmd>() { @Override public void onMessage(Message<SayHelloToCmd> message, OrangeContext context) { latch.countDown(); } }).consumerForTopic(cruft, new DiscardFailedMessages()); Producer producer = new ProducerFactory(serviceProperties).createProducer(); String key = RandomStringUtils.randomAscii(5); SayHelloToCmd payload = SayHelloToCmd.newBuilder().setName(key).build(); Message request = Messages.requestFor(cruft, cruft, key, payload, new OrangeContext()); producer.send(request); assertTrue(latch.await(1, TimeUnit.MINUTES)); producer.shutdown(); consumer.shutdown(); // Results: // 1.) WITH topic auto creation i.e. KAFKA_AUTO_CREATE_TOPICS_ENABLE = true // All ok, needs to discover coordinator etc. // 2.) NO topic auto creation i.e. KAFKA_AUTO_CREATE_TOPICS_ENABLE = false //2017-04-12 18:27:16,701 [pool-9-thread-1] INFO c.s.s.f.kafka.messaging.Consumer - Consumer in group kruftmeister-com.sixt.service.unknown subscribed to topic kruftmeister //2017-04-12 18:27:16,852 [pool-9-thread-1] WARN o.apache.kafka.clients.NetworkClient - Error while fetching metadata with correlation id 1 : {kruftmeister=UNKNOWN_TOPIC_OR_PARTITION} //2017-04-12 18:27:18,876 [pool-9-thread-1] WARN o.apache.kafka.clients.NetworkClient - Error while fetching metadata with correlation id 40 : {kruftmeister=UNKNOWN_TOPIC_OR_PARTITION} //2017-04-12 18:27:18,889 [pool-9-thread-1] INFO o.a.k.c.c.i.AbstractCoordinator - Discovered coordinator 172.19.0.3:9092 (id: 2147482646 rack: null) for group kruftmeister-com.sixt.service.unknown. //2017-04-12 18:27:18,892 [pool-9-thread-1] INFO o.a.k.c.c.i.ConsumerCoordinator - Revoking previously assigned partitions [] for group kruftmeister-com.sixt.service.unknown //2017-04-12 18:27:18,894 [pool-9-thread-1] DEBUG c.s.s.f.kafka.messaging.Consumer - ConsumerRebalanceListener.onPartitionsRevoked on [] //2017-04-12 18:27:18,917 [pool-9-thread-1] INFO o.a.k.c.c.i.AbstractCoordinator - (Re-)joining group kruftmeister-com.sixt.service.unknown //2017-04-12 18:27:18,937 [pool-9-thread-1] INFO o.a.k.c.c.i.AbstractCoordinator - Marking the coordinator 172.19.0.3:9092 (id: 2147482646 rack: null) dead for group kruftmeister-com.sixt.service.unknown //2017-04-12 18:27:19,041 [pool-9-thread-1] INFO o.a.k.c.c.i.AbstractCoordinator - Discovered coordinator 172.19.0.3:9092 (id: 2147482646 rack: null) for group kruftmeister-com.sixt.service.unknown. //2017-04-12 18:27:19,041 [pool-9-thread-1] INFO o.a.k.c.c.i.AbstractCoordinator - (Re-)joining group kruftmeister-com.sixt.service.unknown //2017-04-12 18:27:19,135 [pool-9-thread-1] INFO o.a.k.c.c.i.AbstractCoordinator - Successfully joined group kruftmeister-com.sixt.service.unknown with generation 1 //2017-04-12 18:27:19,135 [pool-9-thread-1] INFO o.a.k.c.c.i.ConsumerCoordinator - Setting newly assigned partitions [] for group kruftmeister-com.sixt.service.unknown //2017-04-12 18:27:19,135 [pool-9-thread-1] DEBUG c.s.s.f.kafka.messaging.Consumer - ConsumerRebalanceListener.onPartitionsAssigned on [] // -> assigned to a topic with no partitions? }
From source file:io.fabric8.kubernetes.client.dsl.internal.RollingUpdater.java
/** * Since k8s v1.4.x, rc/rs deletes are asynchronous. * Lets wait until the resource is actually deleted in the server *//*ww w .j a v a 2 s. co m*/ private void waitUntilDeleted(final String namespace, final String name) { final CountDownLatch countDownLatch = new CountDownLatch(1); final Runnable waitTillDeletedPoller = new Runnable() { public void run() { try { T res = resources().inNamespace(namespace).withName(name).get(); if (res == null) { countDownLatch.countDown(); } } catch (KubernetesClientException e) { if (e.getCode() == 404) { countDownLatch.countDown(); } } } }; ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); ScheduledFuture poller = executor.scheduleWithFixedDelay(waitTillDeletedPoller, 0, 5, TimeUnit.SECONDS); ScheduledFuture logger = executor.scheduleWithFixedDelay(new Runnable() { @Override public void run() { LOG.debug("Found resource {}/{} not yet deleted on server, so waiting...", namespace, name); } }, 0, loggingIntervalMillis, TimeUnit.MILLISECONDS); try { countDownLatch.await(DEFAULT_SERVER_GC_WAIT_TIMEOUT, TimeUnit.MILLISECONDS); executor.shutdown(); } catch (InterruptedException e) { poller.cancel(true); logger.cancel(true); executor.shutdown(); LOG.warn("Still found deleted resource {} in namespace: {} after waiting for {} seconds so giving up", name, namespace, TimeUnit.MILLISECONDS.toSeconds(DEFAULT_SERVER_GC_WAIT_TIMEOUT)); } }
From source file:info.archinnov.achilles.it.TestAsyncDSLSimpleEntity.java
@Test public void should_dsl_delete_async() throws Exception { //Given/*from w ww . java 2 s. com*/ final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE); final Date date = buildDateKey(); scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql", ImmutableMap.of("id", id, "table", "simple")); final CountDownLatch latch = new CountDownLatch(1); final CassandraLogAsserter logAsserter = new CassandraLogAsserter(); logAsserter.prepareLogLevel(ASYNC_LOGGER_STRING, "%msg - [%thread]%n"); //When manager.dsl().delete().consistencyList().simpleMap().fromBaseTable().where().id_Eq(id).date_Eq(date) .withResultSetAsyncListener(rs -> { LOGGER.info(CALLED); latch.countDown(); return rs; }).withTracing().executeAsync(); //Then latch.await(); logAsserter.assertContains("Called - "); }
From source file:de.jackwhite20.japs.client.cache.impl.PubSubCacheImpl.java
@Override public Future<Boolean> has(String key) { if (key == null || key.isEmpty()) { throw new IllegalArgumentException("key cannot be null or empty"); }//from ww w . j ava 2 s . c o m return executorService.submit(() -> { int id = CALLBACK_COUNTER.getAndIncrement(); AtomicBoolean has = new AtomicBoolean(false); CountDownLatch countDownLatch = new CountDownLatch(1); callbacks.put(id, new Consumer<JSONObject>() { @Override public void accept(JSONObject jsonObject) { has.set(jsonObject.getBoolean("has")); countDownLatch.countDown(); } }); JSONObject jsonObject = new JSONObject().put("op", OpCode.OP_CACHE_HAS.getCode()).put("key", key) .put("id", id); write(jsonObject); countDownLatch.await(); return has.get(); }); }
From source file:org.red5.server.service.ShutdownServer.java
private void shutdownOrderly() { // shutdown internal listener shutdown.compareAndSet(false, true); // shutdown the plug-in launcher try {//from w w w. j a va 2 s . c o m log.debug("Attempting to shutdown plugin registry"); PluginRegistry.shutdown(); } catch (Exception e) { log.warn("Exception shutting down plugin registry", e); } // shutdown the context loader if (contextLoader != null) { log.debug("Attempting to shutdown context loader"); contextLoader.shutdown(); contextLoader = null; } // shutdown the jee server LoaderBase jeeServer = applicationContext.getBean(LoaderBase.class); if (jeeServer != null) { // destroy is a DisposibleBean method not LoaderBase // jeeServer.destroy(); jeeServer = null; } // attempt to kill the contexts final CountDownLatch latch = new CountDownLatch(3); new Thread(new Runnable() { public void run() { try { log.debug("Attempting to close core context"); ((ConfigurableApplicationContext) coreContext).close(); latch.countDown(); } catch (Exception e) { e.printStackTrace(); } } }).start(); new Thread(new Runnable() { public void run() { try { log.debug("Attempting to close common context"); ((ConfigurableApplicationContext) commonContext).close(); latch.countDown(); } catch (Exception e) { e.printStackTrace(); } } }).start(); new Thread(new Runnable() { public void run() { try { log.debug("Attempting to close app context"); ((ConfigurableApplicationContext) applicationContext).close(); latch.countDown(); } catch (Exception e) { e.printStackTrace(); } } }).start(); try { if (latch.await(shutdownDelay, TimeUnit.SECONDS)) { log.info("Application contexts are closed"); } else { log.info("One or more contexts didn't close in the allotted time"); } } catch (InterruptedException e) { log.error("Exception attempting to close app contexts", e); } // exit System.exit(0); }
From source file:com.vmware.photon.controller.api.client.resource.ImagesApiTest.java
@Test public void testGetAllImagesAsync() throws IOException, InterruptedException { Image image1 = new Image(); image1.setId("image1"); Image image2 = new Image(); image2.setId("image2"); final ResourceList<Image> imageResourceList = new ResourceList<>(Arrays.asList(image1, image2)); ObjectMapper mapper = new ObjectMapper(); String serializedTask = mapper.writeValueAsString(imageResourceList); setupMocks(serializedTask, HttpStatus.SC_OK); ImagesApi imagesApi = new ImagesApi(this.restClient); final CountDownLatch latch = new CountDownLatch(1); imagesApi.getImagesAsync(new FutureCallback<ResourceList<Image>>() { @Override/*ww w . jav a 2 s .c om*/ public void onSuccess(@Nullable ResourceList<Image> result) { assertEquals(result.getItems(), imageResourceList.getItems()); latch.countDown(); } @Override public void onFailure(Throwable t) { fail(t.toString()); latch.countDown(); } }); assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true)); }
From source file:com.vmware.photon.controller.api.client.resource.ImagesRestApiTest.java
@Test public void testGetAllImagesAsync() throws IOException, InterruptedException { Image image1 = new Image(); image1.setId("image1"); Image image2 = new Image(); image2.setId("image2"); final ResourceList<Image> imageResourceList = new ResourceList<>(Arrays.asList(image1, image2)); ObjectMapper mapper = new ObjectMapper(); String serializedTask = mapper.writeValueAsString(imageResourceList); setupMocks(serializedTask, HttpStatus.SC_OK); ImagesApi imagesApi = new ImagesRestApi(this.restClient); final CountDownLatch latch = new CountDownLatch(1); imagesApi.getImagesAsync(new FutureCallback<ResourceList<Image>>() { @Override// www . j a v a 2s .co m public void onSuccess(@Nullable ResourceList<Image> result) { assertEquals(result.getItems(), imageResourceList.getItems()); latch.countDown(); } @Override public void onFailure(Throwable t) { fail(t.toString()); latch.countDown(); } }); assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true)); }
From source file:fi.jumi.core.stdout.OutputCapturerTest.java
/** * {@link Throwable#printStackTrace} synchronizes on {@code System.err}, but it can still interleave with something * that is printed to {@code System.out}. We can fix that by synchronizing all printing on {@code System.err}, but * only in one direction; the output from {@code Throwable.printStackTrace(System.out)} may still interleave with * printing to {@code System.err}.//from ww w . j a v a2 s. c o m */ @Test public void printing_a_stack_trace_to_stderr_and_normally_to_stdout_concurrently() throws Exception { CountDownLatch isPrintingToOut = new CountDownLatch(1); CountDownLatch hasPrintedStackTrace = new CountDownLatch(1); Exception exception = new Exception("dummy exception"); CombinedOutput combinedOutput = new CombinedOutput(); capturer.captureTo(combinedOutput); runConcurrently(() -> { await(isPrintingToOut); exception.printStackTrace(capturer.err()); hasPrintedStackTrace.countDown(); }, () -> { while (hasPrintedStackTrace.getCount() > 0) { capturer.out().println("*garbage*"); isPrintingToOut.countDown(); } }); assertThat(combinedOutput.toString(), containsString(Throwables.getStackTraceAsString(exception))); }
From source file:info.archinnov.achilles.it.TestAsyncCRUDSimpleEntity.java
@Test public void should_find_by_id_async() throws Exception { //Given/*w w w. ja va 2s . c o m*/ final long id = RandomUtils.nextLong(0, Long.MAX_VALUE); scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql", ImmutableMap.of("id", id, "table", "simple")); final Date date = buildDateKey(); final CountDownLatch latch = new CountDownLatch(1); final CassandraLogAsserter logAsserter = new CassandraLogAsserter(); logAsserter.prepareLogLevel(ASYNC_LOGGER_STRING, "%msg - [%thread]%n"); //When final CompletableFuture<Tuple2<SimpleEntity, ExecutionInfo>> tuple2 = manager.crud().findById(id, date) .withResultSetAsyncListener(rs -> { LOGGER.info(CALLED); latch.countDown(); return rs; }).getAsyncWithStats(); //Then latch.await(); final SimpleEntity actual = tuple2.get()._1(); final ExecutionInfo executionInfo = tuple2.get()._2(); assertThat(actual).isNotNull(); assertThat(actual.getConsistencyList()).containsExactly(ConsistencyLevel.QUORUM, ConsistencyLevel.LOCAL_ONE); assertThat(executionInfo.getQueriedHost().isUp()).isTrue(); logAsserter.assertContains("Called - [achilles-default-executor"); }
From source file:de.taimos.httputils.Tester1.java
/** * // w w w . ja va 2s. c o m */ @Test public void testGetAsync() throws InterruptedException { final CountDownLatch cdl = new CountDownLatch(1); WS.url("http://www.heise.de").getAsync(new HTTPResponseCallback() { @Override public void response(HttpResponse response) { Assert.assertEquals(WS.getStatus(response), 200); Assert.assertTrue(WS.isStatusOK(response)); final String body = WS.getResponseAsString(response); Assert.assertNotNull(body); Assert.assertFalse(body.isEmpty()); cdl.countDown(); } @Override public void fail(Exception e) { System.out.println(e); } }); Assert.assertTrue(cdl.await(10, TimeUnit.SECONDS)); }