List of usage examples for java.util.concurrent CountDownLatch getCount
public long getCount()
From source file:org.mskcc.shenkers.view.IntervalViewNGTest.java
public void testRangeSetIntervalView() throws InterruptedException { System.out.println("testIntervalView"); CountDownLatch l = new CountDownLatch(1); System.out.println("before"); Platform.runLater(() -> {/* w ww . j a va2s .c o m*/ System.out.println("running"); double[][] intervals = { { .1, .2 } }; // Range r = null; RangeSet<Double> rs = TreeRangeSet.create(); rs.add(Range.closed(.1, .2)); rs.add(Range.closed(.2, .3)); rs.add(Range.closed(.32, .35)); rs.add(Range.closed(.6, .8)); RangeSetIntervalView p = new RangeSetIntervalView(0, 100); p.setData(Arrays.asList(new Pair(10, 20), new Pair(20, 30), new Pair(32, 35), new Pair(60, 80))); // p.prefTileHeightProperty().bind(p.heightProperty()); Stage stage = new Stage(); stage.setOnHidden(e -> { l.countDown(); System.out.println("count " + l.getCount()); }); Scene scene = new Scene(p, 300, 300, Color.GRAY); stage.setTitle("SimpleIntervalView"); stage.setScene(scene); stage.show(); }); System.out.println("after"); l.await(); Thread.sleep(1000); }
From source file:org.smartfrog.services.anubis.PartitionTest.java
/** * Test that a partition can form two asymmetric partitions, with one * stabilizing, and then reform the original partition. *//* w ww . ja v a 2 s .c o m*/ public void testAsymmetricPartition() throws Exception { int minorPartitionSize = configs.length / 2; BitView A = new BitView(); BitView B = new BitView(); BitView All = new BitView(); CountDownLatch latchA = new CountDownLatch(minorPartitionSize); List<TestNode> partitionA = new ArrayList<TestNode>(); CountDownLatch latchB = new CountDownLatch(minorPartitionSize); List<TestNode> partitionB = new ArrayList<TestNode>(); int i = 0; for (TestNode member : partition) { All.add(member.getIdentity()); if (i++ % 2 == 0) { partitionA.add(member); member.latch = latchA; member.cardinality = minorPartitionSize; A.add(member.getIdentity()); } else { partitionB.add(member); member.latch = latchB; member.cardinality = minorPartitionSize; B.add(member.getIdentity()); } } log.info("asymmetric partitioning: " + A); controller.asymPartition(A); log.info("Awaiting stability of minor partition A"); assertTrue("Partition A did not stabilize", latchA.await(60, TimeUnit.SECONDS)); // The other partition should still be unstable. assertEquals(configs.length / 2, latchB.getCount()); for (TestNode member : partitionA) { assertEquals(A, member.getPartition()); } // reform CountDownLatch latch = new CountDownLatch(configs.length); for (TestNode node : partition) { node.latch = latch; node.cardinality = configs.length; } controller.clearPartitions(); log.info("Awaiting stability of reformed major partition"); assertTrue("Partition did not reform", latch.await(60, TimeUnit.SECONDS)); for (TestNode member : partition) { assertEquals(All, member.getPartition()); } }
From source file:org.atmosphere.cpr.PoolableBroadcasterFactoryTest.java
@Test public void concurrentAccessLookupTest() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1000); final AtomicInteger created = new AtomicInteger(); factory.poolableProvider(new UnboundedApachePoolableProvider()); factory.addBroadcasterListener(new BroadcasterListenerAdapter() { @Override/*from w w w . j av a2 s . com*/ public void onPostCreate(Broadcaster b) { created.incrementAndGet(); } @Override public void onComplete(Broadcaster b) { } @Override public void onPreDestroy(Broadcaster b) { } }); final ConcurrentLinkedQueue<Broadcaster> c = new ConcurrentLinkedQueue<Broadcaster>(); ExecutorService r = Executors.newCachedThreadPool(); final String me = new String("me"); for (int i = 0; i < 1000; i++) { r.submit(new Runnable() { @Override public void run() { c.add(factory.get(me)); latch.countDown(); } }); } try { assertTrue(latch.await(20, TimeUnit.SECONDS)); assertEquals(latch.getCount(), 0); assertEquals(c.size(), 1000); assertEquals(created.get(), 1000); for (Broadcaster b : c) { b.destroy(); } assertNotNull(factory.lookup("name" + UUID.randomUUID().toString(), true).broadcast("test")); assertEquals(factory.poolableProvider().poolSize(), 1000); } finally { factory.destroy(); r.shutdownNow(); } }
From source file:com.ottogroup.bi.spqr.pipeline.component.queue.chronicle.DefaultStreamingMessageQueueTest.java
/** * Inserts a configurable number of messages into a {@link Chronicle} and measures the * duration it takes to read the content from it using the {@link DefaultStreamingMessageQueue} implementation *//*from w ww . j ava 2 s .co m*/ // @Test public void testNext_performanceTest() throws Exception { Properties props = new Properties(); props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_DELETE_ON_EXIT, "true"); props.put(DefaultStreamingMessageQueue.CFG_CHRONICLE_QUEUE_PATH, System.getProperty("java.io.tmpdir")); final DefaultStreamingMessageQueue inbox = new DefaultStreamingMessageQueue(); inbox.setId("testNext_performanceTest"); inbox.initialize(props); final StreamingMessageQueueProducer producer = inbox.getProducer(); final StreamingMessageQueueConsumer consumer = inbox.getConsumer(); final CountDownLatch latch = new CountDownLatch(numberOfMessagesPerfTest); ExecutorService svc = Executors.newCachedThreadPool(); Future<Integer> producerDurationFuture = svc.submit(new Callable<Integer>() { public Integer call() { StreamingDataMessage object = new StreamingDataMessage(new byte[] { 01, 2, 3, 4, 5, 6, 7, 9 }, System.currentTimeMillis()); long s1 = System.nanoTime(); for (int i = 0; i < numberOfMessagesPerfTest; i++) { producer.insert(object); } long s2 = System.nanoTime(); return (int) (s2 - s1); } }); Future<Integer> durationFuture = svc.submit(new Callable<Integer>() { public Integer call() { StreamingDataMessage msg = null; long start = System.nanoTime(); while (true) { msg = consumer.next(); if (msg != null) { latch.countDown(); if (latch.getCount() == 0) break; } else { LockSupport.parkNanos(1); } } long end = System.nanoTime(); return (int) (end - start); } }); try { Assert.assertTrue("Failed to receive expected number of messages", latch.await(10, TimeUnit.SECONDS)); } catch (InterruptedException e) { Assert.fail("Failed to receive expected number of messages"); } int producerDuration = producerDurationFuture.get(); int duration = durationFuture.get(); double messagesPerNano = ((double) numberOfMessagesPerfTest / (double) duration); double messagesPerNanoRounded = (double) Math.round(messagesPerNano * 10000) / 10000; double messagesPerMilli = messagesPerNano * 1000000; messagesPerMilli = (double) Math.round(messagesPerMilli * 100) / 100; long messagesPerSecondTmps = Math.round(messagesPerNano * 1000000 * 1000); double messagesPerSecond = (double) Math.round(messagesPerSecondTmps); ; double nanosPerMessage = ((double) duration / (double) numberOfMessagesPerfTest); nanosPerMessage = (double) Math.round(nanosPerMessage * 100) / 100; logger.info("message count: " + numberOfMessagesPerfTest); logger.info( "message producing: " + producerDuration + "ns, " + TimeUnit.NANOSECONDS.toMillis(producerDuration) + "ms, " + TimeUnit.NANOSECONDS.toSeconds(producerDuration) + "s"); logger.info("message consumption: " + duration + "ns, " + TimeUnit.NANOSECONDS.toMillis(duration) + "ms, " + TimeUnit.NANOSECONDS.toSeconds(duration) + "s"); logger.info("message throughput: " + messagesPerNanoRounded + " msgs/ns, " + messagesPerMilli + " msgs/ms, " + messagesPerSecond + " msgs/s"); svc.shutdownNow(); }
From source file:org.ocelotds.integration.AbstractOcelotTest.java
/** * Test reception of 0 msg triggered by call Runnable * * @param wssession// w w w. j ava 2 s .com * @param topic * @param trigger */ protected void testWait0MessageToTopic(Session wssession, String topic, Runnable trigger) { try { long t0 = System.currentTimeMillis(); CountDownLatch lock = new CountDownLatch(1); CountDownMessageHandler messageHandler = new CountDownMessageHandler(topic, lock); wssession.addMessageHandler(messageHandler); trigger.run(); boolean await = lock.await(TIMEOUT, TimeUnit.MILLISECONDS); long t1 = System.currentTimeMillis(); assertThat(await).as("Timeout. waiting %d ms. Remain %d/%d msgs", t1 - t0, lock.getCount(), 1) .isFalse(); wssession.removeMessageHandler(messageHandler); } catch (IllegalStateException | InterruptedException ex) { fail(ex.getMessage()); } }
From source file:org.ocelotds.integration.AbstractOcelotTest.java
/** * Test reception of X msg triggered by call Runnable * * @param wssession/*from ww w . j a v a 2 s. c o m*/ * @param nbMsg * @param topic * @param trigger */ protected void testWaitXMessageToTopic(Session wssession, int nbMsg, String topic, Runnable trigger) { try { long t0 = System.currentTimeMillis(); CountDownLatch lock = new CountDownLatch(nbMsg); CountDownMessageHandler messageHandler = new CountDownMessageHandler(topic, lock); wssession.addMessageHandler(messageHandler); trigger.run(); boolean await = lock.await(TIMEOUT * nbMsg, TimeUnit.MILLISECONDS); long t1 = System.currentTimeMillis(); assertThat(await).as("Timeout. waiting %d ms. Remain %d/%d msgs", t1 - t0, lock.getCount(), nbMsg) .isTrue(); wssession.removeMessageHandler(messageHandler); } catch (IllegalStateException | InterruptedException ex) { fail(ex.getMessage()); } }
From source file:com.netflix.curator.framework.imps.TestWithCluster.java
@Test public void testReadOnly() throws Exception { System.setProperty("readonlymode.enabled", "true"); try {/*from w ww . j av a 2 s . c o m*/ Timing timing = new Timing(); CuratorFramework client = null; TestingCluster cluster = new TestingCluster(2); try { cluster.start(); client = CuratorFrameworkFactory.builder().connectString(cluster.getConnectString()) .canBeReadOnly(true).connectionTimeoutMs(timing.connection()) .sessionTimeoutMs(timing.session()).retryPolicy(new ExponentialBackoffRetry(100, 3)) .build(); client.start(); client.create().forPath("/test"); final CountDownLatch readOnlyLatch = new CountDownLatch(1); final CountDownLatch reconnectedLatch = new CountDownLatch(1); ConnectionStateListener listener = new ConnectionStateListener() { @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { if (newState == ConnectionState.READ_ONLY) { readOnlyLatch.countDown(); } else if (newState == ConnectionState.RECONNECTED) { reconnectedLatch.countDown(); } } }; client.getConnectionStateListenable().addListener(listener); InstanceSpec ourInstance = cluster .findConnectionInstance(client.getZookeeperClient().getZooKeeper()); Iterator<InstanceSpec> iterator = cluster.getInstances().iterator(); InstanceSpec killInstance = iterator.next(); if (killInstance.equals(ourInstance)) { killInstance = iterator.next(); // kill the instance we're not connected to } cluster.killServer(killInstance); Assert.assertEquals(reconnectedLatch.getCount(), 1); Assert.assertTrue(timing.awaitLatch(readOnlyLatch)); Assert.assertEquals(reconnectedLatch.getCount(), 1); cluster.restartServer(killInstance); Assert.assertTrue(timing.awaitLatch(reconnectedLatch)); } finally { IOUtils.closeQuietly(client); IOUtils.closeQuietly(cluster); } } finally { System.clearProperty("readonlymode.enabled"); } }
From source file:com.googlecode.sardine.AuthenticationTest.java
@Test public void testBasicPreemptiveAuth() throws Exception { final DefaultHttpClient client = new DefaultHttpClient(); final CountDownLatch count = new CountDownLatch(1); client.setCredentialsProvider(new BasicCredentialsProvider() { @Override/*from w w w .j av a 2 s .c o m*/ public Credentials getCredentials(AuthScope authscope) { // Set flag that credentials have been used indicating preemptive authentication count.countDown(); return new Credentials() { public Principal getUserPrincipal() { return new BasicUserPrincipal("anonymous"); } public String getPassword() { return "invalid"; } }; } }); SardineImpl sardine = new SardineImpl(client); URI url = URI.create("http://sudo.ch/dav/basic/"); //Send basic authentication header in initial request sardine.enablePreemptiveAuthentication(url.getHost()); try { sardine.list(url.toString()); fail("Expected authorization failure"); } catch (SardineException e) { // Expect Authorization Failed assertEquals(401, e.getStatusCode()); // Make sure credentials have been queried assertEquals("No preemptive authentication attempt", 0, count.getCount()); } }
From source file:com.github.sardine.AuthenticationTest.java
@Test public void testBasicPreemptiveAuth() throws Exception { final HttpClientBuilder client = HttpClientBuilder.create(); final CountDownLatch count = new CountDownLatch(1); client.setDefaultCredentialsProvider(new BasicCredentialsProvider() { @Override/*from w w w .j av a 2s . com*/ public Credentials getCredentials(AuthScope authscope) { // Set flag that credentials have been used indicating preemptive authentication count.countDown(); return new Credentials() { public Principal getUserPrincipal() { return new BasicUserPrincipal("anonymous"); } public String getPassword() { return "invalid"; } }; } }); SardineImpl sardine = new SardineImpl(client); URI url = URI.create("http://sudo.ch/dav/basic/"); //Send basic authentication header in initial request sardine.enablePreemptiveAuthentication(url.getHost()); try { sardine.list(url.toString()); fail("Expected authorization failure"); } catch (SardineException e) { // Expect Authorization Failed assertEquals(401, e.getStatusCode()); // Make sure credentials have been queried assertEquals("No preemptive authentication attempt", 0, count.getCount()); } }
From source file:org.springframework.cloud.stream.binder.rabbit.RabbitBinderTests.java
@Test public void testAutoBindDLQPartionedConsumerFirst() throws Exception { RabbitTestBinder binder = getBinder(); ExtendedConsumerProperties<RabbitConsumerProperties> properties = createConsumerProperties(); properties.getExtension().setPrefix("bindertest."); properties.getExtension().setAutoBindDlq(true); properties.setMaxAttempts(1); // disable retry properties.setPartitioned(true);//from w ww .ja v a2 s . c o m properties.setInstanceIndex(0); DirectChannel input0 = createBindableChannel("input", createConsumerBindingProperties(properties)); input0.setBeanName("test.input0DLQ"); Binding<MessageChannel> input0Binding = binder.bindConsumer("partDLQ.0", "dlqPartGrp", input0, properties); Binding<MessageChannel> defaultConsumerBinding1 = binder.bindConsumer("partDLQ.0", "default", new QueueChannel(), properties); properties.setInstanceIndex(1); DirectChannel input1 = createBindableChannel("input1", createConsumerBindingProperties(properties)); input1.setBeanName("test.input1DLQ"); Binding<MessageChannel> input1Binding = binder.bindConsumer("partDLQ.0", "dlqPartGrp", input1, properties); Binding<MessageChannel> defaultConsumerBinding2 = binder.bindConsumer("partDLQ.0", "default", new QueueChannel(), properties); ExtendedProducerProperties<RabbitProducerProperties> producerProperties = createProducerProperties(); producerProperties.getExtension().setPrefix("bindertest."); producerProperties.getExtension().setAutoBindDlq(true); producerProperties.setPartitionKeyExtractorClass(PartitionTestSupport.class); producerProperties.setPartitionSelectorClass(PartitionTestSupport.class); producerProperties.setPartitionCount(2); BindingProperties bindingProperties = createProducerBindingProperties(producerProperties); DirectChannel output = createBindableChannel("output", bindingProperties); output.setBeanName("test.output"); Binding<MessageChannel> outputBinding = binder.bindProducer("partDLQ.0", output, producerProperties); final CountDownLatch latch0 = new CountDownLatch(1); input0.subscribe(new MessageHandler() { @Override public void handleMessage(Message<?> message) throws MessagingException { if (latch0.getCount() <= 0) { throw new RuntimeException("dlq"); } latch0.countDown(); } }); final CountDownLatch latch1 = new CountDownLatch(1); input1.subscribe(new MessageHandler() { @Override public void handleMessage(Message<?> message) throws MessagingException { if (latch1.getCount() <= 0) { throw new RuntimeException("dlq"); } latch1.countDown(); } }); output.send(new GenericMessage<>(1)); assertThat(latch1.await(10, TimeUnit.SECONDS)).isTrue(); output.send(new GenericMessage<>(0)); assertThat(latch0.await(10, TimeUnit.SECONDS)).isTrue(); output.send(new GenericMessage<>(1)); RabbitTemplate template = new RabbitTemplate(this.rabbitAvailableRule.getResource()); template.setReceiveTimeout(10000); String streamDLQName = "bindertest.partDLQ.0.dlqPartGrp.dlq"; org.springframework.amqp.core.Message received = template.receive(streamDLQName); assertThat(received).isNotNull(); assertThat(received.getMessageProperties().getReceivedRoutingKey()) .isEqualTo("bindertest.partDLQ.0.dlqPartGrp-1"); assertThat(received.getMessageProperties().getHeaders()).doesNotContainKey(BinderHeaders.PARTITION_HEADER); output.send(new GenericMessage<>(0)); received = template.receive(streamDLQName); assertThat(received).isNotNull(); assertThat(received.getMessageProperties().getReceivedRoutingKey()) .isEqualTo("bindertest.partDLQ.0.dlqPartGrp-0"); assertThat(received.getMessageProperties().getHeaders()).doesNotContainKey(BinderHeaders.PARTITION_HEADER); input0Binding.unbind(); input1Binding.unbind(); defaultConsumerBinding1.unbind(); defaultConsumerBinding2.unbind(); outputBinding.unbind(); }