List of usage examples for java.util.concurrent CountDownLatch CountDownLatch
public CountDownLatch(int count)
From source file:com.microsoft.office.core.MessagesAsyncTestCase.java
@Test(timeout = 60000) public void deleteTest() throws Exception { // create message first prepareMessage();/*from w ww .j a v a 2 s .c om*/ counter = new CountDownLatch(1); Futures.addCallback(Me.flushAsync(), new FutureCallback<Void>() { @Override public void onFailure(Throwable t) { reportError(t); counter.countDown(); } @Override public void onSuccess(Void result) { try { // then remove deleteAndCheck(); } catch (Throwable t) { reportError(t); } counter.countDown(); } }); counter.await(); }
From source file:com.manpowergroup.cn.icloud.util.Case1.java
private void p0(final DataSource dataSource, String name, int threadCount) throws Exception { final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch endLatch = new CountDownLatch(threadCount); for (int i = 0; i < threadCount; ++i) { Thread thread = new Thread() { public void run() { try { startLatch.await();//from www . ja va 2 s .c o m for (int i = 0; i < LOOP_COUNT; ++i) { Connection conn = dataSource.getConnection(); conn.close(); } } catch (Exception ex) { ex.printStackTrace(); } endLatch.countDown(); } }; thread.start(); } long startMillis = System.currentTimeMillis(); long startYGC = TestUtil.getYoungGC(); long startFullGC = TestUtil.getFullGC(); startLatch.countDown(); endLatch.await(); long millis = System.currentTimeMillis() - startMillis; long ygc = TestUtil.getYoungGC() - startYGC; long fullGC = TestUtil.getFullGC() - startFullGC; System.out.println("thread " + threadCount + " " + name + " millis : " + NumberFormat.getInstance().format(millis) + ", YGC " + ygc + " FGC " + fullGC); }
From source file:com.vmware.photon.controller.api.client.resource.ClusterApiTest.java
@Test public void testDeleteAsync() throws IOException, InterruptedException { final Task responseTask = new Task(); responseTask.setId("12345"); responseTask.setState("QUEUED"); responseTask.setQueuedTime(Date.from(Instant.now())); ObjectMapper mapper = new ObjectMapper(); String serializedTask = mapper.writeValueAsString(responseTask); setupMocks(serializedTask, HttpStatus.SC_CREATED); ClusterApi clusterApi = new ClusterApi(restClient); final CountDownLatch latch = new CountDownLatch(1); clusterApi.deleteAsync("foo", new FutureCallback<Task>() { @Override/*from w w w.j a v a2 s . c om*/ public void onSuccess(Task result) { assertEquals(result, responseTask); latch.countDown(); } @Override public void onFailure(Throwable t) { fail(t.toString()); latch.countDown(); } }); assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true)); }
From source file:com.bt.aloha.fitnesse.OutboundCallFixture.java
public String joinDialogsOneAndTwoWithAutoTerminateDialogs() { latch = new CountDownLatch(1); callIds.add(callBean.joinCallLegs(firstDialogId, secondDialogId, AutoTerminateAction.True)); latch.countDown();//ww w .j a va2 s. c o m return "OK"; }
From source file:com.vmware.photon.controller.api.client.resource.ClusterRestApiTest.java
@Test public void testDeleteAsync() throws IOException, InterruptedException { final Task responseTask = new Task(); responseTask.setId("12345"); responseTask.setState("QUEUED"); responseTask.setQueuedTime(Date.from(Instant.now())); ObjectMapper mapper = new ObjectMapper(); String serializedTask = mapper.writeValueAsString(responseTask); setupMocks(serializedTask, HttpStatus.SC_CREATED); ClusterApi clusterApi = new ClusterRestApi(restClient); final CountDownLatch latch = new CountDownLatch(1); clusterApi.deleteAsync("foo", new FutureCallback<Task>() { @Override/*from w ww.j av a 2s .co m*/ public void onSuccess(Task result) { assertEquals(result, responseTask); latch.countDown(); } @Override public void onFailure(Throwable t) { fail(t.toString()); latch.countDown(); } }); assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true)); }
From source file:com.microsoft.office.core.EventsAsyncTestCase.java
private void deleteAndCheck() throws Exception { removeEvent();// w w w.j a va 2s . c o m final CountDownLatch cdl = new CountDownLatch(1); Futures.addCallback(Me.getEvents().getAsync(event.getId()), new FutureCallback<IEvent>() { @Override public void onFailure(Throwable t) { reportError(t); cdl.countDown(); } @Override public void onSuccess(IEvent result) { try { assertNull(result); } catch (Throwable t) { reportError(t); } cdl.countDown(); } }); cdl.await(); }
From source file:org.smartfrog.services.anubis.PartitionTest.java
/** * Test that a partition can form two stable sub partions and then reform * the original partition./*from w w w . ja v a 2s .co m*/ */ public void testSymmetricPartition() throws Exception { int minorPartitionSize = configs.length / 2; BitView fullPartition = new BitView(); BitView A = new BitView(); BitView B = new BitView(); CountDownLatch latchA = new CountDownLatch(minorPartitionSize); List<TestNode> partitionA = new ArrayList<TestNode>(); CountDownLatch latchB = new CountDownLatch(minorPartitionSize); List<TestNode> partitionB = new ArrayList<TestNode>(); int i = 0; for (TestNode member : partition) { if (i++ % 2 == 0) { partitionA.add(member); member.latch = latchA; member.cardinality = minorPartitionSize; A.add(member.getIdentity()); } else { partitionB.add(member); member.latch = latchB; member.cardinality = minorPartitionSize; B.add(member.getIdentity()); } fullPartition.add(member.getIdentity()); } log.info("symmetric partitioning: " + A); controller.symPartition(A); log.info("Awaiting stability of minor partition A"); assertTrue("Partition A did not stabilize", latchA.await(60, TimeUnit.SECONDS)); log.info("Awaiting stability of minor partition B"); assertTrue("Partition B did not stabilize", latchB.await(60, TimeUnit.SECONDS)); for (TestNode member : partitionA) { assertEquals(A, member.getPartition()); } for (TestNode member : partitionB) { assertEquals(B, member.getPartition()); } // reform CountDownLatch latch = new CountDownLatch(configs.length); for (TestNode node : partition) { node.latch = latch; node.cardinality = configs.length; } controller.clearPartitions(); log.info("Awaiting stability of reformed major partition"); assertTrue("Partition did not reform", latch.await(60, TimeUnit.SECONDS)); for (TestNode member : partition) { assertEquals(fullPartition, member.getPartition()); } }
From source file:com.netflix.spectator.nflx.ChronosGcEventListener.java
private void sendToChronos(final byte[] json, final boolean blocking) { final URI uri = URI.create(CHRONOS_URI.get()); final CountDownLatch latch = new CountDownLatch(1); final long start = System.nanoTime(); rxHttp.postJson(uri, json).subscribe(new Action1<HttpClientResponse<ByteBuf>>() { @Override/*from w w w . ja va 2 s . co m*/ public void call(HttpClientResponse<ByteBuf> response) { final int code = response.getStatus().code(); if (code != 200) { logger.warn("failed to send GC event to chronos (status={})", code); } final long latency = System.nanoTime() - start; final Id timerId = requestCount.withTag("status", "" + code); registry.timer(timerId).record(latency, TimeUnit.NANOSECONDS); } }, new Action1<Throwable>() { @Override public void call(Throwable t) { logger.warn("failed to send GC event to chronos", t); final String status = t.getClass().getSimpleName(); final long latency = System.nanoTime() - start; final Id timerId = requestCount.withTag("status", status); registry.timer(timerId).record(latency, TimeUnit.NANOSECONDS); latch.countDown(); } }, new Action0() { @Override public void call() { latch.countDown(); } }); // Used for unit tests so we can reliably detect completion if (blocking) { try { latch.await(); } catch (InterruptedException e) { // Ignore } } }
From source file:ai.grakn.engine.tasks.manager.multiqueue.Scheduler.java
public Scheduler(TaskStateStorage storage, ZookeeperConnection connection) { this.storage = storage; if (OPENED.compareAndSet(false, true)) { // Kafka listener consumer = kafkaConsumer(SCHEDULERS_GROUP); // Configure callback for a Kafka rebalance consumer.subscribe(singletonList(NEW_TASKS_TOPIC), rebalanceListener(consumer, new ExternalOffsetStorage(connection))); // Kafka writer producer = kafkaProducer();/* ww w . j a v a 2s . c o m*/ waitToClose = new CountDownLatch(1); ThreadFactory namedThreadFactory = new ThreadFactoryBuilder().setNameFormat("scheduler-pool-%d") .build(); schedulingService = Executors.newScheduledThreadPool(SCHEDULER_THREADS, namedThreadFactory); LOG.debug("Scheduler started"); } else { LOG.error("Scheduled already opened!"); } }