List of usage examples for java.util.concurrent CountDownLatch countDown
public void countDown()
From source file:example.springdata.mongodb.people.RxJava2PersonRepositoryIntegrationTest.java
/** * This sample performs a count, inserts data and performs a count again using reactive operator chaining. *///from www . ja v a2 s .com @Test public void shouldInsertAndCountData() throws Exception { CountDownLatch countDownLatch = new CountDownLatch(1); Flowable<Person> people = Flowable.just(new Person("Hank", "Schrader", 43), // new Person("Mike", "Ehrmantraut", 62)); repository.count() // .doOnSuccess(System.out::println) // .toFlowable() // .switchMap(count -> repository.saveAll(people)) // .lastElement() // .toSingle() // .flatMap(v -> repository.count()) // .doOnSuccess(System.out::println) // .doAfterTerminate(countDownLatch::countDown) // .doOnError(throwable -> countDownLatch.countDown()) // .subscribe(); countDownLatch.await(); }
From source file:com.fusesource.forge.jmstest.executor.BenchmarkJMSProducerWrapper.java
private void runProducers(long rate, long duration) { BigDecimal bd = new BigDecimal(1000000).divide(new BigDecimal(rate), BigDecimal.ROUND_HALF_DOWN); long delayInMicroSeconds; try {//from ww w . j a v a 2s .c om delayInMicroSeconds = bd.longValueExact(); } catch (ArithmeticException e) { delayInMicroSeconds = bd.longValue(); log().warn("Publish rate cannot be expressed as a precise microsecond value, rounding to nearest value " + "[actualDelay: " + delayInMicroSeconds + "]"); } int producersNeeded = (int) (rate / getPartConfig().getMaxConsumerRatePerThread()); if (producersNeeded == 0) { producersNeeded++; } log.debug("Running " + producersNeeded + " producers for " + duration + "s"); producers = new ArrayList<BenchmarkProducer>(producersNeeded); sendingDelay = delayInMicroSeconds * producersNeeded; executor = new ScheduledThreadPoolExecutor(producersNeeded); for (int i = 0; i < producersNeeded; i++) { try { BenchmarkProducer producer = new BenchmarkProducer(this); producer.start(); producer.setMessageCounter(getProbe()); producers.add(producer); } catch (Exception e) { throw new BenchmarkConfigurationException("Unable to create BenchmarkProducer instance", e); } } for (BenchmarkProducer producer : producers) { // TODO should really hold onto these and monitor for failures until the // executor is shutdown executor.scheduleAtFixedRate(new MessageSender(producer), 0, sendingDelay, sendingDelayUnit); } final CountDownLatch latch = new CountDownLatch(1); new ScheduledThreadPoolExecutor(1).schedule(new Runnable() { public void run() { try { log.debug("Shutting down producers."); executor.shutdown(); for (BenchmarkProducer producer : producers) { try { producer.release(); } catch (Exception e) { log().error("Error releasing producer."); } } latch.countDown(); } catch (Exception e) { } } }, duration, TimeUnit.SECONDS); try { latch.await(); } catch (InterruptedException ie) { log().warn("Producer run has been interrupted ..."); } }
From source file:jp.realglobe.util.uploader.DelayedWatcherTest.java
/** * ?????????/*from w w w .j a v a2s . c o m*/ * @throws Exception */ @Test public void testLatestOnly() throws Exception { final long delay = 1_000L; final CountDownLatch stopper = new CountDownLatch(1); final DelayedWatcher watcher = new DelayedWatcher(this.directory, delay, true, path -> { stopper.await(); this.detected.offer(path); }); this.executor.submit(watcher); Thread.sleep(1_000L); Files.createFile(this.directory.resolve("0")); Thread.sleep(1_000L + delay); // 0 ?? final int n = 10; for (int i = 1; i < n; i++) { Files.createFile(this.directory.resolve("" + i)); } Thread.sleep(1_000L); // 1,...,9 ??? stopper.countDown(); // 0 ???? Assert.assertEquals(this.directory.resolve("0"), this.detected.poll(1_000L, TimeUnit.MILLISECONDS)); Assert.assertEquals(this.directory.resolve("9"), this.detected.poll(1_000L + delay, TimeUnit.MILLISECONDS)); Assert.assertNull(this.detected.poll(1_000L, TimeUnit.MILLISECONDS)); }
From source file:org.wisdom.framework.vertx.FileUploadTest.java
@Test public void testFileUploadOfSmallFiles() throws InterruptedException, IOException { // Prepare the configuration ApplicationConfiguration configuration = mock(ApplicationConfiguration.class); when(configuration.getIntegerWithDefault(eq("vertx.http.port"), anyInt())).thenReturn(0); when(configuration.getIntegerWithDefault(eq("vertx.https.port"), anyInt())).thenReturn(-1); when(configuration.getIntegerWithDefault("vertx.acceptBacklog", -1)).thenReturn(-1); when(configuration.getIntegerWithDefault("vertx.receiveBufferSize", -1)).thenReturn(-1); when(configuration.getIntegerWithDefault("vertx.sendBufferSize", -1)).thenReturn(-1); when(configuration.getIntegerWithDefault("request.body.max.size", 100 * 1024)).thenReturn(100 * 1024); when(configuration.getLongWithDefault("http.upload.disk.threshold", DiskFileUpload.MINSIZE)) .thenReturn(DiskFileUpload.MINSIZE); when(configuration.getLongWithDefault("http.upload.max", -1l)).thenReturn(-1l); when(configuration.getStringArray("wisdom.websocket.subprotocols")).thenReturn(new String[0]); when(configuration.getStringArray("vertx.websocket-subprotocols")).thenReturn(new String[0]); // Prepare the router with a controller Controller controller = new DefaultController() { @SuppressWarnings("unused") public Result index() throws IOException { FileItem item = context().file("upload"); if (!item.isInMemory()) { return badRequest("In memory expected"); }/*from w ww .ja va2 s . com*/ if (!item.name().equals("my-file.dat")) { return badRequest("broken name"); } if (item.size() != 2048) { return badRequest("broken file"); } if (!context().form().get("comment").get(0).equals("my description")) { return badRequest("broken form"); } final File file = item.toFile(); if (!file.exists() && file.length() != 2048) { return badRequest("broken in memory to file handling"); } return ok(item.stream()).as(MimeTypes.BINARY); } }; Router router = mock(Router.class); Route route = new RouteBuilder().route(HttpMethod.POST).on("/").to(controller, "index"); when(router.getRouteFor(anyString(), anyString(), any(Request.class))).thenReturn(route); ContentEngine contentEngine = getMockContentEngine(); // Configure the server. server = new WisdomVertxServer(); server.accessor = new ServiceAccessor(null, configuration, router, contentEngine, executor, null, Collections.<ExceptionMapper>emptyList()); server.configuration = configuration; server.vertx = vertx; server.start(); VertxHttpServerTest.waitForStart(server); // Now start bunch of clients CountDownLatch startSignal = new CountDownLatch(1); CountDownLatch doneSignal = new CountDownLatch(NUMBER_OF_CLIENTS); int port = server.httpPort(); for (int i = 1; i < NUMBER_OF_CLIENTS + 1; ++i) { // create and start threads clients.execute(new Client(startSignal, doneSignal, port, i, 2048)); } startSignal.countDown(); // let all threads proceed if (!doneSignal.await(60, TimeUnit.SECONDS)) { // wait for all to finish Assert.fail("testFileUploadOfSmallFiles - Client not served in time"); } assertThat(failure).isEmpty(); assertThat(success).hasSize(NUMBER_OF_CLIENTS); }
From source file:org.martus.client.swingui.PureFxMainWindow.java
public void runInUiThreadAndWait(final Runnable toRun) throws InterruptedException, InvocationTargetException { if (Platform.isFxApplicationThread()) { toRun.run();//from w w w . j a v a2 s . co m return; } final CountDownLatch doneLatch = new CountDownLatch(1); Platform.runLater(() -> { try { toRun.run(); } finally { doneLatch.countDown(); } }); doneLatch.await(); }
From source file:net.spy.memcached.couch.TestingClient.java
public HttpFuture<String> asyncHttpPut(String uri, String document) throws UnsupportedEncodingException { final CountDownLatch couchLatch = new CountDownLatch(1); final HttpFuture<String> crv = new HttpFuture<String>(couchLatch, operationTimeout); HttpRequest request = new BasicHttpEntityEnclosingRequest("PUT", uri, HttpVersion.HTTP_1_1); StringEntity entity = new StringEntity(document); ((BasicHttpEntityEnclosingRequest) request).setEntity(entity); HttpOperationImpl op = new TestOperationImpl(request, new TestCallback() { private String json; @Override//from w ww. ja v a 2 s. co m public void receivedStatus(OperationStatus status) { crv.set(json, status); } @Override public void complete() { couchLatch.countDown(); } @Override public void getData(String response) { json = response; } }); crv.setOperation(op); addOp(op); return crv; }
From source file:com.dangdang.ddframe.job.internal.job.dataflow.AbstractDataFlowElasticJob.java
private Map<Integer, List<T>> fetchDataForSequence(final JobExecutionMultipleShardingContext shardingContext) { List<Integer> items = shardingContext.getShardingItems(); final Map<Integer, List<T>> result = new ConcurrentHashMap<>(items.size()); final CountDownLatch latch = new CountDownLatch(items.size()); for (final int each : items) { executorService.submit(new Runnable() { @Override//w ww. ja v a 2s . co m public void run() { try { @SuppressWarnings("unchecked") List<T> data = fetchData((C) shardingContext.createJobExecutionSingleShardingContext(each)); if (null != data && !data.isEmpty()) { result.put(each, data); } } finally { latch.countDown(); } } }); } latchAwait(latch); log.trace("Elastic job: fetch data size: {}.", result.size()); return result; }
From source file:com.bt.aloha.util.ConcurrentUpdateManagerTest.java
@Test public void testConcurrentUpdateConflictAwawreGetsCalled() throws Exception { // setup/*from w w w .j a v a2 s. co m*/ final CountDownLatch firstWriterRead = new CountDownLatch(1); final CountDownLatch secondWriterWrote = new CountDownLatch(1); final AtomicInteger failuresCounter = new AtomicInteger(); ConcurrentUpdateBlock concurrentUpdateBlock = new ConflictAwareConcurrentUpdateBlock() { public void execute() { DialogInfo di = dialogCollection.get(dialogId); log.debug("First writer read"); firstWriterRead.countDown(); log.debug("Waiting for second writer to write"); try { secondWriterWrote.await(); } catch (InterruptedException e) { throw new RuntimeException(e.getMessage(), e); } dialogCollection.replace(di); log.debug("First writer replaced"); } public String getResourceId() { return dialogId; } public void onConcurrentUpdateConflict() { failuresCounter.incrementAndGet(); } }; Runnable competingWriter = new Runnable() { public void run() { log.debug("Waiting for first writer to read"); try { firstWriterRead.await(); } catch (InterruptedException e) { throw new RuntimeException(e.getMessage(), e); } DialogInfo di = dialogCollection.get(dialogId); dialogCollection.replace(di); log.debug("Second writer replaced"); secondWriterWrote.countDown(); } }; // act new Thread(competingWriter).start(); concurrentUpdateManager.executeConcurrentUpdate(concurrentUpdateBlock); // assert assertEquals(1, failuresCounter.get()); }
From source file:com.vmware.photon.controller.api.client.resource.ClusterApiTest.java
@Test public void testGetVmsAsyncForPagination() throws IOException, InterruptedException { Vm vm1 = new Vm(); vm1.setId("vm1"); Vm vm2 = new Vm(); vm2.setId("vm2"); Vm vm3 = new Vm(); vm3.setId("vm3"); String nextPageLink = "nextPageLink"; final ResourceList<Vm> vmList = new ResourceList<>(Arrays.asList(vm1, vm2), nextPageLink, null); final ResourceList<Vm> vmListNextPage = new ResourceList<>(Arrays.asList(vm3)); ObjectMapper mapper = new ObjectMapper(); String serializedTask = mapper.writeValueAsString(vmList); String serializedTaskNextPage = mapper.writeValueAsString(vmListNextPage); setupMocksForPagination(serializedTask, serializedTaskNextPage, nextPageLink, HttpStatus.SC_OK); ClusterApi clusterApi = new ClusterApi(restClient); final CountDownLatch latch = new CountDownLatch(1); clusterApi.getVmsInClusterAsync("foo", new FutureCallback<ResourceList<Vm>>() { @Override/*from w w w .ja va 2 s.com*/ public void onSuccess(ResourceList<Vm> result) { assertEquals(result.getItems().size(), vmList.getItems().size() + vmListNextPage.getItems().size()); assertTrue(result.getItems().containsAll(vmList.getItems())); assertTrue(result.getItems().containsAll(vmListNextPage.getItems())); latch.countDown(); } @Override public void onFailure(Throwable t) { fail(t.toString()); latch.countDown(); } }); assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true)); }
From source file:com.vmware.photon.controller.api.client.resource.ClusterRestApiTest.java
@Test public void testGetVmsAsyncForPagination() throws IOException, InterruptedException { Vm vm1 = new Vm(); vm1.setId("vm1"); Vm vm2 = new Vm(); vm2.setId("vm2"); Vm vm3 = new Vm(); vm3.setId("vm3"); String nextPageLink = "nextPageLink"; final ResourceList<Vm> vmList = new ResourceList<>(Arrays.asList(vm1, vm2), nextPageLink, null); final ResourceList<Vm> vmListNextPage = new ResourceList<>(Arrays.asList(vm3)); ObjectMapper mapper = new ObjectMapper(); String serializedTask = mapper.writeValueAsString(vmList); String serializedTaskNextPage = mapper.writeValueAsString(vmListNextPage); setupMocksForPagination(serializedTask, serializedTaskNextPage, nextPageLink, HttpStatus.SC_OK); ClusterApi clusterApi = new ClusterRestApi(restClient); final CountDownLatch latch = new CountDownLatch(1); clusterApi.getVmsInClusterAsync("foo", new FutureCallback<ResourceList<Vm>>() { @Override//from ww w. jav a 2 s . c o m public void onSuccess(ResourceList<Vm> result) { assertEquals(result.getItems().size(), vmList.getItems().size() + vmListNextPage.getItems().size()); assertTrue(result.getItems().containsAll(vmList.getItems())); assertTrue(result.getItems().containsAll(vmListNextPage.getItems())); latch.countDown(); } @Override public void onFailure(Throwable t) { fail(t.toString()); latch.countDown(); } }); assertThat(latch.await(COUNTDOWNLATCH_AWAIT_TIMEOUT, TimeUnit.SECONDS), is(true)); }