List of usage examples for java.util.concurrent CountDownLatch CountDownLatch
public CountDownLatch(int count)
From source file:com.google.api.ads.adwords.awreporting.processors.onfile.ReportProcessorOnFile.java
private <R extends Report> void processFiles(String mccAccountId, Class<R> reportBeanClass, Collection<File> localFiles, ReportDefinitionDateRangeType dateRangeType, String dateStart, String dateEnd) {//from w w w. j ava2s . com final CountDownLatch latch = new CountDownLatch(localFiles.size()); ExecutorService executorService = Executors.newFixedThreadPool(numberOfReportProcessors); // Processing Report Local Files LOGGER.info(" Processing reports..."); Stopwatch stopwatch = Stopwatch.createStarted(); for (File file : localFiles) { LOGGER.trace("."); try { // We need to create a csvToBean and mappingStrategy for each thread ModifiedCsvToBean<R> csvToBean = new ModifiedCsvToBean<R>(); MappingStrategy<R> mappingStrategy = new AnnotationBasedMappingStrategy<R>(reportBeanClass); LOGGER.debug("Parsing file: " + file.getAbsolutePath()); RunnableProcessorOnFile<R> runnableProcesor = new RunnableProcessorOnFile<R>(file, csvToBean, mappingStrategy, dateRangeType, dateStart, dateEnd, mccAccountId, persister, reportRowsSetSize); runnableProcesor.setLatch(latch); executorService.execute(runnableProcesor); } catch (Exception e) { LOGGER.error("Ignoring file (Error when processing): " + file.getAbsolutePath()); e.printStackTrace(); } } try { latch.await(); } catch (InterruptedException e) { LOGGER.error(e.getMessage()); e.printStackTrace(); } executorService.shutdown(); stopwatch.stop(); LOGGER.info("*** Finished processing all reports in " + (stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000) + " seconds ***\n"); }
From source file:gobblin.tunnel.TunnelTest.java
@Test public void mustHandleMultipleConnections() throws Exception { mockExample();/*from ww w . j a v a 2 s. c om*/ Tunnel tunnel = Tunnel.build("example.org", 80, "localhost", PORT); int clients = 5; final CountDownLatch startSignal = new CountDownLatch(1); final CountDownLatch doneSignal = new CountDownLatch(clients); ExecutorService executor = Executors.newFixedThreadPool(clients); try { final int tunnelPort = tunnel.getPort(); List<Future<String>> results = new ArrayList<Future<String>>(); for (int i = 0; i < clients; i++) { Future<String> result = executor.submit(new Callable<String>() { @Override public String call() throws Exception { startSignal.await(); try { return fetchContent(tunnelPort); } finally { doneSignal.countDown(); } } }); results.add(result); } startSignal.countDown(); doneSignal.await(); for (Future<String> result : results) { assertNotNull(result.get()); } } finally { tunnel.close(); } }
From source file:io.openvidu.server.recording.service.SingleStreamRecordingService.java
@Override public Recording startRecording(Session session, RecordingProperties properties) throws OpenViduException { PropertiesRecordingId updatePropertiesAndRecordingId = this .setFinalRecordingNameAndGetFreeRecordingId(session, properties); properties = updatePropertiesAndRecordingId.properties; String recordingId = updatePropertiesAndRecordingId.recordingId; log.info("Starting individual ({}) recording {} of session {}", properties.hasVideo() ? (properties.hasAudio() ? "video+audio" : "video-only") : "audioOnly", recordingId, session.getSessionId()); Recording recording = new Recording(session.getSessionId(), recordingId, properties); this.recordingManager.startingRecordings.put(recording.getId(), recording); recorders.put(session.getSessionId(), new ConcurrentHashMap<String, RecorderEndpointWrapper>()); final int activePublishers = session.getActivePublishers(); final CountDownLatch recordingStartedCountdown = new CountDownLatch(activePublishers); for (Participant p : session.getParticipants()) { if (p.isStreaming()) { MediaProfileSpecType profile = null; try { profile = generateMediaProfile(properties, p); } catch (OpenViduException e) { log.error(//from www. j a va2 s .c o m "Cannot start single stream recorder for stream {} in session {}: {}. Skipping to next stream being published", p.getPublisherStreamId(), session.getSessionId(), e.getMessage()); recordingStartedCountdown.countDown(); continue; } this.startRecorderEndpointForPublisherEndpoint(session, recordingId, profile, p, recordingStartedCountdown); } } try { if (!recordingStartedCountdown.await(5, TimeUnit.SECONDS)) { log.error("Error waiting for some recorder endpoint to start in session {}", session.getSessionId()); throw this.failStartRecording(session, recording, "Couldn't initialize some RecorderEndpoint"); } } catch (InterruptedException e) { recording.setStatus(io.openvidu.java.client.Recording.Status.failed); log.error("Exception while waiting for state change", e); } this.generateRecordingMetadataFile(recording); this.updateRecordingManagerCollections(session, recording); this.sendRecordingStartedNotification(session, recording); return recording; }
From source file:org.wisdom.framework.vertx.ResponseEncodingTest.java
@Test public void testEncodingOfResponse() throws InterruptedException, IOException { Router router = prepareServer();//www . jav a 2s. c om byte[] content = generate(1000).getBytes(); // Prepare the router with a controller Controller controller = new DefaultController() { @SuppressWarnings("unused") public Result index() { return ok(content, false); } }; final Route route1 = new RouteBuilder().route(HttpMethod.GET).on("/").to(controller, "index"); configureRouter(router, route1); server.start(); waitForStart(server); // Now start bunch of clients int num = NUMBER_OF_CLIENTS; CountDownLatch startSignal = new CountDownLatch(1); CountDownLatch doneSignal = new CountDownLatch(num); int port = server.httpPort(); for (int i = 0; i < num; ++i) {// create and start threads executor.submit(new Client(startSignal, doneSignal, port, i, content, "gzip")); } startSignal.countDown(); // let all threads proceed assertThat(doneSignal.await(60, TimeUnit.SECONDS)).isTrue(); // wait for all to finish assertThat(failure).isEmpty(); assertThat(success).hasSize(num); }
From source file:interactivespaces.activity.component.ActivityComponentContextTest.java
/** * Test that there is a processing handler that never exits and the wait has * to exit.//from ww w . j a v a2 s. co m */ @Test public void testWaitMultithreadedProcessingHandlers() throws Exception { final CountDownLatch latch = new CountDownLatch(2); Runnable runnable = new Runnable() { @Override public void run() { context.enterHandler(); latch.countDown(); InteractiveSpacesUtilities.delay(1000); context.exitHandler(); } }; executor.execute(runnable); executor.execute(runnable); // Make sure they have both entered before starting the wait. Assert.assertTrue(latch.await(500, TimeUnit.MILLISECONDS)); Assert.assertTrue(context.waitOnNoProcessingHandlings(500, 4000)); }
From source file:info.archinnov.achilles.it.TestAsyncDSLSimpleEntity.java
@Test public void should_dsl_select_slice_async() throws Exception { //Given// w w w .j a v a 2s. co m final Map<String, Object> values = new HashMap<>(); final long id = RandomUtils.nextLong(0L, Long.MAX_VALUE); values.put("id", id); SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z"); dateFormat.setTimeZone(TimeZone.getTimeZone("GMT")); final Date date1 = dateFormat.parse("2015-10-01 00:00:00 GMT"); final Date date9 = dateFormat.parse("2015-10-09 00:00:00 GMT"); values.put("date1", "'2015-10-01 00:00:00+0000'"); values.put("date2", "'2015-10-02 00:00:00+0000'"); values.put("date3", "'2015-10-03 00:00:00+0000'"); values.put("date4", "'2015-10-04 00:00:00+0000'"); values.put("date5", "'2015-10-05 00:00:00+0000'"); values.put("date6", "'2015-10-06 00:00:00+0000'"); values.put("date7", "'2015-10-07 00:00:00+0000'"); values.put("date8", "'2015-10-08 00:00:00+0000'"); values.put("date9", "'2015-10-09 00:00:00+0000'"); scriptExecutor.executeScriptTemplate("SimpleEntity/insert_many_rows.cql", values); final CountDownLatch latch = new CountDownLatch(1); final CassandraLogAsserter logAsserter = new CassandraLogAsserter(); logAsserter.prepareLogLevel(ASYNC_LOGGER_STRING, "%msg - [%thread]%n"); //When final CompletableFuture<List<SimpleEntity>> future = manager.dsl().select().consistencyList().simpleSet() .simpleMap().value().simpleMap().fromBaseTable().where().id_Eq(id).date_Gte_And_Lt(date1, date9) .withResultSetAsyncListener(rs -> { LOGGER.info(CALLED); latch.countDown(); return rs; }).withTracing().getListAsync(); //Then latch.await(); assertThat(future.get()).hasSize(8); logAsserter.assertContains("Called - [achilles-default-executor"); }
From source file:com.twotoasters.android.hoottestapplication.test.HootTest.java
public void testGetWithQueryParams() { final CountDownLatch latch = new CountDownLatch(1); Map<String, String> params = new LinkedHashMap<String, String>(); params.put("this", "that"); params.put("here", "there"); final HootDeserializer<GetWithParams> deserializer = new TestHootDeserializer<GetWithParams>( GetWithParams.class); final HootRequest request = mHootRestClient.createRequest().get().setResource("params") .setQueryParameters(params).setDeserializer(deserializer) .bindListener(new TestHootListener(latch, true)); assertNotNull(request);//from w w w . ja va 2 s.c om executeTest(request, latch); assertTrue(request.getResult() != null && request.getResult().isSuccess() && request.getResult().getDeserializedResult() != null); assertTrue(deserializer.getDeserializedResult().thisString.equals("that") && deserializer.getDeserializedResult().hereString.equals("there")); }
From source file:io.pravega.controller.server.ControllerServiceStarter.java
public ControllerServiceStarter(ControllerServiceConfig serviceConfig, StoreClient storeClient) { this.serviceConfig = serviceConfig; this.storeClient = storeClient; this.objectId = "ControllerServiceStarter"; this.controllerReadyLatch = new CountDownLatch(1); }
From source file:com.cisco.oss.foundation.monitoring.service.TestMultiService.java
@Ignore @Test/*from w w w. j av a 2 s . c o m*/ public void testHistogram() { final int tpsTime = 10; final Histogram tpsHistogram = new Histogram(new SlidingTimeWindowReservoir(tpsTime, TimeUnit.SECONDS)); ExecutorService threadPool = Executors.newFixedThreadPool(35); int numOfServices = 600000; final CountDownLatch latch = new CountDownLatch(numOfServices); for (int i = 0; i < numOfServices; i++) { final int index = i; threadPool.execute(new Runnable() { @Override public void run() { try { if (index >= 10000 & index < 15000) { Thread.sleep(50); } else if (index % 100 == 0) { Thread.sleep(1000); tpsHistogram.update(index); } else if (index % 1000 == 0) { Thread.sleep(15000); tpsHistogram.update(index); } else if (index % 2 == 0) { Thread.sleep(10); tpsHistogram.update(index); } else { Thread.sleep(50); tpsHistogram.update(index); } } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } if (index % 1000 == 0) { int total = tpsHistogram.getSnapshot().getValues().length; System.out.println("index: " + index + ", count: " + tpsHistogram.getCount() + ", snapshot count: " + total); int tps = total / tpsTime; System.out.println("TPS: " + tps); } latch.countDown(); } }); } try { latch.await(); } catch (InterruptedException e) { e.printStackTrace(); } }
From source file:com.bt.aloha.fitnesse.OutboundCallFixture.java
public String joinDialogsOneAndTwo() { latch = new CountDownLatch(1); String id = callBean.joinCallLegs(firstDialogId, secondDialogId); log.info("call ID: " + id); callIds.add(id);// w ww . j ava2s. com latch.countDown(); return "OK"; }