List of usage examples for java.util.concurrent CountDownLatch await
public void await() throws InterruptedException
From source file:ufo.remote.calls.benchmark.client.caller.vertx.VertxClusterTester.java
@PostConstruct public void init() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final VertxOptions options = new VertxOptions(); final Config conf = new Config(); Vertx.clusteredVertx(options.setClusterHost("localhost").setClusterPort(0).setClustered(true) .setClusterManager(new HazelcastClusterManager(conf)), ar -> { if (ar.failed()) { logger.error("Error starting Vertx cluster", ar.cause()); }/*from w ww . j av a 2 s. c om*/ logger.info("Vertx cluster node started [{}]"); vertx = ar.result(); logger.info("Initialising vertx verticles..."); latch.countDown(); }); latch.await(); }
From source file:com.twitter.common.zookeeper.GroupTest.java
@Test public void testSessionExpirationTriggersOnLoseMembership() throws Exception { final CountDownLatch lostMembership = new CountDownLatch(1); Command onLoseMembership = new Command() { @Override/* w ww . j a v a2 s .c o m*/ public void execute() throws RuntimeException { lostMembership.countDown(); } }; assertEmptyMembershipObserved(); Membership membership = group.join(onLoseMembership); assertMembershipObserved(membership.getMemberId()); expireSession(zkClient); lostMembership.await(); // Will hang this test if onLoseMembership event is not propagated. }
From source file:com.github.oscerd.component.cassandra.embedded.CassandraBaseTest.java
@Override public void doPostSetup() { String id = RandomStringUtils.random(12, "0123456789abcdefghijklmnopqrstuvwxyz"); fs = new Farsandra(); fs.withVersion("2.0.3"); fs.withCleanInstanceOnStart(true);//from w w w. j a v a2 s . c o m fs.withInstanceName("target" + File.separator + id); fs.withCreateConfigurationFiles(true); fs.withHost("localhost"); fs.withSeeds(Arrays.asList("localhost")); final CountDownLatch started = new CountDownLatch(1); fs.getManager().addOutLineHandler(new LineHandler() { @Override public void handleLine(String line) { if (line.contains("Listening for thrift clients...")) { started.countDown(); } } }); fs.getManager().addProcessHandler(new ProcessHandler() { @Override public void handleTermination(int exitValue) { started.countDown(); } }); fs.start(); try { started.await(); Thread.sleep(3000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").build(); Session session = cluster.connect(); session.execute("CREATE KEYSPACE IF NOT EXISTS simplex WITH replication " + "= {'class':'SimpleStrategy', 'replication_factor':3};"); session.execute("CREATE TABLE IF NOT EXISTS simplex.songs (" + "id int PRIMARY KEY," + "title text," + "album text," + "artist text," + "tags set<text>," + "data blob," + ");"); session.execute("CREATE INDEX IF NOT EXISTS album_idx ON simplex.songs(album);"); session.execute("CREATE INDEX IF NOT EXISTS title_idx ON simplex.songs(title);"); PreparedStatement statement = session.prepare( "INSERT INTO simplex.songs " + "(id, title, album, artist, tags) " + "VALUES (?, ?, ?, ?, ?);"); BoundStatement boundStatement = new BoundStatement(statement); List<Song> songList = new ArrayList<Song>(); prepareStartingData(songList); Iterator it = songList.iterator(); while (it.hasNext()) { Song song = (Song) it.next(); ResultSet res = session.execute(boundStatement.bind(song.getId(), song.getTitle(), song.getAlbum(), song.getArtist(), song.getTags())); } session.close(); cluster.close(); try { Thread.sleep(5 * 1000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } }
From source file:com.microsoft.office.integration.test.EventsAsyncTestCase.java
private void createAndCheck() throws Exception { prepareEvent();//from w ww. j a v a2s . c o m final CountDownLatch cdl = new CountDownLatch(1); Futures.addCallback(Me.flushAsync(), new FutureCallback<Void>() { public void onFailure(Throwable t) { reportError(t); cdl.countDown(); } public void onSuccess(Void result) { try { assertTrue(StringUtils.isNotEmpty(EventsAsyncTestCase.this.event.getId())); } catch (Throwable t) { reportError(t); } cdl.countDown(); } }); cdl.await(); }
From source file:com.bt.aloha.dialog.DialogConcurrentUpdateBlockTest.java
@Test public void testSequenceNumberReleasedOnFailedForcedUpdate() { // setup/*from w w w.j a va 2 s .com*/ final CountDownLatch firstWriterRead = new CountDownLatch(1); final CountDownLatch secondWriterWrote = new CountDownLatch(1); DialogConcurrentUpdateBlock dialogConcurrentUpdateBlock = new DialogConcurrentUpdateBlock( dialogBeanHelper) { public void execute() { DialogInfo dialogInfo = dialogCollection.get("id"); firstWriterRead.countDown(); forceSequenceNumber(dialogInfo.getId(), 4L, Request.INVITE); log.debug("Waiting for second writer to write"); try { secondWriterWrote.await(); } catch (InterruptedException e) { throw new RuntimeException(e.getMessage(), e); } dialogCollection.replace(dialogInfo); } public String getResourceId() { return "id"; } }; // act new Thread(new CompetingWriter(firstWriterRead, secondWriterWrote)).start(); concurrentUpdateManager.executeConcurrentUpdate(dialogConcurrentUpdateBlock); // assert assertEquals(3, dialogCollection.get("id").getSequenceNumber()); assertEquals(1, releasedSequenceNumbers.size()); assertEquals(4, releasedSequenceNumbers.get(0)); assertEquals(4, lastSequenceNumber); }
From source file:org.cleverbus.core.common.asynch.queue.MessagePollExecutorTest.java
@Test public void testGetNextMessage_moreThreads() throws InterruptedException { // prepare threads int threads = 5; final CountDownLatch latch = new CountDownLatch(threads); Runnable task = new Runnable() { @Override/* w w w . j a v a 2s . c o m*/ public void run() { try { messagePollExecutor.run(); } finally { latch.countDown(); } } }; mock.expectedMessageCount(3); // start processing and waits for result for (int i = 0; i < threads; i++) { new Thread(task).start(); } latch.await(); mock.assertIsSatisfied(); // verify messages Message msg = findMessage("1234_4567"); assertThat(msg, notNullValue()); assertThat(msg.getState(), is(MsgStateEnum.PROCESSING)); msg = findMessage("1234_4567_8"); assertThat(msg, notNullValue()); assertThat(msg.getState(), is(MsgStateEnum.PROCESSING)); msg = findMessage("1234_4567_9"); assertThat(msg, notNullValue()); assertThat(msg.getState(), is(MsgStateEnum.PROCESSING)); }
From source file:com.microsoft.office.core.EventsAsyncTestCase.java
private void deleteAndCheck() throws Exception { removeEvent();/*from ww w . jav a 2s. c o m*/ final CountDownLatch cdl = new CountDownLatch(1); Futures.addCallback(Me.getEvents().getAsync(event.getId()), new FutureCallback<IEvent>() { @Override public void onFailure(Throwable t) { reportError(t); cdl.countDown(); } @Override public void onSuccess(IEvent result) { try { assertNull(result); } catch (Throwable t) { reportError(t); } cdl.countDown(); } }); cdl.await(); }
From source file:com.linkedin.pinot.integration.tests.OfflineClusterIntegrationTest.java
@BeforeClass public void setUp() throws Exception { //Clean up/*from w w w .j av a2 s. c o m*/ ensureDirectoryExistsAndIsEmpty(_tmpDir); ensureDirectoryExistsAndIsEmpty(_segmentDir); ensureDirectoryExistsAndIsEmpty(_tarDir); // Start the cluster startCluster(); // Unpack the Avro files final List<File> avroFiles = unpackAvroData(_tmpDir, SEGMENT_COUNT); createTable(); // Load data into H2 ExecutorService executor = Executors.newCachedThreadPool(); setupH2AndInsertAvro(avroFiles, executor); // Create segments from Avro data buildSegmentsFromAvro(avroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null); // Initialize query generator setupQueryGenerator(avroFiles, executor); executor.shutdown(); executor.awaitTermination(10, TimeUnit.MINUTES); // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online final CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT); // Upload the segments int i = 0; for (String segmentName : _tarDir.list()) { System.out.println("Uploading segment " + (i++) + " : " + segmentName); File file = new File(_tarDir, segmentName); FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, new FileInputStream(file), file.length()); } // Wait for all segments to be online latch.await(); TOTAL_DOCS = 115545; long timeInTwoMinutes = System.currentTimeMillis() + 2 * 60 * 1000L; long numDocs; while ((numDocs = getCurrentServingNumDocs()) < TOTAL_DOCS) { System.out.println("Current number of documents: " + numDocs); if (System.currentTimeMillis() < timeInTwoMinutes) { Thread.sleep(1000); } else { Assert.fail("Segments were not completely loaded within two minutes"); } } }
From source file:ufo.remote.calls.benchmark.server.vertx.VertxServiceImpl.java
@PostConstruct public void init() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final VertxOptions options = new VertxOptions(); final Config conf = new Config(); Vertx.clusteredVertx(options.setClusterHost("localhost").setClusterPort(0).setClustered(true) .setClusterManager(new HazelcastClusterManager(conf)), ar -> { if (ar.failed()) { logger.error("Error starting Vertx cluster", ar.cause()); }/*from w w w.j a v a 2s. c o m*/ logger.info("Vertx cluster node started [{}]"); vertx = ar.result(); logger.info("Initialising vertx verticles..."); vertx.deployVerticle(webServerVerticle); latch.countDown(); }); latch.await(); }
From source file:hydrograph.server.execution.tracking.client.main.HydrographMain.java
/** * //w ww .j a v a 2s . co m * * @param latch * @param jobId * @param argsFinalList * @param execution * @param isExecutionTracking * @return */ private FutureTask executeGraph(final CountDownLatch latch, final String jobId, final String[] argsFinalList, final HydrographService execution, final boolean isExecutionTracking) { logger.trace("Creating executor thread"); return new FutureTask(new Runnable() { public void run() { try { logger.debug("Executing the job from execute graph"); execution.executeGraph(argsFinalList); if (isExecutionTracking) { latch.await(); } } catch (Exception e) { logger.error("JOB FAILED :", e); if (isExecutionTracking) { try { latch.await(); } catch (InterruptedException e1) { logger.error("job fail :", e1); } } throw new RuntimeException(e); } } }, null); }