List of usage examples for java.util.concurrent ConcurrentLinkedQueue ConcurrentLinkedQueue
public ConcurrentLinkedQueue()
From source file:org.restcomm.app.qoslib.Services.Events.EventUploader.java
/** * Loads event requests from storage, and adds it to the queue *//*from w w w.j a va 2s .c o m*/ protected void loadEventsQueue() { ConcurrentLinkedQueue<EventDataEnvelope> eventQueue = owner.getEventManager().getEventQueue(); if (eventQueue == null) { eventQueue = new ConcurrentLinkedQueue<EventDataEnvelope>(); owner.getEventManager().setEventQueue(eventQueue); } else return; Gson gson = new Gson(); SharedPreferences secureSettings = MainService.getSecurePreferences(owner); if (secureSettings.contains(PreferenceKeys.Miscellaneous.EVENTS_QUEUE)) { try { String strQueue = secureSettings.getString(PreferenceKeys.Miscellaneous.EVENTS_QUEUE, ""); //LoggerUtil.logToFile(LoggerUtil.Level.DEBUG, TAG, "loadQueue", strQueue); if (strQueue.length() < 100) return; JSONArray jsonqueue = new JSONArray(strQueue); for (int i = 0; i < jsonqueue.length(); i++) { JSONObject jsonRequest = jsonqueue.getJSONObject(i); //if(jsonRequest.getString("type").equals(EventDataEnvelope.TAG)) { EventDataEnvelope request = gson.fromJson(jsonRequest.toString(), EventDataEnvelope.class); //EventDataEnvelope request = new EventDataEnvelope(jsonRequest); eventQueue.add(request); } } // remove the oldest events until queue is below 1000 while (eventQueue.size() > 300) eventQueue.poll(); } catch (JSONException e) { LoggerUtil.logToFile(LoggerUtil.Level.ERROR, TAG, "loadEventsQueue", "JSONException loading events from storage", e); } catch (Exception e) { LoggerUtil.logToFile(LoggerUtil.Level.ERROR, TAG, "loadEventsQueue", "Exception loading events from storage", e); } } }
From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java
@Override @SuppressWarnings("unchecked") public ConcurrentLinkedQueue<IMessage> removeIncomedQueue(String dstVertID) { ConcurrentLinkedQueue<IMessage> incomedQueue = null; // Get the hash bucket index. int hashCode = dstVertID.hashCode(); int hashIndex = hashCode % this.hashBucketNumber; // bucket index hashIndex = (hashIndex < 0 ? hashIndex + this.hashBucketNumber : hashIndex); BucketMeta meta = this.incomedQueues.get(hashIndex); // The bucket is on disk. if (meta.onDiskFlag) { this.incomedFileLocks[hashIndex].lock(); /** Lock */ try {//from ww w .j a v a2s .co m loadBucket(this.incomedQueues, hashIndex, "incomed"); } catch (IOException e) { LOG.info("==> bucket-" + hashIndex + ", VertexID = " + dstVertID); LOG.info("size = " + meta.queueMap.get(dstVertID).size()); throw new RuntimeException("==> bucket-" + hashIndex + ", VertexID = " + dstVertID, e); } finally { this.incomedFileLocks[hashIndex].unlock(); /** Unlock */ } } meta = this.incomedQueues.get(hashIndex); this.currentBucket = hashIndex; incomedQueue = meta.queueMap.remove(dstVertID); if (incomedQueue == null) { incomedQueue = new ConcurrentLinkedQueue<IMessage>(); } int removedCount = incomedQueue.size(); long removedLength = removedCount * this.sizeOfMessage; // Update the meta data. meta.count = meta.count - removedCount; meta.countInMemory = meta.countInMemory - removedCount; meta.length = meta.length - removedLength; meta.lengthInMemory = meta.lengthInMemory - removedLength; this.sizeOfMessagesDataInMem = this.sizeOfMessagesDataInMem - removedLength; this.countOfMessagesDataInMem = this.countOfMessagesDataInMem - removedCount; this.sizeOfHashMapsInMem = this.sizeOfHashMapsInMem - (sizeOfRef * 2 + (dstVertID.length() * sizeOfChar) + sizeOfEmptyMessageQueue); return incomedQueue; }
From source file:com.linkedin.pinot.tools.perf.QueryRunner.java
/** * Use multiple threads to run query at an increasing target QPS. * <p>Use a concurrent linked queue to buffer the queries to be sent. Use the main thread to insert queries into the * queue at the target QPS, and start <code>numThreads</code> worker threads to fetch queries from the queue and send * them.// w ww. j av a2s .com * <p>We start with the start QPS, and keep adding delta QPS to the start QPS during the test. * <p>The main thread is responsible for collecting and logging the statistic information periodically. * <p>Queries are picked sequentially from the query file. * <p>Query runner will stop when all queries in the query file has been executed number of times configured. * * @param conf perf benchmark driver config. * @param queryFile query file. * @param numTimesToRunQueries number of times to run all queries in the query file, 0 means infinite times. * @param numThreads number of threads sending queries. * @param startQPS start QPS. * @param deltaQPS delta QPS. * @param reportIntervalMs report interval in milliseconds. * @param numIntervalsToReportAndClearStatistics number of report intervals to report detailed statistics and clear * them, 0 means never. * @param numIntervalsToIncreaseQPS number of intervals to increase QPS. * @throws Exception */ public static void increasingQPSQueryRunner(PerfBenchmarkDriverConf conf, String queryFile, int numTimesToRunQueries, int numThreads, double startQPS, double deltaQPS, int reportIntervalMs, int numIntervalsToReportAndClearStatistics, int numIntervalsToIncreaseQPS) throws Exception { List<String> queries; try (FileInputStream input = new FileInputStream(new File(queryFile))) { queries = IOUtils.readLines(input); } PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf); ConcurrentLinkedQueue<String> queryQueue = new ConcurrentLinkedQueue<>(); AtomicInteger numQueriesExecuted = new AtomicInteger(0); AtomicLong totalBrokerTime = new AtomicLong(0L); AtomicLong totalClientTime = new AtomicLong(0L); List<Statistics> statisticsList = Collections.singletonList(new Statistics(CLIENT_TIME_STATISTICS)); ExecutorService executorService = Executors.newFixedThreadPool(numThreads); for (int i = 0; i < numThreads; i++) { executorService.submit(new Worker(driver, queryQueue, numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList)); } executorService.shutdown(); long startTime = System.currentTimeMillis(); long reportStartTime = startTime; int numReportIntervals = 0; int numTimesExecuted = 0; double currentQPS = startQPS; int queryIntervalMs = (int) (MILLIS_PER_SECOND / currentQPS); while (numTimesToRunQueries == 0 || numTimesExecuted < numTimesToRunQueries) { if (executorService.isTerminated()) { LOGGER.error("All threads got exception and already dead."); return; } for (String query : queries) { queryQueue.add(query); Thread.sleep(queryIntervalMs); long currentTime = System.currentTimeMillis(); if (currentTime - reportStartTime >= reportIntervalMs) { long timePassed = currentTime - startTime; reportStartTime = currentTime; numReportIntervals++; if (numReportIntervals == numIntervalsToIncreaseQPS) { // Try to find the next interval. double newQPS = currentQPS + deltaQPS; int newQueryIntervalMs; // Skip the target QPS with the same interval as the previous one. while ((newQueryIntervalMs = (int) (MILLIS_PER_SECOND / newQPS)) == queryIntervalMs) { newQPS += deltaQPS; } if (newQueryIntervalMs == 0) { LOGGER.warn("Due to sleep granularity of millisecond, cannot further increase QPS."); } else { // Find the next interval. LOGGER.info( "--------------------------------------------------------------------------------"); LOGGER.info("REPORT FOR TARGET QPS: {}", currentQPS); int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size()); numReportIntervals = 0; startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); currentQPS = newQPS; queryIntervalMs = newQueryIntervalMs; LOGGER.info( "Increase target QPS to: {}, the following statistics are for the new target QPS.", currentQPS); } } else { int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info( "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms, Queries Queued: {}.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt, queryQueue.size()); if ((numIntervalsToReportAndClearStatistics != 0) && (numReportIntervals % numIntervalsToReportAndClearStatistics == 0)) { startTime = currentTime; reportAndClearStatistics(numQueriesExecuted, totalBrokerTime, totalClientTime, statisticsList); } } } } numTimesExecuted++; } // Wait for all queries getting executed. while (queryQueue.size() != 0) { Thread.sleep(1); } executorService.shutdownNow(); while (!executorService.isTerminated()) { Thread.sleep(1); } long timePassed = System.currentTimeMillis() - startTime; int numQueriesExecutedInt = numQueriesExecuted.get(); LOGGER.info("--------------------------------------------------------------------------------"); LOGGER.info("FINAL REPORT:"); LOGGER.info( "Current Target QPS: {}, Time Passed: {}ms, Queries Executed: {}, Average QPS: {}, " + "Average Broker Time: {}ms, Average Client Time: {}ms.", currentQPS, timePassed, numQueriesExecutedInt, numQueriesExecutedInt / ((double) timePassed / MILLIS_PER_SECOND), totalBrokerTime.get() / (double) numQueriesExecutedInt, totalClientTime.get() / (double) numQueriesExecutedInt); for (Statistics statistics : statisticsList) { statistics.report(); } }
From source file:com.chinamobile.bcbsp.comm.MessageQueuesForDisk.java
@Override public ConcurrentLinkedQueue<IMessage> removeIncomingQueue(String dstVerID) { ConcurrentLinkedQueue<IMessage> incomingQueue = null; // Get the hash bucket index. int hashCode = dstVerID.hashCode(); int hashIndex = hashCode % this.hashBucketNumber; // bucket index hashIndex = (hashIndex < 0 ? hashIndex + this.hashBucketNumber : hashIndex); BucketMeta meta = this.incomingQueues.get(hashIndex); // The bucket is on disk. if (meta.onDiskFlag) { this.incomingFileLocks[hashIndex].lock(); /** Lock */ try {//from w w w.j a va2 s . c o m loadBucket(this.incomingQueues, hashIndex, "incoming"); } catch (IOException e) { throw new RuntimeException("[MessageQueuesForDisk:removeIncomingQueue]", e); } finally { this.incomingFileLocks[hashIndex].unlock(); /** Unlock */ } } meta = this.incomingQueues.get(hashIndex); incomingQueue = meta.queueMap.remove(dstVerID); if (incomingQueue == null) { incomingQueue = new ConcurrentLinkedQueue<IMessage>(); } int removedCount = incomingQueue.size(); long removedLength = removedCount * this.sizeOfMessage; // Update the meta data. meta.count = meta.count - removedCount; meta.countInMemory = meta.countInMemory - removedCount; meta.length = meta.length - removedLength; meta.lengthInMemory = meta.lengthInMemory - removedLength; this.sizeOfMessagesDataInMem = this.sizeOfMessagesDataInMem - removedLength; this.countOfMessagesDataInMem = this.countOfMessagesDataInMem - removedCount; this.sizeOfHashMapsInMem = this.sizeOfHashMapsInMem - (sizeOfRef * 2 + (dstVerID.length() * sizeOfChar) + sizeOfEmptyMessageQueue); return incomingQueue; }
From source file:com.ibm.crail.tools.CrailBenchmark.java
void getFile(String filename, int loop) throws Exception, InterruptedException { System.out.println("getFile, filename " + filename + ", loop " + loop); //warmup//from ww w . j a v a2 s . c o m ConcurrentLinkedQueue<CrailBuffer> bufferQueue = new ConcurrentLinkedQueue<CrailBuffer>(); CrailBuffer buf = fs.allocateBuffer(); bufferQueue.add(buf); warmUp(filename, warmup, bufferQueue); fs.freeBuffer(buf); //benchmark System.out.println("starting benchmark..."); fs.getStatistics().reset(); double ops = 0; long start = System.currentTimeMillis(); while (ops < loop) { ops = ops + 1.0; fs.lookup(filename).get().asFile(); } long end = System.currentTimeMillis(); double executionTime = ((double) (end - start)) / 1000.0; double latency = 0.0; if (executionTime > 0) { latency = 1000000.0 * executionTime / ops; } System.out.println("execution time " + executionTime); System.out.println("ops " + ops); System.out.println("latency " + latency); fs.getStatistics().print("close"); fs.close(); }
From source file:edu.cornell.mannlib.vitro.webapp.rdfservice.impl.sparql.RDFServiceSparql.java
private List<Statement> sort(List<Statement> stmts) { List<Statement> output = new ArrayList<Statement>(); int originalSize = stmts.size(); if (originalSize == 1) return stmts; List<Statement> remaining = stmts; ConcurrentLinkedQueue<com.hp.hpl.jena.rdf.model.Resource> subjQueue = new ConcurrentLinkedQueue<com.hp.hpl.jena.rdf.model.Resource>(); for (Statement stmt : remaining) { if (stmt.getSubject().isURIResource()) { subjQueue.add(stmt.getSubject()); break; }//from w w w .j a v a 2 s . co m } if (subjQueue.isEmpty()) { throw new RuntimeException("No named subject in statement patterns"); } while (remaining.size() > 0) { if (subjQueue.isEmpty()) { subjQueue.add(remaining.get(0).getSubject()); } while (!subjQueue.isEmpty()) { com.hp.hpl.jena.rdf.model.Resource subj = subjQueue.poll(); List<Statement> temp = new ArrayList<Statement>(); for (Statement stmt : remaining) { if (stmt.getSubject().equals(subj)) { output.add(stmt); if (stmt.getObject().isResource()) { subjQueue.add((com.hp.hpl.jena.rdf.model.Resource) stmt.getObject()); } } else { temp.add(stmt); } } remaining = temp; } } if (output.size() != originalSize) { throw new RuntimeException( "original list size was " + originalSize + " but sorted size is " + output.size()); } return output; }
From source file:com.ibm.crail.tools.CrailBenchmark.java
void getFileAsync(String filename, int loop, int batch) throws Exception, InterruptedException { System.out.println("getFileAsync, filename " + filename + ", loop " + loop + ", batch " + batch); //warmup/*from w w w. ja va 2 s. c o m*/ ConcurrentLinkedQueue<CrailBuffer> bufferQueue = new ConcurrentLinkedQueue<CrailBuffer>(); CrailBuffer buf = fs.allocateBuffer(); bufferQueue.add(buf); warmUp(filename, warmup, bufferQueue); fs.freeBuffer(buf); //benchmark System.out.println("starting benchmark..."); fs.getStatistics().reset(); LinkedBlockingQueue<Future<CrailNode>> fileQueue = new LinkedBlockingQueue<Future<CrailNode>>(); long start = System.currentTimeMillis(); for (int i = 0; i < loop; i++) { //single operation == loop for (int j = 0; j < batch; j++) { Future<CrailNode> future = fs.lookup(filename); fileQueue.add(future); } for (int j = 0; j < batch; j++) { Future<CrailNode> future = fileQueue.poll(); future.get(); } } long end = System.currentTimeMillis(); double executionTime = ((double) (end - start)); double latency = executionTime * 1000.0 / ((double) batch); System.out.println("execution time [ms] " + executionTime); System.out.println("latency [us] " + latency); fs.getStatistics().print("close"); }
From source file:com.ibm.crail.tools.CrailBenchmark.java
void enumerateDir(String filename, int loop) throws Exception { System.out.println("reading enumarate dir, path " + filename); //warmup/*from w w w. j a v a 2 s. c o m*/ ConcurrentLinkedQueue<CrailBuffer> bufferQueue = new ConcurrentLinkedQueue<CrailBuffer>(); CrailBuffer buf = fs.allocateBuffer(); bufferQueue.add(buf); warmUp(filename, warmup, bufferQueue); fs.freeBuffer(buf); //benchmark System.out.println("starting benchmark..."); fs.getStatistics().reset(); long start = System.currentTimeMillis(); for (int i = 0; i < loop; i++) { // single operation == loop Iterator<String> iter = fs.lookup(filename).get().asDirectory().listEntries(); while (iter.hasNext()) { iter.next(); } } long end = System.currentTimeMillis(); double executionTime = ((double) (end - start)); double latency = executionTime * 1000.0 / ((double) loop); System.out.println("execution time [ms] " + executionTime); System.out.println("latency [us] " + latency); fs.getStatistics().print("close"); }
From source file:org.opendaylight.netvirt.elan.internal.ElanInterfaceManager.java
@Override protected void add(InstanceIdentifier<ElanInterface> identifier, ElanInterface elanInterfaceAdded) { String elanInstanceName = elanInterfaceAdded.getElanInstanceName(); String interfaceName = elanInterfaceAdded.getName(); InterfaceInfo interfaceInfo = interfaceManager.getInterfaceInfo(interfaceName); if (interfaceInfo == null) { LOG.warn("Interface {} is removed from Interface Oper DS due to port down ", interfaceName); return;//from w w w. j a va2 s . c om } ElanInstance elanInstance = ElanUtils.getElanInstanceByName(broker, elanInstanceName); if (elanInstance == null) { elanInstance = new ElanInstanceBuilder().setElanInstanceName(elanInstanceName) .setDescription(elanInterfaceAdded.getDescription()).build(); // Add the ElanInstance in the Configuration data-store WriteTransaction tx = broker.newWriteOnlyTransaction(); List<String> elanInterfaces = new ArrayList<>(); elanInterfaces.add(interfaceName); ElanUtils.updateOperationalDataStore(broker, idManager, elanInstance, elanInterfaces, tx); ElanUtils.waitForTransactionToComplete(tx); elanInstance = ElanUtils.getElanInstanceByName(broker, elanInstanceName); } Long elanTag = elanInstance.getElanTag(); // If elan tag is not updated, then put the elan interface into // unprocessed entry map and entry. Let entries // in this map get processed during ELAN update DCN. if (elanTag == null) { ConcurrentLinkedQueue<ElanInterface> elanInterfaces = unProcessedElanInterfaces.get(elanInstanceName); if (elanInterfaces == null) { elanInterfaces = new ConcurrentLinkedQueue<>(); } elanInterfaces.add(elanInterfaceAdded); unProcessedElanInterfaces.put(elanInstanceName, elanInterfaces); return; } DataStoreJobCoordinator coordinator = DataStoreJobCoordinator.getInstance(); InterfaceAddWorkerOnElan addWorker = new InterfaceAddWorkerOnElan(elanInstanceName, elanInterfaceAdded, interfaceInfo, elanInstance, this); coordinator.enqueueJob(elanInstanceName, addWorker, ElanConstants.JOB_MAX_RETRIES); }
From source file:io.openvidu.test.e2e.OpenViduTestAppE2eTest.java
@Test @DisplayName("Change publisher dynamically") void changePublisherTest() throws Exception { Queue<Boolean> threadAssertions = new ConcurrentLinkedQueue<Boolean>(); setupBrowser("chrome"); log.info("Change publisher dynamically"); WebElement oneToManyInput = user.getDriver().findElement(By.id("one2many-input")); oneToManyInput.clear();/* w ww .jav a 2s . co m*/ oneToManyInput.sendKeys("1"); user.getDriver().findElement(By.id("auto-join-checkbox")).click(); final CountDownLatch latch1 = new CountDownLatch(2); // First publication (audio + video [CAMERA]) user.getEventManager().on("streamPlaying", (event) -> { JsonObject stream = event.get("target").getAsJsonObject().get("stream").getAsJsonObject(); threadAssertions.add("CAMERA".equals(stream.get("typeOfVideo").getAsString())); threadAssertions.add(stream.get("hasAudio").getAsBoolean()); latch1.countDown(); }); user.getDriver().findElement(By.id("one2many-btn")).click(); user.getEventManager().waitUntilEventReaches("connectionCreated", 4); user.getEventManager().waitUntilEventReaches("accessAllowed", 1); user.getEventManager().waitUntilEventReaches("streamCreated", 2); user.getEventManager().waitUntilEventReaches("streamPlaying", 2); if (!latch1.await(5000, TimeUnit.MILLISECONDS)) { gracefullyLeaveParticipants(2); fail("Waiting for 2 streamPlaying events to happen in total"); return; } user.getEventManager().off("streamPlaying"); log.info("Thread assertions: {}", threadAssertions.toString()); for (Iterator<Boolean> iter = threadAssertions.iterator(); iter.hasNext();) { Assert.assertTrue("Some Event property was wrong", iter.next()); iter.remove(); } int numberOfVideos = user.getDriver().findElements(By.tagName("video")).size(); Assert.assertEquals("Expected 2 videos but found " + numberOfVideos, 2, numberOfVideos); Assert.assertTrue("Videos were expected to have audio and video tracks", user.getEventManager() .assertMediaTracks(user.getDriver().findElements(By.tagName("video")), true, true)); final CountDownLatch latch2 = new CountDownLatch(2); // Second publication (only video (SCREEN)) user.getEventManager().on("streamPlaying", (event) -> { JsonObject stream = event.get("target").getAsJsonObject().get("stream").getAsJsonObject(); threadAssertions.add("SCREEN".equals(stream.get("typeOfVideo").getAsString())); threadAssertions.add(!stream.get("hasAudio").getAsBoolean()); latch2.countDown(); }); user.getDriver().findElement(By.cssSelector("#openvidu-instance-0 .change-publisher-btn")).click(); user.getEventManager().waitUntilEventReaches("streamDestroyed", 2); user.getEventManager().waitUntilEventReaches("accessAllowed", 2); user.getEventManager().waitUntilEventReaches("streamCreated", 4); user.getEventManager().waitUntilEventReaches("streamPlaying", 4); if (!latch2.await(5000, TimeUnit.MILLISECONDS)) { gracefullyLeaveParticipants(2); fail("Waiting for 4 streamPlaying events to happen in total"); return; } user.getEventManager().off("streamPlaying"); log.info("Thread assertions: {}", threadAssertions.toString()); for (Iterator<Boolean> iter = threadAssertions.iterator(); iter.hasNext();) { Assert.assertTrue("Some Event property was wrong", iter.next()); iter.remove(); } numberOfVideos = user.getDriver().findElements(By.tagName("video")).size(); Assert.assertEquals("Expected 2 videos but found " + numberOfVideos, 2, numberOfVideos); Assert.assertTrue("Videos were expected to only have audio tracks", user.getEventManager() .assertMediaTracks(user.getDriver().findElements(By.tagName("video")), false, true)); final CountDownLatch latch3 = new CountDownLatch(2); // Third publication (audio + video [CAMERA]) user.getEventManager().on("streamPlaying", (event) -> { JsonObject stream = event.get("target").getAsJsonObject().get("stream").getAsJsonObject(); threadAssertions.add("CAMERA".equals(stream.get("typeOfVideo").getAsString())); threadAssertions.add(stream.get("hasAudio").getAsBoolean()); latch3.countDown(); }); user.getDriver().findElement(By.cssSelector("#openvidu-instance-0 .change-publisher-btn")).click(); user.getEventManager().waitUntilEventReaches("streamDestroyed", 4); user.getEventManager().waitUntilEventReaches("accessAllowed", 3); user.getEventManager().waitUntilEventReaches("streamCreated", 6); user.getEventManager().waitUntilEventReaches("streamPlaying", 6); if (!latch3.await(8000, TimeUnit.MILLISECONDS)) { gracefullyLeaveParticipants(2); fail("Waiting for 6 streamPlaying events to happen in total"); return; } user.getEventManager().off("streamPlaying"); log.info("Thread assertions: {}", threadAssertions.toString()); for (Iterator<Boolean> iter = threadAssertions.iterator(); iter.hasNext();) { Assert.assertTrue("Some Event property was wrong", iter.next()); iter.remove(); } numberOfVideos = user.getDriver().findElements(By.tagName("video")).size(); Assert.assertEquals("Expected 2 videos but found " + numberOfVideos, 2, numberOfVideos); Assert.assertTrue("Videos were expected to have audio and video tracks", user.getEventManager() .assertMediaTracks(user.getDriver().findElements(By.tagName("video")), true, true)); gracefullyLeaveParticipants(2); }