Example usage for java.util.concurrent CountDownLatch getCount

List of usage examples for java.util.concurrent CountDownLatch getCount

Introduction

In this page you can find the example usage for java.util.concurrent CountDownLatch getCount.

Prototype

public long getCount() 

Source Link

Document

Returns the current count.

Usage

From source file:edu.brown.hstore.PartitionExecutor.java

/**
 * Execute the given tasks and then block the current thread waiting for the
 * list of dependency_ids to come back from whatever it was we were suppose
 * to do... This is the slowest way to execute a bunch of WorkFragments and
 * therefore should only be invoked for batches that need to access
 * non-local Partitions/*w ww  .  jav a 2  s.c om*/
 * 
 * @param ts
 * @param fragments
 * @param parameters
 * @return
 */
public VoltTable[] dispatchWorkFragments(final LocalTransaction ts, final int batchSize,
        Collection<WorkFragment> fragments, final ParameterSet parameters[]) {
    assert (fragments.isEmpty() == false) : "Unexpected empty WorkFragment list for " + ts;

    // *********************************** DEBUG
    // ***********************************
    if (d) {
        LOG.debug(String.format("%s - Preparing to dispatch %d messages and wait for the results", ts,
                fragments.size()));
        if (t) {
            StringBuilder sb = new StringBuilder();
            sb.append(ts + " - WorkFragments:\n");
            for (WorkFragment fragment : fragments) {
                sb.append(StringUtil.box(fragment.toString()) + "\n");
            } // FOR
            sb.append(ts + " - ParameterSets:\n");
            for (ParameterSet ps : parameters) {
                sb.append(ps + "\n");
            } // FOR
            LOG.trace(sb);
        }
    }
    // *********************************** DEBUG
    // ***********************************

    // OPTIONAL: Check to make sure that this request is valid
    // (1) At least one of the WorkFragments needs to be executed on a
    // remote partition
    // (2) All of the PlanFragments ids in the WorkFragments match this
    // txn's Procedure
    if (hstore_conf.site.exec_validate_work && ts.isSysProc() == false) {
        LOG.warn(String.format("%s - Checking whether all of the WorkFragments are valid", ts));
        boolean has_remote = false;
        for (WorkFragment frag : fragments) {
            if (frag.getPartitionId() != this.partitionId) {
                has_remote = true;
            }
            for (int frag_id : frag.getFragmentIdList()) {
                PlanFragment catalog_frag = CatalogUtil.getPlanFragment(database, frag_id);
                Statement catalog_stmt = catalog_frag.getParent();
                assert (catalog_stmt != null);
                Procedure catalog_proc = catalog_stmt.getParent();
                if (catalog_proc.equals(ts.getProcedure()) == false) {
                    LOG.warn(ts.debug() + "\n" + fragments + "\n---- INVALID ----\n" + frag);
                    String msg = String.format("%s - Unexpected %s", ts, catalog_frag.fullName());
                    throw new ServerFaultException(msg, ts.getTransactionId());
                }
            }
        } // FOR
        if (has_remote == false) {
            LOG.warn(ts.debug() + "\n" + fragments);
            String msg = String.format(
                    "%s - Trying to execute all local single-partition queries using the slow-path!", ts);
            throw new ServerFaultException(msg, ts.getTransactionId());
        }
    }

    // We have to store all of the tasks in the TransactionState before we
    // start executing, otherwise
    // there is a race condition that a task with input dependencies will
    // start running as soon as we
    // get one response back from another executor
    ts.initRound(this.partitionId, this.getNextUndoToken());
    ts.setBatchSize(batchSize);

    final boolean prefetch = ts.hasPrefetchQueries();
    final boolean predict_singlePartition = ts.isPredictSinglePartition();

    // Attach the ParameterSets to our transaction handle so that anybody on
    // this HStoreSite
    // can access them directly without needing to deserialize them from the
    // WorkFragments
    ts.attachParameterSets(parameters);

    // Now if we have some work sent out to other partitions, we need to
    // wait until they come back
    // In the first part, we wait until all of our blocked
    // FragmentTaskMessages become unblocked
    LinkedBlockingDeque<Collection<WorkFragment>> queue = ts.getUnblockedWorkFragmentsQueue();

    boolean first = true;
    boolean serializedParams = false;
    CountDownLatch latch = null;
    boolean all_local = true;
    boolean is_localSite;
    boolean is_localPartition;
    int num_localPartition = 0;
    int num_localSite = 0;
    int num_remote = 0;
    int num_skipped = 0;
    int total = 0;

    // Run through this loop if:
    // (1) We have no pending errors
    // (2) This is our first time in the loop (first == true)
    // (3) If we know that there are still messages being blocked
    // (4) If we know that there are still unblocked messages that we need
    // to process
    // (5) The latch for this round is still greater than zero
    while (ts.hasPendingError() == false
            && (first == true || ts.stillHasWorkFragments() || (latch != null && latch.getCount() > 0))) {
        if (t)
            LOG.trace(String.format("%s - [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, first,
                    ts.stillHasWorkFragments(), queue.size(), latch));

        // If this is the not first time through the loop, then poll the
        // queue to get our list of fragments
        if (first == false) {
            all_local = true;
            is_localSite = false;
            is_localPartition = false;
            num_localPartition = 0;
            num_localSite = 0;
            num_remote = 0;
            num_skipped = 0;
            total = 0;

            if (t)
                LOG.trace(String.format("%s - Waiting for unblocked tasks on partition %d", ts,
                        this.partitionId));
            if (hstore_conf.site.txn_profiling)
                ts.profiler.startExecDtxnWork();
            try {
                fragments = queue.takeFirst(); // BLOCKING
            } catch (InterruptedException ex) {
                if (this.hstore_site.isShuttingDown() == false) {
                    LOG.error(String.format("%s - We were interrupted while waiting for blocked tasks", ts),
                            ex);
                }
                return (null);
            } finally {
                if (hstore_conf.site.txn_profiling)
                    ts.profiler.stopExecDtxnWork();
            }
        }
        assert (fragments != null);

        // If the list to fragments unblock is empty, then we
        // know that we have dispatched all of the WorkFragments for the
        // transaction's current SQLStmt batch. That means we can just wait
        // until all the results return to us.
        if (fragments.isEmpty()) {
            if (t)
                LOG.trace(ts + " - Got an empty list of WorkFragments. Blocking until dependencies arrive");
            break;
        }

        this.tmp_localWorkFragmentList.clear();
        if (predict_singlePartition == false) {
            this.tmp_remoteFragmentList.clear();
            this.tmp_localSiteFragmentList.clear();
        }

        // -------------------------------
        // FAST PATH: Assume everything is local
        // -------------------------------
        if (predict_singlePartition) {
            for (WorkFragment ftask : fragments) {
                if (first == false || ts.addWorkFragment(ftask) == false) {
                    this.tmp_localWorkFragmentList.add(ftask);
                    total++;
                    num_localPartition++;
                }
            } // FOR

            // We have to tell the TransactinState to start the round before
            // we send off the
            // FragmentTasks for execution, since they might start executing
            // locally!
            if (first) {
                ts.startRound(this.partitionId);
                latch = ts.getDependencyLatch();
            }

            // Execute all of our WorkFragments quickly at our local
            // ExecutionEngine
            for (WorkFragment fragment : this.tmp_localWorkFragmentList) {
                if (d)
                    LOG.debug(String.format("Got unblocked FragmentTaskMessage for %s. Executing locally...",
                            ts));
                assert (fragment.getPartitionId() == this.partitionId) : String.format(
                        "Trying to process FragmentTaskMessage for %s on partition %d but it should have been sent to partition %d [singlePartition=%s]\n%s",
                        ts, this.partitionId, fragment.getPartitionId(), predict_singlePartition, fragment);
                ParameterSet fragmentParams[] = this.getFragmentParameters(ts, fragment, parameters);
                this.processWorkFragment(ts, fragment, fragmentParams);
            } // FOR
        }
        // -------------------------------
        // SLOW PATH: Mixed local and remote messages
        // -------------------------------
        else {
            // Look at each task and figure out whether it needs to be
            // executed at a remote
            // HStoreSite or whether we can execute it at one of our local
            // PartitionExecutors.
            for (WorkFragment fragment : fragments) {
                int partition = fragment.getPartitionId();
                is_localSite = hstore_site.isLocalPartition(partition);
                is_localPartition = (partition == this.partitionId);
                all_local = all_local && is_localPartition;
                if (first == false || ts.addWorkFragment(fragment) == false) {
                    total++;

                    // At this point we know that all the WorkFragment has
                    // been registered
                    // in the LocalTransaction, so then it's safe for us to
                    // look to see
                    // whether we already have a prefetched result that we
                    // need
                    if (prefetch && is_localPartition == false) {
                        boolean skip_queue = true;
                        for (int i = 0, cnt = fragment.getFragmentIdCount(); i < cnt; i++) {
                            int fragId = fragment.getFragmentId(i);
                            int paramIdx = fragment.getParamIndex(i);

                            VoltTable vt = this.queryCache.getTransactionCachedResult(ts.getTransactionId(),
                                    fragId, partition, parameters[paramIdx]);
                            if (vt != null) {
                                ts.addResult(partition, fragment.getOutputDepId(i), vt);
                            } else {
                                skip_queue = false;
                            }
                        } // FOR
                          // If we were able to get cached results for all
                          // of the fragmentIds in
                          // this WorkFragment, then there is no need for
                          // us
                          // to send the message
                          // So we'll just skip queuing it up! How nice!
                        if (skip_queue) {
                            if (d)
                                LOG.debug(String.format(
                                        "%s - Using prefetch result for all fragments from partition %d", ts,
                                        partition));
                            num_skipped++;
                            continue;
                        }
                    }

                    // Otherwise add it to our list of WorkFragments that we
                    // want
                    // queue up right now
                    if (is_localPartition) {
                        this.tmp_localWorkFragmentList.add(fragment);
                        num_localPartition++;
                    } else if (is_localSite) {
                        this.tmp_localSiteFragmentList.add(fragment);
                        num_localSite++;
                    } else {
                        this.tmp_remoteFragmentList.add(fragment);
                        num_remote++;
                    }
                }
            } // FOR
            assert (total == (num_remote + num_localSite + num_localPartition + num_skipped)) : String.format(
                    "Total:%d / Remote:%d / LocalSite:%d / LocalPartition:%d / Skipped:%d", total, num_remote,
                    num_localSite, num_localPartition, num_skipped);
            if (num_localPartition == 0 && num_localSite == 0 && num_remote == 0 && num_skipped == 0) {
                String msg = String.format("Deadlock! All tasks for %s are blocked waiting on input!", ts);
                throw new ServerFaultException(msg, ts.getTransactionId());
            }

            // We have to tell the TransactinState to start the round before
            // we send off the
            // FragmentTasks for execution, since they might start executing
            // locally!
            if (first) {
                ts.startRound(this.partitionId);
                latch = ts.getDependencyLatch();
            }

            // Now request the fragments that aren't local
            // We want to push these out as soon as possible
            if (num_remote > 0) {
                // We only need to serialize the ParameterSets once
                if (serializedParams == false) {
                    if (hstore_conf.site.txn_profiling)
                        ts.profiler.startSerialization();
                    tmp_serializedParams.clear();
                    for (int i = 0; i < parameters.length; i++) {
                        if (parameters[i] == null) {
                            tmp_serializedParams.add(ByteString.EMPTY);
                        } else {
                            this.fs.clear();
                            try {
                                parameters[i].writeExternal(this.fs);
                                ByteString bs = ByteString.copyFrom(this.fs.getBBContainer().b);
                                tmp_serializedParams.add(bs);
                            } catch (Exception ex) {
                                throw new ServerFaultException(
                                        "Failed to serialize ParameterSet " + i + " for " + ts, ex);
                            }
                        }
                    } // FOR
                    if (hstore_conf.site.txn_profiling)
                        ts.profiler.stopSerialization();
                }
                if (d)
                    LOG.debug(String.format(
                            "%s - Requesting %d FragmentTaskMessages to be executed on remote partitions", ts,
                            num_remote));
                this.requestWork(ts, tmp_remoteFragmentList, tmp_serializedParams);
            }

            // Then dispatch the task that are needed at the same HStoreSite
            // but
            // at a different partition than this one
            if (num_localSite > 0) {
                if (d)
                    LOG.debug(String.format("%s - Executing %d FragmentTaskMessages on local site's partitions",
                            ts, num_localSite));
                for (WorkFragment fragment : this.tmp_localSiteFragmentList) {
                    FragmentTaskMessage ftask = ts.getFragmentTaskMessage(fragment);
                    hstore_site.getPartitionExecutor(fragment.getPartitionId()).queueWork(ts, ftask);
                } // FOR
            }

            // Then execute all of the tasks need to access the partitions
            // at this HStoreSite
            // We'll dispatch the remote-partition-local-site fragments
            // first because they're going
            // to need to get queued up by at the other PartitionExecutors
            if (num_localPartition > 0) {
                if (d)
                    LOG.debug(String.format("%s - Executing %d FragmentTaskMessages on local partition", ts,
                            num_localPartition));
                for (WorkFragment fragment : this.tmp_localWorkFragmentList) {
                    ParameterSet fragmentParams[] = this.getFragmentParameters(ts, fragment, parameters);
                    this.processWorkFragment(ts, fragment, fragmentParams);
                } // FOR
            }
        }
        if (t)
            LOG.trace(String.format(
                    "%s - Dispatched %d WorkFragments [remoteSite=%d, localSite=%d, localPartition=%d]", ts,
                    total, num_remote, num_localSite, num_localPartition));
        first = false;
    } // WHILE
    this.fs.getBBContainer().discard();

    if (t)
        LOG.trace(String.format("%s - BREAK OUT [first=%s, stillHasWorkFragments=%s, latch=%s]", ts, first,
                ts.stillHasWorkFragments(), latch));
    // assert(ts.stillHasWorkFragments() == false) :
    // String.format("Trying to block %s before all of its WorkFragments have been dispatched!\n%s\n%s",
    // ts,
    // StringUtil.join("** ", "\n", tempDebug),
    // this.getVoltProcedure(ts.getProcedureName()).getLastBatchPlan());

    // Now that we know all of our FragmentTaskMessages have been
    // dispatched, we can then
    // wait for all of the results to come back in.
    if (latch == null)
        latch = ts.getDependencyLatch();
    if (latch.getCount() > 0) {
        if (d) {
            LOG.debug(String.format("%s - All blocked messages dispatched. Waiting for %d dependencies", ts,
                    latch.getCount()));
            if (t)
                LOG.trace(ts.toString());
        }
        if (hstore_conf.site.txn_profiling)
            ts.profiler.startExecDtxnWork();
        boolean done = false;
        // XXX this.utilityWork(latch);
        try {
            done = latch.await(hstore_conf.site.exec_response_timeout, TimeUnit.MILLISECONDS);
        } catch (InterruptedException ex) {
            if (this.hstore_site.isShuttingDown() == false) {
                LOG.error(String.format("%s - We were interrupted while waiting for results", ts), ex);
            }
            done = true;
        } catch (Throwable ex) {
            new ServerFaultException(String.format("Fatal error for %s while waiting for results", ts), ex);
        } finally {
            if (hstore_conf.site.txn_profiling)
                ts.profiler.stopExecDtxnWork();
        }
        if (done == false && this.isShuttingDown() == false) {
            LOG.warn(String.format("Still waiting for responses for %s after %d ms [latch=%d]\n%s", ts,
                    hstore_conf.site.exec_response_timeout, latch.getCount(), ts.debug()));
            LOG.warn("Procedure Parameters:\n" + ts.getInvocation().getParams());
            hstore_conf.site.exec_profiling = true;
            LOG.warn(hstore_site.statusSnapshot());

            String msg = "PartitionResponses for " + ts + " never arrived!";
            throw new ServerFaultException(msg, ts.getTransactionId());
        }
    }

    // IMPORTANT: Check whether the fragments failed somewhere and we got a
    // response with an error
    // We will rethrow this so that it pops the stack all the way back to
    // VoltProcedure.call()
    // where we can generate a message to the client
    if (ts.hasPendingError()) {
        if (d)
            LOG.warn(
                    String.format("%s was hit with a %s", ts, ts.getPendingError().getClass().getSimpleName()));
        throw ts.getPendingError();
    }

    // IMPORTANT: Don't try to check whether we got back the right number of
    // tables because the batch
    // may have hit an error and we didn't execute all of them.
    VoltTable results[] = ts.getResults();
    ts.finishRound(this.partitionId);
    if (d) {
        if (t)
            LOG.trace(ts + " is now running and looking for love in all the wrong places...");
        LOG.debug(ts + " is returning back " + results.length + " tables to VoltProcedure");
    }
    return (results);
}

From source file:lcmc.gui.ClusterBrowser.java

/** Process output from cluster. */
void processClusterOutput(final String output, final StringBuffer clusterStatusOutput, final Host host,
        final CountDownLatch firstTime, final boolean testOnly) {
    final ClusterStatus clStatus = clusterStatus;
    clStatusLock();//w w  w.  j av  a 2 s.com
    if (clStatusCanceled || clStatus == null) {
        clStatusUnlock();
        firstTime.countDown();
        return;
    }
    if (output == null || "".equals(output)) {
        clStatus.setOnlineNode(host.getName(), "no");
        setClStatus(host, false);
        firstTime.countDown();
    } else {
        // TODO: if we get ERROR:... show it somewhere
        clusterStatusOutput.append(output);
        /* removes the string from the output. */
        int s = clusterStatusOutput.indexOf(RESET_STRING);
        while (s >= 0) {
            clusterStatusOutput.delete(s, s + RESET_STRING_LEN);
            s = clusterStatusOutput.indexOf(RESET_STRING);
        }
        if (clusterStatusOutput.length() > 12) {
            final String e = clusterStatusOutput.substring(clusterStatusOutput.length() - 12);
            if (e.trim().equals("---done---")) {
                final int i = clusterStatusOutput.lastIndexOf("---start---");
                if (i >= 0) {
                    if (clusterStatusOutput.indexOf("is stopped") >= 0) {
                        /* TODO: heartbeat's not running. */
                    } else {
                        final String status = clusterStatusOutput.substring(i);
                        clusterStatusOutput.delete(0, clusterStatusOutput.length());
                        if (CLUSTER_STATUS_ERROR.equals(status)) {
                            final boolean oldStatus = host.isClStatus();
                            clStatus.setOnlineNode(host.getName(), "no");
                            setClStatus(host, false);
                            if (oldStatus) {
                                crmGraph.repaint();
                            }
                        } else {
                            if (clStatus.parseStatus(status)) {
                                Tools.debug(this, "update cluster status: " + host.getName(), 1);
                                final ServicesInfo ssi = servicesInfo;
                                rscDefaultsInfo.setParameters(clStatus.getRscDefaultsValuePairs());
                                ssi.setGlobalConfig(clStatus);
                                ssi.setAllResources(clStatus, testOnly);
                                if (firstTime.getCount() == 1) {
                                    /* one more time so that id-refs work.*/
                                    ssi.setAllResources(clStatus, testOnly);
                                }
                                repaintTree();
                                clusterHostsInfo.updateTable(ClusterHostsInfo.MAIN_TABLE);
                            }
                            final String online = clStatus.isOnlineNode(host.getName());
                            if ("yes".equals(online)) {
                                setClStatus(host, true);
                                setClStatus();
                            } else {
                                setClStatus(host, false);
                            }
                        }
                    }
                    firstTime.countDown();
                }
            }
        }
        Tools.chomp(clusterStatusOutput);
    }
    clStatusUnlock();
}

From source file:lcmc.cluster.ui.ClusterBrowser.java

public void parseClusterOutput(final String output, final StringBuffer clusterStatusOutput, final Host host,
        final CountDownLatch firstTime, final Application.RunMode runMode) {
    final ClusterStatus clusterStatus0 = this.clusterStatus;
    clStatusLock();/*from w  w  w .  ja  va  2 s . co  m*/
    if (crmStatusCanceledByUser || clusterStatus0 == null) {
        clStatusUnlock();
        firstTime.countDown();
        return;
    }
    if (output == null || "".equals(output)) {
        clusterStatus0.setOnlineNode(host.getName(), "no");
        setCrmStatus(host, false);
        firstTime.countDown();
    } else {
        // TODO: if we get ERROR:... show it somewhere
        clusterStatusOutput.append(output);
        /* removes the string from the output. */
        int s = clusterStatusOutput.indexOf(RESET_STRING);
        while (s >= 0) {
            clusterStatusOutput.delete(s, s + RESET_STRING_LEN);
            s = clusterStatusOutput.indexOf(RESET_STRING);
        }
        if (clusterStatusOutput.length() > 12) {
            final String e = clusterStatusOutput.substring(clusterStatusOutput.length() - 12);
            if (e.trim().equals("---done---")) {
                final int i = clusterStatusOutput.lastIndexOf("---start---");
                if (i >= 0) {
                    if (clusterStatusOutput.indexOf("is stopped") >= 0) {
                        /* TODO: heartbeat's not running. */
                    } else {
                        final String status = clusterStatusOutput.substring(i);
                        clusterStatusOutput.delete(0, clusterStatusOutput.length());
                        if (CLUSTER_STATUS_ERROR.equals(status)) {
                            final boolean oldStatus = host.isCrmStatusOk();
                            clusterStatus0.setOnlineNode(host.getName(), "no");
                            setCrmStatus(host, false);
                            if (oldStatus) {
                                crmGraph.repaint();
                            }
                        } else {
                            if (clusterStatus0.parseStatus(status)) {
                                LOG.debug1("processClusterOutput: host: " + host.getName());
                                final ServicesInfo ssi = servicesInfo;
                                rscDefaultsInfo.setParameters(clusterStatus0.getRscDefaultsValuePairs());
                                ssi.setGlobalConfig(clusterStatus0);
                                resourceUpdaterProvider.get().updateAllResources(ssi, ssi.getBrowser(),
                                        clusterStatus0, runMode);
                                if (firstTime.getCount() == 1) {
                                    /* one more time so that id-refs work.*/
                                    resourceUpdaterProvider.get().updateAllResources(ssi, ssi.getBrowser(),
                                            clusterStatus0, runMode);
                                }
                                treeMenuController.repaintMenuTree();
                                clusterHostsInfo.updateTable(ClusterHostsInfo.MAIN_TABLE);
                            }
                            final String online = clusterStatus0.isOnlineNode(host.getName());
                            if ("yes".equals(online)) {
                                setCrmStatus(host, true);
                                setCrmStatus();
                            } else {
                                setCrmStatus(host, false);
                            }
                        }
                    }
                    firstTime.countDown();
                }
            }
        }
        Tools.chomp(clusterStatusOutput);
    }
    clStatusUnlock();
}

From source file:org.jitsi.meet.test.PSNRTest.java

/**
 * A test where we read some configurations or fallback to default values
 * and we expect a conference to be already established (by SetupConference)
 * and we keep checking whether this is still the case, and if something
 * is not working we fail./* w  ww  . j  av  a2  s .  co  m*/
 */
public void testPSNR() {
    File inputFrameDir = new File(INPUT_FRAME_DIR);
    if (!inputFrameDir.exists()) {
        // Skip the PSNR tests because we don't have any PSNR
        // resources.
        return;
    }
    // Create the output directory for captured frames.
    File outputFrameDir = new File(OUTPUT_FRAME_DIR);
    if (!outputFrameDir.exists()) {
        outputFrameDir.mkdirs();
    }

    String timeToRunInMin = System.getProperty("psnr.duration");

    // default is 1 minute
    if (timeToRunInMin == null || timeToRunInMin.length() == 0)
        timeToRunInMin = "1";

    final int minutesToRun = Integer.valueOf(timeToRunInMin);

    final CountDownLatch waitSignal = new CountDownLatch(1);

    // execute every 1 sec.
    final Timer timer = new Timer();
    timer.schedule(new TimerTask() {
        long lastRun = System.currentTimeMillis();

        int millsToRun = minutesToRun * 60 * 1000;

        CountDownLatch ownerDownloadSignal = new CountDownLatch(3);
        CountDownLatch secondPDownloadSignal = new CountDownLatch(3);

        @Override
        public void run() {
            try {
                System.err.println("Checking at " + new Date() + " / to finish: " + millsToRun + " ms.");

                if (!ConferenceFixture.isIceConnected(ConferenceFixture.getOwner())) {
                    assertAndQuit("Owner ice is not connected.");
                    return;
                }

                if (!ConferenceFixture.isInMuc(ConferenceFixture.getOwner())) {
                    assertAndQuit("Owner is not in the muc.");
                    return;
                }

                if (!ConferenceFixture.isIceConnected(ConferenceFixture.getSecondParticipant())) {
                    assertAndQuit("Second participant ice is not connected.");
                    return;
                }

                if (!ConferenceFixture.isInMuc(ConferenceFixture.getSecondParticipant())) {
                    assertAndQuit("The second participant is not in the muc.");
                    return;
                }

                long downloadOwner = ConferenceFixture.getDownloadBitrate(ConferenceFixture.getOwner());
                long downloadParticipant = ConferenceFixture
                        .getDownloadBitrate(ConferenceFixture.getSecondParticipant());

                if (downloadOwner <= 0) {
                    System.err.println("Owner no download bitrate");
                    ownerDownloadSignal.countDown();
                } else
                    ownerDownloadSignal = new CountDownLatch(3);

                if (ownerDownloadSignal.getCount() <= 0) {
                    assertAndQuit("Owner download bitrate less than 0");
                    return;
                }

                if (downloadParticipant <= 0) {
                    System.err.println("Second participant no download bitrate");
                    secondPDownloadSignal.countDown();
                } else
                    secondPDownloadSignal = new CountDownLatch(3);

                if (secondPDownloadSignal.getCount() <= 0) {
                    assertAndQuit("Second participant download rate less than 0");
                    return;
                }

                if (!ConferenceFixture.isXmppConnected(ConferenceFixture.getOwner())) {
                    assertAndQuit("Owner xmpp connection is not connected");
                    return;
                }

                if (!ConferenceFixture.isXmppConnected(ConferenceFixture.getSecondParticipant())) {
                    assertAndQuit("The second participant xmpp " + "connection is not connected");
                    return;
                }

                WebDriver driver = ConferenceFixture.getOwner();
                if (driver instanceof JavascriptExecutor) {
                    JavascriptExecutor js = ((JavascriptExecutor) driver);

                    List<WebElement> remoteThumb = driver
                            .findElements(By.xpath("//video[starts-with(@id, 'remoteVideo_')]"));

                    for (WebElement webElement : remoteThumb) {
                        //FIXME This needs to be optimized. We run this
                        // every second. It encodes an image in base64 and
                        // it transfers it over the network (that's how
                        // selenium communicates with the debugger). So this
                        // might work with a few images per second.. But this
                        // will fail miserably if we want to capture 30fps.
                        // The proper solution would be to store the images
                        // in the sandboxed HTML filesystem that modern
                        // browsers provide. And transfer them at the end
                        // of the test. We could follow the same approach
                        // if we want to grab the whole webm/vp8 stream using
                        // the Recorder API.
                        String elmId = webElement.getAttribute("id");
                        Object pngUrl = js.executeScript("var video = document.getElementById(\"" + elmId
                                + "\");" + "var canvasId = 'canvas-capture';"
                                + "var canvas = document.getElementById(canvasId);" + "if (canvas == null) {"
                                + "    canvas = document.createElement('canvas');" + "    canvas.id = canvasId;"
                                + "    document.body.appendChild(canvas);" + "}"
                                + "canvas.width = video.videoWidth;" + "canvas.height = video.videoHeight;"
                                + "var ctx = canvas.getContext('2d');" + "ctx.drawImage(video, 0, 0);"
                                + "return canvas.toDataURL(\"image/png\");");

                        // Parse the URI to get only the base64 part
                        String strBase64 = pngUrl.toString().substring("data:image/png;base64,".length());

                        // Convert it to binary
                        // Java 8 has a Base64 class.
                        byte[] data = org.apache.commons.codec.binary.Base64.decodeBase64(strBase64);

                        try (OutputStream stream = new FileOutputStream(
                                OUTPUT_FRAME_DIR + elmId + "-" + lastRun + ".png")) {
                            stream.write(data);
                        }
                    }
                }

                long currentTime = System.currentTimeMillis();
                millsToRun -= (currentTime - lastRun);
                lastRun = currentTime;

                if (millsToRun <= 0) {
                    timer.cancel();
                }
            } catch (Exception e) {
                e.printStackTrace();

                assertAndQuit("Unexpected error occurred.");
            }
        }

        /**
         * Clears what is needed and lowers the assert countdown.
         * @param msg
         */
        private void assertAndQuit(String msg) {
            System.err.println(msg);
            waitSignal.countDown();
            timer.cancel();
        }

    }, /* delay */ 1000, /* period */ 1000);

    try {
        waitSignal.await(minutesToRun, TimeUnit.MINUTES);

        if (waitSignal.getCount() == 0)
            assertTrue("A problem with the conf occurred", false);
        else {
            Runtime rt = Runtime.getRuntime();
            String[] commands = { PSNR_SCRIPT, OUTPUT_FRAME_DIR, INPUT_FRAME_DIR, RESIZED_FRAME_DIR };
            Process proc = rt.exec(commands);

            BufferedReader stdInput = new BufferedReader(new InputStreamReader(proc.getInputStream()));

            BufferedReader stdError = new BufferedReader(new InputStreamReader(proc.getErrorStream()));

            // read the output from the command
            String s = null;
            while ((s = stdInput.readLine()) != null) {
                assertTrue(s == null || Float.parseFloat(s.split(" ")[1]) > MIN_PSNR);
            }

            // read any errors from the attempted command
            while ((s = stdError.readLine()) != null) {
                System.err.println(s);
            }
        }
    } catch (Exception e) {
        assertTrue("An error occurred", false);
    }
}