Example usage for java.util LinkedList clone

List of usage examples for java.util LinkedList clone

Introduction

In this page you can find the example usage for java.util LinkedList clone.

Prototype

public Object clone() 

Source Link

Document

Returns a shallow copy of this LinkedList .

Usage

From source file:Main.java

public static void main(String[] args) {

    // create a LinkedList
    LinkedList<String> list = new LinkedList<String>();

    // add some elements
    list.add("Hello");
    list.add("from java2s.com");
    list.add("10");

    // print the list
    System.out.println("LinkedList:" + list);

    // create a second LinkedList
    LinkedList list2 = new LinkedList();

    // clone list1
    list2 = (LinkedList) list.clone();

    // print list2
    System.out.println("LinkedList 2:" + list2);
}

From source file:jp.co.ctc_g.jse.vid.ViewId.java

@SuppressWarnings("unchecked")
private static Iterable<ViewId> container(ViewIdStore store) {

    Iterable<ViewId> iterable = null;
    synchronized (store.semaphore()) {
        LinkedList<ViewId> ids = store.find(false);
        iterable = ids != null ? Collections.unmodifiableList((LinkedList<ViewId>) ids.clone())
                : Collections.<ViewId>emptyList();
    }/*from  w w w . j  a  v  a 2 s . co  m*/
    return iterable;
}

From source file:ru.apertum.journal.redactor.TarTableModel.java

public void setTars(LinkedList<Integer> tars1) {
    this.tars = tars1 == null ? null : (LinkedList<Integer>) tars1.clone();
    if (tars == null) {
        tars = new LinkedList<>();
        for (int i = 0; i < 256; i++) {
            tars.add(i);//  ww  w.j ava 2 s .  com
        }
    }
    for (int i = tars.size(); i < 256; i++) {
        tars.add(i);
    }
}

From source file:se.backede.jeconomix.forms.report.SingleTransactionReport.java

public void filter() {

    companyComboBox.setEnabled(false);//  ww w  . j av a2s.c o m
    yearComboBox.setEnabled(false);
    monthComboBox.setEnabled(false);

    LinkedList<TransactionDto> allTrasactions = new LinkedList<TransactionDto>(reports.getTransctions());

    CompanyDto company = (CompanyDto) companyComboBox.getSelectedItem();

    String yearString = (String) yearComboBox.getSelectedItem();
    Integer year = Integer.parseInt("0");
    if (yearString != null) {
        if (!yearString.equals(ALL_YEARS)) {
            year = Integer.parseInt(yearString);
        }
    }

    String monthString = (String) monthComboBox.getSelectedItem();
    Month month = null;
    if (monthString != null) {
        if (!monthString.equals(ALL_MONTHS)) {
            month = Month.valueOf(monthString);
        }
    }

    LinkedList<TransactionDto> filteredCompanies = new LinkedList<>(reports.getTransctions());

    if (company != null) {
        if (!company.getName().equals(ALL_COMPANIES)) {
            filteredCompanies = allTrasactions.stream().filter(line -> line.getCompany().equals(company))
                    .collect(Collectors.toCollection(LinkedList::new));
        }
    }

    LinkedList<TransactionDto> filteredByYear = (LinkedList) filteredCompanies.clone();
    if (yearString != null) {
        if (!yearString.equals(ALL_YEARS)) {
            for (TransactionDto filteredTransaction : filteredCompanies) {
                if (!Objects.equals(filteredTransaction.getBudgetYear(), year)) {
                    filteredByYear.remove(filteredTransaction);
                }
            }
        }
    }

    LinkedList<TransactionDto> filteredByMonth = (LinkedList) filteredByYear.clone();
    if (monthString != null) {
        if (!monthString.equals(ALL_MONTHS)) {
            for (TransactionDto filteredTransaction : filteredByYear) {
                if (filteredTransaction.getBudgetMonth() != month) {
                    filteredByMonth.remove(filteredTransaction);
                }
            }
        }
    }

    DefaultTableCellRenderer rightRenderer = new DefaultTableCellRenderer();

    rightRenderer.setHorizontalAlignment(JLabel.RIGHT);

    TransactionCompanyModel transModel = new TransactionCompanyModel(new HashSet<>(filteredByMonth));

    transactionTable.setModel(transModel);

    transactionTable.getColumnModel().getColumn(2).setCellRenderer(rightRenderer);

    transactionSumLabel.setText(transModel.getSum().toString().concat(" Kr"));
    categoryNameLabel.setText(reports.getCategory());

    companyComboBox.setEnabled(true);
    yearComboBox.setEnabled(true);
    monthComboBox.setEnabled(true);

}

From source file:com.act.lcms.v2.fullindex.Builder.java

protected void extractTriples(Iterator<LCMSSpectrum> iter, List<MZWindow> windows)
        throws RocksDBException, IOException {
    /* Warning: this method makes heavy use of ByteBuffers to perform memory efficient collection of values and
     * conversion of those values into byte arrays that RocksDB can consume.  If you haven't already, go read this
     * tutorial on ByteBuffers: http://mindprod.com/jgloss/bytebuffer.html
     *//from  ww w  .j a va2s.  co m
     * ByteBuffers are quite low-level structures, and they use some terms you need to watch out for:
     *   capacity: The total number of bytes in the array backing the buffer.  Don't write more than this.
     *   position: The next index in the buffer to read or write a byte.  Moves with each read or write op.
     *   limit:    A mark of where the final byte in the buffer was written.  Don't read past this.
     *             The remaining() call is affected by the limit.
     *   mark:     Ignore this for now, we don't use it.  (We'll always, always read buffers from 0.)
     *
     * And here are some methods that we'll use often:
     *   clear:     Set position = 0, limit = 0.  Pretend the buffer is empty, and is ready for more writes.
     *   flip:      Set limit = position, then position = 0.  This remembers how many bytes were written to the buffer
     *              (as the current position), and then puts the position at the beginning.
     *              Always call this after the write before a read.
     *   rewind:    Set position = 0.  Buffer is ready for reading, but unless the limit was set we might now know how
     *              many bytes there are to read.  Always call flip() before rewind().  Can rewind many times to re-read
     *              the buffer repeatedly.
     *   remaining: How many bytes do we have left to read?  Requires an accurate limit value to avoid garbage bytes.
     *   reset:     Don't use this.  It uses the mark, which we don't need currently.
     *
     * Write/read patterns look like:
     *   buffer.clear(); // Clear out anything already in the buffer.
     *   buffer.put(thing1).put(thing2)... // write a bunch of stuff
     *   buffer.flip(); // Prep for reading.  Call *once*!
     *
     *   while (buffer.hasRemaining()) { buffer.get(); } // Read a bunch of stuff.
     *   buffer.rewind(); // Ready for reading again!
     *   while (buffer.hasRemaining()) { buffer.get(); } // Etc.
     *   buffer.reset(); // Forget what was written previously, buffer is ready for reuse.
     *
     * We use byte buffers because they're fast, efficient, and offer incredibly convenient means of serializing a
     * stream of primitive types to their minimal binary representations.  The same operations on objects + object
     * streams require significantly more CPU cycles, consume more memory, and tend to be brittle (i.e. if a class
     * definition changes slightly, serialization may break).  Since the data we're dealing with is pretty simple, we
     * opt for the low-level approach.
     */

    /* Because we'll eventually use the window indices to map a mz range to a list of triples that fall within that
     * range, verify that all of the indices are unique.  If they're not, we'll end up overwriting the data in and
     * corrupting the structure of the index. */
    ensureUniqueMZWindowIndices(windows);

    // For every mz window, allocate a buffer to hold the indices of the triples that fall in that window.
    ByteBuffer[] mzWindowTripleBuffers = new ByteBuffer[windows.size()];
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        /* Note: the mapping between these buffers and their respective mzWindows is purely positional.  Specifically,
         * mzWindows.get(i).getIndex() != i, but mzWindowTripleBuffers[i] belongs to mzWindows.get(i).  We'll map windows
         * indices to the contents of mzWindowTripleBuffers at the very end of this function. */
        mzWindowTripleBuffers[i] = ByteBuffer.allocate(Long.BYTES * 4096); // Start with 4096 longs = 8 pages per window.
    }

    // Every TMzI gets an index which we'll use later when we're querying by m/z and time.
    long counter = -1; // We increment at the top of the loop.
    // Note: we could also write to an mmapped file and just track pointers, but then we might lose out on compression.

    // We allocate all the buffers strictly here, as we know how many bytes a long and a triple will take.  Then reuse!
    ByteBuffer counterBuffer = ByteBuffer.allocate(Long.BYTES);
    ByteBuffer valBuffer = ByteBuffer.allocate(TMzI.BYTES);
    List<Float> timepoints = new ArrayList<>(2000); // We can be sloppy here, as the count is small.

    /* We use a sweep-line approach to scanning through the m/z windows so that we can aggregate all intensities in
     * one pass over the current LCMSSpectrum (this saves us one inner loop in our extraction process).  The m/z
     * values in the LCMSSpectrum become our "critical" or "interesting points" over which we sweep our m/z ranges.
     * The next window in m/z order is guaranteed to be the next one we want to consider since we address the points
     * in m/z order as well.  As soon as we've passed out of the range of one of our windows, we discard it.  It is
     * valid for a window to be added to and discarded from the working queue in one application of the work loop. */
    LinkedList<MZWindow> tbdQueueTemplate = new LinkedList<>(windows); // We can reuse this template to init the sweep.

    int spectrumCounter = 0;
    while (iter.hasNext()) {
        LCMSSpectrum spectrum = iter.next();
        float time = spectrum.getTimeVal().floatValue();

        // This will record all the m/z + intensity readings that correspond to this timepoint.  Exactly sized too!
        ByteBuffer triplesForThisTime = ByteBuffer.allocate(Long.BYTES * spectrum.getIntensities().size());

        // Batch up all the triple writes to reduce the number of times we hit the disk in this loop.
        // Note: huge success!
        RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();

        // Initialize the sweep line lists.  Windows go follow: tbd -> working -> done (nowhere).
        LinkedList<MZWindow> workingQueue = new LinkedList<>();
        LinkedList<MZWindow> tbdQueue = (LinkedList<MZWindow>) tbdQueueTemplate.clone(); // clone is in the docs, so okay!
        for (Pair<Double, Double> mzIntensity : spectrum.getIntensities()) {
            // Very important: increment the counter for every triple.  Otherwise we'll overwrite triples = Very Bad (tm).
            counter++;

            // Brevity = soul of wit!
            Double mz = mzIntensity.getLeft();
            Double intensity = mzIntensity.getRight();

            // Reset the buffers so we end up re-using the few bytes we've allocated.
            counterBuffer.clear(); // Empty (virtually).
            counterBuffer.putLong(counter);
            counterBuffer.flip(); // Prep for reading.

            valBuffer.clear(); // Empty (virtually).
            TMzI.writeToByteBuffer(valBuffer, time, mz, intensity.floatValue());
            valBuffer.flip(); // Prep for reading.

            // First, shift any applicable ranges onto the working queue based on their minimum mz.
            while (!tbdQueue.isEmpty() && tbdQueue.peekFirst().getMin() <= mz) {
                workingQueue.add(tbdQueue.pop());
            }

            // Next, remove any ranges we've passed.
            while (!workingQueue.isEmpty() && workingQueue.peekFirst().getMax() < mz) {
                workingQueue.pop(); // TODO: add() this to a recovery queue which can then become the tbdQueue.  Edge cases!
            }
            /* In the old indexed trace extractor world, we could bail here if there were no target m/z's in our window set
             * that matched with the m/z of our current mzIntensity.  However, since we're now also recording the links
             * between timepoints and their (t, m/z, i) triples, we need to keep on keepin' on regardless of whether we have
             * any m/z windows in the working set right now. */

            // The working queue should now hold only ranges that include this m/z value.  Sweep line swept!

            /* Now add this intensity to the buffers of all the windows in the working queue.  Note that since we're only
             * storing the *index* of the triple, these buffers are going to consume less space than they would if we
             * stored everything together. */
            for (MZWindow window : workingQueue) {
                // TODO: count the number of times we add intensities to each window's accumulator for MS1-style warnings.
                counterBuffer.rewind(); // Already flipped.
                mzWindowTripleBuffers[window.getIndex()] = // Must assign when calling appendOrRealloc.
                        Utils.appendOrRealloc(mzWindowTripleBuffers[window.getIndex()], counterBuffer);
            }

            // We flipped after reading, so we should be good to rewind (to be safe) and write here.
            counterBuffer.rewind();
            valBuffer.rewind();
            writeBatch.put(ColumnFamilies.ID_TO_TRIPLE, Utils.toCompactArray(counterBuffer),
                    Utils.toCompactArray(valBuffer));

            // Rewind again for another read.
            counterBuffer.rewind();
            triplesForThisTime.put(counterBuffer);
        }

        writeBatch.write();

        assert (triplesForThisTime.position() == triplesForThisTime.capacity());

        ByteBuffer timeBuffer = ByteBuffer.allocate(Float.BYTES).putFloat(time);
        timeBuffer.flip(); // Prep both bufers for reading so they can be written to the DB.
        triplesForThisTime.flip();
        dbAndHandles.put(ColumnFamilies.TIMEPOINT_TO_TRIPLES, Utils.toCompactArray(timeBuffer),
                Utils.toCompactArray(triplesForThisTime));

        timepoints.add(time);

        spectrumCounter++;
        if (spectrumCounter % 1000 == 0) {
            LOGGER.info("Extracted %d time spectra", spectrumCounter);
        }
    }
    LOGGER.info("Extracted %d total time spectra", spectrumCounter);

    // Now write all the mzWindow to triple indexes.
    RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();
    ByteBuffer idBuffer = ByteBuffer.allocate(Integer.BYTES);
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        idBuffer.clear();
        idBuffer.putInt(windows.get(i).getIndex());
        idBuffer.flip();

        ByteBuffer triplesBuffer = mzWindowTripleBuffers[i];
        triplesBuffer.flip(); // Prep for read.

        writeBatch.put(ColumnFamilies.WINDOW_ID_TO_TRIPLES, Utils.toCompactArray(idBuffer),
                Utils.toCompactArray(triplesBuffer));
    }
    writeBatch.write();

    dbAndHandles.put(ColumnFamilies.TIMEPOINTS, TIMEPOINTS_KEY, Utils.floatListToByteArray(timepoints));
    dbAndHandles.flush(true);
}

From source file:edu.cornell.med.icb.clustering.QTClusterer.java

/**
 * Groups instances into clusters. Returns the indices of the instances
 * that belong to a cluster as an int array in the list result.
 *
 * @param calculator       The//w  w  w .  j a va  2s  .  c o  m
 *                         {@link edu.cornell.med.icb.clustering.SimilarityDistanceCalculator}
 *                         that should be used when clustering
 * @param qualityThreshold The QT clustering algorithm quality threshold (d)
 * @return The list of clusters.
 */
public List<int[]> cluster(final SimilarityDistanceCalculator calculator, final double qualityThreshold) {
    final ProgressLogger clusterProgressLogger = new ProgressLogger(LOGGER, logInterval, "instances clustered");
    clusterProgressLogger.displayFreeMemory = true;
    clusterProgressLogger.expectedUpdates = instanceCount;
    clusterProgressLogger.start("Starting to cluster " + instanceCount + " instances using "
            + parallelTeam.getThreadCount() + " threads.");

    // reset cluster results
    clusterCount = 0;
    // instanceList is the set "G" to cluster
    final LinkedList<Integer> instanceList = new LinkedList<Integer>();
    for (int i = 0; i < instanceCount; i++) {
        clusters[i].clear();

        // set each node in the instance list to it's
        // original position in the source data array
        instanceList.add(i);
    }

    final double ignoreDistance = calculator.getIgnoreDistance();

    // eliminate any instances that will never cluster with anything else
    final IntList singletonClusters = identifySingletonClusters(calculator, qualityThreshold, instanceList,
            clusterProgressLogger);

    final ProgressLogger innerLoopProgressLogger = new ProgressLogger(LOGGER, logInterval,
            "inner loop iterations");
    innerLoopProgressLogger.displayFreeMemory = false;

    final ProgressLogger outerLoopProgressLogger = new ProgressLogger(LOGGER, logInterval,
            "outer loop iterations");
    outerLoopProgressLogger.displayFreeMemory = true;

    try {
        // loop over instances until they have all been added to a cluster
        while (!instanceList.isEmpty()) {
            // cluster remaining instances to find the maximum cardinality
            for (int i = 0; i < instanceList.size(); i++) {
                candidateClusters[i].clear();
            }

            if (logOuterLoopProgress) {
                outerLoopProgressLogger.expectedUpdates = instanceList.size();
                outerLoopProgressLogger.start("Entering outer loop for " + instanceList.size() + " iterations");
            }

            // for each i in G (instance list)
            // find instance j such that distance i,j minimum
            parallelTeam.execute(new ParallelRegion() { // NOPMD

                @Override
                public void run() throws Exception { // NOPMD
                    // each thread will populate a different portion of the "candidateCluster"
                    // array so we shouldn't need to worry about concurrent access
                    execute(0, instanceList.size() - 1, new IntegerForLoop() {
                        @Override
                        public void run(final int first, final int last) {
                            if (LOGGER.isDebugEnabled()) {
                                LOGGER.debug("first = " + first + ", last = " + last);
                            }
                            for (int i = first; i <= last; i++) {
                                @SuppressWarnings("unchecked")
                                final LinkedList<Integer> notClustered = (LinkedList<Integer>) instanceList
                                        .clone();

                                // add the first instance to the next candidate cluster
                                final IntArrayList candidateCluster = candidateClusters[i];
                                candidateCluster.add(notClustered.remove(i));

                                if (logInnerLoopProgress) {
                                    innerLoopProgressLogger.expectedUpdates = notClustered.size();
                                    innerLoopProgressLogger.start(
                                            "Entering inner loop for " + notClustered.size() + " iterations");
                                }

                                // cluster the remaining instances to find the maximum
                                // cardinality find instance j such that distance i,j minimum
                                boolean done = false;
                                while (!done && !notClustered.isEmpty()) {
                                    // find the node that has minimum distance between the
                                    // current cluster and the instances that have not yet
                                    // been clustered.
                                    double minDistance = Double.POSITIVE_INFINITY;
                                    int minDistanceInstanceIndex = 0;
                                    int instanceIndex = 0;
                                    for (final int instance : notClustered) {
                                        double newDistance = ignoreDistance;

                                        final int[] cluster = candidateCluster.elements();
                                        for (int instanceInCluster = 0; instanceInCluster < candidateCluster
                                                .size(); instanceInCluster++) {
                                            final double a = calculator.distance(cluster[instanceInCluster],
                                                    instance);
                                            // if the distance of the instance will force the candidate cluster
                                            // to be larger than the cutoff value, we can stop here
                                            // because we know that this candidate cluster will be too large
                                            if (a >= minDistance) {
                                                newDistance = ignoreDistance;
                                                break;
                                            }
                                            final double b = newDistance;

                                            // This code is inlined from java.lang.Math.max(a, b)
                                            if (a != a) { // a is NaN
                                                newDistance = a;
                                            } else if (a == 0.0d && b == 0.0d
                                                    && Double.doubleToLongBits(a) == negativeZeroDoubleBits) {
                                                newDistance = b;
                                            } else if (a >= b) {
                                                newDistance = a;
                                            } else {
                                                newDistance = b;
                                            }
                                        }

                                        if (newDistance != ignoreDistance && newDistance < minDistance) {
                                            minDistance = newDistance;
                                            minDistanceInstanceIndex = instanceIndex;
                                        }
                                        instanceIndex++;
                                    }
                                    // grow clusters until min distance between new instance
                                    // and cluster reaches quality threshold
                                    // if (diameter(Ai U {j}) > d)
                                    if (minDistance > qualityThreshold) {
                                        done = true;
                                    } else {
                                        // remove the instance from the ones to be considered
                                        final int instance = notClustered.remove(minDistanceInstanceIndex);
                                        // and add it to the newly formed cluster
                                        candidateCluster.add(instance);
                                    }
                                    if (logInnerLoopProgress) {
                                        innerLoopProgressLogger.update();
                                    }
                                }
                                if (logInnerLoopProgress) {
                                    innerLoopProgressLogger.stop("Inner loop completed.");
                                }
                                if (logOuterLoopProgress) {
                                    outerLoopProgressLogger.update();
                                }
                            }
                        }
                    });
                }
            });

            if (logOuterLoopProgress) {
                outerLoopProgressLogger.stop("Outer loop completed.");
            }

            // identify cluster (set C) with maximum cardinality
            int maxCardinality = 0;
            int selectedClusterIndex = -1;
            for (int i = 0; i < instanceList.size(); i++) {
                final int size = candidateClusters[i].size();
                if (LOGGER.isTraceEnabled() && size > 0) {
                    LOGGER.trace("potential cluster " + i + ": " + ArrayUtils.toString(candidateClusters[i]));
                }
                if (size > maxCardinality) {
                    maxCardinality = size;
                    selectedClusterIndex = i;
                }
            }

            final IntArrayList selectedCluster = candidateClusters[selectedClusterIndex];

            if (LOGGER.isTraceEnabled()) {
                LOGGER.trace("adding " + selectedCluster.size() + " instances to cluster " + clusterCount);
            }
            // and add that cluster to the final result
            clusters[clusterCount].addAll(selectedCluster);

            // remove instances in cluster C so they are no longer considered
            instanceList.removeAll(selectedCluster);

            if (logClusterProgress) {
                final int selectedClusterSize = selectedCluster.size();
                int i = 0;
                while (i < selectedClusterSize - 1) {
                    clusterProgressLogger.lightUpdate();
                    i++;
                }
                // make sure there is at least one "full" update per loop
                if (i < selectedClusterSize) {
                    clusterProgressLogger.update();
                }
            }

            // we just created a new cluster
            clusterCount++;

            // next iteration is over (G - C)
        }
    } catch (RuntimeException e) {
        LOGGER.error("Caught runtime exception - rethrowing", e);
        throw e;
    } catch (Exception e) {
        LOGGER.error("Caught exception - rethrowing as ClusteringException", e);
        throw new ClusteringException(e);
    }

    // add singleton clusters to the end so the largest clusters are at the start of the list
    for (final int singleton : singletonClusters) {
        clusters[clusterCount].add(singleton);
        clusterCount++;
    }

    clusterProgressLogger.stop("Clustering completed.");
    return getClusters();
}

From source file:org.kuali.kra.test.infrastructure.ApplicationServer.java

/**
 * The jetty server's jsp compiler does not have access to the classpath artifacts to compile the jsps.
 * This method takes the current webapp classloader and creates one containing all of the
 * classpath artifacts on the test's classpath.
 *
 * See http://stackoverflow.com/questions/17685330/how-do-you-get-embedded-jetty-9-to-successfully-resolve-the-jstl-uri
 *
 * @param current the current webapp classpath
 * @return a classloader to replace it with
 * @throws IOException if an error occurs creating the classloader
 *///from ww w .j a v a 2  s  . c o  m
private static ClassLoader createClassLoaderForJasper(ClassLoader current) throws IOException {
    // Replace classloader with a new classloader with all URLs in manifests
    // from the parent loader bubbled up so Jasper looks at them.
    final ClassLoader parentLoader = current.getParent();
    if (current instanceof WebAppClassLoader && parentLoader instanceof URLClassLoader) {
        final LinkedList<URL> allURLs = new LinkedList<URL>(
                Arrays.asList(((URLClassLoader) parentLoader).getURLs()));

        for (URL url : ((LinkedList<URL>) allURLs.clone())) {
            try {
                final URLConnection conn = new URL("jar:" + url.toString() + "!/").openConnection();
                if (conn instanceof JarURLConnection) {
                    final JarURLConnection jconn = (JarURLConnection) conn;
                    final Manifest jarManifest = jconn.getManifest();
                    final String[] classPath = ((String) jarManifest.getMainAttributes().getValue("Class-Path"))
                            .split(" ");

                    for (String cpurl : classPath) {
                        allURLs.add(new URL(url, cpurl));
                    }
                }
            } catch (IOException | NullPointerException e) {
                //do nothing
            }
        }
        LOG.info("Creating new classloader for Application Server");
        return new WebAppClassLoader(new URLClassLoader(allURLs.toArray(new URL[] {}), parentLoader),
                ((WebAppClassLoader) current).getContext());
    }
    LOG.warn("Cannot create new classloader for app server " + current);
    return current;
}

From source file:org.paxle.data.db.impl.CommandDBTest.java

@SuppressWarnings("unchecked")
private void storeUnknownLocation() throws InterruptedException {
    final int MAX = 10;

    // command-tracker must be called MAX times
    checking(new Expectations() {
        {//from ww  w. j  a v  a 2  s.c  om
            exactly(MAX).of(cmdTracker).commandCreated(with(equal("org.paxle.data.db.ICommandDB")),
                    with(any(ICommand.class)));
        }
    });

    // generated test URI
    LinkedList<URI> knownURIs;
    LinkedList<URI> testURI = new LinkedList<URI>();
    for (int i = 0; i < MAX; i++) {
        testURI.add(URI.create("http://test.paxle.net/" + i));
    }
    knownURIs = (LinkedList<URI>) testURI.clone();

    // store them to DB
    int knownCount = this.cmdDB.storeUnknownLocations(0, 1, testURI);
    assertEquals(0, knownCount);

    // create a dummy data-sink
    Semaphore s = null;
    this.cmdDB.setDataSink(new DummyDataSink(s = new Semaphore(-MAX + 1)));

    // wait for all commands to be enqueued
    boolean acquired = s.tryAcquire(3, TimeUnit.SECONDS);
    assertTrue(acquired);

    // testing if all URI are known to the DB
    for (URI knownURI : knownURIs) {
        // command must be marked as crawled
        boolean known = this.cmdDB.isKnownInDB(knownURI, "CrawledCommand");
        assertTrue("Unkown URI: " + knownURI, known);

        // command must not be enqueued
        known = this.cmdDB.isKnownInDB(knownURI, "EnqueuedCommand");
        assertFalse("Unkown URI: " + knownURI, known);

        // command must be known to the cache
        known = this.cmdDB.isKnownInCache(knownURI);
        assertTrue(known);

        // command must be known to the bloom filter
        known = this.cmdDB.isKnownInDoubleURLs(knownURI);
        assertTrue(known);
    }
}