Example usage for java.util Collections shuffle

List of usage examples for java.util Collections shuffle

Introduction

In this page you can find the example usage for java.util Collections shuffle.

Prototype

public static void shuffle(List<?> list) 

Source Link

Document

Randomly permutes the specified list using a default source of randomness.

Usage

From source file:com.digitalpebble.storm.crawler.elasticsearch.persistence.AggregationSpout.java

/** run a query on ES to populate the internal buffer **/
private void populateBuffer() {

    Date now = new Date();

    // check that we allowed some time between queries
    if (timePreviousQuery != null) {
        long difference = now.getTime() - timePreviousQuery.getTime();
        if (difference < minDelayBetweenQueries) {
            long sleepTime = minDelayBetweenQueries - difference;
            LOG.info("{} Not enough time elapsed since {} - sleeping for {}", logIdprefix, timePreviousQuery,
                    sleepTime);//from   w  w  w. j  ava2 s  . c  o m
            try {
                Thread.sleep(sleepTime);
            } catch (InterruptedException e) {
                LOG.error("{} InterruptedException caught while waiting", logIdprefix);
            }
            return;
        }
    }

    timePreviousQuery = now;

    LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, now);

    QueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery("nextFetchDate").lte(now);

    SearchRequestBuilder srb = client.prepareSearch(indexName).setTypes(docType)
            // expensive as it builds global Term/Document Frequencies
            // TODO look for a more appropriate method
            .setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(rangeQueryBuilder).setFrom(0).setSize(0)
            .setExplain(false);

    TermsBuilder aggregations = AggregationBuilders.terms("partition").field(partitionField).size(maxBucketNum);
    TopHitsBuilder tophits = AggregationBuilders.topHits("docs").setSize(maxURLsPerBucket).setExplain(false);

    // sort within a bucket
    if (StringUtils.isNotBlank(bucketSortField)) {
        FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField).order(SortOrder.ASC);
        tophits.addSort(sorter);
    }

    aggregations.subAggregation(tophits);
    srb.addAggregation(aggregations);

    // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
    // _shards:2,3
    if (shardID != -1) {
        srb.setPreference("_shards:" + shardID);
    }

    long start = System.currentTimeMillis();
    SearchResponse response = srb.execute().actionGet();
    long end = System.currentTimeMillis();

    eventCounter.scope("ES_query_time_msec").incrBy(end - start);

    Aggregations aggregs = response.getAggregations();

    Terms agg = aggregs.get("partition");

    int numhits = 0;
    int numBuckets = 0;
    int alreadyprocessed = 0;

    // For each entry
    for (Terms.Bucket entry : agg.getBuckets()) {
        String key = entry.getKey(); // bucket key
        long docCount = entry.getDocCount(); // Doc count

        numBuckets++;

        int hitsForThisBucket = 0;

        // filter results so that we don't include URLs we are already
        // being processed
        TopHits topHits = entry.getAggregations().get("docs");
        for (SearchHit hit : topHits.getHits().getHits()) {
            hitsForThisBucket++;

            Map<String, Object> keyValues = hit.sourceAsMap();
            String url = (String) keyValues.get("url");

            LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString());

            // is already being processed - skip it!
            if (beingProcessed.contains(url)) {
                alreadyprocessed++;
                continue;
            }
            Metadata metadata = fromKeyValues(keyValues);
            buffer.add(new Values(url, metadata));
        }

        numhits += hitsForThisBucket;

        LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount,
                alreadyprocessed);
    }

    // Shuffle the URLs so that we don't get blocks of URLs from the same
    // host or domain
    Collections.shuffle((List) buffer);

    LOG.info("{} ES query returned {} hits from {} buckets in {} msec but {} already being processed",
            logIdprefix, numhits, numBuckets, end - start, alreadyprocessed);

    eventCounter.scope("already_being_processed").incrBy(alreadyprocessed);
    eventCounter.scope("ES_queries").incrBy(1);
    eventCounter.scope("ES_docs").incrBy(numhits);
}

From source file:com.github.vatbub.tictactoe.Board.java

public List<Move> getAvailableMoves() {
    List<Move> res = new ArrayList<>();
    for (int row = 0; row < getRowCount(); row++) {
        for (int column = 0; column < getColumnCount(); column++) {
            if (getPlayerAt(row, column) == null) {
                // cell is empty so it is a valid move
                res.add(new Move(row, column));
            }//w ww  . j a  v  a 2 s.c  o m
        }
    }

    Collections.shuffle(res);
    return res;
}

From source file:clummy.classes.DataHandlingClass.java

/**
 * get shuffled marital status from list
 * @param age//  w w w .  java2 s . co m
 * @return 
 */
public String getShuffledMaritalString(int age) {
    List<String> maritalList = defaultList.getMaritalStatusList();
    Collections.shuffle(maritalList);
    Collections.shuffle(maritalList);
    if (age <= 17)
        return "Single";
    else {
        //this is to reduce people who are age and above to be single haha
        if (age > 65) {
            if (maritalList.get(randBetween(0, maritalList.size() - 1)).equalsIgnoreCase("Single")) {
                if (random.nextBoolean())
                    if (random.nextBoolean())
                        if (random.nextBoolean())
                            if (random.nextBoolean())
                                if (random.nextBoolean())
                                    if (random.nextBoolean())
                                        return maritalList.get(randBetween(0, maritalList.size() - 1));
            }
        }
        Collections.shuffle(defaultList.getMaritalStatusListWitoutSingle());
        return defaultList.getMaritalStatusListWitoutSingle()
                .get(randBetween(0, defaultList.getMaritalStatusListWitoutSingle().size() - 1));
    }
}

From source file:edu.uga.cs.fluxbuster.clustering.ClusterGenerator.java

/**
 * Load candidate flux domains from the data files for the time period
 * between the start and end times.//  www  .j  av a 2 s.c  o  m
 *
 * @param startTime the start time in sec.
 * @param endTime the end time in sec.
 * @param domainfile a file containing the list of domains that should
 *       be clustered regardless of the candidate score.  If null the list
 *       is ignored.
 * @return the list of candidate flux domains
 * @throws Exception if there is an error reading the ClusterGenerator.localprops
 *       or data files
 */
public List<CandidateFluxDomain> loadCandidateFluxDomains(long startTime, long endTime, String domainfile)
        throws Exception {
    ArrayList<CandidateFluxDomain> retval = new ArrayList<CandidateFluxDomain>();
    HashMap<String, CandidateFluxDomain> seenDomains = new HashMap<String, CandidateFluxDomain>();
    Set<String> recentFluxDomains = loadRecentFluxDomains(startTime);
    String dirPath = appprops.getProperty(FLUXDIRKEY);
    double goodCandidateThreshold = Double.parseDouble(appprops.getProperty(CANDIDATETHRESHKEY));
    int maxCandidateDomains = Integer.parseInt(appprops.getProperty(MAXDOMAINSKEY));

    for (String filename : getFileNames(dirPath, startTime, endTime)) {
        BufferedReader br = null;
        try {
            GZIPInputStream gis = new GZIPInputStream(new FileInputStream(filename));
            br = new BufferedReader(new InputStreamReader(gis));
            String line;
            while ((line = br.readLine()) != null) {
                CandidateFluxDomain cfd = CandidateFluxDomain.parseFromLog(line);

                if (isWhiteListable(cfd.getDomainName())) {
                    if (log.isDebugEnabled()) {
                        log.debug(cfd.getDomainName() + " is whitelisted.");
                    }
                    continue;
                }

                String domainname = cfd.getDomainName();
                if (seenDomains.containsKey(domainname)) {
                    CandidateFluxDomain prev = seenDomains.get(domainname);
                    seenDomains.put(domainname, prev.merge(cfd));
                } else {
                    seenDomains.put(domainname, cfd);
                }

            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (br != null) {
                br.close();
            }
        }
    }

    //add all domains from a file
    if (domainfile != null) {
        addDomainsFromFile(domainfile, maxCandidateDomains, retval, seenDomains);
    }

    ArrayList<String> allDomains = new ArrayList<String>();
    allDomains.addAll(seenDomains.keySet());

    // add all domains from recently seen flux domains
    if (retval.size() < maxCandidateDomains && recentFluxDomains.size() > 0) {
        addRecentFluxDomains(recentFluxDomains, maxCandidateDomains, retval, seenDomains, allDomains);
    }

    // then add the non-recent ones that meet the score threshold
    if (retval.size() < maxCandidateDomains) {
        addThresholdMeetingDomains(maxCandidateDomains, goodCandidateThreshold, retval, seenDomains,
                allDomains);
    }

    // then fill the rest randomly from what's left over
    if (retval.size() < maxCandidateDomains) {
        Collections.shuffle(allDomains);
        for (String domainname : allDomains) {
            if (retval.size() == maxCandidateDomains) {
                break;
            }
            retval.add(seenDomains.get(domainname));
        }
    }

    return retval;
}

From source file:org.jboss.aerogear.unifiedpush.utils.installation.InstallationUtils.java

private static Set<String> getRandomCategories(List<String> categories, int categoriesPerInstallation) {

    Set<String> picked = new HashSet<String>();

    Collections.shuffle(categories);

    picked.addAll(categories.subList(0, categoriesPerInstallation));

    return picked;
}

From source file:com.digitalpebble.stormcrawler.elasticsearch.persistence.AggregationSpout.java

/** run a query on ES to populate the internal buffer **/
protected void populateBuffer() {

    Date now = new Date();

    // check that we allowed some time between queries
    if (timePreviousQuery != null) {
        long difference = now.getTime() - timePreviousQuery.getTime();
        if (difference < minDelayBetweenQueries) {
            long sleepTime = minDelayBetweenQueries - difference;
            LOG.info("{} Not enough time elapsed since {} - sleeping for {}", logIdprefix, timePreviousQuery,
                    sleepTime);//from w  w  w .j a  v a 2s  .c  o  m
            try {
                Thread.sleep(sleepTime);
            } catch (InterruptedException e) {
                LOG.error("{} InterruptedException caught while waiting", logIdprefix);
            }
            return;
        }
    }

    timePreviousQuery = now;

    LOG.info("{} Populating buffer with nextFetchDate <= {}", logIdprefix, now);

    QueryBuilder rangeQueryBuilder = QueryBuilders.rangeQuery("nextFetchDate").lte(now);

    SearchRequestBuilder srb = client.prepareSearch(indexName).setTypes(docType)
            .setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(rangeQueryBuilder).setFrom(0).setSize(0)
            .setExplain(false);

    TermsBuilder aggregations = AggregationBuilders.terms("partition").field("metadata." + partitionField)
            .size(maxBucketNum);

    TopHitsBuilder tophits = AggregationBuilders.topHits("docs").setSize(maxURLsPerBucket).setExplain(false);
    // sort within a bucket
    if (StringUtils.isNotBlank(bucketSortField)) {
        FieldSortBuilder sorter = SortBuilders.fieldSort(bucketSortField).order(SortOrder.ASC);
        tophits.addSort(sorter);
    }

    aggregations.subAggregation(tophits);

    // sort between buckets
    if (StringUtils.isNotBlank(totalSortField)) {
        MinBuilder minBuilder = AggregationBuilders.min("top_hit").field(totalSortField);
        aggregations.subAggregation(minBuilder);
        aggregations.order(Terms.Order.aggregation("top_hit", true));
    }

    srb.addAggregation(aggregations);

    // https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-preference.html
    // _shards:2,3
    if (shardID != -1) {
        srb.setPreference("_shards:" + shardID);
    }

    // dump query to log
    LOG.debug("{} ES query {}", logIdprefix, srb.toString());

    long start = System.currentTimeMillis();
    SearchResponse response = srb.execute().actionGet();
    long end = System.currentTimeMillis();

    eventCounter.scope("ES_query_time_msec").incrBy(end - start);

    Aggregations aggregs = response.getAggregations();

    Terms agg = aggregs.get("partition");

    int numhits = 0;
    int numBuckets = 0;
    int alreadyprocessed = 0;

    // For each entry
    for (Terms.Bucket entry : agg.getBuckets()) {
        String key = (String) entry.getKey(); // bucket key
        long docCount = entry.getDocCount(); // Doc count

        int hitsForThisBucket = 0;

        // filter results so that we don't include URLs we are already
        // being processed
        TopHits topHits = entry.getAggregations().get("docs");
        for (SearchHit hit : topHits.getHits().getHits()) {
            hitsForThisBucket++;

            Map<String, Object> keyValues = hit.sourceAsMap();
            String url = (String) keyValues.get("url");

            LOG.debug("{} -> id [{}], _source [{}]", logIdprefix, hit.getId(), hit.getSourceAsString());

            // is already being processed - skip it!
            if (beingProcessed.contains(url)) {
                alreadyprocessed++;
                continue;
            }
            Metadata metadata = fromKeyValues(keyValues);
            buffer.add(new Values(url, metadata));
        }

        if (hitsForThisBucket > 0)
            numBuckets++;

        numhits += hitsForThisBucket;

        LOG.debug("{} key [{}], hits[{}], doc_count [{}]", logIdprefix, key, hitsForThisBucket, docCount,
                alreadyprocessed);
    }

    // Shuffle the URLs so that we don't get blocks of URLs from the same
    // host or domain
    Collections.shuffle((List) buffer);

    LOG.info("{} ES query returned {} hits from {} buckets in {} msec with {} already being processed",
            logIdprefix, numhits, numBuckets, end - start, alreadyprocessed);

    eventCounter.scope("already_being_processed").incrBy(alreadyprocessed);
    eventCounter.scope("ES_queries").incrBy(1);
    eventCounter.scope("ES_docs").incrBy(numhits);
}

From source file:io.bifroest.commons.boot.BootLoaderNG.java

/**
 * Calculates the systems boot order. This is an iterative process: At
 * first, from a list with all available systems, all systems with no
 * dependencies are removed. This is repeated until the list is empty. If
 * there are systems still remaining, there is a dependency misconfiguration
 * and a CircularDependencyException is raised.
 *
 * @return A list with systems ordered by boot priority. The first element
 * needs to start first, the second after and so on.
 * @throws CircularDependencyException If two or more systems are
 * misconfigured, a circular dependency can occur. This happens e.g. if
 * system A depends on system B and system B also requires system A. This
 * cannot be resolved and an exception is thrown.
 *///w  w  w.  j a v a2 s.  c om
private List<Subsystem<E>> getBootOrder() throws CircularDependencyException {
    HashMap<String, Subsystem<E>> bootSystems = new HashMap<>();
    HashMap<String, List<String>> systemDependencies = new HashMap<>();
    List<Subsystem<E>> result = new ArrayList<>();

    // shuffle systems to boot, so no one can forget system dependencies
    Collections.shuffle(this.systemsToBoot);

    this.systemsToBoot.stream().forEach((system) -> {
        bootSystems.put(system.getSystemIdentifier(), system);
        systemDependencies.put(system.getSystemIdentifier(), system.getRequiredSystems().stream()
                .filter(dep -> !dep.equals(system.getSystemIdentifier())).collect(Collectors.toList()));
    });
    // while there are dependencies to solve
    while (!systemDependencies.isEmpty()) {
        // Get all nodes without any dependency            
        Set<String> keys = systemDependencies.keySet();
        List<String> resolved = new ArrayList<>();
        keys.stream().forEach((key) -> {
            log.trace("Trying to resolve {}", key);
            Collection<String> dependencies = systemDependencies.get(key);
            log.trace("Found dependencies: {}", dependencies);
            if (dependencies == null || dependencies.isEmpty()) {
                log.trace("Marking {} as resolved", key);
                resolved.add(key);
            }
        });
        // if resolved is empty, we have a loop in the graph            
        if (resolved.isEmpty()) {
            String msg = "Loop in graph! This should not happen. Check your dependencies! Remaining systems: "
                    + keys.toString();
            throw new CircularDependencyException(msg, systemDependencies);
        }

        // remove systemsToBoot found from dependency graph
        resolved.stream().forEach((systemIdentifier) -> {
            systemDependencies.remove(systemIdentifier);
            result.add(bootSystems.get(systemIdentifier));
        });

        // remove dependencies
        Set<String> systemDependenciesKeys = systemDependencies.keySet();
        systemDependenciesKeys.stream().map((key) -> systemDependencies.get(key)).forEach((values) -> {
            resolved.stream().forEach((resolvedValue) -> {
                values.removeIf(v -> v.equals(resolvedValue));
            });
        });
    }
    return result;
}

From source file:edu.cmu.tetrad.search.IndTestCorrelationT.java

public void shuffleVariables() {
    ArrayList<Node> nodes = new ArrayList<Node>(this.variables);
    Collections.shuffle(nodes);
    this.variables = Collections.unmodifiableList(nodes);
}

From source file:ch.epfl.eagle.daemon.nodemonitor.NodeMonitor.java

private void gossipNotExecutingLong(int round) {
    List<InetSocketAddress> listBackends = getCleanWorkersList();
    // Choose randomly log(n) workers
    Collections.shuffle(listBackends);
    int gossip_fanout = (int) (Math.ceil(Math.log(listBackends.size())));
    for (int i = 0; i < gossip_fanout; i++) {
        InetSocketAddress chosenBackend = listBackends.get(i);
        try {/*from w  w  w. j ava 2 s . c o  m*/
            InternalService.AsyncClient client = nodeMonitorClientPool.borrowClient(chosenBackend);
            LOG.debug(
                    "STEALING: Launching gossipNotExecutingLong on node: " + chosenBackend + " round " + round);
            client.receiveGossip(notExecutingLong, longStatusTimestamp, round,
                    new ReceiveGossipCallback(chosenBackend, client));
        } catch (Exception e) {
            LOG.error("Error enqueuing task on node " + chosenBackend.toString() + ":" + e);
        }
    }
}

From source file:com.twitter.graphjet.bipartite.GraphConcurrentTestHelper.java

/**
 * This helper method sets up a concurrent read-write situation with a single writer and multiple
 * readers that access the same underlying bipartiteGraph, and tests for correct edge access during
 * simultaneous edge writes. This helps test read consistency during arbitrary points of
 * inserting edges. Note that the exact read-write sequence here is non-deterministic and would
 * vary depending on the machine, but the hope is that given the large number of readers the reads
 * would be done at many different points of edge insertion. The test itself checks only for
 * partial correctness (it could have false positives) so this should only be used as a supplement
 * to other testing.// w  ww. ja  v  a2  s. c o  m
 *
 * @param graph              is the underlying
 *                           {@link BipartiteGraph}
 * @param numReadersPerNode  is the number of reader threads to use per node
 * @param leftSize           is the number of left nodes
 * @param rightSize          is the number of right nodes
 * @param edgeProbability    is the probability of an edge between a left-right node pair
 * @param random             is the random number generator to use for generating a random graph
 */
public static <T extends BipartiteGraph & DynamicBipartiteGraph> void testRandomConcurrentReadWriteThreads(
        T graph, int numReadersPerNode, int leftSize, int rightSize, double edgeProbability, Random random) {
    int maxWaitingTimeForThreads = 20; // in milliseconds
    int numLeftReaders = leftSize * numReadersPerNode;
    int numRightReaders = rightSize * numReadersPerNode;
    int totalNumReaders = numLeftReaders + numRightReaders;
    CountDownLatch readersDoneLatch = new CountDownLatch(totalNumReaders);
    // First, construct a random set of edges to insert in the graph
    Set<Pair<Long, Long>> edges = Sets
            .newHashSetWithExpectedSize((int) (leftSize * rightSize * edgeProbability));
    List<BipartiteGraphReader> leftReaders = Lists.newArrayListWithCapacity(numLeftReaders);
    List<BipartiteGraphReader> rightReaders = Lists.newArrayListWithCapacity(numRightReaders);
    Long2ObjectMap<LongSet> leftSideGraph = new Long2ObjectOpenHashMap<LongSet>(leftSize);
    Long2ObjectMap<LongSet> rightSideGraph = new Long2ObjectOpenHashMap<LongSet>(leftSize);
    int averageLeftDegree = (int) (rightSize * edgeProbability);
    for (int i = 0; i < leftSize; i++) {
        LongSet nodeEdges = new LongOpenHashSet(averageLeftDegree);
        for (int j = 0; j < rightSize; j++) {
            if (random.nextDouble() < edgeProbability) {
                nodeEdges.add(j);
                if (!rightSideGraph.containsKey(j)) {
                    rightSideGraph.put(j, new LongOpenHashSet(new long[] { i }));
                } else {
                    rightSideGraph.get(j).add(i);
                }
                edges.add(Pair.of((long) i, (long) j));
            }
        }
        leftSideGraph.put(i, nodeEdges);
    }

    // Create a bunch of leftReaders per node that'll read from the graph at random
    for (int i = 0; i < leftSize; i++) {
        for (int j = 0; j < numReadersPerNode; j++) {
            leftReaders.add(new BipartiteGraphReader(graph, new CountDownLatch(0), readersDoneLatch, i, true,
                    random.nextInt(maxWaitingTimeForThreads)));
        }
    }

    // Create a bunch of rightReaders per node that'll read from the graph at random
    for (int i = 0; i < rightSize; i++) {
        for (int j = 0; j < numReadersPerNode; j++) {
            rightReaders.add(new BipartiteGraphReader(graph, new CountDownLatch(0), readersDoneLatch, i, false,
                    random.nextInt(maxWaitingTimeForThreads)));
        }
    }

    // Create a single writer that will insert these edges in random order
    List<WriterInfo> writerInfo = Lists.newArrayListWithCapacity(edges.size());
    List<Pair<Long, Long>> edgesList = Lists.newArrayList(edges);
    Collections.shuffle(edgesList);
    CountDownLatch writerDoneLatch = new CountDownLatch(edgesList.size());
    for (Pair<Long, Long> edge : edgesList) {
        writerInfo.add(new WriterInfo(edge.getLeft(), edge.getRight(), new CountDownLatch(0), writerDoneLatch));
    }

    ExecutorService executor = Executors.newFixedThreadPool(totalNumReaders + 1); // single writer
    List<Callable<Integer>> allThreads = Lists.newArrayListWithCapacity(totalNumReaders + 1);
    // First, we add the writer
    allThreads.add(Executors.callable(new BipartiteGraphWriter(graph, writerInfo), 1));
    // then the readers
    for (int i = 0; i < numLeftReaders; i++) {
        allThreads.add(Executors.callable(leftReaders.get(i), 1));
    }
    for (int i = 0; i < numRightReaders; i++) {
        allThreads.add(Executors.callable(rightReaders.get(i), 1));
    }
    // these will execute in some non-deterministic order
    Collections.shuffle(allThreads, random);

    // Wait for all the processes to finish
    try {
        List<Future<Integer>> results = executor.invokeAll(allThreads, 10, TimeUnit.SECONDS);
        for (Future<Integer> result : results) {
            assertTrue(result.isDone());
            assertEquals(1, result.get().intValue());
        }
    } catch (InterruptedException e) {
        throw new RuntimeException("Execution for a thread was interrupted: ", e);
    } catch (ExecutionException e) {
        throw new RuntimeException("Execution issue in an executor thread: ", e);
    }

    // confirm that these worked as expected
    try {
        readersDoneLatch.await();
        writerDoneLatch.await();
    } catch (InterruptedException e) {
        throw new RuntimeException("Execution for a latch was interrupted: ", e);
    }

    // Check that all readers' read info is consistent with the graph
    // first check the left side
    for (int i = 0; i < numLeftReaders; i++) {
        LongSet expectedLeftEdges = leftSideGraph.get(leftReaders.get(i).queryNode);
        assertTrue(leftReaders.get(i).getQueryNodeDegree() <= expectedLeftEdges.size());
        if (leftReaders.get(i).getQueryNodeDegree() == 0) {
            assertNull(leftReaders.get(i).getQueryNodeEdges());
        } else {
            for (long edge : leftReaders.get(i).getQueryNodeEdges()) {
                assertTrue(expectedLeftEdges.contains(edge));
            }
        }
    }

    // then the right side
    for (int i = 0; i < numRightReaders; i++) {
        LongSet expectedRightEdges = rightSideGraph.get(rightReaders.get(i).queryNode);
        assertTrue(rightReaders.get(i).getQueryNodeDegree() <= expectedRightEdges.size());
        if (rightReaders.get(i).getQueryNodeDegree() == 0) {
            assertNull(rightReaders.get(i).getQueryNodeEdges());
        } else {
            for (long edge : rightReaders.get(i).getQueryNodeEdges()) {
                assertTrue(expectedRightEdges.contains(edge));
            }
        }
    }
}