Example usage for java.util Queue remove

List of usage examples for java.util Queue remove

Introduction

In this page you can find the example usage for java.util Queue remove.

Prototype

E remove();

Source Link

Document

Retrieves and removes the head of this queue.

Usage

From source file:it.geosolutions.geobatch.actions.xstream.XstreamAction.java

public Queue<EventObject> execute(Queue<EventObject> events) throws ActionException {

    // the output
    final Queue<EventObject> ret = new LinkedList<EventObject>();
    listenerForwarder.started();/* w  ww .  j  a  v  a 2s  .  c  o  m*/
    while (events.size() > 0) {
        final EventObject event = events.remove();
        if (event == null) {
            final String message = "The passed event object is null";
            if (LOGGER.isWarnEnabled())
                LOGGER.warn(message);
            if (conf.isFailIgnored()) {
                continue;
            } else {
                final ActionException e = new ActionException(this, message);
                listenerForwarder.failed(e);
                throw e;
            }
        }

        if (event instanceof FileSystemEvent) {
            // generate an object
            final File sourceFile = File.class.cast(event.getSource());
            if (!sourceFile.exists() || !sourceFile.canRead()) {
                final String message = "XstreamAction.adapter(): The passed FileSystemEvent "
                        + "reference to a not readable or not existent file: " + sourceFile.getAbsolutePath();
                if (LOGGER.isWarnEnabled())
                    LOGGER.warn(message);
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    final ActionException e = new ActionException(this, message);
                    listenerForwarder.failed(e);
                    throw e;
                }
            }
            FileInputStream inputStream = null;
            try {
                inputStream = new FileInputStream(sourceFile);
                final Map<String, String> aliases = conf.getAlias();
                if (aliases != null && aliases.size() > 0) {
                    for (String alias : aliases.keySet()) {
                        final Class<?> clazz = Class.forName(aliases.get(alias));
                        xstream.alias(alias, clazz);
                    }
                }

                listenerForwarder.setTask("Converting file to a java object");

                // deserialize
                final Object res = xstream.fromXML(inputStream);
                // generate event
                final EventObject eo = new EventObject(res);
                // append to the output
                ret.add(eo);

            } catch (XStreamException e) {
                // the object cannot be deserialized
                if (LOGGER.isErrorEnabled())
                    LOGGER.error("The passed FileSystemEvent reference to a not deserializable file: "
                            + sourceFile.getAbsolutePath(), e);
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    listenerForwarder.failed(e);
                    throw new ActionException(this, e.getLocalizedMessage());
                }
            } catch (Throwable e) {
                // the object cannot be deserialized
                if (LOGGER.isErrorEnabled())
                    LOGGER.error("XstreamAction.adapter(): " + e.getLocalizedMessage(), e);
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    listenerForwarder.failed(e);
                    throw new ActionException(this, e.getLocalizedMessage());
                }
            } finally {
                IOUtils.closeQuietly(inputStream);
            }

        } else {

            // try to serialize
            // build the output absolute file name
            File outputDir;
            try {
                outputDir = new File(conf.getOutput());
                // the output
                if (!outputDir.isAbsolute())
                    outputDir = it.geosolutions.tools.commons.file.Path.findLocation(outputDir, getTempDir());

                if (!outputDir.exists()) {
                    if (!outputDir.mkdirs()) {
                        final String message = "Unable to create the ouptut dir named: " + outputDir.toString();
                        if (LOGGER.isInfoEnabled())
                            LOGGER.info(message);
                        if (conf.isFailIgnored()) {
                            continue;
                        } else {
                            final ActionException e = new ActionException(this, message);
                            listenerForwarder.failed(e);
                            throw e;
                        }
                    }
                }
                if (LOGGER.isInfoEnabled()) {
                    LOGGER.info("Output dir name: " + outputDir.toString());
                }

            } catch (NullPointerException npe) {
                final String message = "Unable to get the output file path from :" + conf.getOutput();
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(message, npe);
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    listenerForwarder.failed(npe);
                    throw new ActionException(this, npe.getLocalizedMessage());
                }
            }

            final File outputFile;
            try {
                outputFile = File.createTempFile(conf.getOutput(), null, outputDir);
            } catch (IOException ioe) {
                final String message = "Unable to build the output file writer: " + ioe.getLocalizedMessage();
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(message, ioe);
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    listenerForwarder.failed(ioe);
                    throw new ActionException(this, ioe.getLocalizedMessage());
                }
            }

            // try to open the file to write into
            FileWriter fw = null;
            try {
                listenerForwarder.setTask("Serializing java object to " + outputFile);
                fw = new FileWriter(outputFile);

                final Map<String, String> aliases = conf.getAlias();
                if (aliases != null && aliases.size() > 0) {
                    for (String alias : aliases.keySet()) {
                        final Class<?> clazz = Class.forName(aliases.get(alias));
                        xstream.alias(alias, clazz);
                    }
                }
                xstream.toXML(event.getSource(), fw);

            } catch (XStreamException e) {
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(
                            "The passed event object cannot be serialized to: " + outputFile.getAbsolutePath(),
                            e);
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    listenerForwarder.failed(e);
                    throw new ActionException(this, e.getLocalizedMessage());
                }
            } catch (Throwable e) {
                // the object cannot be deserialized
                if (LOGGER.isErrorEnabled())
                    LOGGER.error(e.getLocalizedMessage(), e);
                if (conf.isFailIgnored()) {
                    continue;
                } else {
                    listenerForwarder.failed(e);
                    throw new ActionException(this, e.getLocalizedMessage());
                }
            } finally {
                IOUtils.closeQuietly(fw);
            }

            // add the file to the queue
            ret.add(new FileSystemEvent(outputFile.getAbsoluteFile(), FileSystemEventType.FILE_ADDED));

        }
    }
    listenerForwarder.completed();
    return ret;
}

From source file:com.github.pierods.ramltoapidocconverter.RAMLToApidocConverter.java

private List<Apidoc.Operation> walkSubresources(Resource rootResource) {

    List<Apidoc.Operation> operations = new ArrayList<>();

    class NameAndResource {

        public NameAndResource(String name, Resource resource) {
            this.name = name;
            this.resource = resource;
        }/*from www  .j  av  a2 s  .c  o m*/

        public String name;
        public Resource resource;
    }

    Queue<NameAndResource> bfsAccumulator = new LinkedList<>();

    // path is specified only once for the resource. Subpaths will be only specified if parameters (:xxx), see getOperations()
    bfsAccumulator.add(new NameAndResource("", rootResource));

    while (!bfsAccumulator.isEmpty()) {

        NameAndResource nr = bfsAccumulator.remove();

        operations.addAll(getOperations(nr.name, nr.resource));

        Map<String, Resource> subresources = nr.resource.getResources();

        for (String resourceName : subresources.keySet()) {
            bfsAccumulator.add(new NameAndResource(nr.name + resourceName, subresources.get(resourceName)));
        }

    }

    operations.sort((operation1, operation2) -> {
        if (operation1.path == null) {
            return 1;
        }
        if (operation2.path == null) {
            return -1;
        }

        return operation1.path.compareTo(operation2.path);
    });

    return operations;
}

From source file:org.apache.hadoop.hbase.chaos.actions.RollingBatchRestartRsAction.java

@Override
public void perform() throws Exception {
    LOG.info(String.format("Performing action: Rolling batch restarting %d%% of region servers",
            (int) (ratio * 100)));
    List<ServerName> selectedServers = PolicyBasedChaosMonkey.selectRandomItems(getCurrentServers(), ratio);

    Queue<ServerName> serversToBeKilled = new LinkedList<ServerName>(selectedServers);
    Queue<ServerName> deadServers = new LinkedList<ServerName>();

    ////from w  w  w . j a v  a 2s .c om
    while (!serversToBeKilled.isEmpty() || !deadServers.isEmpty()) {
        boolean action = true; //action true = kill server, false = start server

        if (serversToBeKilled.isEmpty() || deadServers.isEmpty()) {
            action = deadServers.isEmpty();
        } else {
            action = RandomUtils.nextBoolean();
        }

        if (action) {
            ServerName server = serversToBeKilled.remove();
            try {
                killRs(server);
            } catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
                // We've seen this in test runs where we timeout but the kill went through. HBASE-9743
                // So, add to deadServers even if exception so the start gets called.
                LOG.info("Problem killing but presume successful; code=" + e.getExitCode(), e);
            }
            deadServers.add(server);
        } else {
            try {
                ServerName server = deadServers.remove();
                startRs(server);
            } catch (org.apache.hadoop.util.Shell.ExitCodeException e) {
                // The start may fail but better to just keep going though we may lose server.
                //
                LOG.info("Problem starting, will retry; code=" + e.getExitCode(), e);
            }
        }

        sleep(RandomUtils.nextInt((int) sleepTime));
    }
}

From source file:com.github.braully.graph.hn.GraphWS.java

public boolean checkIfHullSet(UndirectedSparseGraphTO<Integer, Integer> graph, int[] currentSet) {
    if (currentSet == null || currentSet.length == 0) {
        return false;
    }//from  w  w w . jav  a 2 s. c  o m
    Set<Integer> fecho = new HashSet<>();
    Collection vertices = graph.getVertices();
    int[] aux = new int[graph.getVertexCount()];
    for (int i = 0; i < aux.length; i++) {
        aux[i] = 0;
    }

    Queue<Integer> mustBeIncluded = new ArrayDeque<>();
    for (Integer v : currentSet) {
        mustBeIncluded.add(v);
    }
    while (!mustBeIncluded.isEmpty()) {
        Integer verti = mustBeIncluded.remove();
        fecho.add(verti);
        aux[verti] = INCLUDED;
        Collection<Integer> neighbors = graph.getNeighbors(verti);
        for (int vertn : neighbors) {
            if (vertn != verti) {
                int previousValue = aux[vertn];
                aux[vertn] = aux[vertn] + NEIGHBOOR_COUNT_INCLUDED;
                if (previousValue < INCLUDED && aux[vertn] >= INCLUDED) {
                    //                        includeVertex(graph, fecho, aux, verti);
                    mustBeIncluded.add(vertn);
                }
            }
        }
    }

    //        for (int i : currentSet) {
    //            includeVertex(graph, fecho, aux, i);
    //        }
    return fecho.size() == graph.getVertexCount();
}

From source file:org.grouplens.grapht.graph.DAGNode.java

/**
 * Do a breadth-first search for an edge.
 *
 * @param pred The predicate for matching nodes.
 * @return The first node matching {@code pred} in a breadth-first search, or {@code null} if no
 *         such node is found.//  w w  w .j a  va  2 s  .  c om
 */
public DAGEdge<V, E> findEdgeBFS(@Nonnull Predicate<? super DAGEdge<V, E>> pred) {
    Queue<DAGNode<V, E>> work = Lists.newLinkedList();
    Set<DAGNode<V, E>> seen = Sets.newHashSet();
    work.add(this);
    seen.add(this);
    while (!work.isEmpty()) {
        DAGNode<V, E> node = work.remove();
        for (DAGEdge<V, E> e : node.getOutgoingEdges()) {
            // is this the edge we are looking for?
            if (pred.apply(e)) {
                return e;
            } else if (!seen.contains(e.getTail())) {
                seen.add(e.getTail());
                work.add(e.getTail());
            }
        }
    }

    // no node found
    return null;
}

From source file:org.grouplens.grapht.graph.DAGNode.java

/**
 * Do a breadth-first search for a node.
 *
 * @param pred The predicate for matching nodes.
 * @return The first node matching {@code pred} in a breadth-first search, or {@code null} if no
 *         such node is found./*from ww  w  . j  a v  a  2s.co m*/
 */
public DAGNode<V, E> findNodeBFS(@Nonnull Predicate<? super DAGNode<V, E>> pred) {
    if (pred.apply(this)) {
        return this;
    }

    Queue<DAGNode<V, E>> work = Lists.newLinkedList();
    Set<DAGNode<V, E>> seen = Sets.newHashSet();
    work.add(this);
    seen.add(this);
    while (!work.isEmpty()) {
        DAGNode<V, E> node = work.remove();
        for (DAGEdge<V, E> e : node.getOutgoingEdges()) {
            // is this the node we are looking for?
            DAGNode<V, E> nbr = e.getTail();
            if (!seen.contains(nbr)) {
                if (pred.apply(nbr)) {
                    return nbr;
                } else {
                    seen.add(nbr);
                    work.add(nbr);
                }
            }
        }
    }

    // no node found
    return null;
}

From source file:hudson.plugins.emailext.plugins.content.BuildLogRegexContent.java

String getContent(BufferedReader reader) throws IOException {

    final boolean asHtml = matchedLineHtmlStyle != null;
    escapeHtml = asHtml || escapeHtml;//from ww w .j  a va 2 s .com

    final Pattern pattern = Pattern.compile(regex);
    final StringBuffer buffer = new StringBuffer();
    int numLinesTruncated = 0;
    int numMatches = 0;
    int numLinesStillNeeded = 0;
    boolean insidePre = false;
    Queue<String> linesBeforeList = new LinkedList<String>();
    String line = null;
    while ((line = reader.readLine()) != null) {
        // Remove console notes (JENKINS-7402)
        line = ConsoleNote.removeNotes(line);

        // Remove any lines before that are no longer needed.
        while (linesBeforeList.size() > linesBefore) {
            linesBeforeList.remove();
            ++numLinesTruncated;
        }
        final Matcher matcher = pattern.matcher(line);
        final StringBuffer sb = new StringBuffer();
        boolean matched = false;
        while (matcher.find()) {
            matched = true;
            if (substText != null) {
                matcher.appendReplacement(sb, substText);
            } else {
                break;
            }
        }
        if (matched) {
            // The current line matches.
            if (showTruncatedLines == true && numLinesTruncated > 0) {
                // Append information about truncated lines.
                insidePre = stopPre(buffer, insidePre);
                appendLinesTruncated(buffer, numLinesTruncated, asHtml);
                numLinesTruncated = 0;
            }
            if (asHtml) {
                insidePre = startPre(buffer, insidePre);
            }
            while (!linesBeforeList.isEmpty()) {
                appendContextLine(buffer, linesBeforeList.remove(), escapeHtml);
            }
            // Append the (possibly transformed) current line.
            if (substText != null) {
                matcher.appendTail(sb);
                line = sb.toString();
            }
            appendMatchedLine(buffer, line, escapeHtml, matchedLineHtmlStyle, addNewline);
            ++numMatches;
            // Set up to add numLinesStillNeeded
            numLinesStillNeeded = linesAfter;
        } else {
            // The current line did not match.
            if (numLinesStillNeeded > 0) {
                // Append this line as a line after.
                appendContextLine(buffer, line, escapeHtml);
                --numLinesStillNeeded;
            } else {
                // Store this line as a possible line before.
                linesBeforeList.offer(line);
            }
        }
        if (maxMatches != 0 && numMatches >= maxMatches && numLinesStillNeeded == 0) {
            break;
        }
    }
    if (showTruncatedLines == true) {
        // Count the rest of the lines.
        // Include any lines in linesBefore.
        while (linesBeforeList.size() > 0) {
            linesBeforeList.remove();
            ++numLinesTruncated;
        }
        if (line != null) {
            // Include the rest of the lines that haven't been read in.
            while ((line = reader.readLine()) != null) {
                ++numLinesTruncated;
            }
        }
        if (numLinesTruncated > 0) {
            insidePre = stopPre(buffer, insidePre);
            appendLinesTruncated(buffer, numLinesTruncated, asHtml);
        }
    }
    insidePre = stopPre(buffer, insidePre);
    if (buffer.length() == 0) {
        return defaultValue;
    }
    return buffer.toString();
}

From source file:org.paxle.indexer.impl.IndexerWorker.java

@Override
protected void execute(ICommand command) {
    final long start = System.currentTimeMillis();

    IIndexerDocument indexerDoc = null;/*www.j  a v  a 2 s .c o  m*/
    ArrayList<IIndexerDocument> indexerSubDocs = null;
    try {
        /* ================================================================
         * Input Parameter Check
         * ================================================================ */
        String errorMsg = null;
        if (command.getResult() != ICommand.Result.Passed) {
            errorMsg = String.format("Won't index resource '%s'. Command status is: '%s' (%s)",
                    command.getLocation(), command.getResult(), command.getResultText());
        } else if (command.getCrawlerDocument() == null) {
            errorMsg = String.format("Won't index resource '%s'. Crawler-document is null",
                    command.getLocation());
        } else if (command.getCrawlerDocument().getStatus() != ICrawlerDocument.Status.OK) {
            errorMsg = String.format("Won't index resource '%s'. Crawler-document status is: '%s' (%s)",
                    command.getLocation(), command.getCrawlerDocument().getStatus(),
                    command.getCrawlerDocument().getStatusText());
        } else if (command.getParserDocument() == null) {
            errorMsg = String.format("Won't index resource '%s'. Parser-document is null",
                    command.getLocation());
        } else if (command.getParserDocument().getStatus() != IParserDocument.Status.OK) {
            errorMsg = String.format("Won't index resource '%s'. Parser-document status is: '%s' (%s)",
                    command.getLocation(), command.getCrawlerDocument().getStatus(),
                    command.getCrawlerDocument().getStatusText());
        }

        if (errorMsg != null) {
            this.logger.warn(errorMsg);
            return;
        }

        /* ================================================================
         * Generate Indexer Document
         * ================================================================ */

        // generate the "main" indexer document from the "main" parser document including the
        // data from the command object
        if ((command.getParserDocument().getFlags() & IParserDocument.FLAG_NOINDEX) == 0) {
            this.logger.debug(String.format("Indexing of URL '%s' (%s) ...", command.getLocation(),
                    command.getCrawlerDocument().getMimeType()));

            indexerDoc = this.generateIIndexerDoc(command.getLocation(),
                    command.getCrawlerDocument().getCrawlerDate(), null, command.getParserDocument());
        } else {
            this.logger.info(String.format("Indexing of URL '%s' (%s) ommitted due to 'noindex'-flag",
                    command.getLocation(), command.getCrawlerDocument().getMimeType()));

            // don't exit here already, we still have to process the sub-parser-docs
        }

        // generate indexer docs from all parser-sub-documents and add them to the command
        indexerSubDocs = new ArrayList<IIndexerDocument>();

        final class Entry {
            public String key;
            public IParserDocument pdoc;

            public Entry(final String key, final IParserDocument pdoc) {
                this.key = key;
                this.pdoc = pdoc;
            }
        }

        // traverse the tree of sub-documents
        final Queue<Entry> queue = new LinkedList<Entry>();
        for (Map.Entry<String, IParserDocument> pdoce : command.getParserDocument().getSubDocs().entrySet())
            queue.add(new Entry(pdoce.getKey(), pdoce.getValue()));

        while (!queue.isEmpty()) {
            Entry e = queue.remove();
            if ((e.pdoc.getFlags() & IParserDocument.FLAG_NOINDEX) == 0) {
                IIndexerDocument indexerSubDoc = this.generateIIndexerDoc(command.getLocation(),
                        command.getCrawlerDocument().getCrawlerDate(), e.key, e.pdoc);
                indexerSubDocs.add(indexerSubDoc);
            }

            for (final Map.Entry<String, IParserDocument> pdoce : e.pdoc.getSubDocs().entrySet())
                queue.add(new Entry(e.key + "/" + pdoce.getKey(), pdoce.getValue()));
        }

        /* ================================================================
         * Process indexer response
         * ================================================================ */

        /* There may be the case, that - i.e. by a document's and it's parser's restriction - the main
         * document, from which the sub-docs are retrieved, may not be indexed, but links, and therefore
         * sub-docs, may be followed.
         * In this case we simply omit the main document. If the document has no children, then this is the
         * only thing we need to check for correctness. */
        if (indexerSubDocs.size() == 0) {

            if (indexerDoc == null) {
                command.setResult(ICommand.Result.Failure,
                        String.format("Indexer returned no indexer-document."));
                return;
            } else if (indexerDoc.getStatus() == null || indexerDoc.getStatus() != IIndexerDocument.Status.OK) {
                command.setResult(ICommand.Result.Failure,
                        String.format("Indexer-document status is '%s'.", indexerDoc.getStatus()));
                return;
            }

        }

        // XXX: what to take if both (pdoc and cdoc) contain a different value for last mod?
        if (command.getCrawlerDocument().getLastModDate() != null) {
            indexerDoc.set(IIndexerDocument.LAST_MODIFIED, command.getCrawlerDocument().getLastModDate());
        }
        indexerDoc.set(IIndexerDocument.SIZE, Long.valueOf(command.getCrawlerDocument().getSize()));

        // setting command status to passed
        command.setResult(ICommand.Result.Passed);

    } catch (Throwable e) {
        // setting command status
        command.setResult(ICommand.Result.Failure, String.format("Unexpected '%s' while indexing resource. %s",
                e.getClass().getName(), e.getMessage()));

        // log error
        this.logger.warn(String.format("Unexpected '%s' while indexing resource '%s'.", e.getClass().getName(),
                command.getLocation()), e);
    } finally {
        /* Add indexer-docs to command-object.
         * 
         * This must be done even in error situations to 
         * - allow filters to correct the error (if possible)
         * - to report the error back properly (e.g. to store it into db
         *   or send it back to a remote peer). 
         */
        if (indexerDoc != null) {
            command.addIndexerDocument(indexerDoc);
        }

        if (indexerSubDocs != null) {
            // get all indexer-sub-docs and add them to the command
            for (IIndexerDocument indexerSubDoc : indexerSubDocs) {
                // XXX: do sub-docs need a size-field, too?
                command.addIndexerDocument(indexerSubDoc);
            }
        }

        ICrawlerDocument crawlerDoc = command.getCrawlerDocument();
        IParserDocument parserDoc = command.getParserDocument();

        if (logger.isDebugEnabled()) {
            this.logger.info(String.format(
                    "Finished indexing of resource '%s' in %d ms.\r\n" + "\tCrawler-Status: '%s' %s\r\n"
                            + "\tParser-Status:  '%s' %s\r\n" + "\tIndexer-Status: '%s' %s",
                    command.getLocation(), Long.valueOf(System.currentTimeMillis() - start),
                    (crawlerDoc == null) ? "unknown" : crawlerDoc.getStatus().toString(),
                    (crawlerDoc == null) ? ""
                            : (crawlerDoc.getStatusText() == null) ? "" : crawlerDoc.getStatusText(),
                    (parserDoc == null) ? "unknown" : parserDoc.getStatus().toString(),
                    (parserDoc == null) ? ""
                            : (parserDoc.getStatusText() == null) ? "" : parserDoc.getStatusText(),
                    (indexerDoc == null) ? "unknown" : indexerDoc.getStatus().toString(),
                    (indexerDoc == null) ? ""
                            : (indexerDoc.getStatusText() == null) ? "" : indexerDoc.getStatusText()));
        } else if (logger.isInfoEnabled()) {
            this.logger.info(String.format(
                    "Finished indexing of resource '%s' in %d ms.\r\n" + "\tIndexer-Status: '%s' %s",
                    command.getLocation(), Long.valueOf(System.currentTimeMillis() - start),
                    (indexerDoc == null) ? "unknown" : indexerDoc.getStatus().toString(),
                    (indexerDoc == null) ? ""
                            : (indexerDoc.getStatusText() == null) ? "" : indexerDoc.getStatusText()));
        }
    }
}

From source file:fungus.MycoGraph.java

public Set<Set<MycoNode>> findConnectedComponents() {
    Set<Set<MycoNode>> components = new HashSet<Set<MycoNode>>();
    Set<MycoNode> unseen = new HashSet<MycoNode>(this.getVertices());
    Queue<MycoNode> queue = new LinkedList<MycoNode>();

    Set<MycoNode> workingComponent = null;
    MycoNode current;/*w  ww  . j a  va  2  s .c  om*/
    while ((!unseen.isEmpty()) || (!queue.isEmpty())) {
        if (queue.isEmpty()) {
            // Queue an arbitary unvisited node
            MycoNode n = unseen.iterator().next();
            queue.offer(n);
            unseen.remove(n);
            // Start new component
            workingComponent = new HashSet<MycoNode>();
            components.add(workingComponent);
        }
        current = queue.remove();
        workingComponent.add(current);
        for (MycoNode neighbor : current.getHyphaLink().getNeighbors()) {
            if (unseen.contains(neighbor)) {
                queue.offer(neighbor);
                unseen.remove(neighbor);
            }
        }
    }
    return components;
}