Example usage for java.util Queue add

List of usage examples for java.util Queue add

Introduction

In this page you can find the example usage for java.util Queue add.

Prototype

boolean add(E e);

Source Link

Document

Inserts the specified element into this queue if it is possible to do so immediately without violating capacity restrictions, returning true upon success and throwing an IllegalStateException if no space is currently available.

Usage

From source file:com.github.braully.graph.hn.GraphWS.java

public boolean checkIfHullSet(UndirectedSparseGraphTO<Integer, Integer> graph, int[] currentSet) {
    if (currentSet == null || currentSet.length == 0) {
        return false;
    }/* w w w .j a v a  2 s .c o m*/
    Set<Integer> fecho = new HashSet<>();
    Collection vertices = graph.getVertices();
    int[] aux = new int[graph.getVertexCount()];
    for (int i = 0; i < aux.length; i++) {
        aux[i] = 0;
    }

    Queue<Integer> mustBeIncluded = new ArrayDeque<>();
    for (Integer v : currentSet) {
        mustBeIncluded.add(v);
    }
    while (!mustBeIncluded.isEmpty()) {
        Integer verti = mustBeIncluded.remove();
        fecho.add(verti);
        aux[verti] = INCLUDED;
        Collection<Integer> neighbors = graph.getNeighbors(verti);
        for (int vertn : neighbors) {
            if (vertn != verti) {
                int previousValue = aux[vertn];
                aux[vertn] = aux[vertn] + NEIGHBOOR_COUNT_INCLUDED;
                if (previousValue < INCLUDED && aux[vertn] >= INCLUDED) {
                    //                        includeVertex(graph, fecho, aux, verti);
                    mustBeIncluded.add(vertn);
                }
            }
        }
    }

    //        for (int i : currentSet) {
    //            includeVertex(graph, fecho, aux, i);
    //        }
    return fecho.size() == graph.getVertexCount();
}

From source file:org.grouplens.grapht.graph.DAGNode.java

/**
 * Do a breadth-first search for an edge.
 *
 * @param pred The predicate for matching nodes.
 * @return The first node matching {@code pred} in a breadth-first search, or {@code null} if no
 *         such node is found.//from w  w  w  . j ava  2s. c om
 */
public DAGEdge<V, E> findEdgeBFS(@Nonnull Predicate<? super DAGEdge<V, E>> pred) {
    Queue<DAGNode<V, E>> work = Lists.newLinkedList();
    Set<DAGNode<V, E>> seen = Sets.newHashSet();
    work.add(this);
    seen.add(this);
    while (!work.isEmpty()) {
        DAGNode<V, E> node = work.remove();
        for (DAGEdge<V, E> e : node.getOutgoingEdges()) {
            // is this the edge we are looking for?
            if (pred.apply(e)) {
                return e;
            } else if (!seen.contains(e.getTail())) {
                seen.add(e.getTail());
                work.add(e.getTail());
            }
        }
    }

    // no node found
    return null;
}

From source file:org.paxle.indexer.impl.IndexerWorker.java

@Override
protected void execute(ICommand command) {
    final long start = System.currentTimeMillis();

    IIndexerDocument indexerDoc = null;//  w  w w . j a  v a  2s  . com
    ArrayList<IIndexerDocument> indexerSubDocs = null;
    try {
        /* ================================================================
         * Input Parameter Check
         * ================================================================ */
        String errorMsg = null;
        if (command.getResult() != ICommand.Result.Passed) {
            errorMsg = String.format("Won't index resource '%s'. Command status is: '%s' (%s)",
                    command.getLocation(), command.getResult(), command.getResultText());
        } else if (command.getCrawlerDocument() == null) {
            errorMsg = String.format("Won't index resource '%s'. Crawler-document is null",
                    command.getLocation());
        } else if (command.getCrawlerDocument().getStatus() != ICrawlerDocument.Status.OK) {
            errorMsg = String.format("Won't index resource '%s'. Crawler-document status is: '%s' (%s)",
                    command.getLocation(), command.getCrawlerDocument().getStatus(),
                    command.getCrawlerDocument().getStatusText());
        } else if (command.getParserDocument() == null) {
            errorMsg = String.format("Won't index resource '%s'. Parser-document is null",
                    command.getLocation());
        } else if (command.getParserDocument().getStatus() != IParserDocument.Status.OK) {
            errorMsg = String.format("Won't index resource '%s'. Parser-document status is: '%s' (%s)",
                    command.getLocation(), command.getCrawlerDocument().getStatus(),
                    command.getCrawlerDocument().getStatusText());
        }

        if (errorMsg != null) {
            this.logger.warn(errorMsg);
            return;
        }

        /* ================================================================
         * Generate Indexer Document
         * ================================================================ */

        // generate the "main" indexer document from the "main" parser document including the
        // data from the command object
        if ((command.getParserDocument().getFlags() & IParserDocument.FLAG_NOINDEX) == 0) {
            this.logger.debug(String.format("Indexing of URL '%s' (%s) ...", command.getLocation(),
                    command.getCrawlerDocument().getMimeType()));

            indexerDoc = this.generateIIndexerDoc(command.getLocation(),
                    command.getCrawlerDocument().getCrawlerDate(), null, command.getParserDocument());
        } else {
            this.logger.info(String.format("Indexing of URL '%s' (%s) ommitted due to 'noindex'-flag",
                    command.getLocation(), command.getCrawlerDocument().getMimeType()));

            // don't exit here already, we still have to process the sub-parser-docs
        }

        // generate indexer docs from all parser-sub-documents and add them to the command
        indexerSubDocs = new ArrayList<IIndexerDocument>();

        final class Entry {
            public String key;
            public IParserDocument pdoc;

            public Entry(final String key, final IParserDocument pdoc) {
                this.key = key;
                this.pdoc = pdoc;
            }
        }

        // traverse the tree of sub-documents
        final Queue<Entry> queue = new LinkedList<Entry>();
        for (Map.Entry<String, IParserDocument> pdoce : command.getParserDocument().getSubDocs().entrySet())
            queue.add(new Entry(pdoce.getKey(), pdoce.getValue()));

        while (!queue.isEmpty()) {
            Entry e = queue.remove();
            if ((e.pdoc.getFlags() & IParserDocument.FLAG_NOINDEX) == 0) {
                IIndexerDocument indexerSubDoc = this.generateIIndexerDoc(command.getLocation(),
                        command.getCrawlerDocument().getCrawlerDate(), e.key, e.pdoc);
                indexerSubDocs.add(indexerSubDoc);
            }

            for (final Map.Entry<String, IParserDocument> pdoce : e.pdoc.getSubDocs().entrySet())
                queue.add(new Entry(e.key + "/" + pdoce.getKey(), pdoce.getValue()));
        }

        /* ================================================================
         * Process indexer response
         * ================================================================ */

        /* There may be the case, that - i.e. by a document's and it's parser's restriction - the main
         * document, from which the sub-docs are retrieved, may not be indexed, but links, and therefore
         * sub-docs, may be followed.
         * In this case we simply omit the main document. If the document has no children, then this is the
         * only thing we need to check for correctness. */
        if (indexerSubDocs.size() == 0) {

            if (indexerDoc == null) {
                command.setResult(ICommand.Result.Failure,
                        String.format("Indexer returned no indexer-document."));
                return;
            } else if (indexerDoc.getStatus() == null || indexerDoc.getStatus() != IIndexerDocument.Status.OK) {
                command.setResult(ICommand.Result.Failure,
                        String.format("Indexer-document status is '%s'.", indexerDoc.getStatus()));
                return;
            }

        }

        // XXX: what to take if both (pdoc and cdoc) contain a different value for last mod?
        if (command.getCrawlerDocument().getLastModDate() != null) {
            indexerDoc.set(IIndexerDocument.LAST_MODIFIED, command.getCrawlerDocument().getLastModDate());
        }
        indexerDoc.set(IIndexerDocument.SIZE, Long.valueOf(command.getCrawlerDocument().getSize()));

        // setting command status to passed
        command.setResult(ICommand.Result.Passed);

    } catch (Throwable e) {
        // setting command status
        command.setResult(ICommand.Result.Failure, String.format("Unexpected '%s' while indexing resource. %s",
                e.getClass().getName(), e.getMessage()));

        // log error
        this.logger.warn(String.format("Unexpected '%s' while indexing resource '%s'.", e.getClass().getName(),
                command.getLocation()), e);
    } finally {
        /* Add indexer-docs to command-object.
         * 
         * This must be done even in error situations to 
         * - allow filters to correct the error (if possible)
         * - to report the error back properly (e.g. to store it into db
         *   or send it back to a remote peer). 
         */
        if (indexerDoc != null) {
            command.addIndexerDocument(indexerDoc);
        }

        if (indexerSubDocs != null) {
            // get all indexer-sub-docs and add them to the command
            for (IIndexerDocument indexerSubDoc : indexerSubDocs) {
                // XXX: do sub-docs need a size-field, too?
                command.addIndexerDocument(indexerSubDoc);
            }
        }

        ICrawlerDocument crawlerDoc = command.getCrawlerDocument();
        IParserDocument parserDoc = command.getParserDocument();

        if (logger.isDebugEnabled()) {
            this.logger.info(String.format(
                    "Finished indexing of resource '%s' in %d ms.\r\n" + "\tCrawler-Status: '%s' %s\r\n"
                            + "\tParser-Status:  '%s' %s\r\n" + "\tIndexer-Status: '%s' %s",
                    command.getLocation(), Long.valueOf(System.currentTimeMillis() - start),
                    (crawlerDoc == null) ? "unknown" : crawlerDoc.getStatus().toString(),
                    (crawlerDoc == null) ? ""
                            : (crawlerDoc.getStatusText() == null) ? "" : crawlerDoc.getStatusText(),
                    (parserDoc == null) ? "unknown" : parserDoc.getStatus().toString(),
                    (parserDoc == null) ? ""
                            : (parserDoc.getStatusText() == null) ? "" : parserDoc.getStatusText(),
                    (indexerDoc == null) ? "unknown" : indexerDoc.getStatus().toString(),
                    (indexerDoc == null) ? ""
                            : (indexerDoc.getStatusText() == null) ? "" : indexerDoc.getStatusText()));
        } else if (logger.isInfoEnabled()) {
            this.logger.info(String.format(
                    "Finished indexing of resource '%s' in %d ms.\r\n" + "\tIndexer-Status: '%s' %s",
                    command.getLocation(), Long.valueOf(System.currentTimeMillis() - start),
                    (indexerDoc == null) ? "unknown" : indexerDoc.getStatus().toString(),
                    (indexerDoc == null) ? ""
                            : (indexerDoc.getStatusText() == null) ? "" : indexerDoc.getStatusText()));
        }
    }
}

From source file:org.grouplens.grapht.graph.DAGNode.java

/**
 * Do a breadth-first search for a node.
 *
 * @param pred The predicate for matching nodes.
 * @return The first node matching {@code pred} in a breadth-first search, or {@code null} if no
 *         such node is found./*from  w ww.j  a  v  a 2 s.co  m*/
 */
public DAGNode<V, E> findNodeBFS(@Nonnull Predicate<? super DAGNode<V, E>> pred) {
    if (pred.apply(this)) {
        return this;
    }

    Queue<DAGNode<V, E>> work = Lists.newLinkedList();
    Set<DAGNode<V, E>> seen = Sets.newHashSet();
    work.add(this);
    seen.add(this);
    while (!work.isEmpty()) {
        DAGNode<V, E> node = work.remove();
        for (DAGEdge<V, E> e : node.getOutgoingEdges()) {
            // is this the node we are looking for?
            DAGNode<V, E> nbr = e.getTail();
            if (!seen.contains(nbr)) {
                if (pred.apply(nbr)) {
                    return nbr;
                } else {
                    seen.add(nbr);
                    work.add(nbr);
                }
            }
        }
    }

    // no node found
    return null;
}

From source file:dendroscope.autumn.hybridnetwork.ComputeHybridizationNetwork.java

/**
 * get all alive leaves below the given root
 *
 * @param root//from   www .  java 2 s.  com
 * @return leaves
 */
private List<Root> getAllAliveLeaves(Root root) {
    List<Root> leaves = new LinkedList<Root>();
    if (root.getTaxa().cardinality() > 0) {
        if (root.getOutDegree() == 0)
            leaves.add(root);
        else {
            Queue<Root> queue = new LinkedList<Root>();
            queue.add(root);
            while (queue.size() > 0) {
                root = queue.poll();
                for (Edge e = root.getFirstOutEdge(); e != null; e = root.getNextOutEdge(e)) {
                    Root w = (Root) e.getTarget();
                    if (w.getTaxa().cardinality() > 0) {
                        if (w.getOutDegree() == 0)
                            leaves.add(w);
                        else
                            queue.add(w);
                    }
                }
            }
        }
    }
    return leaves;
}

From source file:org.eclipse.skalli.core.search.LuceneIndex.java

private List<IndexEntry> indexEntity(T entity) {
    List<IndexEntry> fields = new LinkedList<IndexEntry>();

    Queue<EntityBase> queue = new LinkedList<EntityBase>();
    queue.add(entity);

    while (!queue.isEmpty()) {
        EntityBase currentEntity = queue.poll();

        for (ExtensionService<?> extensionService : ExtensionServices.getAll()) {
            if (currentEntity.getClass().equals(extensionService.getExtensionClass())) {
                Indexer<?> indexer = extensionService.getIndexer();
                if (indexer != null) {
                    indexer.indexEntity(fields, currentEntity);
                }/*from   w w w .  j a  v  a2 s  . com*/
            }
        }

        if (currentEntity instanceof ExtensibleEntityBase) {
            queue.addAll(((ExtensibleEntityBase) currentEntity).getAllExtensions());
        }
    }
    return fields;
}

From source file:org.protempa.dest.table.Derivation.java

@Override
public String[] getInferredPropositionIds(KnowledgeSource knowledgeSource, String[] inPropIds)
        throws KnowledgeSourceReadException {
    String[] explicitPropIds = getPropositionIds();
    if (explicitPropIds.length > 0) {
        return explicitPropIds;
    } else {/* w  w w.jav a  2  s .  c o m*/
        Set<String> result = new HashSet<>();
        for (String propId : inPropIds) {
            PropositionDefinition propDef = knowledgeSource.readPropositionDefinition(propId);
            if (propDef == null) {
                throw new IllegalArgumentException("Invalid propId: " + propId);
            }
            switch (this.behavior) {
            case SINGLE_BACKWARD:
                Arrays.addAll(result, propDef.getChildren());
                break;
            case MULT_BACKWARD:
                Queue<String> backwardProps = new LinkedList<>();
                Arrays.addAll(backwardProps, propDef.getChildren());
                String pId;
                while ((pId = backwardProps.poll()) != null) {
                    PropositionDefinition pDef = knowledgeSource.readPropositionDefinition(pId);
                    Arrays.addAll(backwardProps, pDef.getChildren());
                }
                result.addAll(backwardProps);
                break;
            case SINGLE_FORWARD:
                for (PropositionDefinition def : knowledgeSource.readParents(propDef)) {
                    result.add(def.getId());
                }
                break;
            case MULT_FORWARD:
                Queue<String> forwardProps = new LinkedList<>();
                for (PropositionDefinition def : knowledgeSource.readParents(propDef)) {
                    forwardProps.add(def.getId());
                }
                // pId is declared in MULT_BACKWARD case.
                while ((pId = forwardProps.poll()) != null) {
                    PropositionDefinition pDef = knowledgeSource.readPropositionDefinition(pId);
                    for (PropositionDefinition def : knowledgeSource.readParents(pDef)) {
                        forwardProps.add(def.getId());
                    }
                }
                result.addAll(forwardProps);
                break;
            default:
                throw new AssertionError("Invalid derivation behavior specified");
            }
        }
        return result.toArray(new String[result.size()]);
    }
}

From source file:ch.mlutz.plugins.t4e.index.TapestryIndexer.java

/**
 * Iterates all folders and looks for *.specification files in a folder
 * named WEB-INF./* w  w w  .  j av  a 2  s.  c o m*/
 *
 * @param project
 * @return
 */
public List<TapestryModule> createModulesForProject(IProject project) {

    // the queue storing the potential app specification files
    Queue<IFile> appSpecificationFiles = new LinkedList<IFile>();

    /*
    // add all children folders of project
    for (IResource member: project.members()) {
       if (member.getType() == IResource.FOLDER) {
    folderQueue.add((IFolder) member);
       }
    }
    */

    // the queue used for breadth first search
    Queue<IContainer> containerQueue = new LinkedList<IContainer>();
    containerQueue.add(project);

    IContainer currentContainer;
    while ((currentContainer = containerQueue.poll()) != null) {

        try {
            if (!TapestryModule.WEB_INF_FOLDER_NAME.equals(currentContainer.getName())) {

                // add all children folders of project
                for (IResource member : currentContainer.members()) {
                    if (member.getType() == IResource.FOLDER) {
                        containerQueue.add((IContainer) member);
                    }
                }
            } else {
                // add all children folders of project and check files for
                // specification
                for (IResource member : currentContainer.members()) {
                    if (member.getType() == IResource.FOLDER) {
                        containerQueue.add((IContainer) member);
                    } else if (member.getType() == IResource.FILE && isAppSpecification((IFile) member)) {
                        appSpecificationFiles.add((IFile) member);
                    }
                }

            }
        } catch (CoreException e) {
            log.warn("Couldn't iterate container " + currentContainer.getName(), e);
        }
    } // while

    List<TapestryModule> result = new ArrayList<TapestryModule>();

    IFile currentFile;
    while ((currentFile = appSpecificationFiles.poll()) != null) {

        try {
            TapestryModule tapestryModule = new TapestryModule(currentFile, this);
            result.add(tapestryModule);
        } catch (TapestryException e) {
            log.warn("Couldn't validate tapestryModule for app specification" + " file " + currentFile.getName()
                    + " in project " + project.getName() + ": ", e);
        }
    }

    return result;
}

From source file:it.geosolutions.geobatch.nrl.csvingest.CSVIngestActionTest.java

/**
 * Test of execute method, of class CSVIngestAction.
 *//*from w ww .  ja v a 2  s .  c  om*/
//    @Test
public void testExecute() throws Exception {

    Queue<EventObject> events = new LinkedList<EventObject>();
    File cropFile = loadFile("testdata/cropdistr.csv");
    assertNotNull(cropFile);

    { // create FK crop descriptor
        CropDescriptor cd = new CropDescriptor();
        cd.setId("crop0");
        cd.setLabel("label0");
        cd.setSeasons(Season.KHARIF);
        cropDescriptorDAO.persist(cd);
    }

    FileSystemEvent event = new FileSystemEvent(cropFile, FileSystemEventType.FILE_ADDED);
    events.add(event);

    CSVIngestAction action = new CSVIngestAction(new CSVIngestConfiguration(null, null, null));
    action.setCropDataDao(cropDataDAO);
    action.setCropDescriptorDao(cropDescriptorDAO);
    action.afterPropertiesSet();

    Queue result = action.execute(events);

    assertEquals(1, cropDataDAO.count(null));
}

From source file:org.commonjava.maven.ext.io.rest.DefaultTranslator.java

/**
 * Translate the versions./*from www  .  j av  a2 s. co  m*/
 * <pre>{@code
 * [ {
 *     "groupId": "com.google.guava",
 *     "artifactId": "guava",
 *     "version": "13.0.1"
 * } }
 * }</pre>
 * This equates to a List of ProjectVersionRef.
 *
 * <pre>{@code
 * {
 *     "productNames": [],
 *     "productVersionIds": [],
 *     "repositoryGroup": "",
 *     "gavs": [
 *     {
 *         "groupId": "com.google.guava",
 *         "artifactId": "guava",
 *         "version": "13.0.1"
 *     } ]
 * }
 * }</pre>
 * There may be a lot of them, possibly causing timeouts or other issues.
 * This is mitigated by splitting them into smaller chunks when an error occurs and retrying.
 */
public Map<ProjectVersionRef, String> translateVersions(List<ProjectVersionRef> projects) {
    init(rgm);

    final Map<ProjectVersionRef, String> result = new HashMap<>();
    final Queue<Task> queue = new ArrayDeque<>();
    if (initialRestMaxSize != 0) {
        // Presplit
        final List<List<ProjectVersionRef>> partition = ListUtils.partition(projects, initialRestMaxSize);
        for (List<ProjectVersionRef> p : partition) {
            queue.add(new Task(rgm, p, endpointUrl + REPORTS_LOOKUP_GAVS));
        }
        logger.debug("For initial sizing of {} have split the queue into {} ", initialRestMaxSize,
                queue.size());
    } else {
        queue.add(new Task(rgm, projects, endpointUrl + REPORTS_LOOKUP_GAVS));
    }

    while (!queue.isEmpty()) {
        Task task = queue.remove();
        task.executeTranslate();
        if (task.isSuccess()) {
            result.putAll(task.getResult());
        } else {
            if (task.canSplit() && task.getStatus() == 504) {
                List<Task> tasks = task.split();

                logger.warn(
                        "Failed to translate versions for task @{} due to {}, splitting and retrying. Chunk size was: {} and new chunk size {} in {} segments.",
                        task.hashCode(), task.getStatus(), task.getChunkSize(), tasks.get(0).getChunkSize(),
                        tasks.size());
                queue.addAll(tasks);
            } else {
                if (task.getStatus() < 0) {
                    logger.debug("Caught exception calling server with message {}", task.getErrorMessage());
                } else {
                    logger.debug("Did not get status {} but received {}", SC_OK, task.getStatus());
                }

                if (task.getStatus() > 0) {
                    throw new RestException("Received response status " + task.getStatus() + " with message: "
                            + task.getErrorMessage());
                } else {
                    throw new RestException("Received response status " + task.getStatus() + " with message "
                            + task.getErrorMessage());
                }
            }
        }
    }
    return result;
}