Example usage for java.util Queue poll

List of usage examples for java.util Queue poll

Introduction

In this page you can find the example usage for java.util Queue poll.

Prototype

E poll();

Source Link

Document

Retrieves and removes the head of this queue, or returns null if this queue is empty.

Usage

From source file:org.dspace.app.xmlui.aspect.administrative.group.EditGroupForm.java

/**
 * Method to extensively check whether the first group has the second group as a distant
 * parent. This is used to avoid creating cycles like A->B, B->C, C->D, D->A which leads
 * all the groups involved to essentially include themselves.
 *///from  w w  w  . j  av a 2  s  .  co  m
private boolean isDescendant(Group descendant, Group ancestor, List<UUID> memberGroupIDs) throws SQLException {
    Queue<Group> toVisit = new LinkedList<Group>();
    Group currentGroup;

    toVisit.offer(ancestor);

    // Initialize by adding a list of our current list of group members.
    for (UUID groupid : memberGroupIDs) {
        Group member = groupService.find(context, groupid);
        toVisit.offer(member);
    }

    while (!toVisit.isEmpty()) {
        // 1. Grab a group from the queue
        currentGroup = toVisit.poll();

        // 2. See if it's the descendant we're looking for
        if (currentGroup.equals(descendant)) {
            return true;
        }

        // 3. If not, add that group's children to the queue
        for (Group nextBatch : currentGroup.getMemberGroups()) {
            toVisit.offer(nextBatch);
        }
    }
    return false;
}

From source file:com.baifendian.swordfish.common.utils.graph.Graph.java

/**
 * ??, ???, // w w  w  . j  a  v a  2 s.c om
 *
 * @return
 * @throws Exception
 */
public List<VK> broadFirstSearch() throws Exception {
    List<VK> visit = new ArrayList<>();
    Queue<VK> q = new LinkedList<>();
    Set<VK> hasVisited = new HashSet<>();

    synchronized (this) {
        // 
        for (VK key : getStartVertex()) {
            q.add(key);
            hasVisited.add(key);
            visit.add(key);
        }

        while (!q.isEmpty()) {
            VK key = q.poll();

            // ?
            for (VK postKey : getPostNode(key)) {
                if (!hasVisited.contains(postKey)) {
                    q.add(postKey);
                    hasVisited.add(postKey);
                    visit.add(postKey);
                }
            }
        }

        // ??
        if (visit.size() != getVertexNumber()) {
            throw new Exception("Broad first search can't search complete.");
        }
    }

    return visit;
}

From source file:org.exoplatform.services.cms.webdav.WebDavServiceImpl.java

@DELETE
@Path("/{repoName}/{repoPath:.*}/")
public Response delete(@PathParam("repoName") String repoName, @PathParam("repoPath") String repoPath,
        @HeaderParam(ExtHttpHeaders.LOCKTOKEN) String lockTokenHeader,
        @HeaderParam(ExtHttpHeaders.IF) String ifHeader) {
    Item item = null;//from  www .j a  v  a 2  s .c  om
    try {
        repoName = repositoryService.getCurrentRepository().getConfiguration().getName();
        repoPath = convertRepoPath(repoPath, false);

        try {
            item = nodeFinder.getItem(workspaceName(repoPath), path(normalizePath(repoPath)), true);
        } catch (PathNotFoundException e) {
            item = nodeFinder.getItem(workspaceName(repoPath), path(Text.escapeIllegalJcrChars(repoPath)),
                    true);
        }

    } catch (PathNotFoundException exc) {
        return Response.status(HTTPStatus.NOT_FOUND).entity(exc.getMessage()).build();
    } catch (NoSuchWorkspaceException exc) {
        return Response.status(HTTPStatus.NOT_FOUND).entity(exc.getMessage()).build();
    } catch (Exception e) {
        if (LOG.isWarnEnabled()) {
            LOG.warn("Cannot find the item at " + repoName + "/" + repoPath, e);
        }
        return Response.serverError().build();
    }

    try {
        //Broadcast the event when user move node to Trash
        Node node = (Node) item;
        ListenerService listenerService = WCMCoreUtils.getService(ListenerService.class);
        ActivityCommonService activityService = WCMCoreUtils.getService(ActivityCommonService.class);
        Node parent = node.getParent();
        if (node.getPrimaryNodeType().getName().equals(NodetypeConstant.NT_FILE)) {
            if (activityService.isBroadcastNTFileEvents(node)) {
                listenerService.broadcast(ActivityCommonService.FILE_REMOVE_ACTIVITY, parent, node);
            }
        } else if (!WCMCoreUtils.isDocumentNodeType(node)) {
            Queue<Node> queue = new LinkedList<Node>();
            queue.add(node);

            //Broadcast event to remove file activities
            Node tempNode = null;
            try {
                while (!queue.isEmpty()) {
                    tempNode = queue.poll();
                    if (WCMCoreUtils.isDocumentNodeType(tempNode)
                            || tempNode.getPrimaryNodeType().getName().equals(NodetypeConstant.NT_FILE)) {
                        listenerService.broadcast(ActivityCommonService.FILE_REMOVE_ACTIVITY,
                                tempNode.getParent(), tempNode);
                    } else {
                        for (NodeIterator iter = tempNode.getNodes(); iter.hasNext();) {
                            Node childNode = iter.nextNode();
                            if (WCMCoreUtils.isDocumentNodeType(childNode)
                                    || childNode.isNodeType(NodetypeConstant.NT_UNSTRUCTURED)
                                    || childNode.isNodeType(NodetypeConstant.NT_FOLDER))
                                queue.add(childNode);
                        }
                    }
                }
            } catch (Exception e) {
                if (LOG.isWarnEnabled()) {
                    LOG.warn(e.getMessage());
                }
            }
        }
        //Remove the symlinks of deleted node. 
        Utils.removeSymlinks(node);
    } catch (Exception ex) {
        if (LOG.isWarnEnabled()) {
            LOG.warn(ex.getMessage());
        }
    }
    return super.delete(repoName, repoPath, lockTokenHeader, ifHeader);
}

From source file:bwem.Graph.java

private int[] computeDistances(final ChokePoint start, final List<ChokePoint> targets) {
    final int[] distances = new int[targets.size()];

    TileImpl.getStaticMarkable().unmarkAll();

    final Queue<Pair<Integer, ChokePoint>> toVisit = new PriorityQueue<>(Comparator.comparingInt(a -> a.first));
    toVisit.offer(new Pair<>(0, start));

    int remainingTargets = targets.size();
    while (!toVisit.isEmpty()) {
        final Pair<Integer, ChokePoint> distanceAndChokePoint = toVisit.poll();
        final int currentDist = distanceAndChokePoint.first;
        final ChokePoint current = distanceAndChokePoint.second;
        final Tile currentTile = getMap().getData().getTile(current.getCenter().toTilePosition(),
                CheckMode.NO_CHECK);//  ww w  .  j  a v  a 2  s.  c  o  m
        //            bwem_assert(currentTile.InternalData() == currentDist);
        if (!(((TileImpl) currentTile).getInternalData() == currentDist)) {
            throw new IllegalStateException();
        }
        ((TileImpl) currentTile).setInternalData(0); // resets Tile::m_internalData for future usage
        ((TileImpl) currentTile).getMarkable().setMarked();

        for (int i = 0; i < targets.size(); ++i) {
            if (current == targets.get(i)) {
                distances[i] = currentDist;
                --remainingTargets;
            }
        }
        if (remainingTargets == 0) {
            break;
        }

        if (current.isBlocked() && (!current.equals(start))) {
            continue;
        }

        for (final Area pArea : new Area[] { current.getAreas().getFirst(), current.getAreas().getSecond() }) {
            for (final ChokePoint next : pArea.getChokePoints()) {
                if (!next.equals(current)) {
                    final int newNextDist = currentDist + distance(current, next);
                    final Tile nextTile = getMap().getData().getTile(next.getCenter().toTilePosition(),
                            CheckMode.NO_CHECK);
                    if (!((TileImpl) nextTile).getMarkable().isMarked()) {
                        if (((TileImpl) nextTile).getInternalData() != 0) { // next already in toVisit
                            if (newNextDist < ((TileImpl) nextTile).getInternalData()) { // nextNewDist < nextOldDist
                                                                                         // To update next's distance, we need to remove-insert it from toVisit:
                                                                                         //                                    bwem_assert(iNext != range.second);
                                final boolean removed = toVisit
                                        .remove(new Pair<>(((TileImpl) nextTile).getInternalData(), next));
                                if (!removed) {
                                    throw new IllegalStateException();
                                }
                                ((TileImpl) nextTile).setInternalData(newNextDist);
                                ((ChokePointImpl) next).setPathBackTrace(current);
                                toVisit.offer(new Pair<>(newNextDist, next));
                            }
                        } else {
                            ((TileImpl) nextTile).setInternalData(newNextDist);
                            ((ChokePointImpl) next).setPathBackTrace(current);
                            toVisit.offer(new Pair<>(newNextDist, next));
                        }
                    }
                }
            }
        }
    }

    //    //   bwem_assert(!remainingTargets);
    //        if (!(remainingTargets == 0)) {
    //            throw new IllegalStateException();
    //        }

    // reset Tile::m_internalData for future usage
    for (Pair<Integer, ChokePoint> distanceToChokePoint : toVisit) {
        ((TileImpl) getMap().getData().getTile(distanceToChokePoint.second.getCenter().toTilePosition(),
                CheckMode.NO_CHECK)).setInternalData(0);
    }

    return distances;
}

From source file:gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java

/**
 * Get all pages in an async mode./*  w  ww  . ja v  a  2 s  . c  om*/
 */
private Collection<String> getPages(String startDate, String endDate, List<Dimension> dimensions,
        ApiDimensionFilter countryFilter, Queue<Pair<String, FilterOperator>> toProcess) throws IOException {
    String country = GoogleWebmasterFilter.countryFilterToString(countryFilter);

    ConcurrentLinkedDeque<String> allPages = new ConcurrentLinkedDeque<>();
    int r = 0;
    while (r <= RETRY) {
        ++r;
        log.info(String.format("Get pages at round %d with size %d.", r, toProcess.size()));
        ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound = new ConcurrentLinkedDeque<>();
        ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils
                .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName())));

        while (!toProcess.isEmpty()) {
            submitJob(toProcess.poll(), countryFilter, startDate, endDate, dimensions, es, allPages, nextRound);
        }
        //wait for jobs to finish and start next round if necessary.
        try {
            es.shutdown();
            boolean terminated = es.awaitTermination(5, TimeUnit.MINUTES);
            if (!terminated) {
                es.shutdownNow();
                log.warn(String.format(
                        "Timed out while getting all pages for country-%s at round %d. Next round now has size %d.",
                        country, r, nextRound.size()));
            }
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }

        if (nextRound.isEmpty()) {
            break;
        }
        toProcess = nextRound;
    }
    if (r == RETRY) {
        throw new RuntimeException(String.format(
                "Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s.",
                RETRY, startDate, endDate, country));
    }
    return allPages;
}

From source file:org.mobicents.servlet.restcomm.ussd.interpreter.UssdInterpreter.java

private StringBuffer processUssdMessageTags(Queue<Tag> messageTags) {
    StringBuffer message = new StringBuffer();
    while (!messageTags.isEmpty()) {
        Tag tag = messageTags.poll();
        if (tag != null) {
            message.append(tag.text());//ww w .ja v a 2  s. c om
            if (!messageTags.isEmpty())
                message.append("\n");
        } else {
            return message;
        }
    }
    return message;
}

From source file:com.thoughtworks.go.server.service.dd.DependencyFanInNode.java

private Pair<StageIdentifier, List<FaninScmMaterial>> getRevisionNthFor(int n, FanInGraphContext context) {
    List<FaninScmMaterial> scmMaterials = new ArrayList<>();
    PipelineTimeline pipelineTimeline = context.pipelineTimeline;
    Queue<PipelineTimelineEntry.Revision> revisionQueue = new ConcurrentLinkedQueue<>();
    DependencyMaterialConfig dependencyMaterial = (DependencyMaterialConfig) materialConfig;
    PipelineTimelineEntry entry = pipelineTimeline.instanceFor(dependencyMaterial.getPipelineName(),
            totalInstanceCount - n);//from   w  w  w .ja  v a 2 s.com

    Set<CaseInsensitiveString> visitedNodes = new HashSet<>();

    StageIdentifier dependentStageIdentifier = dependentStageIdentifier(context, entry,
            CaseInsensitiveString.str(dependencyMaterial.getStageName()));
    if (!StageIdentifier.NULL.equals(dependentStageIdentifier)) {
        addToRevisionQueue(entry, revisionQueue, scmMaterials, context, visitedNodes);
    } else {
        return null;
    }
    while (!revisionQueue.isEmpty()) {
        PipelineTimelineEntry.Revision revision = revisionQueue.poll();
        DependencyMaterialRevision dmr = DependencyMaterialRevision.create(revision.revision, null);
        PipelineTimelineEntry pte = pipelineTimeline
                .getEntryFor(new CaseInsensitiveString(dmr.getPipelineName()), dmr.getPipelineCounter());
        addToRevisionQueue(pte, revisionQueue, scmMaterials, context, visitedNodes);
    }

    return new Pair<>(dependentStageIdentifier, scmMaterials);
}

From source file:com.norconex.committer.core.AbstractFileQueueCommitter.java

@Override
public void commit() {

    // Get all files to be committed, relying on natural ordering which 
    // will be in file creation order.
    final Queue<File> filesPending = new ConcurrentLinkedQueue<File>();
    FileUtil.visitAllFiles(new File(queue.getDirectory()), new IFileVisitor() {
        @Override/*www .  ja va 2s. c  o  m*/
        public void visit(File file) {
            filesPending.add(file);
        }
    }, REF_FILTER);

    // Nothing left to commit. This happens if multiple threads are 
    // committing at the same time and no more files are available for the 
    // current thread to commit. This should happen rarely in practice.
    if (filesPending.isEmpty()) {
        return;
    }

    // Don't commit more than queue size
    List<ICommitOperation> filesToCommit = new ArrayList<>();
    while (filesToCommit.size() < queueSize) {

        File file = filesPending.poll();

        // If no more files available in both list, quit loop. This happens 
        // if multiple threads tries to commit at once and there is less 
        // than queueSize files to commit. This should happen rarely in
        // practice.
        if (file == null) {
            break;
        }

        // Current thread tries to own this file. If the file is already own
        // by another thread, continue and attempt to grab another file.
        if (filesCommitting.putIfAbsent(file, Thread.currentThread()) != null) {
            continue;
        }

        // A file might have already been committed and cleanup from 
        // the map, but still returned by the directory listing. Ignore 
        // those. It is important to make this check AFTER the current  
        // thread got ownership of the file. 
        if (!file.exists()) {
            continue;
        }

        // Current thread will be committing this file
        if (file.getAbsolutePath().contains(FileSystemCommitter.FILE_SUFFIX_ADD)) {
            filesToCommit.add(new FileAddOperation(file));
        } else if (file.getAbsolutePath().contains(FileSystemCommitter.FILE_SUFFIX_REMOVE)) {
            filesToCommit.add(new FileDeleteOperation(file));
        } else {
            LOG.error("Unsupported file to commit: " + file);
        }
    }

    if (LOG.isInfoEnabled()) {
        LOG.info(String.format("Committing %s files", filesToCommit.size()));
    }
    for (ICommitOperation op : filesToCommit) {
        try {
            if (op instanceof FileAddOperation) {
                prepareCommitAddition((IAddOperation) op);
                commitAddition((IAddOperation) op);
            } else {
                prepareCommitDeletion((IDeleteOperation) op);
                commitDeletion((IDeleteOperation) op);
            }
        } catch (IOException e) {
            throw new CommitterException("Cannot read reference from : " + op, e);
        }
    }

    commitComplete();

    deleteEmptyOldDirs(new File(queue.getDirectory()));

    // Cleanup committed files from map that might have been deleted
    Enumeration<File> en = filesCommitting.keys();
    while (en.hasMoreElements()) {
        File file = (File) en.nextElement();
        if (!file.exists()) {
            filesCommitting.remove(file);
        }
    }
}

From source file:org.apache.gobblin.ingestion.google.webmaster.GoogleWebmasterDataFetcherImpl.java

/**
 * Get all pages in an async mode.//w  w  w  .java2 s  .  c o m
 */
private Collection<String> getPages(String startDate, String endDate, List<Dimension> dimensions,
        ApiDimensionFilter countryFilter, Queue<Pair<String, FilterOperator>> toProcess, int rowLimit)
        throws IOException {
    String country = GoogleWebmasterFilter.countryFilterToString(countryFilter);

    ConcurrentLinkedDeque<String> allPages = new ConcurrentLinkedDeque<>();
    int r = 0;
    while (r <= GET_PAGES_RETRIES) {
        ++r;
        log.info(String.format("Get pages at round %d with size %d.", r, toProcess.size()));
        ConcurrentLinkedDeque<Pair<String, FilterOperator>> nextRound = new ConcurrentLinkedDeque<>();
        ExecutorService es = Executors.newFixedThreadPool(10, ExecutorsUtils
                .newDaemonThreadFactory(Optional.of(log), Optional.of(this.getClass().getSimpleName())));

        while (!toProcess.isEmpty()) {
            submitJob(toProcess.poll(), countryFilter, startDate, endDate, dimensions, es, allPages, nextRound,
                    rowLimit);
        }
        //wait for jobs to finish and start next round if necessary.
        try {
            es.shutdown();
            boolean terminated = es.awaitTermination(5, TimeUnit.MINUTES);
            if (!terminated) {
                es.shutdownNow();
                log.warn(String.format(
                        "Timed out while getting all pages for country-%s at round %d. Next round now has size %d.",
                        country, r, nextRound.size()));
            }
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }

        if (nextRound.isEmpty()) {
            break;
        }
        toProcess = nextRound;
    }
    if (r == GET_PAGES_RETRIES) {
        throw new RuntimeException(String.format(
                "Getting all pages reaches the maximum number of retires %d. Date range: %s ~ %s. Country: %s.",
                GET_PAGES_RETRIES, startDate, endDate, country));
    }
    return allPages;
}

From source file:edu.uci.ics.jung.algorithms.scoring.BetweennessCentrality.java

protected void computeBetweenness(Queue<V> queue, Transformer<E, ? extends Number> edge_weights) {
    for (V v : graph.getVertices()) {
        // initialize the betweenness data for this new vertex
        for (V s : graph.getVertices())
            this.vertex_data.put(s, new BetweennessData());

        //         if (v.equals(new Integer(0)))
        //            System.out.println("pause");

        vertex_data.get(v).numSPs = 1;//from ww  w  .ja  va 2  s.  co m
        vertex_data.get(v).distance = 0;

        Stack<V> stack = new Stack<V>();
        //            Buffer<V> queue = new UnboundedFifoBuffer<V>();
        //            queue.add(v);
        queue.offer(v);

        while (!queue.isEmpty()) {
            //                V w = queue.remove();
            V w = queue.poll();
            stack.push(w);
            BetweennessData w_data = vertex_data.get(w);

            for (E e : graph.getOutEdges(w)) {
                // TODO (jrtom): change this to getOtherVertices(w, e)
                V x = graph.getOpposite(w, e);
                if (x.equals(w))
                    continue;
                double wx_weight = edge_weights.transform(e).doubleValue();

                //                for(V x : graph.getSuccessors(w)) 
                //                {
                //                   if (x.equals(w))
                //                      continue;

                // FIXME: the other problem is that I need to 
                // keep putting the neighbors of things we've just 
                // discovered in the queue, if they're undiscovered or
                // at greater distance.

                // FIXME: this is the problem, right here, I think: 
                // need to update position in queue if distance changes
                // (which can only happen with weighted edges).
                // for each outgoing edge e from w, get other end x
                // if x not already visited (dist x < 0)
                //   set x's distance to w's dist + edge weight
                //   add x to queue; pri in queue is x's dist
                // if w's dist + edge weight < x's dist 
                //   update x's dist
                //   update x in queue (MapBinaryHeap)
                //   clear x's incoming edge list
                // if w's dist + edge weight = x's dist
                //   add e to x's incoming edge list

                BetweennessData x_data = vertex_data.get(x);
                double x_potential_dist = w_data.distance + wx_weight;

                if (x_data.distance < 0) {
                    //                        queue.add(x);
                    //                        vertex_data.get(x).distance = vertex_data.get(w).distance + 1;
                    x_data.distance = x_potential_dist;
                    queue.offer(x);
                }

                // note:
                // (1) this can only happen with weighted edges
                // (2) x's SP count and incoming edges are updated below 
                if (x_data.distance > x_potential_dist) {
                    x_data.distance = x_potential_dist;
                    // invalidate previously identified incoming edges
                    // (we have a new shortest path distance to x)
                    x_data.incomingEdges.clear();
                    // update x's position in queue
                    ((MapBinaryHeap<V>) queue).update(x);
                }
                //                  if (vertex_data.get(x).distance == vertex_data.get(w).distance + 1) 
                // 
                //                    if (x_data.distance == x_potential_dist) 
                //                    {
                //                        x_data.numSPs += w_data.numSPs;
                ////                        vertex_data.get(x).predecessors.add(w);
                //                        x_data.incomingEdges.add(e);
                //                    }
            }
            for (E e : graph.getOutEdges(w)) {
                V x = graph.getOpposite(w, e);
                if (x.equals(w))
                    continue;
                double e_weight = edge_weights.transform(e).doubleValue();
                BetweennessData x_data = vertex_data.get(x);
                double x_potential_dist = w_data.distance + e_weight;
                if (x_data.distance == x_potential_dist) {
                    x_data.numSPs += w_data.numSPs;
                    //                        vertex_data.get(x).predecessors.add(w);
                    x_data.incomingEdges.add(e);
                }
            }
        }
        while (!stack.isEmpty()) {
            V x = stack.pop();

            //              for (V w : vertex_data.get(x).predecessors) 
            for (E e : vertex_data.get(x).incomingEdges) {
                V w = graph.getOpposite(x, e);
                double partialDependency = vertex_data.get(w).numSPs / vertex_data.get(x).numSPs
                        * (1.0 + vertex_data.get(x).dependency);
                vertex_data.get(w).dependency += partialDependency;
                //                  E w_x = graph.findEdge(w, x);
                //                  double w_x_score = edge_scores.get(w_x).doubleValue();
                //                  w_x_score += partialDependency;
                //                  edge_scores.put(w_x, w_x_score);
                double e_score = edge_scores.get(e).doubleValue();
                edge_scores.put(e, e_score + partialDependency);
            }
            if (!x.equals(v)) {
                double x_score = vertex_scores.get(x).doubleValue();
                x_score += vertex_data.get(x).dependency;
                vertex_scores.put(x, x_score);
            }
        }
    }

    if (graph instanceof UndirectedGraph) {
        for (V v : graph.getVertices()) {
            double v_score = vertex_scores.get(v).doubleValue();
            v_score /= 2.0;
            vertex_scores.put(v, v_score);
        }
        for (E e : graph.getEdges()) {
            double e_score = edge_scores.get(e).doubleValue();
            e_score /= 2.0;
            edge_scores.put(e, e_score);
        }
    }

    vertex_data.clear();
}