Example usage for java.util Queue poll

List of usage examples for java.util Queue poll

Introduction

In this page you can find the example usage for java.util Queue poll.

Prototype

E poll();

Source Link

Document

Retrieves and removes the head of this queue, or returns null if this queue is empty.

Usage

From source file:org.apache.karaf.tooling.exam.container.internal.KarafTestContainer.java

/**
 * Since we might get quite deep use a simple breath first search algorithm
 *//*  ww  w  .  j av  a2s  .co m*/
private File searchKarafBase(File _targetFolder) {
    Queue<File> searchNext = new LinkedList<File>();
    searchNext.add(_targetFolder);
    while (!searchNext.isEmpty()) {
        File head = searchNext.poll();
        if (!head.isDirectory()) {
            continue;
        }
        boolean isSystem = false;
        boolean etc = false;
        for (File file : head.listFiles()) {
            if (file.isDirectory() && file.getName().equals("system")) {
                isSystem = true;
            }
            if (file.isDirectory() && file.getName().equals("etc")) {
                etc = true;
            }
        }
        if (isSystem && etc) {
            return head;
        }
        searchNext.addAll(Arrays.asList(head.listFiles()));
    }
    throw new IllegalStateException("No karaf base dir found in extracted distribution.");
}

From source file:org.apache.drill.exec.store.mongo.MongoGroupScan.java

@Override
public void applyAssignments(List<DrillbitEndpoint> endpoints) throws PhysicalOperatorSetupException {
    logger.debug("Incoming endpoints :" + endpoints);
    watch.reset();//from ww  w.  j  av  a 2 s . c o m
    watch.start();

    final int numSlots = endpoints.size();
    int totalAssignmentsTobeDone = chunksMapping.size();

    Preconditions.checkArgument(numSlots <= totalAssignmentsTobeDone, String.format(
            "Incoming endpoints %d is greater than number of chunks %d", numSlots, totalAssignmentsTobeDone));

    final int minPerEndpointSlot = (int) Math.floor((double) totalAssignmentsTobeDone / numSlots);
    final int maxPerEndpointSlot = (int) Math.ceil((double) totalAssignmentsTobeDone / numSlots);

    endpointFragmentMapping = Maps.newHashMapWithExpectedSize(numSlots);
    Map<String, Queue<Integer>> endpointHostIndexListMap = Maps.newHashMap();

    for (int i = 0; i < numSlots; ++i) {
        endpointFragmentMapping.put(i, new ArrayList<MongoSubScanSpec>(maxPerEndpointSlot));
        String hostname = endpoints.get(i).getAddress();
        Queue<Integer> hostIndexQueue = endpointHostIndexListMap.get(hostname);
        if (hostIndexQueue == null) {
            hostIndexQueue = Lists.newLinkedList();
            endpointHostIndexListMap.put(hostname, hostIndexQueue);
        }
        hostIndexQueue.add(i);
    }

    Set<Entry<String, List<ChunkInfo>>> chunksToAssignSet = Sets.newHashSet(chunksInverseMapping.entrySet());

    for (Iterator<Entry<String, List<ChunkInfo>>> chunksIterator = chunksToAssignSet.iterator(); chunksIterator
            .hasNext();) {
        Entry<String, List<ChunkInfo>> chunkEntry = chunksIterator.next();
        Queue<Integer> slots = endpointHostIndexListMap.get(chunkEntry.getKey());
        if (slots != null) {
            for (ChunkInfo chunkInfo : chunkEntry.getValue()) {
                Integer slotIndex = slots.poll();
                List<MongoSubScanSpec> subScanSpecList = endpointFragmentMapping.get(slotIndex);
                subScanSpecList.add(buildSubScanSpecAndGet(chunkInfo));
                slots.offer(slotIndex);
            }
            chunksIterator.remove();
        }
    }

    PriorityQueue<List<MongoSubScanSpec>> minHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots,
            LIST_SIZE_COMPARATOR);
    PriorityQueue<List<MongoSubScanSpec>> maxHeap = new PriorityQueue<List<MongoSubScanSpec>>(numSlots,
            LIST_SIZE_COMPARATOR_REV);
    for (List<MongoSubScanSpec> listOfScan : endpointFragmentMapping.values()) {
        if (listOfScan.size() < minPerEndpointSlot) {
            minHeap.offer(listOfScan);
        } else if (listOfScan.size() > minPerEndpointSlot) {
            maxHeap.offer(listOfScan);
        }
    }

    if (chunksToAssignSet.size() > 0) {
        for (Entry<String, List<ChunkInfo>> chunkEntry : chunksToAssignSet) {
            for (ChunkInfo chunkInfo : chunkEntry.getValue()) {
                List<MongoSubScanSpec> smallestList = minHeap.poll();
                smallestList.add(buildSubScanSpecAndGet(chunkInfo));
                minHeap.offer(smallestList);
            }
        }
    }

    while (minHeap.peek() != null && minHeap.peek().size() < minPerEndpointSlot) {
        List<MongoSubScanSpec> smallestList = minHeap.poll();
        List<MongoSubScanSpec> largestList = maxHeap.poll();
        smallestList.add(largestList.remove(largestList.size() - 1));
        if (largestList.size() > minPerEndpointSlot) {
            maxHeap.offer(largestList);
        }
        if (smallestList.size() < minPerEndpointSlot) {
            minHeap.offer(smallestList);
        }
    }

    logger.debug("Built assignment map in {} s.\nEndpoints: {}.\nAssignment Map: {}",
            watch.elapsed(TimeUnit.NANOSECONDS) / 1000, endpoints, endpointFragmentMapping.toString());
}

From source file:com.microsoft.office.plugin.MetadataMojo.java

/**
 * Generates a single entity set.//from   w w w .  j a va 2  s.  com
 * 
 * @param schema Schema which an entity set will be related to.
 * @param base Base directory for output classes.
 * @param pkg Package.
 * @param objs Objects to be put into velocity context.
 * @param generatedEntitySets Holds information if entity set for some type already generated to prevent multiple generations.
 * @param entitySetNames Maps entity type and its set name.
 * @param type Fully qualified enity type (contains schema namespace and class name).
 * @param paths Maps entity type and path to its set related to service root.
 * @param additionalSets List of all sets were generated.
 * @throws MojoExecutionException
 */
private void generateEntitySet(Schema schema, final File base, final String pkg, final Map<String, Object> objs,
        Map<String, Boolean> generatedEntitySets, Map<String, String> entitySetNames, String type,
        Map<String, String> paths, List<EntitySet> additionalSets) throws MojoExecutionException {
    Queue<String> typesQueue = new LinkedList<String>();

    for (NavigationProperty np : schema.getEntityType(utility.getNameFromNS(type)).getNavigationProperties()) {
        addTypeToQueue(entitySetNames, type, paths, typesQueue, np);
    }

    while (!typesQueue.isEmpty()) {
        String currentType = typesQueue.poll();
        if (generatedEntitySets.get(currentType)) {
            continue;
        }

        EntitySet generatedSet = new EntitySet();
        generatedSet.setEntityType(utility.getNameInNamespace(currentType));
        String name = entitySetNames.get(currentType);
        generatedSet.setName(name);
        additionalSets.add(generatedSet);

        objs.clear();
        objs.put("entitySet", generatedSet);

        EntityType currentEntityType = schema.getEntityType(currentType);
        while (true) {
            for (NavigationProperty np : currentEntityType.getNavigationProperties()) {
                addTypeToQueue(entitySetNames, currentType, paths, typesQueue, np);
            }
            if (currentEntityType.getBaseType() != null) {
                currentEntityType = schema
                        .getEntityType(utility.getNameFromNS(currentEntityType.getBaseType()));
            } else {
                break;
            }
        }

        /******************************* EXCHANGE-SPECIFIC ******************************************/
        // As we know from spec we cannot directly create a message inside /Me/Messages
        // we must create it inside /Me/path/to/some/folder/Messages
        // the path may be one of:
        // 1. Predefined folder name - as described in metadata in navigation properties of User entity
        // example: Inbox -> /Me/Inbox/Messages
        // 2. Folder with given id
        // example: Folders('abc') -> /Me/Folders('abc')/Messages
        // 3. A child folder (may be recursively)
        // example: Folders('abc')/ChildFolders('xyz') -> /Me/Folders('abc')/ChildFolders('xyz')/Messages

        if (name.equals("Messages")) {
            objs.put("pathToSet", "Me/");
            objs.put("createPath", "Me/%s/Messages");
            objs.put("overridePath", true);
        } else if (name.equals("Events")) {
            objs.put("pathToSet", "Me/");
            objs.put("createPath", "Me/Calendars('%s')/Events");
            objs.put("overridePath", true);
        }

        if (!paths.get(currentType).equals("")) {
            objs.put("pathToSet", paths.get(currentType));
        }
        if (utility.capitalize(name).equals("Folders")) {
            objs.put("userType", schema.getEntityType("User"));
        }

        /*************************** END OF EXCHANGE-SPECIFIC BLOCK ********************************/
        if (generateInterfaces) {
            parseObj(base, pkg, "entitySet", "I" + utility.capitalize(name) + ".java", objs);
        }
        if (generateClasses) {
            parseObj(base, pkg, "entitySetImpl", utility.capitalize(name) + ".java", objs);
        }
        generatedEntitySets.put(currentType, true);
    }
}

From source file:org.aksw.simba.cetus.yago.YagoBasedTypeSearcher.java

protected void searchDolceSuperClasses(Set<Resource> types) {
    Queue<Resource> queue = new LinkedList<Resource>(types);
    Resource classResource, superClass;
    RDFNode node;//from ww w. j av a  2  s  . c  om
    NodeIterator nodeIterator;
    Set<Resource> yagoSuperClasses = new HashSet<Resource>();
    Set<Resource> dolceSuperClasses = new HashSet<Resource>();
    boolean dolceClassFound = false;
    while (!queue.isEmpty()) {
        classResource = queue.poll();
        // If this resource is a DOLCE resource
        if (dolceClassModel.containsResource(classResource)) {
            dolceClassFound = true;
        } else {
            nodeIterator = classesModel.listObjectsOfProperty(classResource, RDFS.subClassOf);
            yagoSuperClasses.clear();
            dolceSuperClasses.clear();
            while (nodeIterator.hasNext()) {
                node = nodeIterator.next();
                if (node.isResource()) {
                    superClass = node.asResource();
                    if (dolceClassModel.containsResource(superClass)) {
                        dolceSuperClasses.add(superClass);
                    } else {
                        yagoSuperClasses.add(superClass);
                    }
                } else {
                    LOGGER.error("Expected a resource in the statement (" + classResource
                            + ", rdfs:subClassOf, " + node + "). Ignoring this statement.");
                }
            }

            // If a DOLCE class has been found
            if (dolceSuperClasses.size() > 0) {
                // add only the DOLCE classes and discard all others
                types.addAll(dolceSuperClasses);
                dolceClassFound = true;
                if (LOGGER.isDebugEnabled()) {
                    LOGGER.debug("Added super classes of " + classResource.getURI() + " --> "
                            + Arrays.toString(dolceSuperClasses.toArray()));
                }
            } else {
                for (Resource r : yagoSuperClasses) {
                    // If they have not been found before (and already have
                    // been
                    // added to the queue)
                    if (!types.contains(r)) {
                        types.add(r);
                        queue.add(r);
                    }
                    if (LOGGER.isDebugEnabled()) {
                        LOGGER.debug("Added super classes of " + classResource.getURI() + " --> "
                                + Arrays.toString(yagoSuperClasses.toArray()));
                    }
                }
            }
        }
    }
    if (!dolceClassFound) {
        LOGGER.warn("Couldn't find a DOLCE class for the following list of types: "
                + Arrays.toString(types.toArray()));
    }
}

From source file:org.dkpro.lab.engine.impl.BatchTaskEngine.java

/**
 * Locate the latest task execution compatible with the given task configuration.
 * /*w w  w . j a v a  2s  .c  o m*/
 * @param aContext
 *            the context of the current batch task.
 * @param aConfig
 *            the current parameter configuration.
 * @param aExecutedSubtasks
 *            already executed subtasks.
 */
protected void executeConfiguration(BatchTask aConfiguration, TaskContext aContext, Map<String, Object> aConfig,
        Set<String> aExecutedSubtasks) throws ExecutionException, LifeCycleException {
    if (log.isTraceEnabled()) {
        // Show all subtasks executed so far
        for (String est : aExecutedSubtasks) {
            log.trace("-- Already executed: " + est);
        }
    }

    // Set up initial scope used by sub-batch-tasks using the inherited scope. The scope is
    // extended as the subtasks of this batch are executed with the present configuration.
    // FIXME: That means that sub-batch-tasks in two different configurations cannot see
    // each other. Is that intended? Mind that the "executedSubtasks" set is intentionally
    // maintained *across* configurations, so maybe the scope should also be maintained
    // *across* configurations? - REC 2014-06-15
    Set<String> scope = new HashSet<String>();
    if (aConfiguration.getScope() != null) {
        scope.addAll(aConfiguration.getScope());
    }

    // Configure subtasks
    for (Task task : aConfiguration.getTasks()) {
        aContext.getLifeCycleManager().configure(aContext, task, aConfig);
    }

    Queue<Task> queue = new LinkedList<Task>(aConfiguration.getTasks());
    Set<Task> loopDetection = new HashSet<Task>();

    List<UnresolvedImportException> deferralReasons = new ArrayList<UnresolvedImportException>();
    while (!queue.isEmpty()) {
        Task task = queue.poll();

        try {
            // Check if a subtask execution compatible with the present configuration has
            // does already exist ...
            TaskContextMetadata execution = getExistingExecution(aConfiguration, aContext, task, aConfig,
                    aExecutedSubtasks);
            if (execution == null) {
                // ... otherwise execute it with the present configuration
                log.info("Executing task [" + task.getType() + "]");

                // set scope here so that the inherited scopes are considered 
                // set scope here so that tasks added to scope in this loop are considered
                if (task instanceof BatchTask) {
                    ((BatchTask) task).setScope(scope);
                }

                execution = runNewExecution(aContext, task, aConfig, aExecutedSubtasks);
            } else {
                log.debug("Using existing execution [" + execution.getId() + "]");
            }

            // Record new/existing execution
            aExecutedSubtasks.add(execution.getId());
            scope.add(execution.getId());
            loopDetection.clear();
            deferralReasons.clear();
        } catch (UnresolvedImportException e) {
            // Add task back to queue
            log.debug("Deferring execution of task [" + task.getType() + "]: " + e.getMessage());
            queue.add(task);

            // Detect endless loop
            if (loopDetection.contains(task)) {
                StringBuilder details = new StringBuilder();
                for (UnresolvedImportException r : deferralReasons) {
                    details.append("\n -");
                    details.append(r.getMessage());
                }

                // throw an UnresolvedImportException in case there is an outer BatchTask which needs to be executed first 
                throw new UnresolvedImportException(e, details.toString());
            }

            // Record failed execution
            loopDetection.add(task);
            deferralReasons.add(e);
        }
    }
}

From source file:org.protempa.Executor.java

private void retain(Set<PropositionDefinition> propDefs) {
    if (this.propIdsToRetain != null) {
        Map<String, PropositionDefinition> propDefMap = new HashMap<>();
        for (PropositionDefinition and : propDefs) {
            propDefMap.put(and.getId(), and);
        }//from w ww .  j  av a2 s.c  o  m
        Queue<String> propIdsQueue = new LinkedList<>(this.propIds);
        String pid;
        Set<PropositionDefinition> propDefsToKeep = new HashSet<>();
        while ((pid = propIdsQueue.poll()) != null) {
            if (this.propIdsToRetain.contains(pid)) {
                Queue<String> propIdsToKeep = new LinkedList<>();
                propIdsToKeep.add(pid);
                String pid2;
                while ((pid2 = propIdsToKeep.poll()) != null) {
                    PropositionDefinition get = propDefMap.get(pid2);
                    propDefsToKeep.add(get);
                    Arrays.addAll(propIdsToKeep, get.getChildren());
                }
            } else {
                PropositionDefinition get = propDefMap.get(pid);
                Arrays.addAll(propIdsQueue, get.getChildren());
            }
        }
        allNarrowerDescendants = propDefsToKeep;
    } else {
        allNarrowerDescendants = propDefs;
    }
}

From source file:edu.uci.ics.hyracks.api.rewriter.ActivityClusterGraphRewriter.java

/**
 * rewrite an activity cluster internally
 * /*from w ww . ja  v a  2 s.  c  o  m*/
 * @param ac
 *            the activity cluster to be rewritten
 */
private void rewriteIntraActivityCluster(ActivityCluster ac,
        Map<IActivity, SuperActivity> invertedActivitySuperActivityMap) {
    Map<ActivityId, IActivity> activities = ac.getActivityMap();
    Map<ActivityId, List<IConnectorDescriptor>> activityInputMap = ac.getActivityInputMap();
    Map<ActivityId, List<IConnectorDescriptor>> activityOutputMap = ac.getActivityOutputMap();
    Map<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> connectorActivityMap = ac
            .getConnectorActivityMap();
    ActivityClusterGraph acg = ac.getActivityClusterGraph();
    Map<ActivityId, IActivity> startActivities = new HashMap<ActivityId, IActivity>();
    Map<ActivityId, SuperActivity> superActivities = new HashMap<ActivityId, SuperActivity>();
    Map<ActivityId, Queue<IActivity>> toBeExpendedMap = new HashMap<ActivityId, Queue<IActivity>>();

    /**
     * Build the initial super activities
     */
    for (Entry<ActivityId, IActivity> entry : activities.entrySet()) {
        ActivityId activityId = entry.getKey();
        IActivity activity = entry.getValue();
        if (activityInputMap.get(activityId) == null) {
            startActivities.put(activityId, activity);
            /**
             * use the start activity's id as the id of the super activity
             */
            createNewSuperActivity(ac, superActivities, toBeExpendedMap, invertedActivitySuperActivityMap,
                    activityId, activity);
        }
    }

    /**
     * expand one-to-one connected activity cluster by the BFS order.
     * after the while-loop, the original activities are partitioned
     * into equivalent classes, one-per-super-activity.
     */
    Map<ActivityId, SuperActivity> clonedSuperActivities = new HashMap<ActivityId, SuperActivity>();
    while (toBeExpendedMap.size() > 0) {
        clonedSuperActivities.clear();
        clonedSuperActivities.putAll(superActivities);
        for (Entry<ActivityId, SuperActivity> entry : clonedSuperActivities.entrySet()) {
            ActivityId superActivityId = entry.getKey();
            SuperActivity superActivity = entry.getValue();

            /**
             * for the case where the super activity has already been swallowed
             */
            if (superActivities.get(superActivityId) == null) {
                continue;
            }

            /**
             * expend the super activity
             */
            Queue<IActivity> toBeExpended = toBeExpendedMap.get(superActivityId);
            if (toBeExpended == null) {
                /**
                 * Nothing to expand
                 */
                continue;
            }
            IActivity expendingActivity = toBeExpended.poll();
            List<IConnectorDescriptor> outputConnectors = activityOutputMap
                    .get(expendingActivity.getActivityId());
            if (outputConnectors != null) {
                for (IConnectorDescriptor outputConn : outputConnectors) {
                    Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = connectorActivityMap
                            .get(outputConn.getConnectorId());
                    IActivity newActivity = endPoints.getRight().getLeft();
                    SuperActivity existingSuperActivity = invertedActivitySuperActivityMap.get(newActivity);
                    if (outputConn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
                        /**
                         * expend the super activity cluster on an one-to-one out-bound connection
                         */
                        if (existingSuperActivity == null) {
                            superActivity.addActivity(newActivity);
                            toBeExpended.add(newActivity);
                            invertedActivitySuperActivityMap.put(newActivity, superActivity);
                        } else {
                            /**
                             * the two activities already in the same super activity
                             */
                            if (existingSuperActivity == superActivity) {
                                continue;
                            }
                            /**
                             * swallow an existing super activity
                             */
                            swallowExistingSuperActivity(superActivities, toBeExpendedMap,
                                    invertedActivitySuperActivityMap, superActivity, superActivityId,
                                    existingSuperActivity);
                        }
                    } else {
                        if (existingSuperActivity == null) {
                            /**
                             * create new activity
                             */
                            createNewSuperActivity(ac, superActivities, toBeExpendedMap,
                                    invertedActivitySuperActivityMap, newActivity.getActivityId(), newActivity);
                        }
                    }
                }
            }

            /**
             * remove the to-be-expended queue if it is empty
             */
            if (toBeExpended.size() == 0) {
                toBeExpendedMap.remove(superActivityId);
            }
        }
    }

    Map<ConnectorDescriptorId, IConnectorDescriptor> connMap = ac.getConnectorMap();
    Map<ConnectorDescriptorId, RecordDescriptor> connRecordDesc = ac.getConnectorRecordDescriptorMap();
    Map<SuperActivity, Integer> superActivityProducerPort = new HashMap<SuperActivity, Integer>();
    Map<SuperActivity, Integer> superActivityConsumerPort = new HashMap<SuperActivity, Integer>();
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        superActivityProducerPort.put(entry.getValue(), 0);
        superActivityConsumerPort.put(entry.getValue(), 0);
    }

    /**
     * create a new activity cluster to replace the old activity cluster
     */
    ActivityCluster newActivityCluster = new ActivityCluster(acg, ac.getId());
    newActivityCluster.setConnectorPolicyAssignmentPolicy(ac.getConnectorPolicyAssignmentPolicy());
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        newActivityCluster.addActivity(entry.getValue());
        acg.getActivityMap().put(entry.getKey(), newActivityCluster);
    }

    /**
     * Setup connectors: either inside a super activity or among super activities
     */
    for (Entry<ConnectorDescriptorId, Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>>> entry : connectorActivityMap
            .entrySet()) {
        ConnectorDescriptorId connectorId = entry.getKey();
        Pair<Pair<IActivity, Integer>, Pair<IActivity, Integer>> endPoints = entry.getValue();
        IActivity producerActivity = endPoints.getLeft().getLeft();
        IActivity consumerActivity = endPoints.getRight().getLeft();
        int producerPort = endPoints.getLeft().getRight();
        int consumerPort = endPoints.getRight().getRight();
        RecordDescriptor recordDescriptor = connRecordDesc.get(connectorId);
        IConnectorDescriptor conn = connMap.get(connectorId);
        if (conn.getClass().getName().contains(ONE_TO_ONE_CONNECTOR)) {
            /**
             * connection edge between inner activities
             */
            SuperActivity residingSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
            residingSuperActivity.connect(conn, producerActivity, producerPort, consumerActivity, consumerPort,
                    recordDescriptor);
        } else {
            /**
             * connection edge between super activities
             */
            SuperActivity producerSuperActivity = invertedActivitySuperActivityMap.get(producerActivity);
            SuperActivity consumerSuperActivity = invertedActivitySuperActivityMap.get(consumerActivity);
            int producerSAPort = superActivityProducerPort.get(producerSuperActivity);
            int consumerSAPort = superActivityConsumerPort.get(consumerSuperActivity);
            newActivityCluster.addConnector(conn);
            newActivityCluster.connect(conn, producerSuperActivity, producerSAPort, consumerSuperActivity,
                    consumerSAPort, recordDescriptor);

            /**
             * bridge the port
             */
            producerSuperActivity.setClusterOutputIndex(producerSAPort, producerActivity.getActivityId(),
                    producerPort);
            consumerSuperActivity.setClusterInputIndex(consumerSAPort, consumerActivity.getActivityId(),
                    consumerPort);
            acg.getConnectorMap().put(connectorId, newActivityCluster);

            /**
             * increasing the port number for the producer and consumer
             */
            superActivityProducerPort.put(producerSuperActivity, ++producerSAPort);
            superActivityConsumerPort.put(consumerSuperActivity, ++consumerSAPort);
        }
    }

    /**
     * Set up the roots of the new activity cluster
     */
    for (Entry<ActivityId, SuperActivity> entry : superActivities.entrySet()) {
        List<IConnectorDescriptor> connIds = newActivityCluster.getActivityOutputMap().get(entry.getKey());
        if (connIds == null || connIds.size() == 0) {
            newActivityCluster.addRoot(entry.getValue());
        }
    }

    /**
     * set up the blocked2Blocker mapping, which will be updated in the rewriteInterActivityCluster call
     */
    newActivityCluster.getBlocked2BlockerMap().putAll(ac.getBlocked2BlockerMap());

    /**
     * replace the old activity cluster with the new activity cluster
     */
    acg.getActivityClusterMap().put(ac.getId(), newActivityCluster);
}

From source file:org.phenotips.data.permissions.internal.DefaultPatientAccessHelper.java

@Override
public AccessLevel getAccessLevel(Patient patient, EntityReference user) {
    AccessLevel result = this.manager.resolveAccessLevel("none");
    if (patient == null || user == null) {
        return result;
    }/*from  w w  w. j  av a2  s. c om*/
    try {
        EntityReference owner = getOwner(patient).getUser();
        Collection<Collaborator> collaborators = getCollaborators(patient);
        Set<DocumentReference> processedEntities = new HashSet<DocumentReference>();
        Queue<DocumentReference> entitiesToCheck = new LinkedList<DocumentReference>();
        entitiesToCheck.add((DocumentReference) user);
        AccessLevel currentItemAccess = null;
        DocumentReference currentItem;
        XWikiContext context = getXWikiContext();
        XWikiGroupService groupService = context.getWiki().getGroupService(context);
        while (!entitiesToCheck.isEmpty()) {
            currentItem = entitiesToCheck.poll();
            currentItemAccess = getAccessLevel(currentItem, owner, collaborators);
            if (currentItemAccess.compareTo(result) > 0) {
                result = currentItemAccess;
            }
            processedEntities.add(currentItem);
            Collection<DocumentReference> groups = groupService.getAllGroupsReferencesForMember(currentItem, 0,
                    0, context);
            groups.removeAll(processedEntities);
            entitiesToCheck.addAll(groups);
        }
    } catch (XWikiException ex) {
        this.logger.warn("Failed to compute access level for [{}] on [{}]: {}", user, patient.getId(),
                ex.getMessage());
    }
    return result;
}

From source file:org.cleverbus.component.externalcall.ExternalCallComponentTest.java

private boolean sendAndVerifyBatch(Message[] messages) throws Exception {
    boolean lockFailureEncountered = false;
    HashMap<Message, Future<String>> replies = new HashMap<Message, Future<String>>();
    // send messages that have no reply, resend messages that have LockFailureException instead of a reply
    // verify results and re-send failures - test has timeout set because this is potentially endless
    Queue<Message> unverifiedMessages = new LinkedList<Message>(Arrays.asList(messages));
    while (!unverifiedMessages.isEmpty()) {
        Message message = unverifiedMessages.poll();
        boolean replyAvailable = replies.containsKey(message);
        if (replyAvailable) {
            Future<String> reply = replies.get(message);
            try {
                reply.get(); // this will throw an exception if it occurred during processing
            } catch (Exception exc) {
                if (ExceptionUtils.indexOfType(exc, LockFailureException.class) != -1) {
                    // expected cause - this test verifies that this scenario happens and is handled properly
                    lockFailureEncountered = true;
                    replyAvailable = false; // mark reply unavailable to resend the original message
                } else {
                    // fail by rethrowing
                    Log.error("Unexpected failure for message {} --", message, exc);
                    throw exc;
                }// ww w.j  a va2 s.  c  o  m
            }
        }
        if (!replyAvailable) {
            unverifiedMessages.add(message); // mark message as still unverified
            replies.put(message, requestViaExternalCallAsync(message, "mock:test", "concurrentKey",
                    "external call original body"));
        }
    }
    // check the call is now in DB as OK and with the correct LAST msg timestamp
    assertExtCallStateInDB(extCallId, ExternalCallStateEnum.OK, messages[messages.length - 1]);
    return lockFailureEncountered;
}

From source file:org.unitime.timetable.solver.curricula.CurriculaLastLikeCourseDemands.java

protected void computeTargetShare(CurriculumClassification clasf, Collection<CurriculumCourse> courses,
        CurriculumCourseGroupsProvider course2groups, CurModel model) {
    for (CurriculumCourse c1 : courses) {
        double x1 = clasf.getNrStudents() * c1.getPercShare();
        Set<CurriculumCourse>[] group = new HashSet[] { new HashSet<CurriculumCourse>(),
                new HashSet<CurriculumCourse>() };
        Queue<CurriculumCourse> queue = new LinkedList<CurriculumCourse>();
        queue.add(c1);/*from w ww  .  j  a v  a 2s.com*/
        Set<CurriculumCourseGroup> done = new HashSet<CurriculumCourseGroup>();
        while (!queue.isEmpty()) {
            CurriculumCourse c = queue.poll();
            for (CurriculumCourseGroup g : course2groups.getGroups(c))
                if (done.add(g))
                    for (CurriculumCourse x : courses)
                        if (!x.equals(c) && !x.equals(c1) && course2groups.getGroups(x).contains(g)
                                && group[group[0].contains(c) ? 0 : g.getType()].add(x))
                            queue.add(x);
        }
        for (CurriculumCourse c2 : courses) {
            double x2 = clasf.getNrStudents() * c2.getPercShare();
            if (c1.getUniqueId() >= c2.getUniqueId())
                continue;
            double share = 0;
            Set<WeightedStudentId> s1 = iProjectedDemands.getDemands(c1.getCourse());
            Set<WeightedStudentId> s2 = iProjectedDemands.getDemands(c2.getCourse());
            double sharedStudents = 0, lastLike = 0;
            if (s1 != null && !s1.isEmpty() && s2 != null && !s2.isEmpty()) {
                for (WeightedStudentId s : s1) {
                    if (s.match(clasf)) {
                        lastLike += s.getWeight();
                        if (s2.contains(s))
                            sharedStudents += s.getWeight();
                    }
                }
            }
            if (lastLike > 0) {
                double requested = c1.getPercShare() * clasf.getNrStudents();
                share = (requested / lastLike) * sharedStudents;
            } else {
                share = c1.getPercShare() * c2.getPercShare() * clasf.getNrStudents();
            }
            boolean opt = group[0].contains(c2);
            boolean req = !opt && group[1].contains(c2);
            model.setTargetShare(c1.getCourse().getUniqueId(), c2.getCourse().getUniqueId(),
                    opt ? 0.0 : req ? Math.min(x1, x2) : share, false);
        }
    }
}