Example usage for java.util Queue isEmpty

List of usage examples for java.util Queue isEmpty

Introduction

In this page you can find the example usage for java.util Queue isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this collection contains no elements.

Usage

From source file:org.apache.giraph.worker.BspServiceSource.java

/**
 * Load saved partitions in multiple threads.
 * @param superstep superstep to load/*from   w  w  w. ja  v  a  2s. c o  m*/
 * @param partitions list of partitions to load
 */
private void loadCheckpointVertices(final long superstep, List<Integer> partitions) {
    int numThreads = Math.min(GiraphConstants.NUM_CHECKPOINT_IO_THREADS.get(getConfiguration()),
            partitions.size());

    final Queue<Integer> partitionIdQueue = new ConcurrentLinkedQueue<>(partitions);

    final CompressionCodec codec = new CompressionCodecFactory(getConfiguration())
            .getCodec(new Path(GiraphConstants.CHECKPOINT_COMPRESSION_CODEC.get(getConfiguration())));

    long t0 = System.currentTimeMillis();

    CallableFactory<Void> callableFactory = new CallableFactory<Void>() {
        @Override
        public Callable<Void> newCallable(int callableId) {
            return new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    while (!partitionIdQueue.isEmpty()) {
                        Integer partitionId = partitionIdQueue.poll();
                        if (partitionId == null) {
                            break;
                        }
                        Path path = getSavedCheckpoint(superstep,
                                "_" + partitionId + CheckpointingUtils.CHECKPOINT_VERTICES_POSTFIX);

                        FSDataInputStream compressedStream = getFs().open(path);

                        DataInputStream stream = codec == null ? compressedStream
                                : new DataInputStream(codec.createInputStream(compressedStream));

                        Partition<I, V, E> partition = getConfiguration().createPartition(partitionId,
                                getContext());

                        partition.readFields(stream);

                        getPartitionStore().addPartition(partition);

                        stream.close();
                    }
                    return null;
                }

            };
        }
    };

    ProgressableUtils.getResultsWithNCallables(callableFactory, numThreads, "load-vertices-%d", getContext());

    LOG.info("Loaded checkpoint in " + (System.currentTimeMillis() - t0) + " ms, using " + numThreads
            + " threads");
}

From source file:org.joox.test.JOOXTest.java

License:asdf

@Test
public void testEachCallback() {
    final Queue<Integer> queue = new LinkedList<Integer>();

    queue.addAll(Arrays.asList(0));
    $.each(new Each() {
        @Override/* w  w w .  ja va 2 s .c o  m*/
        public void each(Context context) {
            assertEquals(context.element(), context.match());
            assertEquals(context.elementIndex(), context.matchIndex());
            assertEquals(context.elementSize(), context.matchSize());

            assertEquals((int) queue.poll(), context.matchIndex());
            assertEquals(1, context.matchSize());
            assertEquals("document", context.element().getTagName());
        }
    });

    assertTrue(queue.isEmpty());
    queue.addAll(Arrays.asList(0, 1, 2));

    $.children().each(new Each() {
        @Override
        public void each(Context context) {
            assertEquals(context.element(), context.match());
            assertEquals(context.elementIndex(), context.matchIndex());
            assertEquals(context.elementSize(), context.matchSize());

            assertEquals((int) queue.poll(), context.matchIndex());
            assertEquals(3, context.matchSize());
            assertEquals("library", context.element().getTagName());
        }
    });

    assertTrue(queue.isEmpty());
}

From source file:tachyon.master.MasterInfo.java

/**
 * Get the id of the file at the given path. If recursive, it scans the subdirectories as well.
 *
 * @param path The path to start looking at
 * @param recursive If true, recursively scan the subdirectories at the given path as well
 * @return the list of the inode id's at the path
 * @throws InvalidPathException/*w w  w  .  j a  v  a 2 s  .c  o  m*/
 * @throws FileDoesNotExistException
 */
public List<Integer> listFiles(TachyonURI path, boolean recursive)
        throws InvalidPathException, FileDoesNotExistException {
    List<Integer> ret = new ArrayList<Integer>();
    synchronized (mRootLock) {
        Inode inode = getInode(path);
        if (inode == null) {
            throw new FileDoesNotExistException(path.toString());
        }

        if (inode.isFile()) {
            ret.add(inode.getId());
        } else if (recursive) {
            Queue<Inode> queue = new LinkedList<Inode>();
            queue.addAll(((InodeFolder) inode).getChildren());

            while (!queue.isEmpty()) {
                Inode qinode = queue.poll();
                if (qinode.isDirectory()) {
                    queue.addAll(((InodeFolder) qinode).getChildren());
                } else {
                    ret.add(qinode.getId());
                }
            }
        } else {
            for (Inode child : ((InodeFolder) inode).getChildren()) {
                ret.add(child.getId());
            }
        }
    }

    return ret;
}

From source file:org.apache.giraph.worker.BspServiceSource.java

/**
 * Save partitions. To speed up this operation
 * runs in multiple threads./*from  w ww .  j  av  a2  s  .  co  m*/
 */
private void storeCheckpointVertices() {
    final int numPartitions = getPartitionStore().getNumPartitions();
    int numThreads = Math.min(GiraphConstants.NUM_CHECKPOINT_IO_THREADS.get(getConfiguration()), numPartitions);

    final Queue<Integer> partitionIdQueue = (numPartitions == 0) ? new LinkedList<Integer>()
            : new ArrayBlockingQueue<Integer>(numPartitions);
    Iterables.addAll(partitionIdQueue, getPartitionStore().getPartitionIds());

    final CompressionCodec codec = new CompressionCodecFactory(getConfiguration())
            .getCodec(new Path(GiraphConstants.CHECKPOINT_COMPRESSION_CODEC.get(getConfiguration())));

    long t0 = System.currentTimeMillis();

    CallableFactory<Void> callableFactory = new CallableFactory<Void>() {
        @Override
        public Callable<Void> newCallable(int callableId) {
            return new Callable<Void>() {

                @Override
                public Void call() throws Exception {
                    while (!partitionIdQueue.isEmpty()) {
                        Integer partitionId = partitionIdQueue.poll();
                        if (partitionId == null) {
                            break;
                        }
                        Path path = createCheckpointFilePathSafe(
                                "_" + partitionId + CheckpointingUtils.CHECKPOINT_VERTICES_POSTFIX);

                        FSDataOutputStream uncompressedStream = getFs().create(path);

                        DataOutputStream stream = codec == null ? uncompressedStream
                                : new DataOutputStream(codec.createOutputStream(uncompressedStream));

                        Partition<I, V, E> partition = getPartitionStore().getOrCreatePartition(partitionId);

                        partition.write(stream);

                        getPartitionStore().putPartition(partition);

                        stream.close();
                        uncompressedStream.close();
                    }
                    return null;
                }

            };
        }
    };

    ProgressableUtils.getResultsWithNCallables(callableFactory, numThreads, "checkpoint-vertices-%d",
            getContext());

    LOG.info("Save checkpoint in " + (System.currentTimeMillis() - t0) + " ms, using " + numThreads
            + " threads");
}

From source file:org.lockss.repository.RepositoryNodeImpl.java

private List enumerateEncodedChildren(File[] children, CachedUrlSetSpec filter, boolean includeInactive) {
    // holds fully decoded immediate children
    List<File> expandedDirectories = new ArrayList<File>();

    // holds immediate children that still need to be decoded, and may
    // yield more than one expanded child
    Queue<File> unexpandedDirectories = new LinkedList<File>();

    // add initial set of unexpanded directories
    for (File file : children) {
        if (file.getName().endsWith("\\")) {
            unexpandedDirectories.add(file);
        } else {/*from w w w  . jav  a 2 s.c  o  m*/
            expandedDirectories.add(file);
        }
    }

    // keep expanding directories until no more unexpanded directories exist
    // core algorithm: BFS
    while (!unexpandedDirectories.isEmpty()) {
        File child = unexpandedDirectories.poll();
        if (child.getName().endsWith("\\")) {
            File[] newChildren = child.listFiles();
            for (File newChild : newChildren) {
                unexpandedDirectories.add(newChild);
            }
        } else {
            expandedDirectories.add(child);
        }
    }

    // using iterator to traverse safely
    Iterator<File> iter = expandedDirectories.iterator();
    while (iter.hasNext()) {
        File child = iter.next();
        if ((child.getName().equals(CONTENT_DIR)) || (!child.isDirectory())) {
            // iter remove instead of list.remove
            iter.remove();
        }
    }

    // normalization needed?
    CheckUnnormalizedMode unnormMode = RepositoryManager.getCheckUnnormalizedMode();

    // We switch to using a sorted set, this time we hold strings
    // representing the url
    List<String> subUrls = new ArrayList<String>();
    for (File child : expandedDirectories) {
        try {
            // http://root/child -> /child
            String location = child.getCanonicalPath()
                    .substring(nodeRootFile.getCanonicalFile().toString().length());
            location = decodeUrl(location);
            String oldLocation = location;
            switch (unnormMode) {
            case Log:
            case Fix:
                // Normalization done here against the url string, instead of
                // against the file in the repository. This alleviates us from
                // dealing with edge conditions where the file split occurs
                // around an encoding. e.g. %/5C is special in file, but decoded
                // URL string is %5C and we handle it correctly.
                location = normalizeTrailingQuestion(location);
                location = normalizeUrlEncodingCase(location);
                if (!oldLocation.equals(location)) {
                    switch (unnormMode) {
                    case Fix:
                        // most dangerous part done here, where we copy and
                        // delete. Maybe we should move to a lost in found instead? :)
                        String newRepoLocation = LockssRepositoryImpl
                                .mapUrlToFileLocation(repository.getRootLocation(), url + location);
                        logger.debug("Fixing unnormalized " + oldLocation + " => " + location);
                        FileUtils.copyDirectory(child, new File(newRepoLocation));
                        FileUtils.deleteDirectory(child);
                        break;
                    case Log:
                        logger.debug("Detected unnormalized " + oldLocation + ", s.b. " + location);
                        break;
                    }
                }
                break;
            }
            location = url + location;
            subUrls.add(location);
        } catch (IOException e) {
            logger.error("Normalizing (" + unnormMode + ") " + child, e);
        } catch (NullPointerException ex) {
            logger.error("Normalizing (" + unnormMode + ") " + child, ex);
        }
    }

    int listSize;
    if (filter == null) {
        listSize = subUrls.size();
    } else {
        // give a reasonable minimum since, if it's filtered, the array size
        // may be much smaller than the total children, particularly in very
        // flat trees
        listSize = Math.min(40, subUrls.size());
    }

    // generate the arraylist with urls and return
    ArrayList childL = new ArrayList();
    for (String childUrl : subUrls) {
        if ((filter == null) || (filter.matches(childUrl))) {
            try {
                RepositoryNode node = repository.getNode(childUrl);
                if (node == null)
                    continue;
                // add all nodes which are internal or active leaves
                // deleted nodes never included
                // boolean activeInternal = !node.isLeaf() && !node.isDeleted();
                // boolean activeLeaf = node.isLeaf() && !node.isDeleted() &&
                // (!node.isContentInactive() || includeInactive);
                // if (activeInternal || activeLeaf) {
                if (!node.isDeleted() && (!node.isContentInactive() || (includeInactive || !node.isLeaf()))) {
                    childL.add(node);
                }
            } catch (MalformedURLException ignore) {
                // this can safely skip bad files because they will
                // eventually be trimmed by the repository integrity checker
                // and the content will be replaced by a poll repair
                logger.error("Malformed child url: " + childUrl);
            }
        }
    }
    return childL;
}

From source file:com.stehno.sanctuary.core.archive.DefaultFileArchiver.java

@Override
public MessageSet archiveChanges(final ChangeSet changeSet) {
    if (log.isDebugEnabled())
        log.debug("Archiving changes for " + changeSet.getRootDirectory() + "...");

    final MessageSet messageSet = new MessageSet(changeSet.getRootDirectory().getPath());

    final Queue<Future> futures = new LinkedList<Future>();

    int count = 0;
    for (final File file : changeSet.listFiles(FileStatus.NEW)) {
        futures.add(executor.submit(new Runnable() {
            @Override/*  w  w w.j a  va2 s.  c  o m*/
            public void run() {
                try {
                    remoteStore.addFile(changeSet.getRootDirectory(), file);
                    localStore.storeFile(file);
                    messageSet.addMessage(file.getPath(), "Added successfully");

                } catch (Exception ex) {
                    messageSet.addError(file.getPath(), "Add failed: " + ex.getMessage());
                }
            }
        }));
        count++;
    }
    if (log.isDebugEnabled())
        log.debug("Scheduled Adds: " + count);

    count = 0;
    for (final File file : changeSet.listFiles(FileStatus.MODIFIED)) {
        futures.add(executor.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    remoteStore.updateFile(changeSet.getRootDirectory(), file);
                    localStore.storeFile(file);
                    messageSet.addMessage(file.getPath(), "Updated successfully");

                } catch (Exception ex) {
                    messageSet.addError(file.getPath(), "Update failed: " + ex.getMessage());
                }

            }
        }));
        count++;
    }
    if (log.isDebugEnabled())
        log.debug("Scheduled Updates: " + count);

    count = 0;
    for (final File file : changeSet.listFiles(FileStatus.DELETED)) {
        futures.add(executor.submit(new Runnable() {
            @Override
            public void run() {
                try {
                    remoteStore.deleteFile(changeSet.getRootDirectory(), file);
                    localStore.removeFile(file);
                    messageSet.addMessage(file.getPath(), "Deleted successfully");

                } catch (Exception ex) {
                    messageSet.addError(file.getPath(), "Delete failed: " + ex.getMessage());
                }

            }
        }));
        count++;
    }
    if (log.isDebugEnabled())
        log.debug("Scheduled Deletes: " + count);

    do {
        while (!futures.isEmpty()) {
            if (futures.peek().isDone()) {
                futures.poll();
            }
        }

        if (collectionWaitTime > 0) {
            try {
                Thread.sleep(100);
            } catch (InterruptedException ie) {
            }
        }

    } while (!futures.isEmpty());

    return messageSet;
}

From source file:nl.b3p.viewer.config.services.WMSService.java

/**
 * Update the tree structure of Layers by following the tree structure and
 * setting the parent and children accordingly. Reuses entities for layers
 * which are UNMODIFIED or UPDATED and inserts new entities for NEW layers.
 * <p>/*from   w ww  .  j a  v a  2  s .  c  o m*/
 * Because virtual layers with null name cannot be updated, those are always
 * recreated and user set properties are lost, except those set on the top
 * layer which are preserved.
 * <p>
 * Interface should disallow setting user properties (especially authorizations)
 * on virtual layers.
 */
private void updateLayerTree(final WMSService update, final UpdateResult result) {

    Layer newTopLayer;

    String topLayerName = update.getTopLayer().getName();
    if (topLayerName == null) {
        // Start with a new no name topLayer
        newTopLayer = update.getTopLayer().pluckCopy();
    } else {
        // Old persistent top layer or new plucked copy from updated service
        newTopLayer = result.getLayerStatus().get(topLayerName).getLeft();
    }

    // Copy user set stuff over from old toplayer, even if name was changed
    // or topLayer has no name
    newTopLayer.copyUserModifiedProperties(getTopLayer());

    newTopLayer.setParent(null);
    newTopLayer.setService(this);
    newTopLayer.getChildren().clear();
    setTopLayer(newTopLayer);

    // Do a breadth-first traversal to set the parent and fill the children
    // list of all layers.
    // For the breadth-first traversal save layers from updated service to
    // visit with their (possibly persistent) parent layers from this service

    // XXX why did we need BFS?

    Queue<Pair<Layer, Layer>> q = new LinkedList();

    // Start at children of topLayer from updated service, topLayer handled
    // above
    for (Layer child : update.getTopLayer().getChildren()) {
        q.add(new ImmutablePair(child, newTopLayer));
    }

    Set<String> visitedLayerNames = new HashSet();

    do {
        // Remove from head of queue
        Pair<Layer, Layer> p = q.remove();

        Layer updateLayer = p.getLeft(); // layer from updated service
        Layer parent = p.getRight(); // parent layer from this

        Layer thisLayer;
        String layerName = updateLayer.getName();
        if (layerName == null) {
            // 'New' no name layer - we can't possibly guess if it is
            // the same as an already existing no name layer so always
            // new entity
            thisLayer = updateLayer.pluckCopy();
        } else {

            if (visitedLayerNames.contains(layerName)) {
                // Duplicate layer in updated service -- ignore this one
                thisLayer = null;
            } else {
                // Find possibly already persistent updated layer
                // (depth first) - if new already a pluckCopy()
                thisLayer = result.getLayerStatus().get(layerName).getLeft();
                visitedLayerNames.add(layerName);
            }
        }

        if (thisLayer != null) {
            thisLayer.setService(this);
            thisLayer.setParent(parent);
            parent.getChildren().add(thisLayer);
        }

        for (Layer child : updateLayer.getChildren()) {
            // Add add end of queue
            q.add(new ImmutablePair(child, thisLayer));
        }
    } while (!q.isEmpty());
}

From source file:org.zkoss.ganttz.data.GanttDiagramGraph.java

List<Recalculation> getRecalculationsNeededFrom(V task) {
    List<Recalculation> result = new ArrayList<>();
    Set<Recalculation> parentRecalculationsAlreadyDone = new HashSet<>();
    Recalculation first = recalculationFor(allPointsPotentiallyModified(task));
    first.couldHaveBeenModifiedBeforehand();

    result.addAll(getParentsRecalculations(parentRecalculationsAlreadyDone, first.taskPoint));
    result.add(first);//from   ww  w  .  ja  va  2s  .  c o  m

    Queue<Recalculation> pendingOfVisit = new LinkedList<>();
    pendingOfVisit.offer(first);

    Map<Recalculation, Recalculation> alreadyVisited = new HashMap<>();
    alreadyVisited.put(first, first);

    while (!pendingOfVisit.isEmpty()) {

        Recalculation current = pendingOfVisit.poll();

        for (TaskPoint each : current.taskPoint.getImmediateSuccessors()) {

            if (each.isImmediatelyDerivedFrom(current.taskPoint)) {
                continue;
            }

            Recalculation recalculationToAdd = getRecalcualtionToAdd(each, alreadyVisited);
            recalculationToAdd.comesFromPredecessor(current);

            if (!alreadyVisited.containsKey(recalculationToAdd)) {
                result.addAll(getParentsRecalculations(parentRecalculationsAlreadyDone, each));
                result.add(recalculationToAdd);
                pendingOfVisit.offer(recalculationToAdd);
                alreadyVisited.put(recalculationToAdd, recalculationToAdd);
            }
        }
    }

    return topologicalSorter.sort(result);
}

From source file:edu.emory.cci.aiw.umls.UMLSDatabaseConnection.java

@Override
public int getDistBF(ConceptUID cui1, ConceptUID cui2, String rela, SAB sab, int maxR)
        throws UMLSQueryException {
    Queue<ConceptUID> cuiQue = new LinkedList<ConceptUID>();
    Set<ConceptUID> visited = new HashSet<ConceptUID>();
    Map<Integer, Integer> radiusIdx = new HashMap<Integer, Integer>();
    int queIdx = 0;
    int r = 0;/*  w ww.  ja v a2  s.  c  o m*/
    radiusIdx.put(r, 0);

    if (maxR <= 0) {
        maxR = 3;
    }

    try {
        setupConn();
        cuiQue.add(cui1);
        visited.add(cui1);

        List<UMLSQuerySearchUID> params = new ArrayList<UMLSQuerySearchUID>();
        StringBuilder sql = new StringBuilder(
                "select distinct(CUI2) from MRREL where CUI1 = ? and (rel='PAR' or rel='CHD')");
        params.add(ConceptUID.EMPTY_CUI);
        if (sab != null) {
            sql.append(" and SAB = ?");
            params.add(sab);
        }
        if (rela != null && !rela.equals("")) {
            sql.append(" and RELA = ?");
            params.add(UMLSQueryStringValue.fromString(rela));
        }

        while (!cuiQue.isEmpty()) {
            ConceptUID node = cuiQue.remove();
            params.set(0, node);
            if (node.equals(cui2)) {
                return r;
            }

            List<ConceptUID> adjNodes = new ArrayList<ConceptUID>();

            ResultSet rs = executeAndLogQuery(substParams(sql.toString(), params));
            while (rs.next()) {
                ConceptUID c2 = ConceptUID.fromString(rs.getString(1));
                if (!visited.contains(c2)) {
                    adjNodes.add(c2);
                }
            }

            if (!radiusIdx.containsKey(r + 1)) {
                radiusIdx.put(r + 1, queIdx + cuiQue.size());
            }
            radiusIdx.put(r + 1, adjNodes.size());

            if (queIdx == radiusIdx.get(r)) {
                r++;
            }
            queIdx++;

            for (ConceptUID c : adjNodes) {
                visited.add(c);
                cuiQue.add(c);
            }
            if (r > maxR) {
                return r;
            }
        }
    } catch (SQLException sqle) {
        throw new UMLSQueryException(sqle);
    } catch (MalformedUMLSUniqueIdentifierException muuie) {
        throw new UMLSQueryException(muuie);
    } finally {
        tearDownConn();
    }

    log(Level.FINEST, "Returning -1");
    return -1;
}

From source file:org.kuali.rice.krad.service.impl.DictionaryValidationServiceImpl.java

/**
 * process constraints for the provided value using the provided constraint processors
 *
 * @param result - used to store the validation results
 * @param value - the object on which constraints are to be processed - a collection or the value of an attribute
 * @param definition - a Data Dictionary definition e.g. {@code ComplexAttributeDefinition} or {@code
 * CollectionDefinition}// w w w.ja va 2 s .  c  o  m
 * @param attributeValueReader - a class that encapsulate access to both dictionary metadata and object field
 * values
 * @param doOptionalProcessing - true if the validation should do optional validation, false otherwise
 */
@SuppressWarnings("unchecked")
private void processConstraints(DictionaryValidationResult result,
        List<? extends ConstraintProcessor> constraintProcessors, Object value, Constrainable definition,
        AttributeValueReader attributeValueReader, boolean doOptionalProcessing, String validationState,
        StateMapping stateMapping) {
    //TODO: Implement custom validators

    if (constraintProcessors != null) {
        Constrainable selectedDefinition = definition;
        AttributeValueReader selectedAttributeValueReader = attributeValueReader;

        // First - take the constrainable definition and get its constraints

        Queue<Constraint> constraintQueue = new LinkedList<Constraint>();

        // Using a for loop to iterate through constraint processors because ordering is important
        for (ConstraintProcessor<Object, Constraint> processor : constraintProcessors) {

            // Let the calling method opt out of any optional processing
            if (!doOptionalProcessing && processor.isOptional()) {
                result.addSkipped(attributeValueReader, processor.getName());
                continue;
            }

            Class<? extends Constraint> constraintType = processor.getConstraintType();

            // Add all of the constraints for this constraint type for all providers to the queue
            for (ConstraintProvider constraintProvider : constraintProviders) {
                if (constraintProvider.isSupported(selectedDefinition)) {
                    Collection<Constraint> constraintList = constraintProvider
                            .getConstraints(selectedDefinition, constraintType);
                    if (constraintList != null) {
                        constraintQueue.addAll(constraintList);
                    }
                }
            }

            // If there are no constraints provided for this definition, then just skip it
            if (constraintQueue.isEmpty()) {
                result.addSkipped(attributeValueReader, processor.getName());
                continue;
            }

            Collection<Constraint> additionalConstraints = new LinkedList<Constraint>();

            // This loop is functionally identical to a for loop, but it has the advantage of letting us keep the queue around
            // and populate it with any new constraints contributed by the processor
            while (!constraintQueue.isEmpty()) {

                Constraint constraint = constraintQueue.poll();

                // If this constraint is not one that this process handles, then skip and add to the queue for the next processor;
                // obviously this would be redundant (we're only looking at constraints that this processor can process) except that
                // the previous processor might have stuck a new constraint (or constraints) on the queue
                if (!constraintType.isInstance(constraint)) {
                    result.addSkipped(attributeValueReader, processor.getName());
                    additionalConstraints.add(constraint);
                    continue;
                }

                constraint = ConstraintStateUtils.getApplicableConstraint(constraint, validationState,
                        stateMapping);

                if (constraint != null) {
                    ProcessorResult processorResult = processor.process(result, value, constraint,
                            selectedAttributeValueReader);

                    Collection<Constraint> processorResultContraints = processorResult.getConstraints();
                    if (processorResultContraints != null && processorResultContraints.size() > 0) {
                        constraintQueue.addAll(processorResultContraints);
                    }

                    // Change the selected definition to whatever was returned from the processor
                    if (processorResult.isDefinitionProvided()) {
                        selectedDefinition = processorResult.getDefinition();
                    }
                    // Change the selected attribute value reader to whatever was returned from the processor
                    if (processorResult.isAttributeValueReaderProvided()) {
                        selectedAttributeValueReader = processorResult.getAttributeValueReader();
                    }
                }
            }

            // After iterating through all the constraints for this processor, add the ones that werent consumed by this processor to the queue
            constraintQueue.addAll(additionalConstraints);
        }
    }
}