Example usage for java.util Deque isEmpty

List of usage examples for java.util Deque isEmpty

Introduction

In this page you can find the example usage for java.util Deque isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this collection contains no elements.

Usage

From source file:net.sf.jasperreports.engine.json.expression.member.evaluation.ObjectKeyExpressionEvaluator.java

private List<JRJsonNode> goAnywhereDown(JRJsonNode jrJsonNode) {
    if (log.isDebugEnabled()) {
        log.debug("going " + MemberExpression.DIRECTION.ANYWHERE_DOWN + " by "
                + (expression.isWildcard() ? "wildcard" : "key: [" + expression.getObjectKey() + "]") + " on "
                + jrJsonNode.getDataNode());
    }//  w w  w  .ja v a2  s.co m

    List<JRJsonNode> result = new ArrayList<>();
    Deque<JRJsonNode> stack = new ArrayDeque<>();
    JsonNode initialDataNode = jrJsonNode.getDataNode();

    if (log.isDebugEnabled()) {
        log.debug("initial stack population with: " + initialDataNode);
    }

    // populate the stack initially
    if (initialDataNode.isArray()) {
        for (JsonNode deeper : initialDataNode) {
            stack.addLast(jrJsonNode.createChild(deeper));
        }
    } else {
        stack.push(jrJsonNode);
    }

    while (!stack.isEmpty()) {
        JRJsonNode stackNode = stack.pop();
        JsonNode stackDataNode = stackNode.getDataNode();

        addChildrenToStack(stackNode, stack);

        if (log.isDebugEnabled()) {
            log.debug("processing stack element: " + stackDataNode);
        }

        // process the current stack item
        if (stackDataNode.isObject()) {
            if (log.isDebugEnabled()) {
                log.debug("stack element is object; wildcard: " + expression.isWildcard());
            }

            // if wildcard => only filter the parent; we already added the object keys to the stack
            if (expression.isWildcard()) {
                if (applyFilter(stackNode)) {
                    result.add(stackNode);
                }
            }
            // else go down and filter
            else {
                JRJsonNode deeperNode = goDeeperIntoObjectNode(stackNode, false);
                if (deeperNode != null) {
                    result.add(deeperNode);
                }
            }
        } else if (stackDataNode.isValueNode() || stackDataNode.isArray()) {
            if (log.isDebugEnabled()) {
                log.debug("stack element is " + (stackDataNode.isValueNode() ? "value node" : "array")
                        + "; wildcard: " + expression.isWildcard());
            }

            if (expression.isWildcard()) {
                if (applyFilter(stackNode)) {
                    result.add(stackNode);
                }
            }
        }
    }

    return result;
}

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * Perform a bulk load of the given directory into the given pre-existing table. This method is
 * not threadsafe.//  w  w w . j  a  v  a2 s  .c om
 * @param hfofDir the directory that was provided as the output path of a job using
 *          HFileOutputFormat
 * @param table the table to load into
 * @throws TableNotFoundException if table does not yet exist
 */
public void doBulkLoad(Path hfofDir, final HTable table) throws TableNotFoundException, IOException {
    final HConnection conn = table.getConnection();

    if (!conn.isTableAvailable(table.getTableName())) {
        throw new TableNotFoundException(
                "Table " + Bytes.toStringBinary(table.getTableName()) + "is not currently available.");
    }

    // initialize thread pools
    int nrThreads = cfg.getInt("hbase.loadincremental.threads.max", Runtime.getRuntime().availableProcessors());
    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
    builder.setNameFormat("LoadIncrementalHFiles-%1$d");
    ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(), builder.build());
    ((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);

    // LQI queue does not need to be threadsafe -- all operations on this queue
    // happen in this thread
    Deque<LoadQueueItem> queue = new LinkedList<LoadQueueItem>();
    try {
        discoverLoadQueue(queue, hfofDir);
        int count = 0;

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
            return;
        }

        if (queue.isEmpty()) {
            LOG.warn("Bulk load operation did not find any files to load in " + "directory " + hfofDir.toUri()
                    + ".  Does it contain files in "
                    + "subdirectories that correspond to column family names?");
        }

        // Assumes that region splits can happen while this occurs.
        while (!queue.isEmpty()) {
            // need to reload split keys each iteration.
            final Pair<byte[][], byte[][]> startEndKeys = table.getStartEndKeys();
            if (count != 0) {
                LOG.info("Split occured while grouping HFiles, retry attempt " + +count + " with "
                        + queue.size() + " files remaining to group or split");
            }

            int maxRetries = cfg.getInt("hbase.bulkload.retries.number", 0);
            if (maxRetries != 0 && count >= maxRetries) {
                LOG.error("Retry attempted " + count + " times without completing, bailing out");
                return;
            }
            count++;

            // Using ByteBuffer for byte[] equality semantics
            Multimap<ByteBuffer, LoadQueueItem> regionGroups = groupOrSplitPhase(table, pool, queue,
                    startEndKeys);

            bulkLoadPhase(table, conn, pool, queue, regionGroups);

            // NOTE: The next iteration's split / group could happen in parallel to
            // atomic bulkloads assuming that there are splits and no merges, and
            // that we can atomically pull out the groups we want to retry.
        }

    } finally {
        pool.shutdown();
        if (queue != null && !queue.isEmpty()) {
            StringBuilder err = new StringBuilder();
            err.append("-------------------------------------------------\n");
            err.append("Bulk load aborted with some files not yet loaded:\n");
            err.append("-------------------------------------------------\n");
            for (LoadQueueItem q : queue) {
                err.append("  ").append(q.hfilePath).append('\n');
            }
            LOG.error(err);
        }
    }
}

From source file:org.apache.hadoop.hive.ql.parse.spark.GenSparkUtils.java

public void removeUnionOperators(Configuration conf, GenSparkProcContext context, BaseWork work)
        throws SemanticException {

    List<Operator<?>> roots = new ArrayList<Operator<?>>();

    // For MapWork, getAllRootOperators is not suitable, since it checks
    // getPathToAliases, and will return null if this is empty. Here we are
    // replacing getAliasToWork, so should use that information instead.
    if (work instanceof MapWork) {
        roots.addAll(((MapWork) work).getAliasToWork().values());
    } else {//www.  j a  va2s  .  c  o  m
        roots.addAll(work.getAllRootOperators());
    }
    if (work.getDummyOps() != null) {
        roots.addAll(work.getDummyOps());
    }

    // need to clone the plan.
    List<Operator<?>> newRoots = Utilities.cloneOperatorTree(conf, roots);

    // Build a map to map the original FileSinkOperator and the cloned FileSinkOperators
    // This map is used for set the stats flag for the cloned FileSinkOperators in later process
    Iterator<Operator<?>> newRootsIt = newRoots.iterator();
    for (Operator<?> root : roots) {
        Operator<?> newRoot = newRootsIt.next();
        List<Operator<?>> newOpQueue = new LinkedList<Operator<?>>();
        collectOperators(newRoot, newOpQueue);
        List<Operator<?>> opQueue = new LinkedList<Operator<?>>();
        collectOperators(root, opQueue);
        Iterator<Operator<?>> newOpQueueIt = newOpQueue.iterator();
        for (Operator<?> op : opQueue) {
            Operator<?> newOp = newOpQueueIt.next();

            // We need to update rootToWorkMap in case the op is a key, since even
            // though we clone the op tree, we're still using the same MapWork/ReduceWork.
            if (context.rootToWorkMap.containsKey(op)) {
                context.rootToWorkMap.put(newOp, context.rootToWorkMap.get(op));
            }
            // Don't remove the old entry - in SparkPartitionPruningSink it still
            // refers to the old TS, and we need to lookup it later in
            // processPartitionPruningSink.

            if (op instanceof FileSinkOperator) {
                List<FileSinkOperator> fileSinkList = context.fileSinkMap.get(op);
                if (fileSinkList == null) {
                    fileSinkList = new LinkedList<FileSinkOperator>();
                }
                fileSinkList.add((FileSinkOperator) newOp);
                context.fileSinkMap.put((FileSinkOperator) op, fileSinkList);
            } else if (op instanceof SparkPartitionPruningSinkOperator) {
                SparkPartitionPruningSinkOperator oldPruningSink = (SparkPartitionPruningSinkOperator) op;
                SparkPartitionPruningSinkOperator newPruningSink = (SparkPartitionPruningSinkOperator) newOp;
                newPruningSink.getConf().setTableScan(oldPruningSink.getConf().getTableScan());
                context.pruningSinkSet.add(newPruningSink);
                context.pruningSinkSet.remove(oldPruningSink);
            }
        }
    }

    // we're cloning the operator plan but we're retaining the original work. That means
    // that root operators have to be replaced with the cloned ops. The replacement map
    // tells you what that mapping is.
    Map<Operator<?>, Operator<?>> replacementMap = new HashMap<Operator<?>, Operator<?>>();

    // there's some special handling for dummyOps required. Mapjoins won't be properly
    // initialized if their dummy parents aren't initialized. Since we cloned the plan
    // we need to replace the dummy operators in the work with the cloned ones.
    List<HashTableDummyOperator> dummyOps = new LinkedList<HashTableDummyOperator>();

    Iterator<Operator<?>> it = newRoots.iterator();
    for (Operator<?> orig : roots) {
        Operator<?> newRoot = it.next();
        if (newRoot instanceof HashTableDummyOperator) {
            dummyOps.add((HashTableDummyOperator) newRoot);
            it.remove();
        } else {
            replacementMap.put(orig, newRoot);
        }
    }

    // now we remove all the unions. we throw away any branch that's not reachable from
    // the current set of roots. The reason is that those branches will be handled in
    // different tasks.
    Deque<Operator<?>> operators = new LinkedList<Operator<?>>();
    operators.addAll(newRoots);

    Set<Operator<?>> seen = new HashSet<Operator<?>>();

    while (!operators.isEmpty()) {
        Operator<?> current = operators.pop();
        seen.add(current);

        if (current instanceof UnionOperator) {
            Operator<?> parent = null;
            int count = 0;

            for (Operator<?> op : current.getParentOperators()) {
                if (seen.contains(op)) {
                    ++count;
                    parent = op;
                }
            }

            // we should have been able to reach the union from only one side.
            Preconditions.checkArgument(count <= 1,
                    "AssertionError: expected count to be <= 1, but was " + count);

            if (parent == null) {
                // root operator is union (can happen in reducers)
                replacementMap.put(current, current.getChildOperators().get(0));
            } else {
                parent.removeChildAndAdoptItsChildren(current);
            }
        }

        if (current instanceof FileSinkOperator || current instanceof ReduceSinkOperator) {
            current.setChildOperators(null);
        } else {
            operators.addAll(current.getChildOperators());
        }
    }
    work.setDummyOps(dummyOps);
    work.replaceRoots(replacementMap);
}

From source file:com.streamsets.datacollector.definition.ConfigDefinitionExtractor.java

void resolveDependencies(String configPrefix, List<ConfigDefinition> defs, Object contextMsg) {
    Map<String, ConfigDefinition> definitionsMap = new HashMap<>();
    Map<String, Map<String, Set<Object>>> dependencyMap = new HashMap<>();
    Map<String, Boolean> isFullyProcessed = new HashMap<>();
    for (ConfigDefinition def : defs) {
        definitionsMap.put(def.getName(), def);
        dependencyMap.put(def.getName(), new HashMap<String, Set<Object>>());
        isFullyProcessed.put(def.getName(), false);
    }/*from  ww w  .j a  va2 s  .com*/

    cycles.clear();

    for (ConfigDefinition def : defs) {
        String dependsOnKey = def.getDependsOn();
        if (!StringUtils.isEmpty(dependsOnKey)) {
            verifyDependencyExists(definitionsMap, def, dependsOnKey, contextMsg);
            ConfigDefinition dependsOnDef = definitionsMap.get(dependsOnKey);
            // evaluate dependsOn triggers
            ConfigDef annotation = def.getConfigField().getAnnotation(ConfigDef.class);
            Set<Object> triggers = new HashSet<>();
            for (String trigger : annotation.triggeredByValue()) {
                triggers.add(ConfigValueExtractor.get().extract(dependsOnDef.getConfigField(),
                        dependsOnDef.getType(), trigger, contextMsg, true));
            }
            dependencyMap.get(def.getName()).put(dependsOnDef.getName(), triggers);
        }
        // Add direct dependencies to dependencyMap
        if (!def.getDependsOnMap().isEmpty()) {
            // Copy same as above.
            for (Map.Entry<String, List<Object>> dependsOn : def.getDependsOnMap().entrySet()) {
                dependsOnKey = dependsOn.getKey();
                if (!StringUtils.isEmpty(dependsOnKey)) {
                    verifyDependencyExists(definitionsMap, def, dependsOnKey, contextMsg);
                    Set<Object> triggers = new HashSet<>();
                    ConfigDefinition dependsOnDef = definitionsMap.get(dependsOnKey);
                    for (Object trigger : dependsOn.getValue()) {
                        triggers.add(ConfigValueExtractor.get().extract(dependsOnDef.getConfigField(),
                                dependsOnDef.getType(), (String) trigger, contextMsg, true));
                    }
                    Map<String, Set<Object>> dependencies = dependencyMap.get(def.getName());
                    if (dependencies.containsKey(dependsOnKey)) {
                        dependencies.get(dependsOnKey).addAll(triggers);
                    } else {
                        dependencies.put(dependsOnKey, triggers);
                    }
                }
            }
        }
    }

    for (ConfigDefinition def : defs) {

        if (isFullyProcessed.get(def.getName())) {
            continue;
        }
        // Now find all indirect dependencies
        Deque<StackNode> stack = new ArrayDeque<>();
        stack.push(new StackNode(def, new LinkedHashSet<String>()));
        while (!stack.isEmpty()) {
            StackNode current = stack.peek();
            // We processed this one's dependencies before, don't bother adding its children
            // The dependencies of this one have all been processed
            if (current.childrenAddedToStack) {
                stack.pop();
                Map<String, Set<Object>> currentDependencies = dependencyMap.get(current.def.getName());
                Set<String> children = new HashSet<>(current.def.getDependsOnMap().keySet());
                for (String child : children) {
                    if (StringUtils.isEmpty(child)) {
                        continue;
                    }
                    Map<String, Set<Object>> depsOfChild = dependencyMap.get(child);
                    for (Map.Entry<String, Set<Object>> depOfChild : depsOfChild.entrySet()) {
                        if (currentDependencies.containsKey(depOfChild.getKey())) {
                            // Add only the common trigger values,
                            // since it has to be one of those for both these to be triggered.
                            Set<Object> currentTriggers = currentDependencies.get(depOfChild.getKey());
                            Set<Object> childTriggers = depOfChild.getValue();
                            currentDependencies.put(depOfChild.getKey(),
                                    Sets.intersection(currentTriggers, childTriggers));
                        } else {
                            currentDependencies.put(depOfChild.getKey(), new HashSet<>(depOfChild.getValue()));
                        }
                    }
                }
                isFullyProcessed.put(current.def.getName(), true);
            } else {
                Set<String> children = current.def.getDependsOnMap().keySet();
                String dependsOn = current.def.getDependsOn();
                LinkedHashSet<String> dependencyAncestors = new LinkedHashSet<>(current.ancestors);
                dependencyAncestors.add(current.def.getName());
                if (!StringUtils.isEmpty(dependsOn) && !isFullyProcessed.get(current.def.getDependsOn())
                        && !detectCycle(dependencyAncestors, cycles, dependsOn)) {
                    stack.push(
                            new StackNode(definitionsMap.get(current.def.getDependsOn()), dependencyAncestors));
                }
                for (String child : children) {
                    if (!StringUtils.isEmpty(child) && !isFullyProcessed.get(child)
                            && !detectCycle(dependencyAncestors, cycles, child)) {
                        stack.push(new StackNode(definitionsMap.get(child), dependencyAncestors));
                    }
                }
                current.childrenAddedToStack = true;
            }
        }
    }
    Preconditions.checkState(cycles.isEmpty(),
            "The following cycles were detected in the configuration dependencies:\n"
                    + Joiner.on("\n").join(cycles));
    for (Map.Entry<String, Map<String, Set<Object>>> entry : dependencyMap.entrySet()) {
        Map<String, List<Object>> dependencies = new HashMap<>();
        definitionsMap.get(entry.getKey()).setDependsOnMap(dependencies);
        for (Map.Entry<String, Set<Object>> trigger : entry.getValue().entrySet()) {
            List<Object> triggerValues = new ArrayList<>();
            triggerValues.addAll(trigger.getValue());
            dependencies.put(trigger.getKey(), triggerValues);
        }
        definitionsMap.get(entry.getKey()).setDependsOn("");
    }
}

From source file:io.anserini.index.IndexWebCollection.java

public int indexWithThreads(int numThreads) throws IOException, InterruptedException {

    LOG.info("Indexing with " + numThreads + " threads to directory '" + indexPath.toAbsolutePath() + "'...");

    final Directory dir = FSDirectory.open(indexPath);

    final IndexWriterConfig iwc = new IndexWriterConfig(new EnglishAnalyzer());

    iwc.setSimilarity(new BM25Similarity());
    iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    iwc.setRAMBufferSizeMB(512);/*from  w ww  .ja va 2  s  . c o m*/
    iwc.setUseCompoundFile(false);
    iwc.setMergeScheduler(new ConcurrentMergeScheduler());

    final IndexWriter writer = new IndexWriter(dir, iwc);

    final ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numThreads);
    final String suffix = Collection.GOV2.equals(collection) ? ".gz" : ".warc.gz";
    final Deque<Path> warcFiles = discoverWarcFiles(docDir, suffix);

    if (doclimit > 0 && warcFiles.size() < doclimit)
        for (int i = doclimit; i < warcFiles.size(); i++)
            warcFiles.removeFirst();

    long totalWarcFiles = warcFiles.size();
    LOG.info(totalWarcFiles + " many " + suffix + " files found under the docs path : " + docDir.toString());

    for (int i = 0; i < 2000; i++) {
        if (!warcFiles.isEmpty())
            executor.execute(new IndexerThread(writer, warcFiles.removeFirst()));
        else {
            if (!executor.isShutdown()) {
                Thread.sleep(30000);
                executor.shutdown();
            }
            break;
        }
    }

    long first = 0;
    //add some delay to let some threads spawn by scheduler
    Thread.sleep(30000);

    try {
        // Wait for existing tasks to terminate
        while (!executor.awaitTermination(1, TimeUnit.MINUTES)) {

            final long completedTaskCount = executor.getCompletedTaskCount();

            LOG.info(String.format("%.2f percentage completed",
                    (double) completedTaskCount / totalWarcFiles * 100.0d));

            if (!warcFiles.isEmpty())
                for (long i = first; i < completedTaskCount; i++) {
                    if (!warcFiles.isEmpty())
                        executor.execute(new IndexerThread(writer, warcFiles.removeFirst()));
                    else {
                        if (!executor.isShutdown())
                            executor.shutdown();
                    }
                }

            first = completedTaskCount;
            Thread.sleep(1000);
        }
    } catch (InterruptedException ie) {
        // (Re-)Cancel if current thread also interrupted
        executor.shutdownNow();
        // Preserve interrupt status
        Thread.currentThread().interrupt();
    }

    if (totalWarcFiles != executor.getCompletedTaskCount())
        throw new RuntimeException("totalWarcFiles = " + totalWarcFiles
                + " is not equal to completedTaskCount =  " + executor.getCompletedTaskCount());

    int numIndexed = writer.maxDoc();

    try {
        writer.commit();
        if (optimize)
            writer.forceMerge(1);
    } finally {
        writer.close();
    }

    return numIndexed;
}

From source file:org.nuxeo.ecm.platform.routing.core.impl.GraphRouteImpl.java

/**
 * Finds which transitions are re-looping (feedback arc set).
 *//* ww w.  j av a 2  s  .  c o m*/
protected void computeLoopTransitions(String startNodeId) throws DocumentRouteException {
    if (startNodeId == null) {
        // incomplete graph
        return;
    }
    /*
     * Depth-first search. In the todo stack, each element records a list of the siblings left to visit at that
     * depth. After visiting the last sibling, we go back to the parent and at this point mark it as visited in
     * post-traversal order.
     */
    List<String> postOrder = new LinkedList<String>();
    Deque<Deque<String>> stack = new LinkedList<Deque<String>>();
    Deque<String> first = new LinkedList<String>();
    first.add(startNodeId);
    stack.push(first);
    Set<String> done = new HashSet<String>();
    for (;;) {
        // find next sibling
        String nodeId = stack.peek().peek();
        if (nodeId == null) {
            // last sibling done
            // go back up one level and mark post-traversal order
            stack.pop(); // pop empty children
            if (stack.isEmpty()) {
                // we are done
                break;
            }
            nodeId = stack.peek().pop(); // pop parent
            postOrder.add(nodeId); // mark post-traversal order
        } else if (done.add(nodeId)) {
            // traverse the next sibling
            Deque<String> children = new LinkedList<String>();
            for (Transition t : getNode(nodeId).getOutputTransitions()) {
                children.add(t.target);
            }
            // add children to stack and recurse
            stack.push(children);
        } else {
            // already traversed
            stack.peek().pop(); // skip it
        }
    }

    // reverse the post-order to find the topological ordering
    Collections.reverse(postOrder);
    Map<String, Integer> ordering = new HashMap<String, Integer>();
    int i = 1;
    for (String nodeId : postOrder) {
        ordering.put(nodeId, Integer.valueOf(i++));
    }

    // walk the graph and all transitions again
    // and mark as looping the transitions pointing to a node
    // with a smaller order that the source
    done.clear();
    Deque<String> todo = new LinkedList<String>();
    todo.add(startNodeId);
    while (!todo.isEmpty()) {
        String nodeId = todo.pop();
        if (done.add(nodeId)) {
            int source = ordering.get(nodeId).intValue();
            for (Transition t : getNode(nodeId).getOutputTransitions()) {
                todo.push(t.target);
                // compare orders to detected feeback arcs
                int target = ordering.get(t.target).intValue();
                if (target <= source) {
                    t.loop = true;
                }
            }
        }
    }
}

From source file:io.jmnarloch.spring.cloud.zuul.trie.AbstractTrie.java

private T put(N root, String key, T value) {

    N node = root;/*from w w  w.  j  ava2  s.c o m*/
    final Deque<N> stack = new LinkedList<N>();
    stack.push(node);
    N next;
    int index = 0;

    while (index < key.length()) {
        final char c = getChar(key, index);
        next = node.getNext(c);
        if (next == null) {
            next = createTrieNode();
            node.setNext(c, next);
        }
        node = next;
        stack.push(node);
        index++;
    }
    final boolean replaced = node.hasValue();
    final T old = node.getValue();
    node.setValue(value);
    if (replaced) {
        return old;
    }

    while (!stack.isEmpty()) {
        node = stack.pop();
        node.setSize(node.getSize() + 1);
    }
    return null;
}

From source file:org.teavm.flavour.templates.parsing.Parser.java

private void popVar(String name) {
    Deque<ValueType> stack = variables.get(name);
    if (stack != null) {
        stack.pop();// w  w  w .  j a  v  a 2 s.c  o m
        if (stack.isEmpty()) {
            variables.remove(stack);
        }
    }
}

From source file:hudson.plugins.nested_view.NestedView.java

/**
 * Returns the health of this nested view.
 * <p/>//from  w  w w . j av a  2  s .  c  om
 * <p>Notice that, if a job is contained in several sub-views of the current
 * view, then it is taken into account only once to get accurate stats.</p>
 * <p>This algorithm has been derecursified, hence the stack stuff.</p>
 */
public HealthReportContainer getHealth() {
    // we use a set to avoid taking into account several times the same job
    // when computing the health
    Set<TopLevelItem> items = new LinkedHashSet<TopLevelItem>(100);

    // retrieve all jobs to analyze (using DFS)
    Deque<View> viewsStack = new ArrayDeque<View>(20);
    viewsStack.push(this);
    do {
        View currentView = viewsStack.pop();
        if (currentView instanceof NestedView) {
            for (View v : ((NestedView) currentView).views) {
                viewsStack.push(v);
            }
        } else {
            items.addAll(currentView.getItems());
        }
    } while (!viewsStack.isEmpty());

    HealthReportContainer hrc = new HealthReportContainer();
    for (TopLevelItem item : items) {
        if (item instanceof Job) {
            hrc.sum += ((Job) item).getBuildHealth().getScore();
            hrc.count++;
        }
    }

    hrc.report = hrc.count > 0 ? new HealthReport(hrc.sum / hrc.count, Messages._ViewHealth(hrc.count))
            : new HealthReport(100, Messages._NoJobs());

    return hrc;
}

From source file:de.escalon.hypermedia.hydra.serialize.JacksonHydraSerializer.java

@Override
public void serialize(Object bean, JsonGenerator jgen, SerializerProvider serializerProvider)
        throws IOException {
    if (!isUnwrappingSerializer()) {
        jgen.writeStartObject();// w w  w.j  a va  2 s  .  c  o  m
    }
    Deque<String> deque = (Deque<String>) serializerProvider.getAttribute(KEY_LD_CONTEXT);
    if (deque == null) {
        deque = new ArrayDeque<String>();
        serializerProvider.setAttribute(KEY_LD_CONTEXT, deque);
    }

    serializeContext(bean, jgen, serializerProvider, deque);
    serializeType(bean, jgen, serializerProvider);
    serializeFields(bean, jgen, serializerProvider);
    if (!isUnwrappingSerializer()) {
        jgen.writeEndObject();
    }
    deque = (Deque<String>) serializerProvider.getAttribute(KEY_LD_CONTEXT);
    if (!deque.isEmpty()) {
        deque.pop();
    }
}