Example usage for java.util Deque removeLast

List of usage examples for java.util Deque removeLast

Introduction

In this page you can find the example usage for java.util Deque removeLast.

Prototype

E removeLast();

Source Link

Document

Retrieves and removes the last element of this deque.

Usage

From source file:org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.TestProportionalCapacityPreemptionPolicy.java

ParentQueue mockNested(int[] abs, int[] maxCap, int[] used, int[] pending, int[] reserved, int[] apps,
        int[] gran, int[] queues) {
    float tot = leafAbsCapacities(abs, queues);
    Deque<ParentQueue> pqs = new LinkedList<ParentQueue>();
    ParentQueue root = mockParentQueue(null, queues[0], pqs);
    when(root.getQueueName()).thenReturn("/");
    when(root.getAbsoluteUsedCapacity()).thenReturn(used[0] / tot);
    when(root.getAbsoluteCapacity()).thenReturn(abs[0] / tot);
    when(root.getAbsoluteMaximumCapacity()).thenReturn(maxCap[0] / tot);
    when(root.getQueuePath()).thenReturn("root");
    boolean preemptionDisabled = mockPreemptionStatus("root");
    when(root.getPreemptionDisabled()).thenReturn(preemptionDisabled);

    for (int i = 1; i < queues.length; ++i) {
        final CSQueue q;
        final ParentQueue p = pqs.removeLast();
        final String queueName = "queue" + ((char) ('A' + i - 1));
        if (queues[i] > 0) {
            q = mockParentQueue(p, queues[i], pqs);
        } else {//from  ww  w. j  a v a 2 s  .  com
            q = mockLeafQueue(p, tot, i, abs, used, pending, reserved, apps, gran);
        }
        when(q.getParent()).thenReturn(p);
        when(q.getQueueName()).thenReturn(queueName);
        when(q.getAbsoluteUsedCapacity()).thenReturn(used[i] / tot);
        when(q.getAbsoluteCapacity()).thenReturn(abs[i] / tot);
        when(q.getAbsoluteMaximumCapacity()).thenReturn(maxCap[i] / tot);
        String parentPathName = p.getQueuePath();
        parentPathName = (parentPathName == null) ? "root" : parentPathName;
        String queuePathName = (parentPathName + "." + queueName).replace("/", "root");
        when(q.getQueuePath()).thenReturn(queuePathName);
        preemptionDisabled = mockPreemptionStatus(queuePathName);
        when(q.getPreemptionDisabled()).thenReturn(preemptionDisabled);
    }
    assert 0 == pqs.size();
    return root;
}

From source file:org.apache.metron.stellar.common.StellarCompiler.java

private void exitLambda(boolean hasArgs) {
    final FrameContext.Context context = getArgContext();
    Token<?> t = expression.tokenDeque.pop();
    final Deque<Token<?>> instanceDeque = new ArrayDeque<>();
    for (; !expression.tokenDeque.isEmpty() && t != EXPRESSION_REFERENCE; t = expression.tokenDeque.pop()) {
        instanceDeque.addLast(t);//from w  w  w  . j av a 2s. c  o m
    }
    final List<String> variables = hasArgs ? (List<String>) instanceDeque.removeLast().getValue()
            : new ArrayList<>();
    expression.tokenDeque.push(new Token<>((tokenDeque, state) -> {
        LambdaExpression expr = new LambdaExpression(variables, instanceDeque, state);
        tokenDeque.push(new Token<>(expr, Object.class, context));
    }, DeferredFunction.class, context));
}

From source file:org.apache.oozie.workflow.lite.LiteWorkflowValidator.java

/**
 * This method recursively validates two things:
 * - fork/join methods are properly paired
 * - there are no multiple "okTo" paths to a given node
 *
 * Important: this method assumes that the workflow is not acyclic - therefore this must run after performBasicValidation()
 *
 * @param app The WorkflowApp/*from  w  w w  .jav a2 s.co  m*/
 * @param node Current node we're checking
 * @param currentFork Current fork node (null if we are not under a fork path)
 * @param topDecisionParent The top (eldest) decision node along the path to this node, or null if there isn't one
 * @param okPath false if node (or an ancestor of node) was gotten to via an "error to" transition or via a join node that has
 * already been visited at least once before
 * @param forkJoins Map that contains a mapping of fork-join node pairs.
 * @param nodeAndDecisionParents Map that contains a mapping of nodes and their eldest decision node
 * @throws WorkflowException If there is any of the constraints described above is violated
 */
private void validateForkJoin(LiteWorkflowApp app, NodeDef node, NodeDef currentFork, String topDecisionParent,
        boolean okPath, Deque<String> path, Map<String, String> forkJoins,
        Map<String, Optional<String>> nodeAndDecisionParents) throws WorkflowException {
    final String nodeName = node.getName();

    path.addLast(nodeName);

    /* If we're walking an "okTo" path and the nodes are not Kill/Join/End, we have to make sure that only a single
     * "okTo" path exists to the current node.
     *
     * The "topDecisionParent" represents the eldest decision in the chain that we've gone through. For example, let's assume
     * that D1, D2, D3 are decision nodes and A is an action node.
     *
     * D1-->D2-->D3---> ... (rest of the WF)
     *  |   |    |
     *  |   |    |
     *  |   |    +----> +---+
     *  |   +---------> | A |
     *  +-------------> +---+
     *
     * In this case, there are three "okTo" paths to "A" but it's still a valid workflow because the eldest decision node
     * is D1 and during every run, there is only one possible execution path that leads to A (D1->A, D1->D2->A or
     * (D1->D2->D3->A). In the code, if we encounter a decision node and we already have one, we don't update it. If it's null
     * then we set it to the current decision node we're under.
     *
     * If the "current" and "top" parents are null, it means that we reached the node from two separate "okTo" paths, which is
     * not acceptable.
     *
     * Also, if we have two distinct top decision parents it means that the node is reachable from two decision paths which
     * are not "chained" (like in the example).
     *
     * It's worth noting that the last two examples can only occur in case of fork-join when we start to execute at least
     * two separate paths in parallel. Without fork-join, multiple parents or two null parents would mean that there is a loop
     * in the workflow but that should not happen since it has been validated.
     */
    if (okPath && !(node instanceof KillNodeDef) && !(node instanceof JoinNodeDef)
            && !(node instanceof EndNodeDef)) {
        // using Optional here so we can distinguish between "non-visited" and "visited - no parent" state.
        Optional<String> decisionParentOpt = nodeAndDecisionParents.get(nodeName);
        if (decisionParentOpt == null) {
            nodeAndDecisionParents.put(node.getName(), Optional.fromNullable(topDecisionParent));
        } else {
            String decisionParent = decisionParentOpt.isPresent() ? decisionParentOpt.get() : null;

            if ((decisionParent == null && topDecisionParent == null)
                    || !Objects.equal(decisionParent, topDecisionParent)) {
                throw new WorkflowException(ErrorCode.E0743, nodeName);
            }
        }
    }

    /* Fork-Join validation logic:
     *
     * At each Fork node, we recurse to every possible paths, changing the "currentFork" variable to the Fork node. We stop
     * walking as soon as we encounter a Join node. At the Join node, we update the forkJoin mapping, which maintains
     * the relationship between every fork-join pair (actually it's join->fork mapping). We check whether the join->fork
     * mapping already contains another Fork node, which means that the Join is reachable from at least two distinct
     * Fork nodes, so we terminate the validation.
     *
     * From the Join node, we don't recurse further. Therefore, all recursive calls return back to the point where we called
     * validateForkJoin() from the Fork node in question.
     *
     * At this point, we have to check how many different Join nodes we've found at each different paths. We collect them to
     * a set, then we make sure that we have only a single Join node for all Fork paths. Otherwise the workflow is broken.
     *
     * If we have only a single Join, then we get the transition node from the Join and go on with the recursive validation -
     * this time we use the original "currentFork" variable that we have on the stack. With this approach, nested
     * Fork-Joins are handled correctly.
     */
    if (node instanceof ForkNodeDef) {
        final List<String> transitions = node.getTransitions();

        checkForkTransitions(app, transitions, node);

        for (String t : transitions) {
            NodeDef transition = app.getNode(t);
            validateForkJoin(app, transition, node, topDecisionParent, okPath, path, forkJoins,
                    nodeAndDecisionParents);
        }

        // get the Join node for this ForkNode & validate it (we must have only one)
        Set<String> joins = new HashSet<String>();
        collectJoins(app, forkJoins, nodeName, joins);
        checkJoins(joins, nodeName);

        List<String> joinTransitions = app.getNode(joins.iterator().next()).getTransitions();
        NodeDef next = app.getNode(joinTransitions.get(0));

        validateForkJoin(app, next, currentFork, topDecisionParent, okPath, path, forkJoins,
                nodeAndDecisionParents);
    } else if (node instanceof JoinNodeDef) {
        if (currentFork == null) {
            throw new WorkflowException(ErrorCode.E0742, node.getName());
        }

        // join --> fork mapping
        String forkNode = forkJoins.get(nodeName);
        if (forkNode == null) {
            forkJoins.put(nodeName, currentFork.getName());
        } else if (!forkNode.equals(currentFork.getName())) {
            throw new WorkflowException(ErrorCode.E0758, node.getName(), forkNode + "," + currentFork);
        }
    } else if (node instanceof DecisionNodeDef) {
        List<String> transitions = node.getTransitions();

        // see explanation above - if we already have a topDecisionParent, we don't update it
        String parentDecisionNode = topDecisionParent;
        if (parentDecisionNode == null) {
            parentDecisionNode = nodeName;
        }

        for (String t : transitions) {
            NodeDef transition = app.getNode(t);
            validateForkJoin(app, transition, currentFork, parentDecisionNode, okPath, path, forkJoins,
                    nodeAndDecisionParents);
        }
    } else if (node instanceof KillNodeDef) {
        // no op
    } else if (node instanceof EndNodeDef) {
        // We can't end the WF if we're on a Fork path. From the "path" deque, we remove the last node (which
        // is the current "End") and look at last node again so we know where we came from
        if (currentFork != null) {
            path.removeLast();
            String previous = path.peekLast();
            throw new WorkflowException(ErrorCode.E0737, previous, node.getName());
        }
    } else if (node instanceof ActionNodeDef) {
        String transition = node.getTransitions().get(0); // "ok to" transition
        NodeDef okNode = app.getNode(transition);
        validateForkJoin(app, okNode, currentFork, topDecisionParent, true, path, forkJoins,
                nodeAndDecisionParents);

        transition = node.getTransitions().get(1); // "error to" transition
        NodeDef errorNode = app.getNode(transition);
        validateForkJoin(app, errorNode, currentFork, topDecisionParent, false, path, forkJoins,
                nodeAndDecisionParents);
    } else if (node instanceof StartNodeDef) {
        String transition = node.getTransitions().get(0); // start always has only 1 transition
        NodeDef tranNode = app.getNode(transition);
        validateForkJoin(app, tranNode, currentFork, topDecisionParent, okPath, path, forkJoins,
                nodeAndDecisionParents);
    } else {
        throw new WorkflowException(ErrorCode.E0740, node.getClass());
    }

    path.remove(nodeName);
}

From source file:org.apache.sling.etcd.testing.tree.Node.java

@Nonnull
public static String parent(@Nonnull String path) {
    Deque<String> names = names(path);
    if (!names.isEmpty()) {
        names.removeLast();
    }// w w  w .jav a  2  s .  co m
    return path(names);
}

From source file:org.apache.tajo.engine.planner.global.ParallelExecutionQueue.java

@Override
public synchronized ExecutionBlock[] first() {
    int max = Math.min(maximum, executable.size());
    List<ExecutionBlock> result = new ArrayList<>();
    for (Deque<ExecutionBlock> queue : executable) {
        if (result.size() < max && isExecutableNow(queue.peekLast())) {
            result.add(queue.removeLast());
        }/*from w w  w .jav a 2 s  . c o m*/
    }
    LOG.info("Initial executable blocks " + result);
    return result.toArray(new ExecutionBlock[result.size()]);
}

From source file:org.apache.tajo.engine.planner.global.ParallelExecutionQueue.java

@Override
public synchronized ExecutionBlock[] next(ExecutionBlockId doneNow) {
    executed.add(doneNow);//  www.j ava  2  s. com

    int remaining = 0;
    for (Deque<ExecutionBlock> queue : executable) {
        if (!queue.isEmpty() && isExecutableNow(queue.peekLast())) {
            LOG.info("Next executable block " + queue.peekLast());
            return new ExecutionBlock[] { queue.removeLast() };
        }
        remaining += queue.size();
    }
    return remaining > 0 ? new ExecutionBlock[0] : null;
}

From source file:org.lilyproject.indexer.engine.ValueEvaluator.java

private void extractContent(String table, Object value, Deque<Integer> indexes, Record record,
        FieldType fieldType, List<String> result, LRepository repository) {

    if (value instanceof List) { // this covers both LIST and PATH types
        List values = (List) value;
        for (int i = 0; i < values.size(); i++) {
            indexes.addLast(i);/*from  w w  w.j a  va2  s  .c  o m*/
            extractContent(table, values.get(i), indexes, record, fieldType, result, repository);
            indexes.removeLast();
        }
    } else {
        extractContent(table, value, record, fieldType, Ints.toArray(indexes), result, repository);
    }
}

From source file:org.mypsycho.util.PropertiesLoader.java

protected String resolveProperty(Bundle bundle, String key, Deque<String> refStack) {

    String fullKey = key;//from w  ww. j av a 2 s . com
    String localKey = key;

    int indexBundle = key.indexOf(MEMBER_TOKEN);
    Bundle definingBundle = bundle;

    if (indexBundle < 0) { // local name
        fullKey = bundle.getBasename() + MEMBER_TOKEN + key;
    } else if ((indexBundle > 0) && (key.indexOf(MEMBER_TOKEN, indexBundle + 1) < 0)) {
        String basename = key.substring(0, indexBundle);
        localKey = key.substring(indexBundle + 1);
        definingBundle = getBundle(basename, bundle.getLocale(), bundle.getContext());
    } else {
        handle("malformedKey", key + " in " + bundle.getBasename());
    } // else not a cross reference fullKey == localKey == key

    if (refStack.contains(fullKey)) {
        handle("recursivity", fullKey);
        return SUBST_TOKEN + key + END_TOKEN;
    }

    Object value = null;

    for (int fb = localKey.length(); (value == null)
            && (fb != -1); fb = localKey.lastIndexOf(FALLBACK_TOKEN, fb - 1)) {
        value = definingBundle.getDefinition(localKey.substring(0, fb));
    }

    // value = definingBundle.getDefinition(localKey);

    if (value == null) { // not defined
        if (env != null) { // Extension Point
            value = env.getProperty(key);
            if (value != null) {
                return (String) value;
            }
        }

        handle("undefined", fullKey);
        return SUBST_TOKEN + key + END_TOKEN;
    }

    if (value instanceof String) { // already substituted
        return (String) value;
    }
    // else unresolved value

    refStack.addLast(fullKey);
    String newValue = resolveExpression(definingBundle, ((String[]) value)[0], refStack);
    refStack.removeLast();
    definingBundle.putValue(localKey, newValue);

    return newValue;
}

From source file:org.openconcerto.sql.view.list.UpdateQueue.java

protected void willPut(final Runnable qr) throws InterruptedException {
    if (qr instanceof ChangeAllRunnable) {
        // si on met tout  jour, ne sert  rien de garder les maj prcdentes.
        // ATTN aux runnables qui dpendent des update, si on enlve les maj
        // elles vont s'executer sans que sa maj soit faite
        this.tasksDo(new IClosure<Deque<FutureTask<?>>>() {
            @Override/*from w  ww . j av  a 2s .  c  o  m*/
            public void executeChecked(final Deque<FutureTask<?>> tasks) {
                // on part de la fin et on supprime toutes les maj jusqu'a ce qu'on trouve
                // un runnable qui n'est pas un UpdateRunnable
                FutureTask<?> current = tasks.peekLast();
                boolean onlyUpdateRunnable = true;
                while (current != null && onlyUpdateRunnable) {
                    onlyUpdateRunnable = isCancelableUpdate(current);
                    if (onlyUpdateRunnable) {
                        tasks.removeLast();
                        current = tasks.peekLast();
                    }
                }
                if (onlyUpdateRunnable) {
                    final FutureTask<?> br = getBeingRun();
                    if (br != null && isCancelableUpdate(br))
                        br.cancel(true);
                }
            }
        });
    }
}

From source file:org.polymap.core.data.pipeline.DefaultPipelineIncubator.java

protected boolean findTransformation(ProcessorDescription from, ProcessorDescription to, LayerUseCase usecase,
        Deque<ProcessorDescription> chain) {
    log.debug(StringUtils.repeat("    ", chain.size()) + "findTransformation: " + from + " => " + to + " -- "
            + usecase);// ww  w .j  a v a2  s  .c o m

    // recursion break
    if (chain.size() > 16) {
        return false;
    }

    // recursion start
    if (from.getSignature().isCompatible(to.getSignature())) {
        chain.addLast(to);
        log.debug(StringUtils.repeat("    ", chain.size()) + "Transformation found: " + chain);
        return true;
    }

    // recursion step
    else {
        for (ProcessorDescription desc : allTransformers(usecase)) {
            if (from.getSignature().isCompatible(desc.getSignature()) && !chain.contains(desc)) {
                chain.addLast(desc);
                if (findTransformation(desc, to, usecase, chain)) {
                    //log.debug( "      transformation found: " + desc );
                    return true;
                }
                chain.removeLast();
            }
        }
        return false;
    }
}