Example usage for java.util Stack push

List of usage examples for java.util Stack push

Introduction

In this page you can find the example usage for java.util Stack push.

Prototype

public E push(E item) 

Source Link

Document

Pushes an item onto the top of this stack.

Usage

From source file:com.gargoylesoftware.htmlunit.javascript.host.xml.XMLHttpRequest.java

/**
 * Sends the specified content to the server in an HTTP request and receives the response.
 * @param content the body of the message being sent with the request
 *//*from w w  w .  j  a v a  2 s  .  c  om*/
@JsxFunction
public void send(final Object content) {
    if (webRequest_ == null) {
        return;
    }
    prepareRequest(content);

    final WebClient client = getWindow().getWebWindow().getWebClient();
    final AjaxController ajaxController = client.getAjaxController();
    final HtmlPage page = (HtmlPage) getWindow().getWebWindow().getEnclosedPage();
    final boolean synchron = ajaxController.processSynchron(page, webRequest_, async_);
    if (synchron) {
        doSend(Context.getCurrentContext());
    } else {
        if (getBrowserVersion().hasFeature(XHR_FIRE_STATE_OPENED_AGAIN_IN_ASYNC_MODE)) {
            // quite strange but IE and FF seem both to fire state loading twice
            // in async mode (at least with HTML of the unit tests)
            setState(OPENED, Context.getCurrentContext());
        }

        // Create and start a thread in which to execute the request.
        final Scriptable startingScope = getWindow();
        final ContextFactory cf = client.getJavaScriptEngine().getContextFactory();
        final ContextAction action = new ContextAction() {
            @Override
            public Object run(final Context cx) {
                // KEY_STARTING_SCOPE maintains a stack of scopes
                @SuppressWarnings("unchecked")
                Stack<Scriptable> stack = (Stack<Scriptable>) cx
                        .getThreadLocal(JavaScriptEngine.KEY_STARTING_SCOPE);
                if (null == stack) {
                    stack = new Stack<>();
                    cx.putThreadLocal(JavaScriptEngine.KEY_STARTING_SCOPE, stack);
                }
                stack.push(startingScope);

                try {
                    doSend(cx);
                } finally {
                    stack.pop();
                }
                return null;
            }
        };
        final JavaScriptJob job = BackgroundJavaScriptFactory.theFactory().createJavascriptXMLHttpRequestJob(cf,
                action);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Starting XMLHttpRequest thread for asynchronous request");
        }
        jobID_ = getWindow().getWebWindow().getJobManager().addJob(job, page);
    }
}

From source file:com.blackducksoftware.integration.hub.detect.detector.pip.PipenvGraphParser.java

public PipParseResult parse(final String projectName, final String projectVersionName,
        final List<String> pipFreezeOutput, final List<String> pipenvGraphOutput, final String sourcePath) {
    final MutableMapDependencyGraph dependencyGraph = new MutableMapDependencyGraph();
    final Stack<Dependency> dependencyStack = new Stack<>();

    final Map<String, String[]> pipFreezeMap = pipFreezeOutput.stream()
            .map(line -> line.split(TOP_LEVEL_SEPARATOR)).filter(splitLine -> splitLine.length == 2)
            .collect(Collectors.toMap(splitLine -> splitLine[0].trim().toLowerCase(), splitLine -> splitLine));

    int lastLevel = -1;
    for (final String line : pipenvGraphOutput) {
        final int currentLevel = getLevel(line);
        final Optional<Dependency> parsedDependency = getDependencyFromLine(pipFreezeMap, line);

        if (!parsedDependency.isPresent()) {
            continue;
        }//from  ww w  . j  av  a  2s.  c  om

        final Dependency dependency = parsedDependency.get();

        if (currentLevel == lastLevel) {
            dependencyStack.pop();
        } else {
            for (; lastLevel >= currentLevel; lastLevel--) {
                dependencyStack.pop();
            }
        }

        if (dependencyStack.size() > 0) {
            dependencyGraph.addChildWithParent(dependency, dependencyStack.peek());
        } else {
            dependencyGraph.addChildrenToRoot(dependency);
        }

        lastLevel = currentLevel;
        dependencyStack.push(dependency);
    }

    if (!dependencyGraph.getRootDependencyExternalIds().isEmpty()) {
        final ExternalId projectExternalId = externalIdFactory.createNameVersionExternalId(Forge.PYPI,
                projectName, projectVersionName);
        final DetectCodeLocation codeLocation = new DetectCodeLocation.Builder(DetectCodeLocationType.PIP,
                sourcePath, projectExternalId, dependencyGraph).build();
        return new PipParseResult(projectName, projectVersionName, codeLocation);
    } else {
        return null;
    }
}

From source file:com.hippo.vectorold.drawable.VectorDrawable.java

private void inflateInternal(Resources res, XmlPullParser parser, AttributeSet attrs)
        throws XmlPullParserException, IOException {
    final VectorDrawableState state = mVectorState;
    final VPathRenderer pathRenderer = state.mVPathRenderer;
    boolean noPathTag = true;

    // Use a stack to help to build the group tree.
    // The top of the stack is always the current group.
    final Stack<VGroup> groupStack = new Stack<VGroup>();
    groupStack.push(pathRenderer.mRootGroup);

    int eventType = parser.getEventType();
    while (eventType != XmlPullParser.END_DOCUMENT) {
        if (eventType == XmlPullParser.START_TAG) {
            final String tagName = parser.getName();
            final VGroup currentGroup = groupStack.peek();

            if (SHAPE_PATH.equals(tagName)) {
                final VFullPath path = new VFullPath();
                path.inflate(res, attrs);
                currentGroup.mChildren.add(path);
                if (path.getPathName() != null) {
                    pathRenderer.mVGTargetsMap.put(path.getPathName(), path);
                }/*www.  j av a2s  . c om*/
                noPathTag = false;
                state.mChangingConfigurations |= path.mChangingConfigurations;
            } else if (SHAPE_CLIP_PATH.equals(tagName)) {
                final VClipPath path = new VClipPath();
                path.inflate(res, attrs);
                currentGroup.mChildren.add(path);
                if (path.getPathName() != null) {
                    pathRenderer.mVGTargetsMap.put(path.getPathName(), path);
                }
                state.mChangingConfigurations |= path.mChangingConfigurations;
            } else if (SHAPE_GROUP.equals(tagName)) {
                VGroup newChildGroup = new VGroup();
                newChildGroup.inflate(res, attrs);
                currentGroup.mChildren.add(newChildGroup);
                groupStack.push(newChildGroup);
                if (newChildGroup.getGroupName() != null) {
                    pathRenderer.mVGTargetsMap.put(newChildGroup.getGroupName(), newChildGroup);
                }
                state.mChangingConfigurations |= newChildGroup.mChangingConfigurations;
            }
        } else if (eventType == XmlPullParser.END_TAG) {
            final String tagName = parser.getName();
            if (SHAPE_GROUP.equals(tagName)) {
                groupStack.pop();
            }
        }
        eventType = parser.next();
    }

    // Print the tree out for debug.
    if (DBG_VECTOR_DRAWABLE) {
        printGroupTree(pathRenderer.mRootGroup, 0);
    }

    if (noPathTag) {
        final StringBuffer tag = new StringBuffer();

        if (tag.length() > 0) {
            tag.append(" or ");
        }
        tag.append(SHAPE_PATH);

        throw new XmlPullParserException("no " + tag + " defined");
    }
}

From source file:javalin.draw.Drawing.java

public void draw(final DrawingGraphics dg) {
    float[] pos = new float[] { 0, 0, 0 };
    final Stack<FloatList> polygonStack = new Stack<FloatList>();
    FloatList polygon = null;/*from w ww  . j  a  va2 s.com*/
    Map<Integer, float[][][]> patches = null;

    int argIndex = 0;
    final Iterator<DrawingOp> opIter = drawing.iterator();
    while (opIter.hasNext()) {
        final DrawingOp op = opIter.next();

        switch (op) {
        case LINE_TO:
            float x = args.get(argIndex++);
            float y = args.get(argIndex++);
            float z = args.get(argIndex++);
            dg.drawLine(pos[0], pos[1], pos[2], x, y, z);

            pos[0] = x;
            pos[1] = y;
            pos[2] = z;
            break;
        case MOVE_TO:
            pos[0] = args.get(argIndex++);
            pos[1] = args.get(argIndex++);
            pos[2] = args.get(argIndex++);
            break;
        case SETCOLOR:
            dg.setColor(args.get(argIndex++), args.get(argIndex++), args.get(argIndex++), args.get(argIndex++));
            break;
        case START_POLY:
            if (polygon != null)
                polygonStack.push(polygon);
            polygon = new FloatList(64);
            break;
        case VERTEX:
            if (polygon == null)
                throw new IllegalStateException("You can't create a vertex before a polygon is begun.");
            polygon.add(pos[0], pos[1], pos[2]);
            break;
        case END_POLY:
            if (polygon == null)
                throw new IllegalStateException("You can't end a polygon before a polygon is begun.");
            dg.drawPolygon(polygon.toArray());
            polygon = polygonStack.size() > 0 ? polygonStack.pop() : null;
            break;

        case INIT_PATCH:
            int patchIndex = (int) args.get(argIndex++);
            if (patches == null)
                patches = new HashMap<Integer, float[][][]>();
            float[][][] patch;
            if (patches.containsKey(patchIndex))
                patch = patches.get(patchIndex);
            else {
                patch = new float[4][4][3];
                patches.put(patchIndex, patch);
            }
            for (int row = 0; row < 4; row++)
                for (int col = 0; col < 4; col++)
                    for (int d = 0; d < 3; d++)
                        patch[row][col][d] = 0;
            break;
        case CONTROL_POINT:
            patchIndex = (int) args.get(argIndex++);
            patch = patches.get(patchIndex);
            if (patch == null)
                throw new IllegalStateException("Patch " + patchIndex + " has not been initialized.");
            int row = (int) args.get(argIndex++);
            int col = (int) args.get(argIndex++);
            patch[row][col][0] = args.get(argIndex++);
            patch[row][col][1] = args.get(argIndex++);
            patch[row][col][2] = args.get(argIndex++);
            break;
        case DRAW_PATCH:
            patchIndex = (int) args.get(argIndex++);
            patch = patches.get(patchIndex);
            if (patch == null)
                throw new IllegalStateException(
                        "You can't draw patch " + patchIndex + " before it is initialized.");
            final int gridSteps = (int) args.get(argIndex++);
            dg.drawPatch(patch, gridSteps);
            break;
        }
    }
    if (polygon != null || polygonStack.size() > 0)
        throw new IllegalStateException("Unfinished polygon.");
}

From source file:com.clustercontrol.performance.operator.RevercePorlishNotation.java

/**
 * ?//from w  ww .j  a v  a 2s .  c  o m
 */
@Override
public double calc(DataTable currentTable, DataTable previousTable, String deviceName)
        throws CollectedDataNotFoundException, InvalidValueException {
    double right = 0D;
    double left = 0D;
    double result = 0D;
    Stack<Double> _stack = new Stack<Double>();

    if (this.expArray.length == 1) {
        result = getVal(expArray[0], currentTable, previousTable, deviceName);
    } else {
        for (int i = 0; i < this.expArray.length; i++) {

            try {
                if (expArray[i] instanceof OPERATOR) {
                    right = _stack.pop();
                    left = _stack.pop();
                    switch ((OPERATOR) expArray[i]) {
                    case ADDITION:
                        result = left + right;
                        break;
                    case SUBTRACTION:
                        result = left - right;
                        break;
                    case MULTIPLICATION:
                        result = left * right;
                        break;
                    case DIVISION:
                        if (right == 0) {
                            log.warn("0-devided, expression=" + expression);
                            // 0-devide???????NaN?
                            return Double.NaN;
                        }
                        result = left / right;
                        break;
                    }
                    _stack.push(new Double(result));
                } else {
                    _stack.push(getVal(expArray[i], currentTable, previousTable, deviceName));
                }
            } catch (CollectedDataNotFoundException | IllegalStateException | EmptyStackException e) {
                log.warn("calc [" + expression + "], " + e.getClass().getName() + ", " + e.getMessage());
                throw new InvalidValueException(e.getMessage());
            } catch (Exception e) {
                log.warn("calc [" + expression + "], " + e.getClass().getName() + ", " + e.getMessage(), e);
                throw new InvalidValueException(e.getMessage());
            }
        }
        if (_stack.size() > 1) {
            String messages = "expression is invalid, expression-" + expression;
            log.warn("calc : " + messages);
            throw new InvalidValueException(messages);
        }
    }
    return result;
}

From source file:com.nextep.designer.sqlgen.ui.editors.sql.SQLCompletionProcessor.java

/**
 * This method parses the SQL statement defined at the current start offset. The method will
 * retrieve any SQL statement which encapsulate the start offset, parse it and return the result
 * of this parse for completion proposals.
 * //from w  w w  . j av a 2  s  . c o  m
 * @param viewer viewer of the document to parse
 * @param start start offset
 * @return a {@link DMLParseResult} which contains information about the parse of the found SQL
 *         statement, or <code>null</code> if no SQL statement has been found from the given
 *         start offset.
 */
private DMLParseResult parseSQL(ITextViewer viewer, int start) {
    // Retrieving the corresponding statement start
    IDocument doc = new Document();
    doc.set(viewer.getDocument().get() + " "); //$NON-NLS-1$

    FindReplaceDocumentAdapter finder = new FindReplaceDocumentAdapter(doc);
    try {
        IRegion lastSemicolonRegion = finder.find(start - 1, ";", false, false, false, false); //$NON-NLS-1$
        if (lastSemicolonRegion == null) {
            lastSemicolonRegion = new Region(0, 1);
        }
        IRegion selectRegion = finder.find(lastSemicolonRegion.getOffset(), "SELECT|INSERT|UPDATE|DELETE", true, //$NON-NLS-1$
                false, false, true);

        IRegion endSemicolonRegion = finder.find(start == doc.getLength() ? start - 1 : start, ";", true, false, //$NON-NLS-1$
                false, false);
        if (endSemicolonRegion == null) {
            endSemicolonRegion = new Region(doc.getLength() - 1, 0);
        }
        if (selectRegion == null || lastSemicolonRegion == null || endSemicolonRegion == null) {
            return null;
        }
        // The select must be found after the first semicolon, else it is not the
        // same SQL statement
        if (selectRegion.getOffset() >= lastSemicolonRegion.getOffset()
                && endSemicolonRegion.getOffset() >= selectRegion.getOffset()) {
            DMLScanner scanner = new DMLScanner(parser);
            scanner.setRange(doc, selectRegion.getOffset(),
                    endSemicolonRegion.getOffset() - selectRegion.getOffset());
            IToken token = scanner.nextToken();
            DMLParseResult result = new DMLParseResult();
            Stack<DMLParseResult> stack = new Stack<DMLParseResult>();
            Map<Segment, DMLParseResult> results = new HashMap<Segment, DMLParseResult>();
            while (!token.isEOF()) {
                // Counting parenthethis
                if (token == DMLScanner.LEFTPAR_TOKEN) {
                    result.parCount++;
                } else if (token == DMLScanner.RIGHTPAR_TOKEN) {
                    result.parCount--;
                }

                if (token == DMLScanner.SELECT_TOKEN) { // && (result.tableSegStart>0 ||
                    // result.whereSegStart>0)) {
                    stack.push(result);
                    result = new DMLParseResult();
                    result.stackStart = scanner.getTokenOffset();
                } else if (token == DMLScanner.RIGHTPAR_TOKEN && result.parCount < 0) { // &&
                    // stack.size()>0)
                    // {
                    results.put(new Segment(result.stackStart, scanner.getTokenOffset() - result.stackStart),
                            result);
                    result = stack.pop();
                } else if (token == DMLScanner.INSERT_TOKEN) {
                    result.ignoreInto = false;
                } else if (token == DMLScanner.FROM_TOKEN || token == DMLScanner.UPDATE_TOKEN
                        || (token == DMLScanner.INTO_TOKEN && !result.ignoreInto)) {
                    result.ignoreInto = true;
                    // We have a table segment start
                    result.tableSegStart = scanner.getTokenOffset();
                    result.tableStartToken = token;
                } else if (token == DMLScanner.WORD_TOKEN && result.tableSegStart > 0) {
                    // We are in a table segment so we instantiate appropriate table references
                    // and aliases
                    // in the parse result
                    if (result.lastAlias == null) {
                        // This is a new table definition, we add it
                        result.lastAlias = new TableAlias(
                                doc.get(scanner.getTokenOffset(), scanner.getTokenLength()).toUpperCase());
                        result.lastAlias.setTable(tablesMap.get(result.lastAlias.getTableName()));
                        result.addFromTable(result.lastAlias);
                    } else if (result.lastAlias.getTableAlias() == null) {
                        // This is an alias of a defined table
                        final String alias = doc.get(scanner.getTokenOffset(), scanner.getTokenLength());
                        final List<String> reservedWords = parser.getTypedTokens().get(ISQLParser.DML);
                        if (!reservedWords.contains(alias.toUpperCase())) {
                            result.lastAlias.setAlias(alias);
                        } else {
                            result.lastAlias = null;
                        }
                    }
                } else if (token == DMLScanner.COMMA_TOKEN) {
                    // On a comma, we reset any table reference
                    result.lastAlias = null;
                } else if (token == DMLScanner.DML_TOKEN) {
                    result.lastAlias = null;
                    if (result.tableSegStart != -1) {
                        int tableSegEnd = scanner.getTokenOffset();
                        result.addTableSegment(
                                new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart));
                        result.tableSegStart = -1;
                    }
                } else if (result.tableSegStart != -1
                        && ((result.tableStartToken == DMLScanner.FROM_TOKEN && token == DMLScanner.WHERE_TOKEN)
                                || (result.tableStartToken == DMLScanner.UPDATE_TOKEN
                                        && token == DMLScanner.SET_TOKEN)
                                || (result.tableStartToken == DMLScanner.INTO_TOKEN
                                        && token == DMLScanner.LEFTPAR_TOKEN))) {
                    // We have matched a table segment end, so we close the segment
                    // and we add it to the parse result's table segments
                    int tableSegEnd = scanner.getTokenOffset();
                    result.addTableSegment(
                            new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart));
                    result.tableSegStart = -1;
                    if (token == DMLScanner.WHERE_TOKEN) {
                        result.whereSegStart = scanner.getTokenOffset() + scanner.getTokenLength();
                    }
                }
                token = scanner.nextToken();
            }
            // If the table segment is still opened, we close it at the end of the SQL statement
            if (result.tableSegStart > -1) {
                int tableSegEnd = endSemicolonRegion.getOffset();
                result.addTableSegment(
                        new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart + 1));
            }
            // Locating the appropriate result
            for (Segment s : results.keySet()) {
                if (s.getOffset() <= start && s.getOffset() + s.getLength() > start) {
                    return results.get(s);
                }
            }
            return result;
        }
    } catch (BadLocationException e) {
        LOGGER.debug("Problems while retrieving SQL statement");
    }
    return null;
}

From source file:org.apache.tajo.engine.planner.PhysicalPlannerImpl.java

private PhysicalExec createPlanRecursive(TaskAttemptContext ctx, LogicalNode logicalNode,
        Stack<LogicalNode> stack) throws IOException {
    PhysicalExec leftExec;//from w  w w  . jav a 2s  .  c  o  m
    PhysicalExec rightExec;

    switch (logicalNode.getType()) {

    case ROOT:
        LogicalRootNode rootNode = (LogicalRootNode) logicalNode;
        stack.push(rootNode);
        leftExec = createPlanRecursive(ctx, rootNode.getChild(), stack);
        stack.pop();
        return leftExec;

    case EXPRS:
        EvalExprNode evalExpr = (EvalExprNode) logicalNode;
        return new EvalExprExec(ctx, evalExpr);

    case CREATE_TABLE:
    case INSERT:
    case STORE:
        StoreTableNode storeNode = (StoreTableNode) logicalNode;
        stack.push(storeNode);
        leftExec = createPlanRecursive(ctx, storeNode.getChild(), stack);
        stack.pop();
        return createStorePlan(ctx, storeNode, leftExec);

    case SELECTION:
        SelectionNode selNode = (SelectionNode) logicalNode;
        stack.push(selNode);
        leftExec = createPlanRecursive(ctx, selNode.getChild(), stack);
        stack.pop();

        return new SelectionExec(ctx, selNode, leftExec);

    case PROJECTION:
        ProjectionNode prjNode = (ProjectionNode) logicalNode;
        stack.push(prjNode);
        leftExec = createPlanRecursive(ctx, prjNode.getChild(), stack);
        stack.pop();

        return new ProjectionExec(ctx, prjNode, leftExec);

    case TABLE_SUBQUERY: {
        TableSubQueryNode subQueryNode = (TableSubQueryNode) logicalNode;
        stack.push(subQueryNode);
        leftExec = createPlanRecursive(ctx, subQueryNode.getSubQuery(), stack);
        stack.pop();
        return new ProjectionExec(ctx, subQueryNode, leftExec);
    }

    case PARTITIONS_SCAN:
    case SCAN:
        leftExec = createScanPlan(ctx, (ScanNode) logicalNode, stack);
        return leftExec;

    case GROUP_BY:
        GroupbyNode grpNode = (GroupbyNode) logicalNode;
        stack.push(grpNode);
        leftExec = createPlanRecursive(ctx, grpNode.getChild(), stack);
        stack.pop();
        return createGroupByPlan(ctx, grpNode, leftExec);

    case WINDOW_AGG:
        WindowAggNode windowAggNode = (WindowAggNode) logicalNode;
        stack.push(windowAggNode);
        leftExec = createPlanRecursive(ctx, windowAggNode.getChild(), stack);
        stack.pop();
        return createWindowAgg(ctx, windowAggNode, leftExec);

    case DISTINCT_GROUP_BY:
        DistinctGroupbyNode distinctNode = (DistinctGroupbyNode) logicalNode;
        stack.push(distinctNode);
        leftExec = createPlanRecursive(ctx, distinctNode.getChild(), stack);
        stack.pop();
        return createDistinctGroupByPlan(ctx, distinctNode, leftExec);

    case HAVING:
        HavingNode havingNode = (HavingNode) logicalNode;
        stack.push(havingNode);
        leftExec = createPlanRecursive(ctx, havingNode.getChild(), stack);
        stack.pop();
        return new HavingExec(ctx, havingNode, leftExec);

    case SORT:
        SortNode sortNode = (SortNode) logicalNode;
        stack.push(sortNode);
        leftExec = createPlanRecursive(ctx, sortNode.getChild(), stack);
        stack.pop();
        return createSortPlan(ctx, sortNode, leftExec);

    case JOIN:
        JoinNode joinNode = (JoinNode) logicalNode;
        stack.push(joinNode);
        leftExec = createPlanRecursive(ctx, joinNode.getLeftChild(), stack);
        rightExec = createPlanRecursive(ctx, joinNode.getRightChild(), stack);
        stack.pop();

        return createJoinPlan(ctx, joinNode, leftExec, rightExec);

    case UNION:
        UnionNode unionNode = (UnionNode) logicalNode;
        stack.push(unionNode);
        leftExec = createPlanRecursive(ctx, unionNode.getLeftChild(), stack);
        rightExec = createPlanRecursive(ctx, unionNode.getRightChild(), stack);
        stack.pop();
        return new UnionExec(ctx, leftExec, rightExec);

    case LIMIT:
        LimitNode limitNode = (LimitNode) logicalNode;
        stack.push(limitNode);
        leftExec = createPlanRecursive(ctx, limitNode.getChild(), stack);
        stack.pop();
        return new LimitExec(ctx, limitNode.getInSchema(), limitNode.getOutSchema(), leftExec, limitNode);

    case INDEX_SCAN:
        IndexScanNode indexScanNode = (IndexScanNode) logicalNode;
        leftExec = createIndexScanExec(ctx, indexScanNode);
        return leftExec;

    case CREATE_INDEX:
        CreateIndexNode createIndexNode = (CreateIndexNode) logicalNode;
        stack.push(createIndexNode);
        leftExec = createPlanRecursive(ctx, createIndexNode.getChild(), stack);
        stack.pop();
        return new StoreIndexExec(ctx, createIndexNode, leftExec);

    default:
        return null;
    }
}

From source file:com.ikanow.infinit.e.application.utils.LogstashConfigUtils.java

public static BasicDBObject parseLogstashConfig(String configFile, StringBuffer error) {

    BasicDBObject tree = new BasicDBObject();

    // Stage 0: remove escaped "s and 's (for the purpose of the validation):
    // (prevents tricksies with escaped "s and then #s)
    // (http://stackoverflow.com/questions/5082398/regex-to-replace-single-backslashes-excluding-those-followed-by-certain-chars)
    configFile = configFile.replaceAll("(?<!\\\\)(?:((\\\\\\\\)*)\\\\)[\"']", "X");
    //TESTED (by hand - using last 2 fields of success_2_1)

    // Stage 1: remove #s, and anything in quotes (for the purpose of the validation)
    configFile = configFile.replaceAll("(?m)(?:([\"'])(?:(?!\\1).)*\\1)", "VALUE").replaceAll("(?m)(?:#.*$)",
            "");/*  w  w  w . j a  v  a  2  s. c  om*/
    //TESTED (2_1 - including with a # inside the ""s - Event_Date -> Event_#Date)
    //TESTED (2_2 - various combinations of "s nested inside 's) ... yes that is a negative lookahead up there - yikes!

    // Stage 2: get a nested list of objects
    int depth = 0;
    int ifdepth = -1;
    Stack<Integer> ifStack = new Stack<Integer>();
    BasicDBObject inputOrFilter = null;
    Matcher m = _navigateLogstash.matcher(configFile);
    // State:
    String currTopLevelBlockName = null;
    String currSecondLevelBlockName = null;
    BasicDBObject currSecondLevelBlock = null;
    while (m.find()) {
        boolean simpleField = false;

        //DEBUG
        //System.out.println("--DEPTH="+depth + " GROUP=" + m.group() + " IFS" + Arrays.toString(ifStack.toArray()));
        //System.out.println("STATES: " + currTopLevelBlockName + " AND " + currSecondLevelBlockName);

        if (m.group().equals("}")) {

            if (ifdepth == depth) { // closing an if statement
                ifStack.pop();
                if (ifStack.isEmpty()) {
                    ifdepth = -1;
                } else {
                    ifdepth = ifStack.peek();
                }
            } //TESTED (1_1bc, 2_1)
            else { // closing a processing block

                depth--;
                if (depth < 0) { // {} Mismatch
                    error.append("{} Mismatch (})");
                    return null;
                } //TESTED (1_1abc)
            }
        } else { // new attribute!

            String typeName = m.group(1);
            if (null == typeName) { // it's an if statement or a string value
                typeName = m.group(4);
                if (null != typeName) {
                    simpleField = true;
                }
            } else if (typeName.equalsIgnoreCase("else")) { // It's an if statement..
                typeName = null;
            }
            if (null == typeName) { // if statement after all
                // Just keep track of ifs so we can ignore them
                ifStack.push(depth);
                ifdepth = depth;
                // (don't increment depth)
            } //TESTED (1_1bc, 2_1)
            else { // processing block
                String subTypeName = m.group(3);
                if (null != subTypeName) { // eg codec.multiline
                    typeName = typeName + "." + subTypeName;
                } //TESTED (2_1, 2_3)

                if (depth == 0) { // has to be one of input/output/filter)
                    String topLevelType = typeName.toLowerCase();
                    if (topLevelType.equalsIgnoreCase("input") || topLevelType.equalsIgnoreCase("filter")) {
                        if (tree.containsField(topLevelType)) {
                            error.append("Multiple input or filter blocks: " + topLevelType);
                            return null;
                        } //TESTED (1_3ab)
                        else {
                            inputOrFilter = new BasicDBObject();
                            tree.put(topLevelType, inputOrFilter);

                            // Store state:
                            currTopLevelBlockName = topLevelType;
                        } //TESTED (*)
                    } else {
                        if (topLevelType.equalsIgnoreCase("output")) {
                            error.append(
                                    "Not allowed output blocks - these are appended automatically by the logstash harvester");
                        } else {
                            error.append("Unrecognized processing block: " + topLevelType);
                        }
                        return null;
                    } //TESTED (1_4a)
                } else if (depth == 1) { // processing blocks
                    String subElType = typeName.toLowerCase();

                    // Some validation: can't include a type called "filter" anywhere
                    if ((null != currTopLevelBlockName) && currTopLevelBlockName.equals("input")) {
                        if (subElType.equals("filter") || subElType.endsWith(".filter")) {
                            error.append("Not allowed sub-elements of input called 'filter' (1)");
                            return null;
                        }
                    } //TESTED (1_5b)

                    BasicDBList subElements = (BasicDBList) inputOrFilter.get(subElType);
                    if (null == subElements) {
                        subElements = new BasicDBList();
                        inputOrFilter.put(subElType, subElements);
                    }
                    BasicDBObject newEl = new BasicDBObject();
                    subElements.add(newEl);

                    // Store state:
                    currSecondLevelBlockName = subElType;
                    currSecondLevelBlock = newEl;
                } //TESTED (*)
                else if (depth == 2) { // attributes of processing blocks
                    // we'll just store the field names for these and do any simple validation that was too complicated for the regexes
                    String subSubElType = typeName.toLowerCase();

                    // Validation:
                    if (null != currTopLevelBlockName) {
                        // 1] sincedb path
                        if (currTopLevelBlockName.equals("input") && (null != currSecondLevelBlockName)) {
                            // (don't care what the second level block name is - no sincedb allowed)
                            if (subSubElType.equalsIgnoreCase("sincedb_path")) {
                                error.append("Not allowed sincedb_path in input.* block");
                                return null;
                            } //TESTED (1_5a)
                              // 2] no sub-(-sub etc)-elements of input called filter
                            if (subSubElType.equals("filter") || subSubElType.endsWith(".filter")) {
                                error.append("Not allowed sub-elements of input called 'filter' (2)");
                                return null;
                            } //TESTED (1_5c)
                        }
                    }

                    // Store in map:
                    if (null != currSecondLevelBlock) {
                        currSecondLevelBlock.put(subSubElType, new BasicDBObject());
                    }
                }
                // (won't go any deeper than this)
                if (!simpleField) {
                    depth++;
                }
            }

        }
    }
    if (0 != depth) {
        error.append("{} Mismatch ({)");
        return null;
    } //TESTED (1_2a)

    return tree;
}

From source file:com.cloudbees.hudson.plugins.folder.AbstractFolder.java

@Exported(name = "healthReport")
public List<HealthReport> getBuildHealthReports() {
    if (healthMetrics == null || healthMetrics.isEmpty()) {
        return Collections.<HealthReport>emptyList();
    }/*from w  w w.j av a  2  s  .  co  m*/
    List<HealthReport> reports = healthReports;
    if (reports != null && nextHealthReportsRefreshMillis > System.currentTimeMillis()) {
        // cache is still valid
        return reports;
    }
    // ensure we refresh on average once every HEALTH_REPORT_CACHE_REFRESH_MIN but not all at once
    nextHealthReportsRefreshMillis = System.currentTimeMillis()
            + TimeUnit.MINUTES.toMillis(HEALTH_REPORT_CACHE_REFRESH_MIN * 3 / 4)
            + ENTROPY.nextInt((int) TimeUnit.MINUTES.toMillis(HEALTH_REPORT_CACHE_REFRESH_MIN / 2));
    reports = new ArrayList<HealthReport>();
    List<FolderHealthMetric.Reporter> reporters = new ArrayList<FolderHealthMetric.Reporter>(
            healthMetrics.size());
    boolean recursive = false;
    boolean topLevelOnly = true;
    for (FolderHealthMetric metric : healthMetrics) {
        recursive = recursive || metric.getType().isRecursive();
        topLevelOnly = topLevelOnly && metric.getType().isTopLevelItems();
        reporters.add(metric.reporter());
    }
    for (AbstractFolderProperty<?> p : getProperties()) {
        for (FolderHealthMetric metric : p.getHealthMetrics()) {
            recursive = recursive || metric.getType().isRecursive();
            topLevelOnly = topLevelOnly && metric.getType().isTopLevelItems();
            reporters.add(metric.reporter());
        }
    }
    if (recursive) {
        Stack<Iterable<? extends Item>> stack = new Stack<Iterable<? extends Item>>();
        stack.push(getItems());
        if (topLevelOnly) {
            while (!stack.isEmpty()) {
                for (Item item : stack.pop()) {
                    if (item instanceof TopLevelItem) {
                        for (FolderHealthMetric.Reporter reporter : reporters) {
                            reporter.observe(item);
                        }
                        if (item instanceof Folder) {
                            stack.push(((Folder) item).getItems());
                        }
                    }
                }
            }
        } else {
            while (!stack.isEmpty()) {
                for (Item item : stack.pop()) {
                    for (FolderHealthMetric.Reporter reporter : reporters) {
                        reporter.observe(item);
                    }
                    if (item instanceof Folder) {
                        stack.push(((Folder) item).getItems());
                    }
                }
            }
        }
    } else {
        for (Item item : getItems()) {
            for (FolderHealthMetric.Reporter reporter : reporters) {
                reporter.observe(item);
            }
        }
    }
    for (FolderHealthMetric.Reporter reporter : reporters) {
        reports.addAll(reporter.report());
    }
    for (AbstractFolderProperty<?> p : getProperties()) {
        reports.addAll(p.getHealthReports());
    }

    Collections.sort(reports);
    healthReports = reports; // idempotent write
    return reports;
}

From source file:org.alfresco.repo.model.filefolder.FileFolderServiceImpl.java

/**
 * A deep version of listSimple.   Which recursively walks down the tree from a given starting point, returning 
 * the node refs of files or folders found along the way.
 * <p>/*from  w w w.j  a  v a2  s  .c om*/
 * The folder filter is called for each sub-folder to determine whether to search in that sub-folder, should a subfolder be excluded 
 * then all its chidren are excluded as well.
 * 
 * @param contextNodeRef the starting point.
 * @param files return nodes of type files.
 * @param folders return nodes of type folders.
 * @param folderFilter filter controls which folders to search.  If null then all subfolders are searched.
 * @return list of node references
 */
/* <p>
 * MER: I've added this rather than changing listSimple to minimise the risk of breaking 
 * the existing code.   This is a quick performance improvement between using 
 * XPath which is awful or adding new methods to the NodeService/DB   This is also a dangerous method in that it can return a 
 * lot of data and take a long time.
 */
private List<NodeRef> listSimpleDeep(NodeRef contextNodeRef, boolean files, boolean folders,
        SubFolderFilter folderFilter) {
    if (logger.isDebugEnabled()) {
        logger.debug("searchSimpleDeep contextNodeRef:" + contextNodeRef);
    }

    // To hold the results.
    List<NodeRef> result = new ArrayList<NodeRef>();

    // Build a list of folder types
    Set<QName> folderTypeQNames = buildFolderTypes();
    Set<QName> fileTypeQNames = (files ? buildFileTypes() : new HashSet<QName>(0));

    if (!folders && !files) {
        return Collections.emptyList();

    }

    // Shortcut
    if (folderTypeQNames.size() == 0) {
        return Collections.emptyList();
    }

    Stack<NodeRef> toSearch = new Stack<NodeRef>();
    toSearch.push(contextNodeRef);

    // Now we need to walk down the folders.
    while (!toSearch.empty()) {
        NodeRef currentDir = toSearch.pop();

        List<ChildAssociationRef> folderAssocRefs = nodeService.getChildAssocs(currentDir, folderTypeQNames);

        for (ChildAssociationRef folderRef : folderAssocRefs) {
            // We have some child folders
            boolean include = true;
            if (folderFilter != null) {
                include = folderFilter.isEnterSubfolder(folderRef);
                if (include) {
                    // yes search in these subfolders
                    toSearch.push(folderRef.getChildRef());
                }
            } else {
                // No filter - Add the folders in the currentDir
                toSearch.push(folderRef.getChildRef());
            }

            if (folders && include) {
                result.add(folderRef.getChildRef());
            }
        }

        if (files) {
            // Add the files in the current dir
            List<ChildAssociationRef> fileAssocRefs = nodeService.getChildAssocs(currentDir, fileTypeQNames);
            for (ChildAssociationRef fileRef : fileAssocRefs) {
                result.add(fileRef.getChildRef());
            }
        }
    }

    if (logger.isDebugEnabled()) {
        logger.debug("searchSimpleDeep finished size:" + result.size());
    }

    // Done
    return result;
}