Example usage for java.util Stack pop

List of usage examples for java.util Stack pop

Introduction

In this page you can find the example usage for java.util Stack pop.

Prototype

public synchronized E pop() 

Source Link

Document

Removes the object at the top of this stack and returns that object as the value of this function.

Usage

From source file:org.apache.jackrabbit.core.nodetype.NodeTypeRegistry.java

static void checkForCircularInheritance(Name[] supertypes, Stack<Name> inheritanceChain,
        Map<Name, QNodeTypeDefinition> ntDefCache) throws InvalidNodeTypeDefException, RepositoryException {
    for (Name nt : supertypes) {
        int pos = inheritanceChain.lastIndexOf(nt);
        if (pos >= 0) {
            StringBuilder buf = new StringBuilder();
            for (int j = 0; j < inheritanceChain.size(); j++) {
                if (j == pos) {
                    buf.append("--> ");
                }//from   w ww .  j a  v  a  2s.c  om
                buf.append(inheritanceChain.get(j));
                buf.append(" extends ");
            }
            buf.append("--> ");
            buf.append(nt);
            throw new InvalidNodeTypeDefException("circular inheritance detected: " + buf.toString());
        }

        try {
            QNodeTypeDefinition ntd = ntDefCache.get(nt);
            Name[] sta = ntd.getSupertypes();
            if (sta.length > 0) {
                // check recursively
                inheritanceChain.push(nt);
                checkForCircularInheritance(sta, inheritanceChain, ntDefCache);
                inheritanceChain.pop();
            }
        } catch (NoSuchNodeTypeException nsnte) {
            String msg = "unknown supertype: " + nt;
            log.debug(msg);
            throw new InvalidNodeTypeDefException(msg, nsnte);
        }
    }
}

From source file:com.unboundid.scim.sdk.FilterParser.java

/**
 * Read a filter expression./*from   w w w .jav a  2 s  . c om*/
 *
 * @return  The SCIM filter.
 */
private SCIMFilter readFilter() {
    final Stack<Node> expressionStack = new Stack<Node>();

    // Employ the shunting-yard algorithm to parse into reverse polish notation,
    // where the operands are filter components and the operators are the
    // logical AND and OR operators. This algorithm ensures that operator
    // precedence and parentheses are respected.
    final List<Node> reversePolish = new ArrayList<Node>();
    for (String word = readWord(); word != null; word = readWord()) {
        if (word.equalsIgnoreCase("and") || word.equalsIgnoreCase("or")) {
            final OperatorNode currentOperator;
            if (word.equalsIgnoreCase("and")) {
                currentOperator = new OperatorNode(SCIMFilterType.AND, markPos);
            } else {
                currentOperator = new OperatorNode(SCIMFilterType.OR, markPos);
            }
            while (!expressionStack.empty() && (expressionStack.peek() instanceof OperatorNode)) {
                final OperatorNode previousOperator = (OperatorNode) expressionStack.peek();
                if (previousOperator.getPrecedence() < currentOperator.getPrecedence()) {
                    break;
                }
                reversePolish.add(expressionStack.pop());
            }
            expressionStack.push(currentOperator);
        } else if (word.equals("(")) {
            expressionStack.push(new LeftParenthesisNode(markPos));
        } else if (word.equals(")")) {
            while (!expressionStack.empty() && !(expressionStack.peek() instanceof LeftParenthesisNode)) {
                reversePolish.add(expressionStack.pop());
            }
            if (expressionStack.empty()) {
                final String msg = String.format(
                        "No opening parenthesis matching closing " + "parenthesis at position %d", markPos);
                throw new IllegalArgumentException(msg);
            }
            expressionStack.pop();
        } else {
            rewind();
            final int pos = currentPos;
            final SCIMFilter filterComponent = readFilterComponent();
            reversePolish.add(new FilterNode(filterComponent, pos));
        }
    }

    while (!expressionStack.empty()) {
        final Node node = expressionStack.pop();
        if (node instanceof LeftParenthesisNode) {
            final String msg = String.format(
                    "No closing parenthesis matching opening " + "parenthesis at position %d", node.getPos());
            throw new IllegalArgumentException(msg);
        }
        reversePolish.add(node);
    }

    // Evaluate the reverse polish notation to create a single complex filter.
    final Stack<FilterNode> filterStack = new Stack<FilterNode>();
    for (final Node node : reversePolish) {
        if (node instanceof OperatorNode) {
            final FilterNode rightOperand = filterStack.pop();
            final FilterNode leftOperand = filterStack.pop();

            final OperatorNode operatorNode = (OperatorNode) node;
            if (operatorNode.getFilterType().equals(SCIMFilterType.AND)) {
                final SCIMFilter filter = SCIMFilter.createAndFilter(
                        Arrays.asList(leftOperand.getFilterComponent(), rightOperand.getFilterComponent()));
                filterStack.push(new FilterNode(filter, leftOperand.getPos()));
            } else {
                final SCIMFilter filter = SCIMFilter.createOrFilter(
                        Arrays.asList(leftOperand.getFilterComponent(), rightOperand.getFilterComponent()));
                filterStack.push(new FilterNode(filter, leftOperand.getPos()));
            }
        } else {
            filterStack.push((FilterNode) node);
        }
    }

    if (filterStack.size() == 0) {
        final String msg = String.format("Empty filter expression");
        throw new IllegalArgumentException(msg);
    } else if (filterStack.size() > 1) {
        final String msg = String.format("Unexpected characters at position %d", expressionStack.get(1).pos);
        throw new IllegalArgumentException(msg);
    }

    return filterStack.get(0).filterComponent;
}

From source file:net.mojodna.searchable.AbstractBeanIndexer.java

/**
 * Create fields for each property.//from ww w  . j  a  va 2s . com
 * 
 * @param doc Document to add fields to.
 * @param fieldname Field name to use.
 * @param prop Property value.
 * @param descriptor Property descriptor.
 * @param stack Stack containing parent field names.
 * @param inheritedBoost Inherited boost factor.
 * @return Document with additional fields.
 * @throws IndexingException
 */
protected Document addFields(final Document doc, final String fieldname, final Object prop,
        final PropertyDescriptor descriptor, final Stack<String> stack, final float inheritedBoost)
        throws IndexingException {
    if (prop instanceof Date) {
        // handle Dates specially
        float boost = SearchableUtils.getBoost(descriptor);

        // TODO allow resolution to be specified in annotation
        // TODO serialize as canonical date (for Solr)
        final Field field = new Field(getFieldname(fieldname, stack),
                DateTools.dateToString((Date) prop, DateTools.Resolution.SECOND), Field.Store.YES,
                Field.Index.UN_TOKENIZED);
        field.setBoost(inheritedBoost * boost);
        doc.add(field);
    } else if (prop instanceof Iterable) {
        // create multiple fields for things that can be iterated over
        float boost = SearchableUtils.getBoost(descriptor);

        for (final Object o : (Iterable) prop) {
            addFields(doc, fieldname, o, descriptor, stack, inheritedBoost * boost);
        }
    } else if (prop instanceof Object[]) {
        // create multiple fields for arrays of things
        float boost = SearchableUtils.getBoost(descriptor);

        for (final Object o : (Object[]) prop) {
            addFields(doc, fieldname, o, descriptor, stack, inheritedBoost * boost);
        }
    } else if (prop instanceof Searchable) {
        // nested Searchables
        stack.push(fieldname);

        processBean(doc, (Searchable) prop, stack, inheritedBoost * SearchableUtils.getBoost(descriptor));

        stack.pop();
    } else {
        final String value = prop.toString();
        float boost = SearchableUtils.getBoost(descriptor);

        final Field field = new Field(getFieldname(fieldname, stack), value,
                SearchableUtils.isStored(descriptor), SearchableUtils.getIndexStyle(descriptor),
                isVectorized(descriptor));
        field.setBoost(inheritedBoost * boost);
        doc.add(field);
    }

    return doc;
}

From source file:gdt.data.store.Entigrator.java

private static String[] intersect(String[] list1, String[] list2) {

    if (list2 == null || list1 == null) {
        return null;
    }/*from   w w  w.jav a  2  s. c  o  m*/
    Stack<String> s1 = new Stack<String>();
    Stack<String> s2 = new Stack<String>();
    for (String aList2 : list2)
        s2.push(aList2);
    String line$;
    boolean found;
    String member$ = null;
    while (!s2.isEmpty()) {
        try {
            found = false;
            line$ = s2.pop().toString();
            if (line$ == null)
                continue;
            for (String aList1 : list1) {
                member$ = aList1;

                if (line$.equals(member$)) {
                    found = true;
                    break;
                }
            }
            if (found)
                Support.addItem(member$, s1);
            //}
        } catch (Exception e) {
            Logger.getLogger(Entigrator.class.getName()).info(":intersect:" + e.toString());
        }
    }
    int cnt = s1.size();
    if (cnt < 1)
        return new String[0];
    String[] res = new String[cnt];
    for (int i = 0; i < cnt; i++)
        res[i] = s1.pop().toString();
    return res;
}

From source file:com.clust4j.algo.DBSCAN.java

@Override
protected DBSCAN fit() {
    synchronized (fitLock) {

        if (null != labels) // Then we've already fit this...
            return this;

        // First get the dist matrix
        final LogTimer timer = new LogTimer();

        // Do the neighborhood assignments, get sample weights, find core samples..
        final LogTimer neighbTimer = new LogTimer();
        labels = new int[m]; // Initialize labels...
        sampleWeights = new double[m]; // Init sample weights...
        coreSamples = new boolean[m];

        // Fit the nearest neighbor model...
        final LogTimer rnTimer = new LogTimer();
        final RadiusNeighbors rnModel = new RadiusNeighbors(data, new RadiusNeighborsParameters(eps)
                .setSeed(getSeed()).setMetric(getSeparabilityMetric()).setVerbose(false)).fit();

        info("fit RadiusNeighbors model in " + rnTimer.toString());
        int[][] nearest = rnModel.getNeighbors().getIndices();

        int[] ptNeighbs;
        ArrayList<int[]> neighborhoods = new ArrayList<>();
        int numCorePts = 0;
        for (int i = 0; i < m; i++) {
            // Each label inits to -1 as noise
            labels[i] = NOISE_CLASS;//w w  w  . ja v  a2 s . c  o  m
            ptNeighbs = nearest[i];

            // Add neighborhood...
            int pts;
            neighborhoods.add(ptNeighbs);
            sampleWeights[i] = pts = ptNeighbs.length;
            coreSamples[i] = pts >= minPts;

            if (coreSamples[i])
                numCorePts++;
        }

        // Log checkpoint
        info("completed density neighborhood calculations in " + neighbTimer.toString());
        info(numCorePts + " core point" + (numCorePts != 1 ? "s" : "") + " found");

        // Label the points...
        int nextLabel = 0, v;
        final Stack<Integer> stack = new Stack<>();
        int[] neighb;

        LogTimer stackTimer = new LogTimer();
        for (int i = 0; i < m; i++) {
            stackTimer = new LogTimer();

            // Want to look at unlabeled OR core points...
            if (labels[i] != NOISE_CLASS || !coreSamples[i])
                continue;

            // Depth-first search starting from i, ending at the non-core points.
            // This is very similar to the classic algorithm for computing connected
            // components, the difference being that we label non-core points as
            // part of a cluster (component), but don't expand their neighborhoods.
            int labelCt = 0;
            while (true) {
                if (labels[i] == NOISE_CLASS) {
                    labels[i] = nextLabel;
                    labelCt++;

                    if (coreSamples[i]) {
                        neighb = neighborhoods.get(i);

                        for (i = 0; i < neighb.length; i++) {
                            v = neighb[i];
                            if (labels[v] == NOISE_CLASS)
                                stack.push(v);
                        }
                    }
                }

                if (stack.size() == 0) {
                    fitSummary.add(new Object[] { nextLabel, labelCt, stackTimer.formatTime(),
                            stackTimer.wallTime() });

                    break;
                }

                i = stack.pop();
            }

            nextLabel++;
        }

        // Count missing
        numNoisey = 0;
        for (int lab : labels)
            if (lab == NOISE_CLASS)
                numNoisey++;

        // corner case: numNoisey == m (never gets a fit summary)
        if (numNoisey == m)
            fitSummary.add(new Object[] { Double.NaN, 0, stackTimer.formatTime(), stackTimer.wallTime() });

        info((numClusters = nextLabel) + " cluster" + (nextLabel != 1 ? "s" : "") + " identified, " + numNoisey
                + " record" + (numNoisey != 1 ? "s" : "") + " classified noise");

        // Encode to put in order
        labels = new NoiseyLabelEncoder(labels).fit().getEncodedLabels();

        sayBye(timer);
        return this;
    }

}

From source file:edu.umn.cs.spatialHadoop.core.RTree.java

/**
 * Searches the RTree starting from the given start position. This is either
 * a node number or offset of an element. If it's a node number, it performs
 * the search in the subtree rooted at this node. If it's an offset number,
 * it searches only the object found there.
 * It is assumed that the openQuery() has been called before this function
 * and that endQuery() will be called afterwards.
 * @param query_mbr//from   ww  w  . j a  v  a2  s  .  c  o m
 * @param output
 * @param start - where to start searching
 * @param end - where to end searching. Only used when start is an offset of
 *   an object.
 * @return
 * @throws IOException 
 */
protected int search(Shape query_shape, ResultCollector<T> output, int start, int end) throws IOException {
    Rectangle query_mbr = query_shape.getMBR();
    int resultSize = 0;
    // Special case for an empty tree
    if (height == 0)
        return 0;

    Stack<Integer> toBeSearched = new Stack<Integer>();
    // Start from the given node
    toBeSearched.push(start);
    if (start >= nodeCount) {
        toBeSearched.push(end);
    }

    Rectangle node_mbr = new Rectangle();

    // Holds one data line from tree data
    Text line = new Text2();

    while (!toBeSearched.isEmpty()) {
        int searchNumber = toBeSearched.pop();
        int mbrsToTest = searchNumber == 0 ? 1 : degree;

        if (searchNumber < nodeCount) {
            long nodeOffset = NodeSize * searchNumber;
            structure.seek(nodeOffset);
            int dataOffset = structure.readInt();

            for (int i = 0; i < mbrsToTest; i++) {
                node_mbr.readFields(structure);
                int lastOffset = (searchNumber + i) == nodeCount - 1 ? treeSize : structure.readInt();
                if (query_mbr.contains(node_mbr)) {
                    // The node is full contained in the query range.
                    // Save the time and do full scan for this node
                    toBeSearched.push(dataOffset);
                    // Checks if this node is the last node in its level
                    // This can be easily detected because the next node in the level
                    // order traversal will be the first node in the next level
                    // which means it will have an offset less than this node
                    if (lastOffset <= dataOffset)
                        lastOffset = treeSize;
                    toBeSearched.push(lastOffset);
                } else if (query_mbr.isIntersected(node_mbr)) {
                    // Node partially overlaps with query. Go deep under this node
                    if (searchNumber < nonLeafNodeCount) {
                        // Search child nodes
                        toBeSearched.push((searchNumber + i) * degree + 1);
                    } else {
                        // Search all elements in this node
                        toBeSearched.push(dataOffset);
                        // Checks if this node is the last node in its level
                        // This can be easily detected because the next node in the level
                        // order traversal will be the first node in the next level
                        // which means it will have an offset less than this node
                        if (lastOffset <= dataOffset)
                            lastOffset = treeSize;
                        toBeSearched.push(lastOffset);
                    }
                }
                dataOffset = lastOffset;
            }
        } else {
            int firstOffset, lastOffset;
            // Search for data items (records)
            lastOffset = searchNumber;
            firstOffset = toBeSearched.pop();

            data.seek(firstOffset + treeStartOffset);
            LineReader lineReader = new LineReader(data);
            while (firstOffset < lastOffset) {
                firstOffset += lineReader.readLine(line);
                stockObject.fromText(line);
                if (stockObject.isIntersected(query_shape)) {
                    resultSize++;
                    if (output != null)
                        output.collect(stockObject);
                }
            }
        }
    }
    return resultSize;
}

From source file:de.tudarmstadt.ukp.dkpro.wsd.graphconnectivity.algorithm.GraphConnectivityWSD.java

/**
 * Beginning at startVertex, conduct a depth-first search of the graph
 * siGraph looking for any of the vertices in goalSynsets. If any are found
 * within maxDepth iterations, add the path to dGraph.
 *
 * @param startVertex//  ww w.ja v  a 2s  . c om
 *            The vertex at which to begin the search
 * @param goalVertices
 *            A collection of vertices at which to stop the search
 *            successfully
 * @param siGraph
 *            The full ontology graph to search
 * @param dGraph
 *            The disambiguation graph to construct
 * @param vertexPath
 *            A stack of vertices visited so far
 * @param edgePath
 *            A stack of edges visited so far
 * @param maxDepth
 *            The maximum depth to recurse to
 * @return true if and only if a goal was found before maxDepth iterations
 */
protected boolean dfs(final String startVertex, final Collection<String> goalVertices,
        final Graph<String, UnorderedPair<String>> siGraph, final Graph<String, UnorderedPair<String>> dGraph,
        final Stack<String> vertexPath, final Stack<UnorderedPair<String>> edgePath, final int maxDepth) {
    // TODO: This algorithm could probably be optimized further

    logger.debug("count=" + dfsCount++ + " depth=" + (searchDepth - maxDepth) + " synset=" + startVertex);

    // We have found a goal
    if (goalVertices.contains(startVertex)) {
        logger.debug("Found goal at " + startVertex);
        for (UnorderedPair<String> p : edgePath) {
            logger.debug(p.toString());
        }
        return true;
    }

    // We have reached the maximum depth
    if (maxDepth == 0) {
        logger.debug("Reached maximum depth at " + startVertex);
        return false;
    }

    // Visit all neighbours of this vertex
    for (UnorderedPair<String> edge : siGraph.getOutEdges(startVertex)) {
        String neighbour = siGraph.getOpposite(startVertex, edge);
        if (vertexPath.contains(neighbour)) {
            // We have encountered a loop
            logger.debug("Encountered loop at " + neighbour);
            continue;
        }
        if (dGraph.containsEdge(edge)) {
            // This path is already in the disambiguation graph
            logger.debug("Path already in graph at " + edge);
            continue;
        }
        edgePath.push(edge);
        vertexPath.push(neighbour);
        logger.debug("Recursing to " + edge);
        if (dfs(neighbour, goalVertices, siGraph, dGraph, vertexPath, edgePath, maxDepth - 1) == true) {
            logger.debug("Adding " + edge);
            addPath(dGraph, edgePath);
        } else {
            logger.debug("Not adding " + edge);
        }
        edgePath.pop();
        vertexPath.pop();
    }

    // We have reached a dead end
    logger.debug("Reached dead end at " + startVertex);
    return false;
}

From source file:com.nextep.designer.sqlgen.ui.editors.sql.SQLCompletionProcessor.java

/**
 * This method parses the SQL statement defined at the current start offset. The method will
 * retrieve any SQL statement which encapsulate the start offset, parse it and return the result
 * of this parse for completion proposals.
 * /*www . j av  a  2 s . c  om*/
 * @param viewer viewer of the document to parse
 * @param start start offset
 * @return a {@link DMLParseResult} which contains information about the parse of the found SQL
 *         statement, or <code>null</code> if no SQL statement has been found from the given
 *         start offset.
 */
private DMLParseResult parseSQL(ITextViewer viewer, int start) {
    // Retrieving the corresponding statement start
    IDocument doc = new Document();
    doc.set(viewer.getDocument().get() + " "); //$NON-NLS-1$

    FindReplaceDocumentAdapter finder = new FindReplaceDocumentAdapter(doc);
    try {
        IRegion lastSemicolonRegion = finder.find(start - 1, ";", false, false, false, false); //$NON-NLS-1$
        if (lastSemicolonRegion == null) {
            lastSemicolonRegion = new Region(0, 1);
        }
        IRegion selectRegion = finder.find(lastSemicolonRegion.getOffset(), "SELECT|INSERT|UPDATE|DELETE", true, //$NON-NLS-1$
                false, false, true);

        IRegion endSemicolonRegion = finder.find(start == doc.getLength() ? start - 1 : start, ";", true, false, //$NON-NLS-1$
                false, false);
        if (endSemicolonRegion == null) {
            endSemicolonRegion = new Region(doc.getLength() - 1, 0);
        }
        if (selectRegion == null || lastSemicolonRegion == null || endSemicolonRegion == null) {
            return null;
        }
        // The select must be found after the first semicolon, else it is not the
        // same SQL statement
        if (selectRegion.getOffset() >= lastSemicolonRegion.getOffset()
                && endSemicolonRegion.getOffset() >= selectRegion.getOffset()) {
            DMLScanner scanner = new DMLScanner(parser);
            scanner.setRange(doc, selectRegion.getOffset(),
                    endSemicolonRegion.getOffset() - selectRegion.getOffset());
            IToken token = scanner.nextToken();
            DMLParseResult result = new DMLParseResult();
            Stack<DMLParseResult> stack = new Stack<DMLParseResult>();
            Map<Segment, DMLParseResult> results = new HashMap<Segment, DMLParseResult>();
            while (!token.isEOF()) {
                // Counting parenthethis
                if (token == DMLScanner.LEFTPAR_TOKEN) {
                    result.parCount++;
                } else if (token == DMLScanner.RIGHTPAR_TOKEN) {
                    result.parCount--;
                }

                if (token == DMLScanner.SELECT_TOKEN) { // && (result.tableSegStart>0 ||
                    // result.whereSegStart>0)) {
                    stack.push(result);
                    result = new DMLParseResult();
                    result.stackStart = scanner.getTokenOffset();
                } else if (token == DMLScanner.RIGHTPAR_TOKEN && result.parCount < 0) { // &&
                    // stack.size()>0)
                    // {
                    results.put(new Segment(result.stackStart, scanner.getTokenOffset() - result.stackStart),
                            result);
                    result = stack.pop();
                } else if (token == DMLScanner.INSERT_TOKEN) {
                    result.ignoreInto = false;
                } else if (token == DMLScanner.FROM_TOKEN || token == DMLScanner.UPDATE_TOKEN
                        || (token == DMLScanner.INTO_TOKEN && !result.ignoreInto)) {
                    result.ignoreInto = true;
                    // We have a table segment start
                    result.tableSegStart = scanner.getTokenOffset();
                    result.tableStartToken = token;
                } else if (token == DMLScanner.WORD_TOKEN && result.tableSegStart > 0) {
                    // We are in a table segment so we instantiate appropriate table references
                    // and aliases
                    // in the parse result
                    if (result.lastAlias == null) {
                        // This is a new table definition, we add it
                        result.lastAlias = new TableAlias(
                                doc.get(scanner.getTokenOffset(), scanner.getTokenLength()).toUpperCase());
                        result.lastAlias.setTable(tablesMap.get(result.lastAlias.getTableName()));
                        result.addFromTable(result.lastAlias);
                    } else if (result.lastAlias.getTableAlias() == null) {
                        // This is an alias of a defined table
                        final String alias = doc.get(scanner.getTokenOffset(), scanner.getTokenLength());
                        final List<String> reservedWords = parser.getTypedTokens().get(ISQLParser.DML);
                        if (!reservedWords.contains(alias.toUpperCase())) {
                            result.lastAlias.setAlias(alias);
                        } else {
                            result.lastAlias = null;
                        }
                    }
                } else if (token == DMLScanner.COMMA_TOKEN) {
                    // On a comma, we reset any table reference
                    result.lastAlias = null;
                } else if (token == DMLScanner.DML_TOKEN) {
                    result.lastAlias = null;
                    if (result.tableSegStart != -1) {
                        int tableSegEnd = scanner.getTokenOffset();
                        result.addTableSegment(
                                new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart));
                        result.tableSegStart = -1;
                    }
                } else if (result.tableSegStart != -1
                        && ((result.tableStartToken == DMLScanner.FROM_TOKEN && token == DMLScanner.WHERE_TOKEN)
                                || (result.tableStartToken == DMLScanner.UPDATE_TOKEN
                                        && token == DMLScanner.SET_TOKEN)
                                || (result.tableStartToken == DMLScanner.INTO_TOKEN
                                        && token == DMLScanner.LEFTPAR_TOKEN))) {
                    // We have matched a table segment end, so we close the segment
                    // and we add it to the parse result's table segments
                    int tableSegEnd = scanner.getTokenOffset();
                    result.addTableSegment(
                            new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart));
                    result.tableSegStart = -1;
                    if (token == DMLScanner.WHERE_TOKEN) {
                        result.whereSegStart = scanner.getTokenOffset() + scanner.getTokenLength();
                    }
                }
                token = scanner.nextToken();
            }
            // If the table segment is still opened, we close it at the end of the SQL statement
            if (result.tableSegStart > -1) {
                int tableSegEnd = endSemicolonRegion.getOffset();
                result.addTableSegment(
                        new Segment(result.tableSegStart, tableSegEnd - result.tableSegStart + 1));
            }
            // Locating the appropriate result
            for (Segment s : results.keySet()) {
                if (s.getOffset() <= start && s.getOffset() + s.getLength() > start) {
                    return results.get(s);
                }
            }
            return result;
        }
    } catch (BadLocationException e) {
        LOGGER.debug("Problems while retrieving SQL statement");
    }
    return null;
}

From source file:fr.inria.oak.paxquery.common.xml.navigation.NavigationTreePatternNode.java

/**
 * //from  w ww  . j a v  a2 s .  co m
 * @param edge
 */
public void copyVirtualChild(NavigationTreePatternEdge edge) {
    NavigationTreePatternNode childCopy = edge.n2.deepCopy();
    // marking childCopy and its subtree as virtual:
    Stack<NavigationTreePatternNode> st = new Stack<NavigationTreePatternNode>();
    st.push(childCopy);
    while (!st.empty()) {
        NavigationTreePatternNode pn = st.pop();
        // Parameters.logger.info("Set virtual node: " + pn.tag);
        pn.virtual = true;
        pn.nodeCode = NavigationTreePatternNode.globalNodeCounter.getAndIncrement();
        // virtual nodes obtained by navigation cannot store ID
        pn.storesID = false;
        Iterator<NavigationTreePatternEdge> pnChildren = pn.edges.iterator();
        while (pnChildren.hasNext()) {
            NavigationTreePatternEdge pnEdge = pnChildren.next();
            st.push(pnEdge.n2);
        }
    }
    addEdge(childCopy, edge.isParent(), edge.isNested(), edge.isOptional());
}

From source file:fr.inria.oak.paxquery.common.xml.navigation.NavigationTreePatternNode.java

/**
 * Returns true if this node is the top returning node of the tree pattern.
 * Assumes that the node returns at least an ID, so we don't check for that.
 * Also assumes that the node selects on the tag and does not return it.
 * @return isTopReturningNode/* w ww. j a  v  a  2  s .co  m*/
 */
public boolean isTopReturningNode() {
    if (this.isTopReturningNode == 0) {
        return false;
    }
    if (this.isTopReturningNode == 1) {
        return true;
    }
    // otherwise, it is -1, and we need to look
    if (this.parentEdge == null) {
        return true;
    }
    NavigationTreePatternNode n2 = this.parentEdge.n1;
    Stack<NavigationTreePatternNode> sn = new Stack<NavigationTreePatternNode>();
    sn.push(n2);
    while (!sn.empty()) {
        NavigationTreePatternNode n3 = sn.pop();
        if (n3 == null) {
            this.isTopReturningNode = 1;
            return true;
        }
        if (n3.storesID || n3.storesValue || (n3.storesContent)) {
            this.isTopReturningNode = 0;
            return false;
        }
        if (n3.parentEdge != null) {
            sn.push(n3.parentEdge.n1);
        }
    }
    this.isTopReturningNode = 1;
    return true;
}