Example usage for java.util Stack push

List of usage examples for java.util Stack push

Introduction

In this page you can find the example usage for java.util Stack push.

Prototype

public E push(E item) 

Source Link

Document

Pushes an item onto the top of this stack.

Usage

From source file:com.taobao.tdhs.jdbc.sqlparser.ParseSQL.java

private void analyzeUpdateSetColumns(String substring) {
    if (substring == null)
        return;/*from w w  w .  j  ava 2  s. c o  m*/

    /*String[] array_setColumn = substring.split(",");
      for (String setColumn : array_setColumn) {
      int addr = StringUtils.indexOfIgnoreCase(setColumn, "=");
      String column = setColumn.substring(0, addr).trim();
      String value = setColumn.substring(addr + 1).trim();
      this.updateEntries.add(new Entry<String, String>(column, value));
      }*/

    //Stack??
    Stack<String> updateColumnValueStack = new Stack<String>();
    for (int i = 0; i < substring.length(); i++) {
        updateColumnValueStack.push(substring.substring(i, i + 1));
    }

    String column = "";
    String value = "";
    while (updateColumnValueStack.isEmpty() == false) {
        column = "";
        value = "";
        //value String
        while (updateColumnValueStack.peek().equals("=") == false
                || checkRealEqual(updateColumnValueStack) == false) {
            value = updateColumnValueStack.pop() + value;
        }
        //=
        updateColumnValueStack.pop();
        //column String
        try {
            while (updateColumnValueStack.peek().equals(",") == false) {
                column = updateColumnValueStack.pop() + column;
            }
        } catch (EmptyStackException e) {
            //?
            this.updateEntries.add(new Entry<String, String>(column, value));
            break;
        }

        //,
        updateColumnValueStack.pop();
        //?
        this.updateEntries.add(new Entry<String, String>(column, value));
    }

}

From source file:de.tudarmstadt.ukp.dkpro.wsd.graphconnectivity.algorithm.GraphConnectivityWSD.java

/**
 * Beginning at startVertex, conduct a depth-first search of the graph
 * siGraph looking for any of the vertices in goalSynsets. If any are found
 * within maxDepth iterations, add the path to dGraph.
 *
 * @param startVertex//w w w .j av a2s .co m
 *            The vertex at which to begin the search
 * @param goalVertices
 *            A collection of vertices at which to stop the search
 *            successfully
 * @param siGraph
 *            The full ontology graph to search
 * @param dGraph
 *            The disambiguation graph to construct
 * @param vertexPath
 *            A stack of vertices visited so far
 * @param edgePath
 *            A stack of edges visited so far
 * @param maxDepth
 *            The maximum depth to recurse to
 * @return true if and only if a goal was found before maxDepth iterations
 */
protected boolean dfs(final String startVertex, final Collection<String> goalVertices,
        final Graph<String, UnorderedPair<String>> siGraph, final Graph<String, UnorderedPair<String>> dGraph,
        final Stack<String> vertexPath, final Stack<UnorderedPair<String>> edgePath, final int maxDepth) {
    // TODO: This algorithm could probably be optimized further

    logger.debug("count=" + dfsCount++ + " depth=" + (searchDepth - maxDepth) + " synset=" + startVertex);

    // We have found a goal
    if (goalVertices.contains(startVertex)) {
        logger.debug("Found goal at " + startVertex);
        for (UnorderedPair<String> p : edgePath) {
            logger.debug(p.toString());
        }
        return true;
    }

    // We have reached the maximum depth
    if (maxDepth == 0) {
        logger.debug("Reached maximum depth at " + startVertex);
        return false;
    }

    // Visit all neighbours of this vertex
    for (UnorderedPair<String> edge : siGraph.getOutEdges(startVertex)) {
        String neighbour = siGraph.getOpposite(startVertex, edge);
        if (vertexPath.contains(neighbour)) {
            // We have encountered a loop
            logger.debug("Encountered loop at " + neighbour);
            continue;
        }
        if (dGraph.containsEdge(edge)) {
            // This path is already in the disambiguation graph
            logger.debug("Path already in graph at " + edge);
            continue;
        }
        edgePath.push(edge);
        vertexPath.push(neighbour);
        logger.debug("Recursing to " + edge);
        if (dfs(neighbour, goalVertices, siGraph, dGraph, vertexPath, edgePath, maxDepth - 1) == true) {
            logger.debug("Adding " + edge);
            addPath(dGraph, edgePath);
        } else {
            logger.debug("Not adding " + edge);
        }
        edgePath.pop();
        vertexPath.pop();
    }

    // We have reached a dead end
    logger.debug("Reached dead end at " + startVertex);
    return false;
}

From source file:edu.umn.cs.spatialHadoop.indexing.RTree.java

/**
 * Searches the RTree starting from the given start position. This is either
 * a node number or offset of an element. If it's a node number, it performs
 * the search in the subtree rooted at this node. If it's an offset number,
 * it searches only the object found there.
 * It is assumed that the openQuery() has been called before this function
 * and that endQuery() will be called afterwards.
 * @param query_shape//from   www.  j a v  a2  s .c  o m
 * @param output
 * @param start where to start searching
 * @param end where to end searching. Only used when start is an offset of
 *   an object.
 * @return
 * @throws IOException
 */
protected int search(Shape query_shape, ResultCollector<T> output, int start, int end) throws IOException {
    Rectangle query_mbr = query_shape.getMBR();
    int resultSize = 0;
    // Special case for an empty tree
    if (height == 0)
        return 0;

    Stack<Integer> toBeSearched = new Stack<Integer>();
    // Start from the given node
    toBeSearched.push(start);
    if (start >= nodeCount) {
        toBeSearched.push(end);
    }

    // Holds one data line from tree data
    Text line = new Text2();

    while (!toBeSearched.isEmpty()) {
        int searchNumber = toBeSearched.pop();

        if (searchNumber < nodeCount) {
            // Searching a node
            int nodeID = searchNumber;
            if (query_mbr.isIntersected(nodes[nodeID])) {
                boolean is_leaf = nodeID >= nonLeafNodeCount;
                if (is_leaf) {
                    // Check all objects under this node
                    int start_offset = this.dataOffset[nodeID];
                    int end_offset = this.dataOffset[nodeID + 1];
                    toBeSearched.add(start_offset);
                    toBeSearched.add(end_offset);
                } else {
                    // Add all child nodes
                    for (int iChild = 0; iChild < this.degree; iChild++) {
                        toBeSearched.add(nodeID * this.degree + iChild + 1);
                    }
                }
            }
        } else {
            // searchNumber is the end offset of data search. Start offset is next
            // in stack
            int end_offset = searchNumber;
            int start_offset = toBeSearched.pop();
            // All data offsets are relative to tree start (typically 4)
            this.data.seek(start_offset + this.treeStartOffset);
            // Should not close the line reader because we do not want to close
            // the underlying data stream now. In case future searches are done
            @SuppressWarnings("resource")
            LineReader lineReader = new LineReader(data);
            while (start_offset < end_offset) {
                start_offset += lineReader.readLine(line);
                stockObject.fromText(line);
                if (stockObject.isIntersected(query_shape)) {
                    resultSize++;
                    if (output != null)
                        output.collect(stockObject);
                }
            }
        }
    }
    return resultSize;
}

From source file:gov.nih.nci.cagrid.sdk4query.processor.PublicDataCQL2ParameterizedHQL.java

/**
 * Processes CQL associations into HQL//from  w w  w.  ja v  a 2s  .c o m
 *
 * @param association
 *       The CQL association
 * @param hql
 *       The HQL fragment which will be edited
  * @param parameters
  *      The positional HQL query parameters
 * @param associationTrace
 *       The trace of associations
 * @param sourceClassName
 *       The class name of the type to which this association belongs
 * @throws QueryProcessingException
 */
private void processAssociation(Association association, StringBuilder hql, List<java.lang.Object> parameters,
        Stack<Association> associationStack, Object sourceQueryObject, String sourceAlias)
        throws QueryProcessingException {
    LOG.debug("Processing association " + sourceQueryObject.getName() + " to " + association.getName());

    // get the association's role name
    String roleName = roleNameResolver.getRoleName(sourceQueryObject.getName(), association);
    if (roleName == null) {
        // still null?? no association to the object!
        // TODO: should probably be malformed query exception
        throw new QueryProcessingException("Association from type " + sourceQueryObject.getName() + " to type "
                + association.getName() + " does not exist.  Use only direct associations");
    }
    LOG.debug("Role name determined to be " + roleName);

    // determine the alias for this association
    String alias = getAssociationAlias(sourceQueryObject.getName(), association.getName(), roleName);
    LOG.debug("Association alias determined to be " + alias);

    // add this association to the stack
    associationStack.push(association);

    // flag indicates the query is only verifying the association is populated
    boolean simpleNullCheck = true;
    if (association.getAssociation() != null) {
        simpleNullCheck = false;
        // add clause to select things from this association
        hql.append(sourceAlias).append('.').append(roleName);
        hql.append(".id in (select ").append(alias).append(".id from ");
        hql.append(association.getName()).append(" as ").append(alias).append(" where ");
        processAssociation(association.getAssociation(), hql, parameters, associationStack, association, alias);
        hql.append(") ");
    }
    if (association.getAttribute() != null) {
        simpleNullCheck = false;
        processAttribute(association.getAttribute(), hql, parameters, association,
                sourceAlias + "." + roleName);
    }
    if (association.getGroup() != null) {
        simpleNullCheck = false;
        hql.append(sourceAlias).append('.').append(roleName);
        hql.append(".id in (select ").append(alias).append(".id from ");
        hql.append(association.getName()).append(" as ").append(alias).append(" where ");
        processGroup(association.getGroup(), hql, parameters, associationStack, association, alias);
        hql.append(") ");
    }

    if (simpleNullCheck) {
        // query is checking for the association to exist and be non-null
        hql.append(sourceAlias).append('.').append(roleName).append(".id is not null ");
    }

    // pop this association off the stack
    associationStack.pop();
    LOG.debug(associationStack.size() + " associations remain on the stack");
}

From source file:org.apache.flink.cep.nfa.SharedBuffer.java

/**
 * Returns all elements from the previous relation starting at the given value with the
 * given key and timestamp.//  w  w  w.  j  ava  2s .c  o  m
 *
 * @param key Key of the starting value
 * @param value Value of the starting element
 * @param timestamp Timestamp of the starting value
 * @param version Version of the previous relation which shall be extracted
 * @return Collection of previous relations starting with the given value
 */
public Collection<LinkedHashMultimap<K, V>> extractPatterns(final K key, final V value, final long timestamp,
        final DeweyNumber version) {
    Collection<LinkedHashMultimap<K, V>> result = new ArrayList<>();

    // stack to remember the current extraction states
    Stack<ExtractionState<K, V>> extractionStates = new Stack<>();

    // get the starting shared buffer entry for the previous relation
    SharedBufferEntry<K, V> entry = get(key, value, timestamp);

    if (entry != null) {
        extractionStates.add(new ExtractionState<K, V>(entry, version, new Stack<SharedBufferEntry<K, V>>()));

        // use a depth first search to reconstruct the previous relations
        while (!extractionStates.isEmpty()) {
            ExtractionState<K, V> extractionState = extractionStates.pop();
            DeweyNumber currentVersion = extractionState.getVersion();
            // current path of the depth first search
            Stack<SharedBufferEntry<K, V>> currentPath = extractionState.getPath();

            // termination criterion
            if (currentVersion.length() == 1) {
                LinkedHashMultimap<K, V> completePath = LinkedHashMultimap.create();

                while (!currentPath.isEmpty()) {
                    SharedBufferEntry<K, V> currentEntry = currentPath.pop();

                    completePath.put(currentEntry.getKey(), currentEntry.getValueTime().getValue());
                }

                result.add(completePath);
            } else {
                SharedBufferEntry<K, V> currentEntry = extractionState.getEntry();

                // append state to the path
                currentPath.push(currentEntry);

                boolean firstMatch = true;
                for (SharedBufferEdge<K, V> edge : currentEntry.getEdges()) {
                    // we can only proceed if the current version is compatible to the version
                    // of this previous relation
                    if (currentVersion.isCompatibleWith(edge.getVersion())) {
                        if (firstMatch) {
                            // for the first match we don't have to copy the current path
                            extractionStates.push(new ExtractionState<K, V>(edge.getTarget(), edge.getVersion(),
                                    currentPath));
                            firstMatch = false;
                        } else {
                            Stack<SharedBufferEntry<K, V>> copy = new Stack<>();
                            copy.addAll(currentPath);

                            extractionStates
                                    .push(new ExtractionState<K, V>(edge.getTarget(), edge.getVersion(), copy));
                        }
                    }
                }
            }
        }
    }

    return result;
}

From source file:org.apache.tajo.engine.planner.rewrite.ProjectionPushDownRule.java

@Override
public LogicalNode visitUnion(Context context, LogicalPlan plan, LogicalPlan.QueryBlock block, UnionNode node,
        Stack<LogicalNode> stack) throws PlanningException {

    LogicalPlan.QueryBlock leftBlock = plan.getBlock(node.getLeftChild());
    LogicalPlan.QueryBlock rightBlock = plan.getBlock(node.getRightChild());

    Context leftContext = new Context(plan,
            PlannerUtil.toQualifiedFieldNames(context.requiredSet, leftBlock.getName()));
    Context rightContext = new Context(plan,
            PlannerUtil.toQualifiedFieldNames(context.requiredSet, rightBlock.getName()));

    stack.push(node);
    visit(leftContext, plan, leftBlock, leftBlock.getRoot(), new Stack<LogicalNode>());
    visit(rightContext, plan, rightBlock, rightBlock.getRoot(), new Stack<LogicalNode>());
    stack.pop();/*w  w  w.  j  a va 2 s.  c o  m*/
    return node;
}

From source file:com.grottworkshop.gwsvectorsandboxlib.VectorDrawable.java

private void inflateInternal(Resources res, XmlPullParser parser, AttributeSet attrs, Theme theme)
        throws XmlPullParserException, IOException {
    final VectorDrawableState state = mVectorState;
    final VPathRenderer pathRenderer = state.mVPathRenderer;
    boolean noPathTag = true;

    // Use a stack to help to build the group tree.
    // The top of the stack is always the current group.
    final Stack<VGroup> groupStack = new Stack<VGroup>();
    groupStack.push(pathRenderer.mRootGroup);

    int eventType = parser.getEventType();
    while (eventType != XmlPullParser.END_DOCUMENT) {
        if (eventType == XmlPullParser.START_TAG) {
            final String tagName = parser.getName();
            final VGroup currentGroup = groupStack.peek();

            if (SHAPE_PATH.equals(tagName)) {
                final VFullPath path = new VFullPath();
                path.inflate(res, attrs, theme);
                currentGroup.mChildren.add(path);
                if (path.getPathName() != null) {
                    pathRenderer.mVGTargetsMap.put(path.getPathName(), path);
                }/*from www . ja  va 2  s.  c  om*/
                noPathTag = false;
                state.mChangingConfigurations |= path.mChangingConfigurations;
            } else if (SHAPE_CLIP_PATH.equals(tagName)) {
                final VClipPath path = new VClipPath();
                path.inflate(res, attrs, theme);
                currentGroup.mChildren.add(path);
                if (path.getPathName() != null) {
                    pathRenderer.mVGTargetsMap.put(path.getPathName(), path);
                }
                state.mChangingConfigurations |= path.mChangingConfigurations;
            } else if (SHAPE_GROUP.equals(tagName)) {
                VGroup newChildGroup = new VGroup();
                newChildGroup.inflate(res, attrs, theme);
                currentGroup.mChildren.add(newChildGroup);
                groupStack.push(newChildGroup);
                if (newChildGroup.getGroupName() != null) {
                    pathRenderer.mVGTargetsMap.put(newChildGroup.getGroupName(), newChildGroup);
                }
                state.mChangingConfigurations |= newChildGroup.mChangingConfigurations;
            }
        } else if (eventType == XmlPullParser.END_TAG) {
            final String tagName = parser.getName();
            if (SHAPE_GROUP.equals(tagName)) {
                groupStack.pop();
            }
        }
        eventType = parser.next();
    }

    // Print the tree out for debug.
    if (DBG_VECTOR_DRAWABLE) {
        printGroupTree(pathRenderer.mRootGroup, 0);
    }

    if (noPathTag) {
        final StringBuffer tag = new StringBuffer();

        if (tag.length() > 0) {
            tag.append(" or ");
        }
        tag.append(SHAPE_PATH);

        throw new XmlPullParserException("no " + tag + " defined");
    }
}

From source file:com.clust4j.algo.DBSCAN.java

@Override
protected DBSCAN fit() {
    synchronized (fitLock) {

        if (null != labels) // Then we've already fit this...
            return this;

        // First get the dist matrix
        final LogTimer timer = new LogTimer();

        // Do the neighborhood assignments, get sample weights, find core samples..
        final LogTimer neighbTimer = new LogTimer();
        labels = new int[m]; // Initialize labels...
        sampleWeights = new double[m]; // Init sample weights...
        coreSamples = new boolean[m];

        // Fit the nearest neighbor model...
        final LogTimer rnTimer = new LogTimer();
        final RadiusNeighbors rnModel = new RadiusNeighbors(data, new RadiusNeighborsParameters(eps)
                .setSeed(getSeed()).setMetric(getSeparabilityMetric()).setVerbose(false)).fit();

        info("fit RadiusNeighbors model in " + rnTimer.toString());
        int[][] nearest = rnModel.getNeighbors().getIndices();

        int[] ptNeighbs;
        ArrayList<int[]> neighborhoods = new ArrayList<>();
        int numCorePts = 0;
        for (int i = 0; i < m; i++) {
            // Each label inits to -1 as noise
            labels[i] = NOISE_CLASS;//from w  w w .  j a v a  2s  .c  o m
            ptNeighbs = nearest[i];

            // Add neighborhood...
            int pts;
            neighborhoods.add(ptNeighbs);
            sampleWeights[i] = pts = ptNeighbs.length;
            coreSamples[i] = pts >= minPts;

            if (coreSamples[i])
                numCorePts++;
        }

        // Log checkpoint
        info("completed density neighborhood calculations in " + neighbTimer.toString());
        info(numCorePts + " core point" + (numCorePts != 1 ? "s" : "") + " found");

        // Label the points...
        int nextLabel = 0, v;
        final Stack<Integer> stack = new Stack<>();
        int[] neighb;

        LogTimer stackTimer = new LogTimer();
        for (int i = 0; i < m; i++) {
            stackTimer = new LogTimer();

            // Want to look at unlabeled OR core points...
            if (labels[i] != NOISE_CLASS || !coreSamples[i])
                continue;

            // Depth-first search starting from i, ending at the non-core points.
            // This is very similar to the classic algorithm for computing connected
            // components, the difference being that we label non-core points as
            // part of a cluster (component), but don't expand their neighborhoods.
            int labelCt = 0;
            while (true) {
                if (labels[i] == NOISE_CLASS) {
                    labels[i] = nextLabel;
                    labelCt++;

                    if (coreSamples[i]) {
                        neighb = neighborhoods.get(i);

                        for (i = 0; i < neighb.length; i++) {
                            v = neighb[i];
                            if (labels[v] == NOISE_CLASS)
                                stack.push(v);
                        }
                    }
                }

                if (stack.size() == 0) {
                    fitSummary.add(new Object[] { nextLabel, labelCt, stackTimer.formatTime(),
                            stackTimer.wallTime() });

                    break;
                }

                i = stack.pop();
            }

            nextLabel++;
        }

        // Count missing
        numNoisey = 0;
        for (int lab : labels)
            if (lab == NOISE_CLASS)
                numNoisey++;

        // corner case: numNoisey == m (never gets a fit summary)
        if (numNoisey == m)
            fitSummary.add(new Object[] { Double.NaN, 0, stackTimer.formatTime(), stackTimer.wallTime() });

        info((numClusters = nextLabel) + " cluster" + (nextLabel != 1 ? "s" : "") + " identified, " + numNoisey
                + " record" + (numNoisey != 1 ? "s" : "") + " classified noise");

        // Encode to put in order
        labels = new NoiseyLabelEncoder(labels).fit().getEncodedLabels();

        sayBye(timer);
        return this;
    }

}

From source file:com.flexive.core.storage.genericSQL.GenericTreeStorageSpreaded.java

protected long _reorganizeSpace(Connection con, SequencerEngine seq, FxTreeMode sourceMode, FxTreeMode destMode,
        long nodeId, boolean includeNodeId, BigInteger overrideSpacing, BigInteger overrideLeft,
        FxTreeNodeInfo insertParent, int insertPosition, BigInteger insertSpace, BigInteger insertBoundaries[],
        int depthDelta, Long destinationNode, boolean createMode, boolean createKeepIds,
        boolean disableSpaceOptimization) throws FxTreeException {
    long firstCreatedNodeId = -1;
    FxTreeNodeInfoSpreaded nodeInfo;/*from   ww w.j a va  2  s.  c  o  m*/
    try {
        nodeInfo = (FxTreeNodeInfoSpreaded) getTreeNodeInfo(con, sourceMode, nodeId);
    } catch (Exception e) {
        return -1;
    }

    if (!nodeInfo.isSpaceOptimizable() && !disableSpaceOptimization) {
        // The Root node and cant be optimize any more ... so all we can do is fail :-/
        // This should never really happen
        if (nodeId == ROOT_NODE) {
            return -1;
        }
        //System.out.println("### UP we go, depthDelta=" + depthDelta);
        return _reorganizeSpace(con, seq, sourceMode, destMode, nodeInfo.getParentId(), includeNodeId,
                overrideSpacing, overrideLeft, insertParent, insertPosition, insertSpace, insertBoundaries,
                depthDelta, destinationNode, createMode, createKeepIds, false);
    }

    BigInteger spacing = nodeInfo.getDefaultSpacing();
    if (overrideSpacing != null && (overrideSpacing.compareTo(spacing) < 0 || overrideLeft != null)) {
        // override spacing unless it is greater OR overrideLeft is specified (in that case we
        // have to use the spacing for valid tree ranges)  
        spacing = overrideSpacing;
    } else {
        if (spacing.compareTo(GO_UP) < 0 && !createMode && !disableSpaceOptimization) {
            return _reorganizeSpace(con, seq, sourceMode, destMode, nodeInfo.getParentId(), includeNodeId,
                    overrideSpacing, overrideLeft, insertParent, insertPosition, insertSpace, insertBoundaries,
                    depthDelta, destinationNode, createMode, createKeepIds, false);
        }
    }

    if (insertBoundaries != null && insertPosition == -1) {
        insertPosition = 0; // insertPosition cannot be negative
    }

    Statement stmt = null;
    PreparedStatement ps = null;
    ResultSet rs;
    BigInteger left = overrideLeft == null ? nodeInfo.getLeft() : overrideLeft;
    BigInteger right = null;
    String includeNode = includeNodeId ? "=" : "";
    long counter = 0;
    long newId = -1;
    try {
        final long start = System.currentTimeMillis();
        String createProps = createMode ? ",PARENT,REF,NAME,TEMPLATE" : "";
        String sql = " SELECT ID," + StorageManager.getIfFunction( // compute total child count only when the node has children
                "CHILDCOUNT = 0", "0",
                "(SELECT COUNT(*) FROM " + getTable(sourceMode) + " WHERE LFT > NODE.LFT AND RGT < NODE.RGT)") +
        // 3           4             5   6
                ", CHILDCOUNT, LFT AS LFTORD,RGT,DEPTH" + createProps
                + " FROM (SELECT ID,CHILDCOUNT,LFT,RGT,DEPTH" + createProps + " FROM " + getTable(sourceMode)
                + " WHERE " + "LFT>" + includeNode + nodeInfo.getLeft() + " AND LFT<" + includeNode
                + nodeInfo.getRight() + ") NODE " + "ORDER BY LFTORD ASC";
        stmt = con.createStatement();
        rs = stmt.executeQuery(sql);
        if (createMode) {
            //                                                                 1  2      3     4     5   6        7   8
            ps = con.prepareStatement(
                    "INSERT INTO " + getTable(destMode) + " (ID,PARENT,DEPTH,DIRTY,REF,TEMPLATE,LFT,RGT," +
                    //9           10    11
                            "CHILDCOUNT,NAME,MODIFIED_AT) " + "VALUES (?,?,?,?,?,?,?,?,?,?,?)");
        } else {
            ps = con.prepareStatement("UPDATE " + getTable(sourceMode) + " SET LFT=?,RGT=?,DEPTH=? WHERE ID=?");
        }
        long id;
        int total_childs;
        int direct_childs;
        BigInteger nextLeft;
        int lastDepth = nodeInfo.getDepth() + (includeNodeId ? 0 : 1);
        int depth;
        BigInteger _rgt;
        BigInteger _lft;
        Long ref = null;
        String data = null;
        String name = "";

        Stack<Long> currentParent = null;
        if (createMode) {
            currentParent = new Stack<Long>();
            currentParent.push(destinationNode);
        }

        //System.out.println("Spacing:"+SPACING);
        while (rs.next()) {
            //System.out.println("------------------");
            id = rs.getLong(1);
            total_childs = rs.getInt(2);
            direct_childs = rs.getInt(3);
            _lft = getNodeBounds(rs, 4);
            _rgt = getNodeBounds(rs, 5);
            depth = rs.getInt(6);
            if (createMode) {
                // Reading these properties is slow, only do it when needed
                ref = rs.getLong(8);
                if (rs.wasNull())
                    ref = null;
                name = rs.getString(9);
                data = rs.getString(10);
                if (rs.wasNull())
                    data = null;
            }
            left = left.add(spacing).add(BigInteger.ONE);

            // Handle depth differences
            if (lastDepth - depth > 0) {
                BigInteger depthDifference = spacing.add(BigInteger.ONE);
                left = left.add(depthDifference.multiply(BigInteger.valueOf(lastDepth - depth)));
            }
            if (createMode) {
                if (lastDepth < depth) {
                    currentParent.push(newId);
                } else if (lastDepth > depth) {
                    for (int p = 0; p < (lastDepth - depth); p++)
                        currentParent.pop();
                }
            }

            right = left.add(spacing).add(BigInteger.ONE);

            // add child space if needed
            if (total_childs > 0) {
                BigInteger childSpace = spacing.multiply(BigInteger.valueOf(total_childs * 2));
                childSpace = childSpace.add(BigInteger.valueOf((total_childs * 2) - 1));
                right = right.add(childSpace);
                nextLeft = left;
            } else {
                nextLeft = right;
            }

            if (insertBoundaries != null) {
                // insert gap at requested position
                // If we're past the gap, keep adding the insert space to left/right because the added
                // space is never "injected" into the loop, i.e. without adding it the left/right boundaries of
                // nodes after the gap would be too far to the left.
                if (_lft.compareTo(insertBoundaries[0]) > 0) {
                    left = left.add(insertSpace);
                }
                if (_rgt.compareTo(insertBoundaries[0]) > 0) {
                    right = right.add(insertSpace);
                }
            }

            // sanity checks
            if (left.compareTo(right) >= 0) {
                throw new FxTreeException(LOG, "ex.tree.reorganize.failed", counter, left, right,
                        "left greater than right");
            }
            if (insertParent != null && right.compareTo((BigInteger) insertParent.getRight()) > 0) {
                throw new FxTreeException(LOG, "ex.tree.reorganize.failed", counter, left, right,
                        "wrote past parent node bounds");
            }

            // Update the node
            if (createMode) {
                newId = createKeepIds ? id : seq.getId(destMode.getSequencer());
                if (firstCreatedNodeId == -1)
                    firstCreatedNodeId = newId;

                // Create the main entry
                ps.setLong(1, newId);
                ps.setLong(2, currentParent.peek());
                ps.setLong(3, depth + depthDelta);
                ps.setBoolean(4, destMode != FxTreeMode.Live); //only flag non-live tree's dirty
                if (ref == null) {
                    ps.setNull(5, java.sql.Types.NUMERIC);
                } else {
                    ps.setLong(5, ref);
                }
                if (data == null) {
                    ps.setNull(6, java.sql.Types.VARCHAR);
                } else {
                    ps.setString(6, data);
                }
                //                    System.out.println("=> id:"+newId+" left:"+left+" right:"+right);
                setNodeBounds(ps, 7, left);
                setNodeBounds(ps, 8, right);
                ps.setInt(9, direct_childs);
                ps.setString(10, name);
                ps.setLong(11, System.currentTimeMillis());
                ps.addBatch();
            } else {
                setNodeBounds(ps, 1, left);
                setNodeBounds(ps, 2, right);
                ps.setInt(3, depth + depthDelta);
                ps.setLong(4, id);
                ps.addBatch();
                //                    ps.executeBatch();
                //                    ps.clearBatch();
            }

            // Prepare variables for the next node
            left = nextLeft;
            lastDepth = depth;
            counter++;

            // Execute batch every 10000 items to avoid out of memory
            if (counter % 10000 == 0) {
                ps.executeBatch();
                ps.clearBatch();
            }
        }
        rs.close();
        stmt.close();
        stmt = null;
        ps.executeBatch();

        if (LOG.isDebugEnabled()) {
            final long time = System.currentTimeMillis() - start;

            LOG.debug("Tree reorganization of " + counter + " items completed in " + time + " ms (spaceLen="
                    + spacing + ")");
        }
        return firstCreatedNodeId;
    } catch (FxApplicationException e) {
        throw e instanceof FxTreeException ? (FxTreeException) e : new FxTreeException(e);
    } catch (SQLException e) {
        String next = "";
        if (e.getNextException() != null)
            next = " next:" + e.getNextException().getMessage();
        if (StorageManager.isDuplicateKeyViolation(e))
            throw new FxTreeException(LOG, e, "ex.tree.reorganize.duplicateKey");
        throw new FxTreeException(LOG, e, "ex.tree.reorganize.failed", counter, left, right,
                e.getMessage() + next);
    } catch (Exception e) {
        throw new FxTreeException(e);
    } finally {
        try {
            if (stmt != null)
                stmt.close();
        } catch (Throwable t) {
            /*ignore*/}
        try {
            if (ps != null)
                ps.close();
        } catch (Throwable t) {
            /*ignore*/}
    }
}

From source file:org.apache.fop.fo.FONode.java

/**
 * Conditionally add a new delimited text range to RANGES, where new range is
 * associated with current FONode. A new text range is added unless all of the following are
 * true:/*  w  ww  .j  a va  2 s  .  c o  m*/
 * <ul>
 * <li>there exists a current range RCUR in RANGES</li>
 * <li>RCUR is empty</li>
 * <li>the node of the RCUR is the same node as FN or a descendent node of FN</li>
 * </ul>
 * @param ranges stack of delimited text ranges
 * @return new range (if constructed and pushed onto stack) or current range (if any) or null
 */
private DelimitedTextRange maybeNewRange(Stack ranges) {
    DelimitedTextRange rCur = null; // current range (top of range stack)
    DelimitedTextRange rNew = null; // new range to be pushed onto range stack
    if (ranges.empty()) {
        if (isBidiRangeBlockItem()) {
            rNew = new DelimitedTextRange(this);
        }
    } else {
        rCur = (DelimitedTextRange) ranges.peek();
        if (rCur != null) {
            if (!rCur.isEmpty() || !isSelfOrDescendent(rCur.getNode(), this)) {
                rNew = new DelimitedTextRange(this);
            }
        }
    }
    if (rNew != null) {
        ranges.push(rNew);
    } else {
        rNew = rCur;
    }
    return rNew;
}