List of usage examples for java.util Stack push
public E push(E item)
From source file:com.espertech.esper.epl.join.plan.NStreamOuterQueryPlanBuilder.java
/** * Recusivly builds a substream-per-stream ordered tree graph using the * join information supplied for outer joins and from the query graph (where clause). * <p>/*from w w w . j a va 2 s . c o m*/ * Required streams are considered first and their lookup is placed first in the list * to gain performance. * @param streamNum is the root stream number that supplies the incoming event to build the tree for * @param queryGraph contains where-clause stream relationship info * @param completedStreams is a temporary holder for streams already considered * @param substreamsPerStream is the ordered, tree-like structure to be filled * @param streamCallStack the query plan call stack of streams available via cursor * @param dependencyGraph - dependencies between historical streams * @throws ExprValidationException if the query planning failed */ protected static void recursiveBuildInnerJoin(int streamNum, Stack<Integer> streamCallStack, QueryGraph queryGraph, Set<Integer> completedStreams, LinkedHashMap<Integer, int[]> substreamsPerStream, DependencyGraph dependencyGraph) throws ExprValidationException { // add this stream to the set of completed streams completedStreams.add(streamNum); // check if the dependencies have been satisfied if (dependencyGraph.hasDependency(streamNum)) { Set<Integer> dependencies = dependencyGraph.getDependenciesForStream(streamNum); for (Integer dependentStream : dependencies) { if (!streamCallStack.contains(dependentStream)) { throw new ExprValidationException( "Historical stream " + streamNum + " parameter dependency originating in stream " + dependentStream + " cannot or may not be satisfied by the join"); } } } // Determine the streams we can navigate to from this stream Set<Integer> navigableStreams = queryGraph.getNavigableStreams(streamNum); // remove streams with a dependency on other streams not yet processed Integer[] navigableStreamArr = navigableStreams.toArray(new Integer[navigableStreams.size()]); for (int navigableStream : navigableStreamArr) { if (dependencyGraph.hasUnsatisfiedDependency(navigableStream, completedStreams)) { navigableStreams.remove(navigableStream); } } // remove those already done navigableStreams.removeAll(completedStreams); // if we are a leaf node, we are done if (navigableStreams.isEmpty()) { substreamsPerStream.put(streamNum, new int[0]); return; } // First the outer (required) streams to this stream, then the inner (optional) streams int[] substreams = new int[navigableStreams.size()]; substreamsPerStream.put(streamNum, substreams); int count = 0; for (int stream : navigableStreams) { substreams[count++] = stream; completedStreams.add(stream); } for (int stream : navigableStreams) { streamCallStack.push(stream); recursiveBuildInnerJoin(stream, streamCallStack, queryGraph, completedStreams, substreamsPerStream, dependencyGraph); streamCallStack.pop(); } }
From source file:org.apache.solr.handler.JsonLoader.java
SolrInputDocument parseDoc(int ev) throws IOException { Stack<Object> stack = new Stack<Object>(); Object obj = null;/* ww w . j a v a 2 s. co m*/ boolean inArray = false; if (ev != JSONParser.OBJECT_START) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "object should already be started"); } while (true) { //System.out.println( ev + "["+JSONParser.getEventString(ev)+"] "+parser.wasKey() ); //+ parser.getString() ); switch (ev) { case JSONParser.STRING: if (parser.wasKey()) { obj = stack.peek(); String v = parser.getString(); if (obj instanceof SolrInputField) { SolrInputField field = (SolrInputField) obj; if ("boost".equals(v)) { ev = parser.nextEvent(); if (ev != JSONParser.NUMBER && ev != JSONParser.LONG && ev != JSONParser.BIGNUMBER) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "boost should have number! " + JSONParser.getEventString(ev)); } field.setBoost((float) parser.getDouble()); } else if ("value".equals(v)) { // nothing special... stack.push(field); // so it can be popped } else { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "invalid key: " + v + " [" + parser.getPosition() + "]"); } } else if (obj instanceof SolrInputDocument) { SolrInputDocument doc = (SolrInputDocument) obj; SolrInputField f = doc.get(v); if (f == null) { f = new SolrInputField(v); doc.put(f.getName(), f); } stack.push(f); } else { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "hymmm [" + parser.getPosition() + "]"); } } else { addValToField(stack, parser.getString(), inArray, parser); } break; case JSONParser.LONG: case JSONParser.NUMBER: case JSONParser.BIGNUMBER: addValToField(stack, parser.getNumberChars().toString(), inArray, parser); break; case JSONParser.BOOLEAN: addValToField(stack, parser.getBoolean(), inArray, parser); break; case JSONParser.NULL: parser.getNull(); /*** if we wanted to remove the field from the document now... if (!inArray) { Object o = stack.peek(); // if null was only value in the field, then remove the field if (o instanceof SolrInputField) { SolrInputField sif = (SolrInputField)o; if (sif.getValueCount() == 0) { sdoc.remove(sif.getName()); } } } ***/ addValToField(stack, null, inArray, parser); break; case JSONParser.OBJECT_START: if (stack.isEmpty()) { stack.push(new SolrInputDocument()); } else { obj = stack.peek(); if (obj instanceof SolrInputField) { // should alreay be pushed... } else { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "should not start new object with: " + obj + " [" + parser.getPosition() + "]"); } } break; case JSONParser.OBJECT_END: obj = stack.pop(); if (obj instanceof SolrInputDocument) { return (SolrInputDocument) obj; } else if (obj instanceof SolrInputField) { // should already be pushed... } else { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "should not start new object with: " + obj + " [" + parser.getPosition() + "]"); } break; case JSONParser.ARRAY_START: inArray = true; break; case JSONParser.ARRAY_END: inArray = false; stack.pop(); // the val should have done it... break; default: System.out.println("UNKNOWN_EVENT_ID:" + ev); break; } ev = parser.nextEvent(); if (ev == JSONParser.EOF) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "should finish doc first!"); } } }
From source file:de.tudarmstadt.ukp.wikipedia.parser.mediawiki.ModularParser.java
/** * There is not much differences between links an images, so they are parsed * in a single step//from w w w . ja v a2 s .com */ private void parseImagesAndInternalLinks(SpanManager sm, List<Span> linkSpans, List<Link> links) { sm.manageList(linkSpans); int pos = -1; Stack<Integer> linkOpenTags = new Stack<Integer>(); while ((pos = sm.indexOf("[[", pos + 1)) != -1) { linkOpenTags.push(pos); } Span lastLinkSpan = new Span(sm.length() + 1, sm.length() + 1); Link.type linkType = Link.type.INTERNAL; while (!linkOpenTags.empty()) { int linkStartTag = linkOpenTags.pop(); int linkEndTag = sm.indexOf("]]", linkStartTag); if (linkEndTag == -1) { continue; } int linkOptionTag = sm.indexOf("|", linkStartTag, linkEndTag); int linkTextStart; String linkTarget; if (linkOptionTag != -1) { linkTextStart = linkOptionTag + 1; linkTarget = sm.substring(new Span(linkStartTag + 2, linkOptionTag).trim(sm)); } else { linkTextStart = linkStartTag + 2; linkTarget = sm.substring(new Span(linkStartTag + 2, linkEndTag).trim(sm)); } // is is a regular link ? if (linkTarget.indexOf(lineSeparator) != -1) { continue; } linkTarget = encodeWikistyle(linkTarget); // so it is a Link or image!!! List<String> parameters; String namespace = getLinkNameSpace(linkTarget); if (namespace != null) { if (imageIdentifers.indexOf(namespace) != -1) { if (linkOptionTag != -1) { int temp; while ((temp = sm.indexOf("|", linkTextStart, linkEndTag)) != -1) { linkTextStart = temp + 1; } parameters = tokenize(sm, linkOptionTag + 1, linkEndTag, "|"); // maybe there is an external link at the end of the // image description... if (sm.charAt(linkEndTag + 2) == ']' && sm.indexOf("[", linkTextStart, linkEndTag) != -1) { linkEndTag++; } } else { parameters = null; } linkType = Link.type.IMAGE; } else { //Link has namespace but is not image linkType = Link.type.UNKNOWN; parameters = null; } } else { if (linkType == Link.type.INTERNAL && lastLinkSpan.hits(new Span(linkStartTag, linkEndTag + 2))) { continue; } parameters = null; linkType = Link.type.INTERNAL; } Span posSpan = new Span(linkTextStart, linkEndTag).trim(sm); linkSpans.add(posSpan); Link l = new Link(null, posSpan, linkTarget, linkType, parameters); links.add(l); if (calculateSrcSpans) { l.setSrcSpan(new SrcSpan(sm.getSrcPos(linkStartTag), sm.getSrcPos(linkEndTag + 2))); } sm.delete(posSpan.getEnd(), linkEndTag + 2); sm.delete(linkStartTag, posSpan.getStart()); // removing line separators in link text int lsinlink; while ((lsinlink = sm.indexOf(lineSeparator, posSpan)) != -1) { sm.replace(lsinlink, lsinlink + lineSeparator.length(), " "); } lastLinkSpan = posSpan; } }
From source file:FilenameUtils.java
/** * Checks a filename to see if it matches the specified wildcard matcher * allowing control over case-sensitivity. * <p>//from www.j a v a 2 s .c om * The wildcard matcher uses the characters '?' and '*' to represent a * single or multiple wildcard characters. * * @param filename the filename to match on * @param wildcardMatcher the wildcard string to match against * @param caseSensitivity what case sensitivity rule to use, null means case-sensitive * @return true if the filename matches the wilcard string * @since Commons IO 1.3 */ public static boolean wildcardMatch(String filename, String wildcardMatcher, IOCase caseSensitivity) { if (filename == null && wildcardMatcher == null) { return true; } if (filename == null || wildcardMatcher == null) { return false; } if (caseSensitivity == null) { caseSensitivity = IOCase.SENSITIVE; } filename = caseSensitivity.convertCase(filename); wildcardMatcher = caseSensitivity.convertCase(wildcardMatcher); String[] wcs = splitOnTokens(wildcardMatcher); boolean anyChars = false; int textIdx = 0; int wcsIdx = 0; Stack backtrack = new Stack(); // loop around a backtrack stack, to handle complex * matching do { if (backtrack.size() > 0) { int[] array = (int[]) backtrack.pop(); wcsIdx = array[0]; textIdx = array[1]; anyChars = true; } // loop whilst tokens and text left to process while (wcsIdx < wcs.length) { if (wcs[wcsIdx].equals("?")) { // ? so move to next text char textIdx++; anyChars = false; } else if (wcs[wcsIdx].equals("*")) { // set any chars status anyChars = true; if (wcsIdx == wcs.length - 1) { textIdx = filename.length(); } } else { // matching text token if (anyChars) { // any chars then try to locate text token textIdx = filename.indexOf(wcs[wcsIdx], textIdx); if (textIdx == -1) { // token not found break; } int repeat = filename.indexOf(wcs[wcsIdx], textIdx + 1); if (repeat >= 0) { backtrack.push(new int[] { wcsIdx, repeat }); } } else { // matching from current position if (!filename.startsWith(wcs[wcsIdx], textIdx)) { // couldnt match token break; } } // matched text token, move text index to end of matched token textIdx += wcs[wcsIdx].length(); anyChars = false; } wcsIdx++; } // full match if (wcsIdx == wcs.length && textIdx == filename.length()) { return true; } } while (backtrack.size() > 0); return false; }
From source file:fr.inria.oak.paxquery.common.xml.navigation.NavigationTreePatternNode.java
/** * //from w w w. j a v a 2 s.c o m * @param edge */ public void copyVirtualChild(NavigationTreePatternEdge edge) { NavigationTreePatternNode childCopy = edge.n2.deepCopy(); // marking childCopy and its subtree as virtual: Stack<NavigationTreePatternNode> st = new Stack<NavigationTreePatternNode>(); st.push(childCopy); while (!st.empty()) { NavigationTreePatternNode pn = st.pop(); // Parameters.logger.info("Set virtual node: " + pn.tag); pn.virtual = true; pn.nodeCode = NavigationTreePatternNode.globalNodeCounter.getAndIncrement(); // virtual nodes obtained by navigation cannot store ID pn.storesID = false; Iterator<NavigationTreePatternEdge> pnChildren = pn.edges.iterator(); while (pnChildren.hasNext()) { NavigationTreePatternEdge pnEdge = pnChildren.next(); st.push(pnEdge.n2); } } addEdge(childCopy, edge.isParent(), edge.isNested(), edge.isOptional()); }
From source file:cn.teamlab.wg.framework.struts2.breadcrumb.TopBreadCrumbInterceptor.java
protected void beforeInvocation(ActionInvocation invocation) { BreadCrumb annotation = processAnnotation(invocation); /*/*from w w w . jav a2 s . co m*/ * overrides rewind mode of this invocation if needed */ if (annotation != null) { Crumb current = makeCrumb(invocation, annotation.name(), annotation.key()); // get the bread crumbs trail stored in session(CRUMB_KEY) BreadCrumbTrail trail = getBreadCrumbTrail(invocation); // set default configuration RewindMode mode = trail.rewindMode; int maxCrumbs = trail.maxCrumbs; Comparator<Crumb> comparator = trail.comparator; // TODO override configuration (if needed) if (annotation.rewind() != RewindMode.DEFAULT) mode = annotation.rewind(); if (annotation.comparator() != BreadCrumb.NULL.class) { comparator = createComparator(annotation.comparator()); } // The comparator to use // then set initial condition the crumbs Stack<Crumb> crumbs = trail.getCrumbs(); /* * synchronized region is needed to prevent * ConcurrentModificationException(s) for concurrent * request (operating on the same session) that would * modify the bread crumbs trail. */ synchronized (crumbs) { LOG.debug("aquired lock on crumbs trail"); Crumb last = (crumbs.size() == 0) ? null : crumbs.lastElement(); /* * compare current and last crumbs */ /* * if (comparator.compare(current, last) != 0) { * int dupIdx = trail.indexOf(current, * comparator); if (mode == RewindMode.AUTO && * dupIdx != -1) { trail.rewindAt(dupIdx - 1); } * crumbs.push(current); if (crumbs.size() > * maxCrumbs) crumbs.remove(0); * * } else { * * if (crumbs.size() > 0) { * crumbs.remove(crumbs.size() - 1); * crumbs.push(current); } } */ if (crumbs.size() > 0) { // group checking (excluding the first one) if (last.getAction().startsWith(current.getAction().split("/")[0]) == false) { // not the same group trail.rewindAt(0); } // the same group, sort them int dupIdx = trail.indexOf(current, comparator); if (mode == RewindMode.AUTO && dupIdx != -1) { trail.rewindAt(dupIdx - 1); } crumbs.push(current); if (crumbs.size() > maxCrumbs) { crumbs.remove(0); } /* * if (crumbs.size() > 2) { // sort by * name trail.sort(comparator); } */ } else { // add directly (after the first one) crumbs.push(current); } LOG.debug("releasing lock on crumbs trail"); } // synchronized } }
From source file:fr.inria.oak.paxquery.common.xml.navigation.NavigationTreePatternNode.java
/** * Returns true if this node is the top returning node of the tree pattern. * Assumes that the node returns at least an ID, so we don't check for that. * Also assumes that the node selects on the tag and does not return it. * @return isTopReturningNode//from w ww .j a va2 s . co m */ public boolean isTopReturningNode() { if (this.isTopReturningNode == 0) { return false; } if (this.isTopReturningNode == 1) { return true; } // otherwise, it is -1, and we need to look if (this.parentEdge == null) { return true; } NavigationTreePatternNode n2 = this.parentEdge.n1; Stack<NavigationTreePatternNode> sn = new Stack<NavigationTreePatternNode>(); sn.push(n2); while (!sn.empty()) { NavigationTreePatternNode n3 = sn.pop(); if (n3 == null) { this.isTopReturningNode = 1; return true; } if (n3.storesID || n3.storesValue || (n3.storesContent)) { this.isTopReturningNode = 0; return false; } if (n3.parentEdge != null) { sn.push(n3.parentEdge.n1); } } this.isTopReturningNode = 1; return true; }
From source file:com.vmware.identity.idm.server.provider.ldap.LdapProvider.java
Set<Group> getNestedGroups(ILdapConnectionEx connection, String membershipId, boolean groupNameOnly) throws NoSuchGroupException, InvalidPrincipalException { Set<Group> groups = new HashSet<Group>(); if (ServerUtils.isNullOrEmpty(membershipId) == false) { final String ATTR_NAME_GROUP_CN = _ldapSchemaMapping .getGroupAttribute(IdentityStoreAttributeMapping.AttributeIds.GroupAttributeAccountName); final String ATTR_DESCRIPTION = _ldapSchemaMapping .getGroupAttribute(IdentityStoreAttributeMapping.AttributeIds.GroupAttributeDescription); final String ATTR_ENTRY_UUID = _ldapSchemaMapping .getGroupAttribute(IdentityStoreAttributeMapping.AttributeIds.GroupAttributeObjectId); ArrayList<String> attributeNames = getAttributesList(ATTR_NAME_GROUP_CN, ATTR_ENTRY_UUID, ATTR_DESCRIPTION, !groupNameOnly); HashSet<String> groupsProcessed = new HashSet<String>(); Stack<String> groupsToProcess = new Stack<String>(); groupsToProcess.push(membershipId); while (groupsToProcess.isEmpty() == false) { String currentMembershipId = groupsToProcess.pop(); if (groupsProcessed.contains(currentMembershipId) == false) { String filter = String.format(_ldapSchemaMapping.getDirectParentGroupsQuery(), LdapFilterString.encode(currentMembershipId)); Collection<ILdapMessage> messages = null; try { messages = ldap_search(connection, getStoreDataEx().getGroupBaseDn(), LdapScope.SCOPE_SUBTREE, filter, attributeNames, DEFAULT_PAGE_SIZE, -1); String groupMembershipId = null; if (messages != null && messages.size() > 0) { for (ILdapMessage message : messages) { ILdapEntry[] entries = message.getEntries(); if ((entries != null) && (entries.length > 0)) { for (ILdapEntry entry : entries) { Group g = buildGroupObject(entry, ATTR_NAME_GROUP_CN, ATTR_ENTRY_UUID, ATTR_DESCRIPTION, !groupNameOnly); if (this._groupGroupMembersListLinkIsDn) { groupMembershipId = entry.getDN(); } else if (this._groupGroupMembersListLinkExists) { groupMembershipId = getOptionalFirstStringValue(entry .getAttributeValues(GROUP_GROUP_MEMBERS_LIST_LINK_ATTRIBUTE)); }/*from www. j av a2 s. c om*/ groups.add(g); if (ServerUtils.isNullOrEmpty(groupMembershipId) == false) { groupsToProcess.push(groupMembershipId); } } } } } } catch (NoSuchObjectLdapException e) { log.error( String.format("Failed to search for grup membership for [%s]", currentMembershipId), e); throw e; } finally { ServerUtils.disposeLdapMessages(messages); } // try groupsProcessed.add(currentMembershipId); } } } return groups; }
From source file:com.matteoveroni.model.copy.FilenameUtils.java
/** * Checks a filename to see if it matches the specified wildcard matcher * allowing control over case-sensitivity. * <p>//from w ww. j a v a 2 s.co m * The wildcard matcher uses the characters '?' and '*' to represent a * single or multiple (zero or more) wildcard characters. * N.B. the sequence "*?" does not work properly at present in match strings. * * @param filename the filename to match on * @param wildcardMatcher the wildcard string to match against * @param caseSensitivity what case sensitivity rule to use, null means case-sensitive * @return true if the filename matches the wilcard string * @since 1.3 */ public static boolean wildcardMatch(String filename, String wildcardMatcher, IOCase caseSensitivity) { if (filename == null && wildcardMatcher == null) { return true; } if (filename == null || wildcardMatcher == null) { return false; } if (caseSensitivity == null) { caseSensitivity = IOCase.SENSITIVE; } String[] wcs = splitOnTokens(wildcardMatcher); boolean anyChars = false; int textIdx = 0; int wcsIdx = 0; Stack<int[]> backtrack = new Stack<int[]>(); // loop around a backtrack stack, to handle complex * matching do { if (backtrack.size() > 0) { int[] array = backtrack.pop(); wcsIdx = array[0]; textIdx = array[1]; anyChars = true; } // loop whilst tokens and text left to process while (wcsIdx < wcs.length) { if (wcs[wcsIdx].equals("?")) { // ? so move to next text char textIdx++; if (textIdx > filename.length()) { break; } anyChars = false; } else if (wcs[wcsIdx].equals("*")) { // set any chars status anyChars = true; if (wcsIdx == wcs.length - 1) { textIdx = filename.length(); } } else { // matching text token if (anyChars) { // any chars then try to locate text token textIdx = caseSensitivity.checkIndexOf(filename, textIdx, wcs[wcsIdx]); if (textIdx == -1) { // token not found break; } int repeat = caseSensitivity.checkIndexOf(filename, textIdx + 1, wcs[wcsIdx]); if (repeat >= 0) { backtrack.push(new int[] { wcsIdx, repeat }); } } else { // matching from current position if (!caseSensitivity.checkRegionMatches(filename, textIdx, wcs[wcsIdx])) { // couldnt match token break; } } // matched text token, move text index to end of matched token textIdx += wcs[wcsIdx].length(); anyChars = false; } wcsIdx++; } // full match if (wcsIdx == wcs.length && textIdx == filename.length()) { return true; } } while (backtrack.size() > 0); return false; }
From source file:com.ricemap.spateDB.core.RTree.java
protected int search(Shape query_shape, ResultCollector<T> output, int start, int end) throws IOException { Prism query_mbr = query_shape.getMBR(); int resultSize = 0; // Special case for an empty tree if (height == 0) return 0; Stack<Integer> toBeSearched = new Stack<Integer>(); // Start from the given node toBeSearched.push(start); if (start >= nodeCount) { toBeSearched.push(end);//from w w w.j av a 2 s . c o m } Prism node_mbr = new Prism(); // Holds one data line from tree data Text line = new Text2(); while (!toBeSearched.isEmpty()) { int searchNumber = toBeSearched.pop(); int mbrsToTest = searchNumber == 0 ? 1 : degree; if (searchNumber < nodeCount) { long nodeOffset = NodeSize * searchNumber; structure.seek(nodeOffset); int dataOffset = structure.readInt(); for (int i = 0; i < mbrsToTest; i++) { node_mbr.readFields(structure); int lastOffset = (searchNumber + i) == nodeCount - 1 ? treeSize : structure.readInt(); if (query_mbr.contains(node_mbr)) { // The node is full contained in the query range. // Save the time and do full scan for this node toBeSearched.push(dataOffset); // Checks if this node is the last node in its level // This can be easily detected because the next node in // the level // order traversal will be the first node in the next // level // which means it will have an offset less than this // node if (lastOffset <= dataOffset) lastOffset = treeSize; toBeSearched.push(lastOffset); } else if (query_mbr.isIntersected(node_mbr)) { // Node partially overlaps with query. Go deep under // this node if (searchNumber < nonLeafNodeCount) { // Search child nodes toBeSearched.push((searchNumber + i) * degree + 1); } else { // Search all elements in this node toBeSearched.push(dataOffset); // Checks if this node is the last node in its level // This can be easily detected because the next node // in the level // order traversal will be the first node in the // next level // which means it will have an offset less than this // node if (lastOffset <= dataOffset) lastOffset = treeSize; toBeSearched.push(lastOffset); } } dataOffset = lastOffset; } } else { int firstOffset, lastOffset; // Search for data items (records) lastOffset = searchNumber; firstOffset = toBeSearched.pop(); data.seek(firstOffset + treeStartOffset); LineReader lineReader = new LineReader(data); while (firstOffset < lastOffset) { firstOffset += lineReader.readLine(line); stockObject.fromText(line); if (stockObject.isIntersected(query_shape)) { resultSize++; if (output != null) output.collect(stockObject); } } } } return resultSize; }