List of usage examples for java.util LinkedList removeFirst
public E removeFirst()
From source file:gallery.service.sitemap.SitemapServiceImpl.java
@Override @Transactional(readOnly = true)/* w w w . j a v a2 s . com*/ public void createSitemap() { if (generating) { logger.info("xml is allready generating ..."); return; } try { logger.info("start generate xml"); generating = true; long time = System.currentTimeMillis(); File base = new File(path_tmp); try { if (base.exists()) { Index index = new Index(10000, base, gallery.web.Config.SITE_NAME); Sitemap sitemap = index.getChild(); List<Pages> pages = pages_service.getByPropertiesValueOrdered(SITEMAP_NAMES, SITEMAP_WHERE, new Object[] { null, Boolean.TRUE }, null, null); LinkedList<Long> pages_unhandled = new LinkedList<Long>(); int k = 0; while (k < pages.size()) { Pages p = pages.get(k); if (!p.getLast()) { pages_unhandled.add(p.getId()); sitemap.addRecord("index.htm?id_pages_nav=" + p.getId(), "daily", "0.8"); } else { //check type if (p.getType().equals(gallery.web.controller.pages.types.WallpaperGalleryType.TYPE)) { Long id_pages = p.getId(); List ids = wallpaper_service.getSingleProperty("id", SITEMAP_WHERE, new Object[] { id_pages, Boolean.TRUE }, 0, 0, null, null); int j = 0; for (int i = 0; i < ids.size(); i = i + gallery.web.controller.pages.types.WallpaperGalleryType.CATEGORY_WALLPAPERS) { sitemap.addRecord( "index.htm?id_pages_nav=" + id_pages + "&page_number=" + j, "daily", "0.9"); j++; } for (Object id_wallpaper : ids) { sitemap.addRecord("index.htm?id_pages_nav=" + id_pages + "&id_photo_nav=" + id_wallpaper, "daily", "0.7"); } } else { sitemap.addRecord("index.htm?id_pages_nav=" + p.getId(), "daily", "0.9"); } } k++; if ((k >= pages.size()) && (!pages_unhandled.isEmpty())) { pages = pages_service.getByPropertiesValueOrdered(SITEMAP_NAMES, SITEMAP_WHERE, new Object[] { pages_unhandled.removeFirst(), Boolean.TRUE }, null, null); k = 0; } } sitemap.close(); //moving to web content clearSitemap(); File f = new File(path_tmp); File[] files = f.listFiles(); for (File file : files) { String name = file.getName(); if ((name.startsWith(core.sitemap.model.Config.SITEMAP_PREFIX) || name.startsWith(core.sitemap.model.Config.INDEX_PREFIX)) && name.endsWith(core.sitemap.model.Config.XML_SUFFIX)) { File new_file = new File(path, file.getName()); file.renameTo(new_file); } } } } catch (IOException ex) { logger.error("error while generating sitemap", ex); } time = System.currentTimeMillis() - time; logger.info("end generate xml. generated in: " + time); } finally { generating = false; } }
From source file:de.tudarmstadt.ukp.wikipedia.parser.mediawiki.ModularParser.java
private NestedListContainer buildNestedList(SpanManager sm, ContentElementParsingParameters cepp, LinkedList<Span> lineSpans, lineType listType) { boolean numbered = listType == lineType.NESTEDLIST_NR; NestedListContainer result = new NestedListContainer(numbered); if (calculateSrcSpans) { result.setSrcSpan(new SrcSpan(sm.getSrcPos(lineSpans.getFirst().getStart()), -1)); }/*w w w . j a va 2 s . c om*/ LinkedList<Span> nestedListSpans = new LinkedList<Span>(); while (!lineSpans.isEmpty()) { Span s = lineSpans.getFirst(); if (listType != getLineType(sm, s)) { break; } nestedListSpans.add(new Span(s.getStart() + 1, s.getEnd()).trim(sm)); lineSpans.removeFirst(); } sm.manageList(nestedListSpans); if (calculateSrcSpans) { result.getSrcSpan().setEnd(sm.getSrcPos(nestedListSpans.getLast().getEnd())); } while (!nestedListSpans.isEmpty()) { Span s = nestedListSpans.getFirst(); lineType t = getLineType(sm, s); if (t == lineType.NESTEDLIST || t == lineType.NESTEDLIST_NR) { result.add(buildNestedList(sm, cepp, nestedListSpans, t)); } else { nestedListSpans.removeFirst(); result.add((NestedListElement) parseContentElement(sm, cepp, s, new NestedListElement())); } } sm.removeManagedList(nestedListSpans); return result; }
From source file:gate.creole.tokeniser.SimpleTokeniser.java
/** Converts the FSM from a non-deterministic to a deterministic one by * eliminating all the unrestricted transitions. */// www . jav a 2 s . co m void eliminateVoidTransitions() throws TokeniserException { //kalina:clear() faster than init() which is called with init() newStates.clear(); Set<Set<FSMState>> sdStates = new HashSet<Set<FSMState>>(); LinkedList<Set<FSMState>> unmarkedDStates = new LinkedList<Set<FSMState>>(); DFSMState dCurrentState = new DFSMState(this); Set<FSMState> sdCurrentState = new HashSet<FSMState>(); sdCurrentState.add(initialState); sdCurrentState = lambdaClosure(sdCurrentState); newStates.put(sdCurrentState, dCurrentState); sdStates.add(sdCurrentState); //find out if the new state is a final one Iterator<FSMState> innerStatesIter = sdCurrentState.iterator(); String rhs; FSMState currentInnerState; Set<String> rhsClashSet = new HashSet<String>(); boolean newRhs = false; while (innerStatesIter.hasNext()) { currentInnerState = innerStatesIter.next(); if (currentInnerState.isFinal()) { rhs = currentInnerState.getRhs(); rhsClashSet.add(rhs); dCurrentState.rhs = rhs; newRhs = true; } } if (rhsClashSet.size() > 1) { Err.println("Warning, rule clash: " + rhsClashSet + "\nSelected last definition: " + dCurrentState.rhs); } if (newRhs) dCurrentState.buildTokenDesc(); rhsClashSet.clear(); unmarkedDStates.addFirst(sdCurrentState); dInitialState = dCurrentState; Set<FSMState> nextSet; while (!unmarkedDStates.isEmpty()) { //Out.println("\n\n=====================" + unmarkedDStates.size()); sdCurrentState = unmarkedDStates.removeFirst(); for (int type = 0; type < maxTypeId; type++) { //Out.print(type); nextSet = new HashSet<FSMState>(); innerStatesIter = sdCurrentState.iterator(); while (innerStatesIter.hasNext()) { currentInnerState = innerStatesIter.next(); Set<FSMState> tempSet = currentInnerState.nextSet(type); if (null != tempSet) nextSet.addAll(tempSet); } //while(innerStatesIter.hasNext()) if (!nextSet.isEmpty()) { nextSet = lambdaClosure(nextSet); dCurrentState = newStates.get(nextSet); if (dCurrentState == null) { //we have a new DFSMState dCurrentState = new DFSMState(this); sdStates.add(nextSet); unmarkedDStates.add(nextSet); //check to see whether the new state is a final one innerStatesIter = nextSet.iterator(); newRhs = false; while (innerStatesIter.hasNext()) { currentInnerState = innerStatesIter.next(); if (currentInnerState.isFinal()) { rhs = currentInnerState.getRhs(); rhsClashSet.add(rhs); dCurrentState.rhs = rhs; newRhs = true; } } if (rhsClashSet.size() > 1) { Err.println("Warning, rule clash: " + rhsClashSet + "\nSelected last definition: " + dCurrentState.rhs); } if (newRhs) dCurrentState.buildTokenDesc(); rhsClashSet.clear(); newStates.put(nextSet, dCurrentState); } newStates.get(sdCurrentState).put(type, dCurrentState); } // if(!nextSet.isEmpty()) } // for(byte type = 0; type < 256; type++) } // while(!unmarkedDStates.isEmpty()) }
From source file:de.tudarmstadt.ukp.wikipedia.parser.mediawiki.ModularParser.java
private SectionContainer parseSections(SpanManager sm, ContentElementParsingParameters cepp, LinkedList<Span> lineSpans) { List<SectionContent> contentSections = new ArrayList<SectionContent>(); SectionContent sc = new SectionContent(1); if (calculateSrcSpans) { sc.setSrcSpan(new SrcSpan(sm.getSrcPos(lineSpans.getFirst().getStart()), -1)); }/*from ww w . ja va 2 s . c o m*/ // Identify the Line Type and call the necessary Function for the // further handling... while (!lineSpans.isEmpty()) { Span s = lineSpans.getFirst(); lineType t = getLineType(sm, s); switch (t) { case SECTION: contentSections.add(sc); int level = getSectionLevel(sm, s); sc = new SectionContent( parseContentElement(sm, cepp, new Span(s.getStart() + level, s.getEnd() - level).trim(sm)), level); lineSpans.removeFirst(); if (calculateSrcSpans) { sc.setSrcSpan(new SrcSpan(sm.getSrcPos(s.getStart()), -1)); } break; case HR: // remove the HR (----) and handle the rest as a parapraph line removeHr(sm, s); t = lineType.PARAGRAPH; case PARAGRAPH: case PARAGRAPH_BOXED: case PARAGRAPH_INDENTED: sc.addParagraph(buildParagraph(sm, cepp, lineSpans, t)); break; case NESTEDLIST: case NESTEDLIST_NR: sc.addNestedList(buildNestedList(sm, cepp, lineSpans, t)); break; case DEFINITIONLIST: sc.addDefinitionList(buildDefinitionList(sm, cepp, lineSpans)); break; case TABLE: sc.addTable(buildTable(sm, cepp, lineSpans)); break; case EMPTYLINE: lineSpans.removeFirst(); break; default: logger.error("unknown lineStart!: \"" + sm.substring(s) + "\""); lineSpans.removeFirst(); } } // add the remaining Section to the list. contentSections.add(sc); return buildSectionStructure(contentSections); }
From source file:org.quickconnectfamily.json.JSONParser.java
/** * Parse JSON text into java object from the input source. * // w w w .j a v a 2 s . c o m * @param in * @param containerFactory - Use this factory to create your own JSON object and JSON array containers. * @return Instance of the following: * org.json.simple.JSONObject, * org.json.simple.JSONArray, * java.lang.String, * java.lang.Number, * java.lang.Boolean, * null * * @throws IOException * @throws ParseException */ @SuppressWarnings({ "unchecked", "rawtypes" }) public Object parse(ContainerFactory containerFactory) throws IOException, ParseException { LinkedList statusStack = new LinkedList(); LinkedList valueStack = new LinkedList(); try { do { if (status != S_IN_FINISHED_VALUE) { nextToken(); } switch (status) { case S_INIT: switch (token.type) { case Yytoken.TYPE_VALUE: status = S_IN_FINISHED_VALUE; statusStack.addFirst(new Integer(status)); valueStack.addFirst(token.value); break; case Yytoken.TYPE_LEFT_BRACE: if (firstCharType == FIRST_JSON_CHAR_TYPE_UNSET) { firstCharType = Yytoken.TYPE_LEFT_BRACE; } if (firstCharType == Yytoken.TYPE_LEFT_BRACE) { numUnmatchedCharTypeCount++; } status = S_IN_OBJECT; statusStack.addFirst(new Integer(status)); valueStack.addFirst(createObjectContainer(containerFactory)); break; case Yytoken.TYPE_LEFT_SQUARE: if (firstCharType == FIRST_JSON_CHAR_TYPE_UNSET) { firstCharType = Yytoken.TYPE_LEFT_SQUARE; } if (firstCharType == Yytoken.TYPE_LEFT_SQUARE) { numUnmatchedCharTypeCount++; } status = S_IN_ARRAY; statusStack.addFirst(new Integer(status)); valueStack.addFirst(createArrayContainer(containerFactory)); break; default: status = S_IN_ERROR; }//inner switch break; case S_IN_FINISHED_VALUE: if (token.type == Yytoken.TYPE_EOF || numUnmatchedCharTypeCount == 0) { firstCharType = FIRST_JSON_CHAR_TYPE_UNSET; status = S_INIT; return valueStack.removeFirst(); } else throw new ParseException(getPosition(), ParseException.ERROR_UNEXPECTED_TOKEN, token); case S_IN_OBJECT: switch (token.type) { case Yytoken.TYPE_COMMA: break; case Yytoken.TYPE_VALUE: if (token.value instanceof String) { String key = (String) token.value; valueStack.addFirst(key); status = S_PASSED_PAIR_KEY; statusStack.addFirst(new Integer(status)); } else { status = S_IN_ERROR; } break; case Yytoken.TYPE_RIGHT_BRACE: if (firstCharType == Yytoken.TYPE_LEFT_BRACE) { numUnmatchedCharTypeCount--; } if (valueStack.size() > 1) { statusStack.removeFirst(); valueStack.removeFirst(); status = peekStatus(statusStack); } else { status = S_IN_FINISHED_VALUE; } break; default: status = S_IN_ERROR; break; }//inner switch break; case S_PASSED_PAIR_KEY: switch (token.type) { case Yytoken.TYPE_COLON: break; case Yytoken.TYPE_VALUE: statusStack.removeFirst(); String key = (String) valueStack.removeFirst(); Map parent = (Map) valueStack.getFirst(); parent.put(key, token.value); status = peekStatus(statusStack); break; case Yytoken.TYPE_LEFT_SQUARE: if (firstCharType == Yytoken.TYPE_LEFT_SQUARE) { numUnmatchedCharTypeCount++; } statusStack.removeFirst(); key = (String) valueStack.removeFirst(); parent = (Map) valueStack.getFirst(); List newArray = createArrayContainer(containerFactory); parent.put(key, newArray); status = S_IN_ARRAY; statusStack.addFirst(new Integer(status)); valueStack.addFirst(newArray); break; case Yytoken.TYPE_LEFT_BRACE: if (firstCharType == Yytoken.TYPE_LEFT_BRACE) { numUnmatchedCharTypeCount++; } statusStack.removeFirst(); key = (String) valueStack.removeFirst(); parent = (Map) valueStack.getFirst(); Map newObject = createObjectContainer(containerFactory); parent.put(key, newObject); status = S_IN_OBJECT; statusStack.addFirst(new Integer(status)); valueStack.addFirst(newObject); break; default: status = S_IN_ERROR; } break; case S_IN_ARRAY: switch (token.type) { case Yytoken.TYPE_COMMA: break; case Yytoken.TYPE_VALUE: List val = (List) valueStack.getFirst(); val.add(token.value); break; case Yytoken.TYPE_RIGHT_SQUARE: if (firstCharType == Yytoken.TYPE_LEFT_SQUARE) { numUnmatchedCharTypeCount--; } if (valueStack.size() > 1) { statusStack.removeFirst(); valueStack.removeFirst(); status = peekStatus(statusStack); } else { status = S_IN_FINISHED_VALUE; } break; case Yytoken.TYPE_LEFT_BRACE: if (firstCharType == Yytoken.TYPE_LEFT_BRACE) { numUnmatchedCharTypeCount++; } val = (List) valueStack.getFirst(); Map newObject = createObjectContainer(containerFactory); val.add(newObject); status = S_IN_OBJECT; statusStack.addFirst(new Integer(status)); valueStack.addFirst(newObject); break; case Yytoken.TYPE_LEFT_SQUARE: if (firstCharType == Yytoken.TYPE_LEFT_SQUARE) { numUnmatchedCharTypeCount++; } val = (List) valueStack.getFirst(); List newArray = createArrayContainer(containerFactory); val.add(newArray); status = S_IN_ARRAY; statusStack.addFirst(new Integer(status)); valueStack.addFirst(newArray); break; default: status = S_IN_ERROR; }//inner switch break; case S_IN_ERROR: throw new ParseException(getPosition(), ParseException.ERROR_UNEXPECTED_TOKEN, token); }//switch if (status == S_IN_ERROR) { throw new ParseException(getPosition(), ParseException.ERROR_UNEXPECTED_TOKEN, token); } } while (token.type != Yytoken.TYPE_EOF); } catch (IOException ie) { throw ie; } throw new ParseException(getPosition(), ParseException.ERROR_UNEXPECTED_TOKEN, token); }
From source file:edu.harvard.iq.dvn.ingest.dsb.impl.DvnRJobRequest.java
public List<List<String>> getValueRange(String tkn) { dbgLog.fine("received token=" + tkn); String step0 = StringUtils.strip(tkn); dbgLog.fine("step0=" + step0); // string into tokens String[] step1raw = step0.split(","); dbgLog.fine("step1raw=" + StringUtils.join(step1raw, ",")); // remove meaningless commas if exist List<String> step1 = new ArrayList<String>(); for (String el : step1raw) { if (!el.equals("")) { step1.add(el);/*from w w w . j a va 2s .c om*/ } } dbgLog.fine("step1=" + StringUtils.join(step1, ",")); List<List<String>> rangeData = new ArrayList<List<String>>(); // for each token, check the range operator for (int i = 0; i < step1.size(); i++) { LinkedList<String> tmp = new LinkedList<String>( Arrays.asList(String2StringArray(String.valueOf(step1.get(i))))); Map<String, String> token = new HashMap<String, String>(); boolean rangeMode = false; // .get(i) below CAN'T possibly be right (??) -- replacing // it with .get(0). -- L.A., v3.6 //if ((!tmp.get(i).equals("[")) && (!tmp.get(i).equals("("))){ if ((!tmp.get(0).equals("[")) && (!tmp.get(0).equals("("))) { // no LHS range operator // assume [ token.put("start", "3"); } else if (tmp.get(0).equals("[")) { rangeMode = true; token.put("start", "3"); tmp.removeFirst(); } else if (tmp.get(0).equals("(")) { rangeMode = true; token.put("start", "5"); tmp.removeFirst(); } if ((!tmp.getLast().equals("]")) && (!tmp.getLast().equals(")"))) { // no RHS range operator // assume ] token.put("end", "4"); } else if (tmp.getLast().equals("]")) { rangeMode = true; tmp.removeLast(); token.put("end", "4"); } else if (tmp.getLast().equals(")")) { rangeMode = true; tmp.removeLast(); token.put("end", "6"); } // I'm now enforcing the following rules: // the "rangeMode" above - a range must have at least one range // operator, a square bracket or parenthesis, on one end, at // least; i.e., either on the left, or on the right. // If there are no range operators, even if there are dashes // inside the token, they are not going to be interpreted as // range definitions. // still TODO: (possibly?) add more validation; figure out how // to encode *date* ranges ("-" is not optimal, since dates already // contain dashes... although, since dates are (supposed to be) // normalized it should still be possible to parse it unambiguously) // -- L.A., v3.6 if (rangeMode) { // after these steps, the string does not have range operators; // i.e., '-9--3', '--9', '-9-','-9', '-1-1', '1', '3-4', '6-' if ((tmp.get(0).equals("!")) && (tmp.get(1).equals("="))) { // != negation string is found token.put("start", "2"); token.put("end", ""); token.put("v1", StringUtils.join(tmp.subList(2, tmp.size()), "")); token.put("v2", ""); dbgLog.fine("value=" + StringUtils.join(tmp.subList(2, tmp.size()), ",")); } else if ((tmp.get(0).equals("-")) && (tmp.get(1).equals("-"))) { // type 2: --9 token.put("v1", ""); tmp.removeFirst(); token.put("v2", StringUtils.join(tmp, "")); } else if ((tmp.get(0).equals("-")) && (tmp.getLast().equals("-"))) { // type 3: -9- token.put("v2", ""); tmp.removeLast(); token.put("v1", StringUtils.join(tmp, "")); } else if ((!tmp.get(0).equals("-")) && (tmp.getLast().equals("-"))) { // type 8: 6- token.put("v2", ""); tmp.removeLast(); token.put("v1", StringUtils.join(tmp, "")); } else { int count = 0; List<Integer> index = new ArrayList<Integer>(); for (int j = 0; j < tmp.size(); j++) { if (tmp.get(j).equals("-")) { count++; index.add(j); } } if (count >= 2) { // range type // divide the second hyphen // types 1 and 5: -9--3, -1-1 // token.put("v1", StringUtils.join(tmp[0..($index[1]-1)],"" )); token.put("v2", StringUtils.join(tmp.subList((index.get(1) + 1), tmp.size()), "")); } else if (count == 1) { if (tmp.get(0).equals("-")) { // point negative type // type 4: -9 or -inf,9 // do nothing if ((token.get("start").equals("5")) && ((token.get("end").equals("6")) || (token.get("end").equals("4")))) { token.put("v1", ""); tmp.removeFirst(); token.put("v2", StringUtils.join(tmp, "")); } else { token.put("v1", StringUtils.join(tmp, "")); token.put("v2", StringUtils.join(tmp, "")); } } else { // type 7: 3-4 // both positive value and range type String[] vset = (StringUtils.join(tmp, "")).split("-"); token.put("v1", vset[0]); token.put("v2", vset[1]); } } else { // type 6: 1 token.put("v1", StringUtils.join(tmp, "")); token.put("v2", StringUtils.join(tmp, "")); } } } else { // assume that this is NOT a range; treat the entire sequence // of symbols as a single token: // type 6: 1 token.put("v1", StringUtils.join(tmp, "")); token.put("v2", StringUtils.join(tmp, "")); } dbgLog.fine(i + "-th result=" + token.get("start") + "|" + token.get("v1") + "|" + token.get("end") + "|" + token.get("v2")); List<String> rangeSet = new ArrayList<String>(); rangeSet.add(token.get("start")); rangeSet.add(token.get("v1")); rangeSet.add(token.get("end")); rangeSet.add(token.get("v2")); rangeData.add(rangeSet); } dbgLog.fine("rangeData:\n" + rangeData); return rangeData; }
From source file:com.jaspersoft.jasperserver.api.metadata.common.service.impl.hibernate.HibernateRepositoryServiceImpl.java
protected void refreshFolderPaths(RepoFolder folder) { // refreshing recursively using a queue LinkedList<RepoFolder> folders = new LinkedList<RepoFolder>(); folders.addLast(folder);//from w ww. j a v a2 s . c om while (!folders.isEmpty()) { RepoFolder aFolder = folders.removeFirst(); aFolder.refreshURI(this); Set resources = aFolder.getChildren(); if (resources != null && !resources.isEmpty()) { for (Iterator it = resources.iterator(); it.hasNext();) { RepoResource child = (RepoResource) it.next(); RepoFolder grandChildrenFolder = child.getChildrenFolder(); if (grandChildrenFolder != null) { folders.addLast(grandChildrenFolder); } } } } }
From source file:elh.eus.absa.Features.java
/** * Extract word form n-grams up to a certain length from a kaf/naf file * /* w w w. j a v a2 s.c o m*/ * @param int length : which 'n' use for 'n-grams' * @param KAFDocument kafDoc : postagged kaf document to extract ngrams from. * @param boolean save : safe ngrams to file or not. * @return TreeSet<String> return word form ngrams of length length */ private int extractWfNgramsKAF(int length, KAFDocument kafDoc, boolean save) { //System.err.println("ngram extraction: _"+length+"_"); if (length == 0) { return 0; } for (List<WF> sent : kafDoc.getSentences()) { LinkedList<String> ngrams = new LinkedList<String>(); for (WF wf : sent) { if (ngrams.size() >= length) { ngrams.removeFirst(); } ngrams.add(wf.getForm()); //ngrams.add(normalize(wf.getForm(), params.getProperty("normalization", "none"))); // add ngrams to the feature list for (int i = 0; i < ngrams.size(); i++) { String ng = featureFromArray(ngrams.subList(0, i + 1), "wf"); addNgram("wf", ng); } } //empty ngram list and add remaining ngrams to the feature list while (!ngrams.isEmpty()) { String ng = featureFromArray(ngrams, "wf"); addNgram("wf", ng); ngrams.removeFirst(); } } return 1; }
From source file:elh.eus.absa.Features.java
/** * POS ngram extraction from a kaf document * /*w w w. j a v a2 s. c o m*/ * @param int length : which 'n' use for 'n-grams' * @param KAFDocument kafDoc : postagged kaf document to extract ngrams from. * @param boolean save : safe ngrams to file or not. * @return TreeSet<String> return lemma ngrams of length length */ public int extractPosNgrams(int length, KAFDocument kafDoc, List<String> discardPos, boolean save) { //System.err.println("POS ngram extraction: _"+length+"_"); if (length == 0) { return 0; } int sentNum = kafDoc.getSentences().size(); for (int s = 0; s < sentNum; s++) { LinkedList<String> ngrams = new LinkedList<String>(); for (Term term : kafDoc.getTermsBySent(s)) { if (ngrams.size() >= length) { ngrams.removeFirst(); } if (!discardPos.contains(term.getPos())) { ngrams.add(term.getPos()); } // add ngrams to the feature list for (int i = 0; i < ngrams.size(); i++) { String ng = featureFromArray(ngrams.subList(0, i + 1), "pos"); addNgram("pos", ng); } } //empty ngram list and add remaining ngrams to the feature list while (!ngrams.isEmpty()) { String ng = featureFromArray(ngrams, "pos"); addNgram("pos", ng); ngrams.removeFirst(); } } return 1; }
From source file:org.kuali.rice.krms.impl.provider.repository.SimplePropositionTypeService.java
/** * Translates the parameters on the given proposition definition to create an expression for evaluation. * The proposition parameters are defined in a reverse-polish notation so a stack is used for * evaluation purposes./*ww w. j a v a2 s . c o m*/ * * @param propositionDefinition the proposition definition to translate * * @return the translated expression for the given proposition, this * expression, when evaluated, will return a Boolean. */ protected Expression<Boolean> translateToExpression(PropositionDefinition propositionDefinition) { LinkedList<Expression<? extends Object>> stack = new LinkedList<Expression<? extends Object>>(); for (PropositionParameter parameter : propositionDefinition.getParameters()) { PropositionParameterType parameterType = PropositionParameterType .fromCode(parameter.getParameterType()); if (parameterType == PropositionParameterType.CONSTANT) { // TODO - need some way to define data type on the prop parameter as well? Not all constants will actually be String values!!! stack.addFirst(new ConstantExpression<String>(parameter.getValue())); } else if (parameterType == PropositionParameterType.FUNCTION) { String functionId = parameter.getValue(); FunctionDefinition functionDefinition = functionRepositoryService.getFunction(functionId); if (functionDefinition == null) { throw new RepositoryDataException("Unable to locate function with the given id: " + functionId); } FunctionTypeService functionTypeService = typeResolver.getFunctionTypeService(functionDefinition); Function function = functionTypeService.loadFunction(functionDefinition); // TODO throw an exception if function is null? List<FunctionParameterDefinition> parameters = functionDefinition.getParameters(); if (stack.size() < parameters.size()) { throw new RepositoryDataException( "Failed to initialize custom function '" + functionDefinition.getNamespace() + " " + functionDefinition.getName() + "'. There were only " + stack.size() + " values on the stack but function requires at least " + parameters.size()); } List<Expression<? extends Object>> arguments = new ArrayList<Expression<? extends Object>>(); // work backward through the list to match params to the stack for (int index = parameters.size() - 1; index >= 0; index--) { FunctionParameterDefinition parameterDefinition = parameters.get(index); // TODO need to check types here? expression object probably needs a getType on it so that we can confirm that the types will be compatible? parameterDefinition.getParameterType(); Expression<? extends Object> argument = stack.removeFirst(); arguments.add(argument); } String[] parameterTypes = getFunctionParameterTypes(functionDefinition); stack.addFirst(new FunctionExpression(function, parameterTypes, arguments, getComparisonOperatorService())); } else if (parameterType == PropositionParameterType.OPERATOR) { ComparisonOperator operator = ComparisonOperator.fromCode(parameter.getValue()); if (stack.size() < 2) { throw new RepositoryDataException( "Failed to initialize expression for comparison operator " + operator + " because a sufficient number of arguments was not available on the stack. " + "Current contents of stack: " + stack.toString()); } Expression<? extends Object> rhs = stack.removeFirst(); Expression<? extends Object> lhs = stack.removeFirst(); stack.addFirst(new BinaryOperatorExpression(operator, lhs, rhs)); } else if (parameterType == PropositionParameterType.TERM) { String termId = parameter.getValue(); TermDefinition termDefinition = getTermRepositoryService().getTerm(termId); if (termDefinition == null) { throw new RepositoryDataException("unable to load term with id " + termId); } Term term = translateTermDefinition(termDefinition); stack.addFirst(new TermExpression(term)); } } if (stack.size() != 1) { throw new RepositoryDataException( "Final contents of expression stack are incorrect, there should only be one entry but was " + stack.size() + ". Current contents of stack: " + stack.toString()); } return new BooleanValidatingExpression(stack.removeFirst()); }