List of usage examples for java.io StreamTokenizer TT_WORD
int TT_WORD
To view the source code for java.io StreamTokenizer TT_WORD.
Click Source Link
From source file:org.openhab.io.caldav.internal.util.ExecuteCommandJob.java
/** * Parses a <code>command</code>. Utilizes the {@link StreamTokenizer} which * takes care of quoted Strings as well. * // www .j av a2s. c o m * @param command the command to parse * @return the tokenized command which can be processed by the * <code>ConsoleInterpreter</code> * * @see org.openhab.io.console.ConsoleInterpreter */ protected String[] parseCommand(String command) { logger.trace("going to parse command '{}'", command); // if the command starts with '>' it contains a script which needs no // further handling here ... if (command.startsWith(">")) { return new String[] { ">", command.substring(1).trim() }; } StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(command)); tokenizer.wordChars('_', '_'); tokenizer.wordChars('-', '-'); tokenizer.wordChars('.', '.'); List<String> tokens = new ArrayList<String>(); try { int tokenType = 0; while (tokenType != StreamTokenizer.TT_EOF && tokenType != StreamTokenizer.TT_EOL) { tokenType = tokenizer.nextToken(); String token = ""; switch (tokenType) { case StreamTokenizer.TT_WORD: case 34 /* quoted String */: token = tokenizer.sval; break; case StreamTokenizer.TT_NUMBER: token = String.valueOf(tokenizer.nval); break; } tokens.add(token); logger.trace("read value {} from the given command", token); } } catch (IOException ioe) { } return tokens.toArray(new String[0]); }
From source file:org.openmrs.cohort.CohortSearchHistory.java
public PatientSearch createCompositionFilter(String description) { Set<String> andWords = new HashSet<String>(); Set<String> orWords = new HashSet<String>(); Set<String> notWords = new HashSet<String>(); andWords.add("and"); andWords.add("intersection"); andWords.add("*"); orWords.add("or"); orWords.add("union"); orWords.add("+"); notWords.add("not"); notWords.add("!"); List<Object> currentLine = new ArrayList<Object>(); try {/* w w w . j a v a 2 s .co m*/ StreamTokenizer st = new StreamTokenizer(new StringReader(description)); st.ordinaryChar('('); st.ordinaryChar(')'); Stack<List<Object>> stack = new Stack<List<Object>>(); while (st.nextToken() != StreamTokenizer.TT_EOF) { if (st.ttype == StreamTokenizer.TT_NUMBER) { Integer thisInt = new Integer((int) st.nval); if (thisInt < 1 || thisInt > searchHistory.size()) { log.error("number < 1 or > search history size"); return null; } currentLine.add(thisInt); } else if (st.ttype == '(') { stack.push(currentLine); currentLine = new ArrayList<Object>(); } else if (st.ttype == ')') { List<Object> l = stack.pop(); l.add(currentLine); currentLine = l; } else if (st.ttype == StreamTokenizer.TT_WORD) { String str = st.sval.toLowerCase(); if (andWords.contains(str)) currentLine.add(PatientSetService.BooleanOperator.AND); else if (orWords.contains(str)) currentLine.add(PatientSetService.BooleanOperator.OR); else if (notWords.contains(str)) currentLine.add(PatientSetService.BooleanOperator.NOT); else throw new IllegalArgumentException("Don't recognize " + st.sval); } } } catch (Exception ex) { log.error("Error in description string: " + description, ex); return null; } if (!testCompositionList(currentLine)) { log.error("Description string failed test: " + description); return null; } //return toPatientFilter(currentLine); PatientSearch ret = new PatientSearch(); ret.setParsedComposition(currentLine); return ret; }
From source file:org.openmrs.module.reporting.cohort.definition.util.CohortExpressionParser.java
/** * Elements in this list can be: an Integer, indicating a 1-based index into a search history a * BooleanOperator (AND, OR, NOT) a CohortDefinition a PatientSearch another List of the same form, * which indicates a parenthetical expression *///www .j ava 2s. com public static List<Object> parseIntoTokens(String expression) { List<Object> tokens = new ArrayList<Object>(); try { StreamTokenizer st = new StreamTokenizer(new StringReader(expression)); for (Character c : characterWords) { st.ordinaryChar(c); } while (st.nextToken() != StreamTokenizer.TT_EOF) { if (st.ttype == StreamTokenizer.TT_NUMBER) { Integer thisInt = new Integer((int) st.nval); if (thisInt < 1) { log.error("number < 1"); return null; } tokens.add(thisInt); } else if (openParenthesesWords.contains(Character.valueOf((char) st.ttype))) { tokens.add("("); } else if (closeParenthesesWords.contains(Character.valueOf((char) st.ttype))) { tokens.add(")"); } else if (st.ttype == StreamTokenizer.TT_WORD) { tokens.add(st.sval); } } return parseIntoTokens(tokens); } catch (Exception ex) { log.error("Error in description string: " + expression, ex); return null; } }
From source file:org.openmrs.module.reporting.query.evaluator.CompositionQueryEvaluator.java
/** * Elements in this list can be: an Integer, indicating a 1-based index into a search history a * BooleanOperator (AND, OR, NOT) a Query, another List of the same form, which indicates a parenthetical expression */// w ww . ja va2 s .c o m public List<Object> parseIntoTokens(String expression) throws EvaluationException { List<Object> tokens = new ArrayList<Object>(); try { StreamTokenizer st = new StreamTokenizer(new StringReader(expression)); for (Character c : CHARACTER_WORDS) { st.ordinaryChar(c); } while (st.nextToken() != StreamTokenizer.TT_EOF) { if (st.ttype == StreamTokenizer.TT_NUMBER) { Integer thisInt = (int) st.nval; if (thisInt < 1) { throw new IllegalArgumentException("Invalid number < 1 found"); } tokens.add(thisInt); } else if (OPEN_PARENTHESES_WORDS.contains(Character.valueOf((char) st.ttype))) { tokens.add("("); } else if (CLOSE_PARENTHESES_WORDS.contains(Character.valueOf((char) st.ttype))) { tokens.add(")"); } else if (st.ttype == StreamTokenizer.TT_WORD) { tokens.add(st.sval); } } return parseIntoTokens(tokens); } catch (Exception e) { throw new EvaluationException("Unable to parse expression <" + expression + "> into tokens", e); } }
From source file:org.openmrs.reporting.PatientSearch.java
public static PatientSearch createCompositionSearch(String description) { // TODO This is a rewrite of the code in CohortSearchHistory.createCompositionFilter(String). That method should probably delegate to this one in some way. // TODO use open/closeParenthesesWords declared above List<Object> tokens = new ArrayList<Object>(); try {//www. j a v a 2 s . co m StreamTokenizer st = new StreamTokenizer(new StringReader(description)); st.ordinaryChar('('); st.ordinaryChar(')'); while (st.nextToken() != StreamTokenizer.TT_EOF) { if (st.ttype == StreamTokenizer.TT_NUMBER) { Integer thisInt = new Integer((int) st.nval); if (thisInt < 1) { log.error("number < 1"); return null; } tokens.add(thisInt); } else if (st.ttype == '(') { tokens.add("("); } else if (st.ttype == ')') { tokens.add(")"); } else if (st.ttype == StreamTokenizer.TT_WORD) { String str = st.sval.toLowerCase(); tokens.add(str); } } return createCompositionSearch(tokens); } catch (Exception ex) { log.error("Error in description string: " + description, ex); return null; } }
From source file:org.osaf.cosmo.calendar.ICalValueParser.java
/** * Parses the text value./*from w w w . j a va 2s. c om*/ */ public void parse() throws ParseException { int nextToken = nextToken(); // log.debug("starting token: " + tokenizer); if (nextToken != ';') return; nextToken = nextToken(); while (nextToken != ':' && nextToken != StreamTokenizer.TT_EOF) { // log.debug("param name token: " + tokenizer); if (nextToken != StreamTokenizer.TT_WORD) throw new ParseException("expected word, read " + tokenizer.ttype, 1); String name = tokenizer.sval; nextToken = nextToken(); // log.debug("param = token: " + tokenizer); if (nextToken != '=') throw new ParseException("expected =, read " + tokenizer.ttype, 1); nextToken = nextToken(); // log.debug("param val token: " + tokenizer); if (!(nextToken == StreamTokenizer.TT_WORD || nextToken == '"')) throw new ParseException("expected word, read " + tokenizer.ttype, 1); String value = tokenizer.sval; // log.debug("parameter " + name + ": " + value); params.put(name, value); nextToken = nextToken(); // log.debug("post param token: " + tokenizer); if (nextToken == ':') break; else if (nextToken == ';') nextToken = nextToken(); else throw new ParseException("expected either : or ;, read " + tokenizer.ttype, 1); } nextToken = nextToken(); // log.debug("prop val token: " + tokenizer); if (nextToken != StreamTokenizer.TT_WORD) throw new ParseException("expected " + StreamTokenizer.TT_WORD + ", read " + tokenizer.ttype, 1); value = tokenizer.sval; // log.debug("property: " + value + ", params: " + params); }
From source file:org.structr.core.function.Functions.java
public static Object evaluate(final ActionContext actionContext, final GraphObject entity, final String expression) throws FrameworkException { final String expressionWithoutNewlines = expression.replace('\n', ' '); final StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(expressionWithoutNewlines)); tokenizer.eolIsSignificant(true);/*from w ww. j ava2 s. c om*/ tokenizer.ordinaryChar('.'); tokenizer.wordChars('_', '_'); tokenizer.wordChars('.', '.'); tokenizer.wordChars('!', '!'); Expression root = new RootExpression(); Expression current = root; Expression next = null; String lastToken = null; int token = 0; int level = 0; while (token != StreamTokenizer.TT_EOF) { token = nextToken(tokenizer); switch (token) { case StreamTokenizer.TT_EOF: break; case StreamTokenizer.TT_EOL: break; case StreamTokenizer.TT_NUMBER: if (current == null) { throw new FrameworkException(422, "Invalid expression: mismatched opening bracket before NUMBER"); } next = new ConstantExpression(tokenizer.nval); current.add(next); lastToken += "NUMBER"; break; case StreamTokenizer.TT_WORD: if (current == null) { throw new FrameworkException(422, "Invalid expression: mismatched opening bracket before " + tokenizer.sval); } next = checkReservedWords(tokenizer.sval); Expression previousExpression = current.getPrevious(); if (tokenizer.sval.startsWith(".") && previousExpression != null && previousExpression instanceof FunctionExpression && next instanceof ValueExpression) { final FunctionExpression previousFunctionExpression = (FunctionExpression) previousExpression; final ValueExpression valueExpression = (ValueExpression) next; current.replacePrevious( new FunctionValueExpression(previousFunctionExpression, valueExpression)); } else { current.add(next); } lastToken = tokenizer.sval; break; case '(': if (((current == null || current instanceof RootExpression) && next == null) || current == next) { // an additional bracket without a new function, // this can only be an execution group. next = new GroupExpression(); current.add(next); } current = next; lastToken += "("; level++; break; case ')': if (current == null) { throw new FrameworkException(422, "Invalid expression: mismatched opening bracket before " + lastToken); } current = current.getParent(); if (current == null) { throw new FrameworkException(422, "Invalid expression: mismatched closing bracket after " + lastToken); } lastToken += ")"; level--; break; case '[': // bind directly to the previous expression next = new ArrayExpression(); current.add(next); current = next; lastToken += "["; level++; break; case ']': if (current == null) { throw new FrameworkException(422, "Invalid expression: mismatched closing bracket before " + lastToken); } current = current.getParent(); if (current == null) { throw new FrameworkException(422, "Invalid expression: mismatched closing bracket after " + lastToken); } lastToken += "]"; level--; break; case ';': next = null; lastToken += ";"; break; case ',': next = current; lastToken += ","; break; default: if (current == null) { throw new FrameworkException(422, "Invalid expression: mismatched opening bracket before " + tokenizer.sval); } current.add(new ConstantExpression(tokenizer.sval)); lastToken = tokenizer.sval; } } if (level > 0) { throw new FrameworkException(422, "Invalid expression: mismatched closing bracket after " + lastToken); } return root.evaluate(actionContext, entity); }
From source file:org.unitedinternet.cosmo.calendar.ICalValueParser.java
/** * Parses the text value./*from ww w .j av a 2s . c o m*/ * @throws ParseException - if something is wrong this exception is thrown. */ public void parse() throws ParseException { int nextToken = nextToken(); // log.debug("starting token: " + tokenizer); if (nextToken != ';') { return; } nextToken = nextToken(); while (nextToken != ':' && nextToken != StreamTokenizer.TT_EOF) { // log.debug("param name token: " + tokenizer); if (nextToken != StreamTokenizer.TT_WORD) { throw new ParseException("expected word, read " + tokenizer.ttype, 1); } String name = tokenizer.sval; nextToken = nextToken(); // log.debug("param = token: " + tokenizer); if (nextToken != '=') { throw new ParseException("expected =, read " + tokenizer.ttype, 1); } nextToken = nextToken(); // log.debug("param val token: " + tokenizer); if (!(nextToken == StreamTokenizer.TT_WORD || nextToken == '"')) { throw new ParseException("expected word, read " + tokenizer.ttype, 1); } String value = tokenizer.sval; // log.debug("parameter " + name + ": " + value); params.put(name, value); nextToken = nextToken(); // log.debug("post param token: " + tokenizer); if (nextToken == ':') { break; } else if (nextToken == ';') { nextToken = nextToken(); } else { throw new ParseException("expected either : or ;, read " + tokenizer.ttype, 1); } } nextToken = nextToken(); // log.debug("prop val token: " + tokenizer); if (nextToken != StreamTokenizer.TT_WORD) { throw new ParseException("expected " + StreamTokenizer.TT_WORD + ", read " + tokenizer.ttype, 1); } value = tokenizer.sval; // log.debug("property: " + value + ", params: " + params); }
From source file:org.wso2.andes.server.security.access.config.PlainConfiguration.java
@Override public RuleSet load() throws ConfigurationException { RuleSet ruleSet = super.load(); try {/*from ww w. j a v a 2 s.com*/ _st = new StreamTokenizer(new BufferedReader(new FileReader(_file))); _st.resetSyntax(); // setup the tokenizer _st.commentChar(COMMENT); // single line comments _st.eolIsSignificant(true); // return EOL as a token _st.lowerCaseMode(true); // case insensitive tokens _st.ordinaryChar('='); // equals is a token _st.ordinaryChar(CONTINUATION); // continuation character (when followed by EOL) _st.quoteChar('"'); // double quote _st.quoteChar('\''); // single quote _st.whitespaceChars('\u0000', '\u0020'); // whitespace (to be ignored) TODO properly _st.wordChars('a', 'z'); // unquoted token characters [a-z] _st.wordChars('A', 'Z'); // [A-Z] _st.wordChars('0', '9'); // [0-9] _st.wordChars('_', '_'); // underscore _st.wordChars('-', '-'); // dash _st.wordChars('.', '.'); // dot _st.wordChars('*', '*'); // star _st.wordChars('@', '@'); // at _st.wordChars(':', ':'); // colon // parse the acl file lines Stack<String> stack = new Stack<String>(); int current; do { current = _st.nextToken(); switch (current) { case StreamTokenizer.TT_EOF: case StreamTokenizer.TT_EOL: if (stack.isEmpty()) { break; // blank line } // pull out the first token from the bottom of the stack and check arguments exist String first = stack.firstElement(); stack.removeElementAt(0); if (stack.isEmpty()) { throw new ConfigurationException(String.format(NOT_ENOUGH_TOKENS_MSG, getLine())); } // check for and parse optional initial number for ACL lines Integer number = null; if (StringUtils.isNumeric(first)) { // set the acl number and get the next element number = Integer.valueOf(first); first = stack.firstElement(); stack.removeElementAt(0); } if (StringUtils.equalsIgnoreCase(ACL, first)) { parseAcl(number, stack); } else if (number == null) { if (StringUtils.equalsIgnoreCase(GROUP, first)) { parseGroup(stack); } else if (StringUtils.equalsIgnoreCase(CONFIG, first)) { parseConfig(stack); } else { throw new ConfigurationException( String.format(UNRECOGNISED_INITIAL_MSG, first, getLine())); } } else { throw new ConfigurationException(String.format(NUMBER_NOT_ALLOWED_MSG, first, getLine())); } // reset stack, start next line stack.clear(); break; case StreamTokenizer.TT_NUMBER: stack.push(Integer.toString(Double.valueOf(_st.nval).intValue())); break; case StreamTokenizer.TT_WORD: stack.push(_st.sval); // token break; default: if (_st.ttype == CONTINUATION) { int next = _st.nextToken(); if (next == StreamTokenizer.TT_EOL) { break; // continue reading next line } // invalid location for continuation character (add one to line beacuse we ate the EOL) throw new ConfigurationException(String.format(PREMATURE_CONTINUATION_MSG, getLine() + 1)); } else if (_st.ttype == '\'' || _st.ttype == '"') { stack.push(_st.sval); // quoted token } else { stack.push(Character.toString((char) _st.ttype)); // single character } } } while (current != StreamTokenizer.TT_EOF); if (!stack.isEmpty()) { throw new ConfigurationException(String.format(PREMATURE_EOF_MSG, getLine())); } } catch (IllegalArgumentException iae) { throw new ConfigurationException(String.format(PARSE_TOKEN_FAILED_MSG, getLine()), iae); } catch (FileNotFoundException fnfe) { throw new ConfigurationException(String.format(CONFIG_NOT_FOUND_MSG, getFile().getName()), fnfe); } catch (IOException ioe) { throw new ConfigurationException(String.format(CANNOT_LOAD_MSG, getFile().getName()), ioe); } return ruleSet; }
From source file:org.xwiki.extension.internal.ExtensionUtils.java
/** * @param str the String to parse/*from ww w . j a v a 2 s.c o m*/ * @param trim true if the passed String should be trimmed * @return the collection of Strings extracted from the passed String * @since 8.3M1 */ public static List<String> importPropertyStringList(String str, boolean trim) { try { String cleanedString = str; // Trim if (trim) { cleanedString = cleanedString.trim(); } // Set up a StreamTokenizer on the characters in this String StreamTokenizer st = new StreamTokenizer(new StringReader(cleanedString)); // Everything is word st.ordinaryChars(0, 255); st.wordChars(0, 255); // Except quote chars st.quoteChar('"'); st.quoteChar('\''); // And delimiters st.whitespaceChars(',', ','); st.whitespaceChars(' ', ' '); st.whitespaceChars('\t', '\t'); st.whitespaceChars('\n', '\n'); st.whitespaceChars('\r', '\r'); // Split comma-delimited tokens into a List List<String> collection = new ArrayList<>(); while (true) { int ttype = st.nextToken(); if (ttype == StreamTokenizer.TT_WORD || ttype > 0) { if (st.sval != null) { collection.add(st.sval); } } else if (ttype == StreamTokenizer.TT_EOF) { break; } else { throw new ConversionException("Encountered token of type " + ttype + " parsing elements."); } } // Return the completed list return collection; } catch (Exception e) { // Log ? } return Collections.emptyList(); }