List of usage examples for java.io StreamTokenizer TT_NUMBER
int TT_NUMBER
To view the source code for java.io StreamTokenizer TT_NUMBER.
Click Source Link
From source file:org.apache.qpid.server.security.access.config.PlainConfiguration.java
@Override public RuleSet load() { RuleSet ruleSet = super.load(); File file = getFile();//w ww. j av a2s. c o m FileReader fileReader = null; try { if (_logger.isDebugEnabled()) { _logger.debug("About to load ACL file " + file); } fileReader = new FileReader(file); _st = new StreamTokenizer(new BufferedReader(fileReader)); _st.resetSyntax(); // setup the tokenizer _st.commentChar(COMMENT); // single line comments _st.eolIsSignificant(true); // return EOL as a token _st.ordinaryChar('='); // equals is a token _st.ordinaryChar(CONTINUATION); // continuation character (when followed by EOL) _st.quoteChar('"'); // double quote _st.quoteChar('\''); // single quote _st.whitespaceChars('\u0000', '\u0020'); // whitespace (to be ignored) TODO properly _st.wordChars('a', 'z'); // unquoted token characters [a-z] _st.wordChars('A', 'Z'); // [A-Z] _st.wordChars('0', '9'); // [0-9] _st.wordChars('_', '_'); // underscore _st.wordChars('-', '-'); // dash _st.wordChars('.', '.'); // dot _st.wordChars('*', '*'); // star _st.wordChars('@', '@'); // at _st.wordChars(':', ':'); // colon // parse the acl file lines Stack<String> stack = new Stack<String>(); int current; do { current = _st.nextToken(); switch (current) { case StreamTokenizer.TT_EOF: case StreamTokenizer.TT_EOL: if (stack.isEmpty()) { break; // blank line } // pull out the first token from the bottom of the stack and check arguments exist String first = stack.firstElement(); stack.removeElementAt(0); if (stack.isEmpty()) { throw new IllegalConfigurationException(String.format(NOT_ENOUGH_TOKENS_MSG, getLine())); } // check for and parse optional initial number for ACL lines Integer number = null; if (StringUtils.isNumeric(first)) { // set the acl number and get the next element number = Integer.valueOf(first); first = stack.firstElement(); stack.removeElementAt(0); } if (StringUtils.equalsIgnoreCase(ACL, first)) { parseAcl(number, stack); } else if (number == null) { if (StringUtils.equalsIgnoreCase("GROUP", first)) { throw new IllegalConfigurationException(String.format( "GROUP keyword not supported. Groups should defined via a Group Provider, not in the ACL file.", getLine())); } else if (StringUtils.equalsIgnoreCase(CONFIG, first)) { parseConfig(stack); } else { throw new IllegalConfigurationException( String.format(UNRECOGNISED_INITIAL_MSG, first, getLine())); } } else { throw new IllegalConfigurationException( String.format(NUMBER_NOT_ALLOWED_MSG, first, getLine())); } // reset stack, start next line stack.clear(); break; case StreamTokenizer.TT_NUMBER: stack.push(Integer.toString(Double.valueOf(_st.nval).intValue())); break; case StreamTokenizer.TT_WORD: stack.push(_st.sval); // token break; default: if (_st.ttype == CONTINUATION) { int next = _st.nextToken(); if (next == StreamTokenizer.TT_EOL) { break; // continue reading next line } // invalid location for continuation character (add one to line beacuse we ate the EOL) throw new IllegalConfigurationException( String.format(PREMATURE_CONTINUATION_MSG, getLine() + 1)); } else if (_st.ttype == '\'' || _st.ttype == '"') { stack.push(_st.sval); // quoted token } else { stack.push(Character.toString((char) _st.ttype)); // single character } } } while (current != StreamTokenizer.TT_EOF); if (!stack.isEmpty()) { throw new IllegalConfigurationException(String.format(PREMATURE_EOF_MSG, getLine())); } } catch (IllegalArgumentException iae) { throw new IllegalConfigurationException(String.format(PARSE_TOKEN_FAILED_MSG, getLine()), iae); } catch (FileNotFoundException fnfe) { throw new IllegalConfigurationException(String.format(CONFIG_NOT_FOUND_MSG, file.getName()), fnfe); } catch (IOException ioe) { throw new IllegalConfigurationException(String.format(CANNOT_LOAD_MSG, file.getName()), ioe); } finally { if (fileReader != null) { try { fileReader.close(); } catch (IOException e) { throw new IllegalConfigurationException(String.format(CANNOT_CLOSE_MSG, file.getName()), e); } } } return ruleSet; }
From source file:org.apache.wiki.plugin.DefaultPluginManager.java
/** * Parses plugin arguments. Handles quotes and all other kewl stuff. * * <h3>Special parameters</h3> * The plugin body is put into a special parameter defined by {@link #PARAM_BODY}; * the plugin's command line into a parameter defined by {@link #PARAM_CMDLINE}; * and the bounds of the plugin within the wiki page text by a parameter defined * by {@link #PARAM_BOUNDS}, whose value is stored as a two-element int[] array, * i.e., <tt>[start,end]</tt>. * * @param argstring The argument string to the plugin. This is * typically a list of key-value pairs, using "'" to escape * spaces in strings, followed by an empty line and then the * plugin body. In case the parameter is null, will return an * empty parameter list./*ww w . j a va 2s . co m*/ * * @return A parsed list of parameters. * * @throws IOException If the parsing fails. */ public Map<String, String> parseArgs(String argstring) throws IOException { Map<String, String> arglist = new HashMap<String, String>(); // // Protection against funny users. // if (argstring == null) return arglist; arglist.put(PARAM_CMDLINE, argstring); StringReader in = new StringReader(argstring); StreamTokenizer tok = new StreamTokenizer(in); int type; String param = null; String value = null; tok.eolIsSignificant(true); boolean potentialEmptyLine = false; boolean quit = false; while (!quit) { String s; type = tok.nextToken(); switch (type) { case StreamTokenizer.TT_EOF: quit = true; s = null; break; case StreamTokenizer.TT_WORD: s = tok.sval; potentialEmptyLine = false; break; case StreamTokenizer.TT_EOL: quit = potentialEmptyLine; potentialEmptyLine = true; s = null; break; case StreamTokenizer.TT_NUMBER: s = Integer.toString((int) tok.nval); potentialEmptyLine = false; break; case '\'': s = tok.sval; break; default: s = null; } // // Assume that alternate words on the line are // parameter and value, respectively. // if (s != null) { if (param == null) { param = s; } else { value = s; arglist.put(param, value); // log.debug("ARG: "+param+"="+value); param = null; } } } // // Now, we'll check the body. // if (potentialEmptyLine) { StringWriter out = new StringWriter(); FileUtil.copyContents(in, out); String bodyContent = out.toString(); if (bodyContent != null) { arglist.put(PARAM_BODY, bodyContent); } } return arglist; }
From source file:org.gvnix.jpa.geo.hibernatespatial.util.EWKTReader.java
/** * Throws a formatted ParseException for the current token. * // www . j a va 2 s.c o m * @param expected a description of what was expected * @throws ParseException * @throws com.vividsolutions.jts.util.AssertionFailedException if an * invalid token is encountered */ private void parseError(String expected) throws ParseException { // throws Asserts for tokens that should never be seen if (tokenizer.ttype == StreamTokenizer.TT_NUMBER) Assert.shouldNeverReachHere("Unexpected NUMBER token"); if (tokenizer.ttype == StreamTokenizer.TT_EOL) Assert.shouldNeverReachHere("Unexpected EOL token"); String tokenStr = tokenString(); throw new ParseException("Expected " + expected + " but found " + tokenStr); }
From source file:org.gvnix.jpa.geo.hibernatespatial.util.EWKTReader.java
/** * Gets a description of the current token * /* w w w.j a v a 2 s .c o m*/ * @return a description of the current token */ private String tokenString() { switch (tokenizer.ttype) { case StreamTokenizer.TT_NUMBER: return "<NUMBER>"; case StreamTokenizer.TT_EOL: return "End-of-Line"; case StreamTokenizer.TT_EOF: return "End-of-Stream"; case StreamTokenizer.TT_WORD: return "'" + tokenizer.sval + "'"; } return "'" + (char) tokenizer.ttype + "'"; }
From source file:org.opencms.main.CmsShell.java
/** * Executes all commands read from the given input stream.<p> * /*from w w w . j ava2 s . c o m*/ * @param fileInputStream a file input stream from which the commands are read */ private void executeCommands(FileInputStream fileInputStream) { try { LineNumberReader lnr = new LineNumberReader(new InputStreamReader(fileInputStream)); while (!m_exitCalled) { printPrompt(); String line = lnr.readLine(); if (line == null) { // if null the file has been read to the end try { Thread.sleep(500); } catch (Throwable t) { // noop } break; } if (line.trim().startsWith("#")) { System.out.println(line); continue; } StringReader reader = new StringReader(line); StreamTokenizer st = new StreamTokenizer(reader); st.eolIsSignificant(true); // put all tokens into a List List<String> parameters = new ArrayList<String>(); while (st.nextToken() != StreamTokenizer.TT_EOF) { if (st.ttype == StreamTokenizer.TT_NUMBER) { parameters.add(Integer.toString(new Double(st.nval).intValue())); } else { parameters.add(st.sval); } } reader.close(); // extract command and arguments if (parameters.size() == 0) { if (m_echo) { System.out.println(); } continue; } String command = parameters.get(0); parameters = parameters.subList(1, parameters.size()); // execute the command executeCommand(command, parameters); } } catch (Throwable t) { t.printStackTrace(System.err); } }
From source file:org.openhab.io.caldav.internal.util.ExecuteCommandJob.java
/** * Parses a <code>command</code>. Utilizes the {@link StreamTokenizer} which * takes care of quoted Strings as well. * /*from ww w . j av a 2 s . c o m*/ * @param command the command to parse * @return the tokenized command which can be processed by the * <code>ConsoleInterpreter</code> * * @see org.openhab.io.console.ConsoleInterpreter */ protected String[] parseCommand(String command) { logger.trace("going to parse command '{}'", command); // if the command starts with '>' it contains a script which needs no // further handling here ... if (command.startsWith(">")) { return new String[] { ">", command.substring(1).trim() }; } StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(command)); tokenizer.wordChars('_', '_'); tokenizer.wordChars('-', '-'); tokenizer.wordChars('.', '.'); List<String> tokens = new ArrayList<String>(); try { int tokenType = 0; while (tokenType != StreamTokenizer.TT_EOF && tokenType != StreamTokenizer.TT_EOL) { tokenType = tokenizer.nextToken(); String token = ""; switch (tokenType) { case StreamTokenizer.TT_WORD: case 34 /* quoted String */: token = tokenizer.sval; break; case StreamTokenizer.TT_NUMBER: token = String.valueOf(tokenizer.nval); break; } tokens.add(token); logger.trace("read value {} from the given command", token); } } catch (IOException ioe) { } return tokens.toArray(new String[0]); }
From source file:org.openmrs.cohort.CohortSearchHistory.java
public PatientSearch createCompositionFilter(String description) { Set<String> andWords = new HashSet<String>(); Set<String> orWords = new HashSet<String>(); Set<String> notWords = new HashSet<String>(); andWords.add("and"); andWords.add("intersection"); andWords.add("*"); orWords.add("or"); orWords.add("union"); orWords.add("+"); notWords.add("not"); notWords.add("!"); List<Object> currentLine = new ArrayList<Object>(); try {//www. ja v a 2 s . com StreamTokenizer st = new StreamTokenizer(new StringReader(description)); st.ordinaryChar('('); st.ordinaryChar(')'); Stack<List<Object>> stack = new Stack<List<Object>>(); while (st.nextToken() != StreamTokenizer.TT_EOF) { if (st.ttype == StreamTokenizer.TT_NUMBER) { Integer thisInt = new Integer((int) st.nval); if (thisInt < 1 || thisInt > searchHistory.size()) { log.error("number < 1 or > search history size"); return null; } currentLine.add(thisInt); } else if (st.ttype == '(') { stack.push(currentLine); currentLine = new ArrayList<Object>(); } else if (st.ttype == ')') { List<Object> l = stack.pop(); l.add(currentLine); currentLine = l; } else if (st.ttype == StreamTokenizer.TT_WORD) { String str = st.sval.toLowerCase(); if (andWords.contains(str)) currentLine.add(PatientSetService.BooleanOperator.AND); else if (orWords.contains(str)) currentLine.add(PatientSetService.BooleanOperator.OR); else if (notWords.contains(str)) currentLine.add(PatientSetService.BooleanOperator.NOT); else throw new IllegalArgumentException("Don't recognize " + st.sval); } } } catch (Exception ex) { log.error("Error in description string: " + description, ex); return null; } if (!testCompositionList(currentLine)) { log.error("Description string failed test: " + description); return null; } //return toPatientFilter(currentLine); PatientSearch ret = new PatientSearch(); ret.setParsedComposition(currentLine); return ret; }
From source file:org.openmrs.module.reporting.cohort.definition.util.CohortExpressionParser.java
/** * Elements in this list can be: an Integer, indicating a 1-based index into a search history a * BooleanOperator (AND, OR, NOT) a CohortDefinition a PatientSearch another List of the same form, * which indicates a parenthetical expression *//*w ww.j a v a 2 s . co m*/ public static List<Object> parseIntoTokens(String expression) { List<Object> tokens = new ArrayList<Object>(); try { StreamTokenizer st = new StreamTokenizer(new StringReader(expression)); for (Character c : characterWords) { st.ordinaryChar(c); } while (st.nextToken() != StreamTokenizer.TT_EOF) { if (st.ttype == StreamTokenizer.TT_NUMBER) { Integer thisInt = new Integer((int) st.nval); if (thisInt < 1) { log.error("number < 1"); return null; } tokens.add(thisInt); } else if (openParenthesesWords.contains(Character.valueOf((char) st.ttype))) { tokens.add("("); } else if (closeParenthesesWords.contains(Character.valueOf((char) st.ttype))) { tokens.add(")"); } else if (st.ttype == StreamTokenizer.TT_WORD) { tokens.add(st.sval); } } return parseIntoTokens(tokens); } catch (Exception ex) { log.error("Error in description string: " + expression, ex); return null; } }
From source file:org.openmrs.module.reporting.query.evaluator.CompositionQueryEvaluator.java
/** * Elements in this list can be: an Integer, indicating a 1-based index into a search history a * BooleanOperator (AND, OR, NOT) a Query, another List of the same form, which indicates a parenthetical expression */// www . j a v a 2 s . co m public List<Object> parseIntoTokens(String expression) throws EvaluationException { List<Object> tokens = new ArrayList<Object>(); try { StreamTokenizer st = new StreamTokenizer(new StringReader(expression)); for (Character c : CHARACTER_WORDS) { st.ordinaryChar(c); } while (st.nextToken() != StreamTokenizer.TT_EOF) { if (st.ttype == StreamTokenizer.TT_NUMBER) { Integer thisInt = (int) st.nval; if (thisInt < 1) { throw new IllegalArgumentException("Invalid number < 1 found"); } tokens.add(thisInt); } else if (OPEN_PARENTHESES_WORDS.contains(Character.valueOf((char) st.ttype))) { tokens.add("("); } else if (CLOSE_PARENTHESES_WORDS.contains(Character.valueOf((char) st.ttype))) { tokens.add(")"); } else if (st.ttype == StreamTokenizer.TT_WORD) { tokens.add(st.sval); } } return parseIntoTokens(tokens); } catch (Exception e) { throw new EvaluationException("Unable to parse expression <" + expression + "> into tokens", e); } }
From source file:org.openmrs.reporting.PatientSearch.java
public static PatientSearch createCompositionSearch(String description) { // TODO This is a rewrite of the code in CohortSearchHistory.createCompositionFilter(String). That method should probably delegate to this one in some way. // TODO use open/closeParenthesesWords declared above List<Object> tokens = new ArrayList<Object>(); try {//from w w w . j a v a 2s.c o m StreamTokenizer st = new StreamTokenizer(new StringReader(description)); st.ordinaryChar('('); st.ordinaryChar(')'); while (st.nextToken() != StreamTokenizer.TT_EOF) { if (st.ttype == StreamTokenizer.TT_NUMBER) { Integer thisInt = new Integer((int) st.nval); if (thisInt < 1) { log.error("number < 1"); return null; } tokens.add(thisInt); } else if (st.ttype == '(') { tokens.add("("); } else if (st.ttype == ')') { tokens.add(")"); } else if (st.ttype == StreamTokenizer.TT_WORD) { String str = st.sval.toLowerCase(); tokens.add(str); } } return createCompositionSearch(tokens); } catch (Exception ex) { log.error("Error in description string: " + description, ex); return null; } }