List of usage examples for java.io StreamTokenizer TT_WORD
int TT_WORD
To view the source code for java.io StreamTokenizer TT_WORD.
Click Source Link
From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.RegressionReport_UK1.java
public void writeAggregateStatisticsForOptimisationConstraints_ISARHP_ISARCEP(String a_OutputDir_String) throws Exception { HashMap a_ID_RecordID_HashMap = _ISARDataHandler.get_ID_RecordID_HashMap(); File optimisationConstraints_SARs = new File(a_OutputDir_String, "OptimisationConstraints_SARs.csv"); FileOutputStream a_FileOutputStream = new FileOutputStream(optimisationConstraints_SARs); OutputDataHandler_OptimisationConstraints.writeHSARHP_ISARCEPHeader(a_FileOutputStream); a_FileOutputStream.flush();/*ww w .ja v a 2s . c o m*/ Object[] fitnessCounts; HashMap<String, Integer> a_SARCounts = null; TreeSet<String> a_LADCodes_TreeSet = _CASDataHandler.getLADCodes_TreeSet(); String s2; String s1; Iterator<String> a_Iterator_String = a_LADCodes_TreeSet.iterator(); while (a_Iterator_String.hasNext()) { // Need to reorder data for each LAD as OAs not necessarily returned // in any order and an ordered result is wanted TreeMap<String, HashMap<String, Integer>> resultsForLAD = new TreeMap<String, HashMap<String, Integer>>(); boolean setPrevious_OA_String = true; s1 = a_Iterator_String.next(); s2 = s1.substring(0, 3); File resultsFile = new File(a_OutputDir_String + s2 + "/" + s1 + "/population.csv"); // A few results are missing if (resultsFile.exists()) { System.out.println(resultsFile.toString() + " exists"); String previous_OA_String = ""; BufferedReader aBufferedReader = new BufferedReader( new InputStreamReader(new FileInputStream(resultsFile))); StreamTokenizer aStreamTokenizer = new StreamTokenizer(aBufferedReader); Generic_StaticIO.setStreamTokenizerSyntax1(aStreamTokenizer); String line = ""; int tokenType = aStreamTokenizer.nextToken(); while (tokenType != StreamTokenizer.TT_EOF) { switch (tokenType) { case StreamTokenizer.TT_EOL: //System.out.println(line); String[] lineFields = line.split(","); String a_OA_String = lineFields[0]; if (previous_OA_String.equalsIgnoreCase(a_OA_String)) { if (lineFields[1].equalsIgnoreCase("HP")) { //System.out.println("HP"); long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2])); ISARDataRecord a_ISARDataRecord = _ISARDataHandler .getISARDataRecord(a_ISARRecordID); GeneticAlgorithm_ISARHP_ISARCEP.addToCountsHP(a_ISARDataRecord, a_SARCounts, _Random); //System.out.println(a_HSARDataRecord.toString()); } else { //System.out.println("CEP"); // From the id of the ISARDataRecord get the // ISARRecordID. long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2])); ISARDataRecord a_ISARDataRecord = _ISARDataHandler .getISARDataRecord(a_ISARRecordID); GeneticAlgorithm_ISARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts, _Random); } } else { // Store result if (setPrevious_OA_String) { previous_OA_String = a_OA_String; setPrevious_OA_String = false; } else { // Store resultsForLAD.put(previous_OA_String, a_SARCounts); } // Initialise/Re-initialise CASDataRecord a_CASDataRecord = (CASDataRecord) _CASDataHandler .getDataRecord(a_OA_String); fitnessCounts = GeneticAlgorithm_ISARHP_ISARCEP.getFitnessCounts(a_CASDataRecord); a_SARCounts = (HashMap<String, Integer>) fitnessCounts[1]; // Start a new aggregation if (lineFields[1].equalsIgnoreCase("HP")) { //System.out.println("HP"); long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2])); ISARDataRecord a_ISARDataRecord = _ISARDataHandler .getISARDataRecord(a_ISARRecordID); GeneticAlgorithm_ISARHP_ISARCEP.addToCountsHP(a_ISARDataRecord, a_SARCounts, _Random); //System.out.println(a_HSARDataRecord.toString()); } else { //System.out.println("CEP"); // From the id of the ISARDataRecord get the // ISARRecordID. long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2])); ISARDataRecord a_ISARDataRecord = _ISARDataHandler .getISARDataRecord(a_ISARRecordID); GeneticAlgorithm_ISARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts, _Random); //System.out.println(a_ISARDataRecord.toString()); } //a_OA_String = lineFields[0]; } previous_OA_String = a_OA_String; break; case StreamTokenizer.TT_WORD: line = aStreamTokenizer.sval; break; } tokenType = aStreamTokenizer.nextToken(); } } else { System.out.println(resultsFile.toString() + " !exists"); } Iterator<String> string_Iterator = resultsForLAD.keySet().iterator(); while (string_Iterator.hasNext()) { String oa_Code = string_Iterator.next(); a_SARCounts = resultsForLAD.get(oa_Code); //GeneticAlgorithm_ISARHP_ISARCEP.addToCountsCEP(null, a_ID_RecordID_HashMap, _Random) OutputDataHandler_OptimisationConstraints.writeISARHP_ISARCEP(a_SARCounts, oa_Code, a_FileOutputStream); } } a_FileOutputStream.close(); }
From source file:com.fluffypeople.managesieve.ManageSieveClient.java
private String tokenToString(final int c) { if (c > 0) { return new String(Character.toChars(c)); } else {//from w w w . ja v a 2 s .c o m switch (c) { case StreamTokenizer.TT_EOF: return "EOF"; case StreamTokenizer.TT_NUMBER: return "NUMBER"; case StreamTokenizer.TT_EOL: return "EOL"; case StreamTokenizer.TT_WORD: return ("WORD [" + in.sval + "]"); default: return "UNKNOWN"; } } }
From source file:Matrix.java
/** * Read a matrix from a stream. The format is the same the print method, so * printed matrices can be read back in (provided they were printed using US * Locale). Elements are separated by whitespace, all the elements for each * row appear on a single line, the last row is followed by a blank line. * // ww w . j av a2 s .c o m * @param input * the input stream. */ public static Matrix read(BufferedReader input) throws java.io.IOException { StreamTokenizer tokenizer = new StreamTokenizer(input); // Although StreamTokenizer will parse numbers, it doesn't recognize // scientific notation (E or D); however, Double.valueOf does. // The strategy here is to disable StreamTokenizer's number parsing. // We'll only get whitespace delimited words, EOL's and EOF's. // These words should all be numbers, for Double.valueOf to parse. tokenizer.resetSyntax(); tokenizer.wordChars(0, 255); tokenizer.whitespaceChars(0, ' '); tokenizer.eolIsSignificant(true); java.util.Vector v = new java.util.Vector(); // Ignore initial empty lines while (tokenizer.nextToken() == StreamTokenizer.TT_EOL) ; if (tokenizer.ttype == StreamTokenizer.TT_EOF) throw new java.io.IOException("Unexpected EOF on matrix read."); do { v.addElement(Double.valueOf(tokenizer.sval)); // Read & store 1st // row. } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD); int n = v.size(); // Now we've got the number of columns! double row[] = new double[n]; for (int j = 0; j < n; j++) // extract the elements of the 1st row. row[j] = ((Double) v.elementAt(j)).doubleValue(); v.removeAllElements(); v.addElement(row); // Start storing rows instead of columns. while (tokenizer.nextToken() == StreamTokenizer.TT_WORD) { // While non-empty lines v.addElement(row = new double[n]); int j = 0; do { if (j >= n) throw new java.io.IOException("Row " + v.size() + " is too long."); row[j++] = Double.valueOf(tokenizer.sval).doubleValue(); } while (tokenizer.nextToken() == StreamTokenizer.TT_WORD); if (j < n) throw new java.io.IOException("Row " + v.size() + " is too short."); } int m = v.size(); // Now we've got the number of rows. double[][] A = new double[m][]; v.copyInto(A); // copy the rows out of the vector return new Matrix(A); }
From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.RegressionReport_UK1.java
public void writeAggregateStatisticsForOptimisationConstraints_HSARHP_ISARCEP(String a_OutputDir_String) throws Exception { HashMap a_HID_HSARDataRecordVector_HashMap = _HSARDataHandler.get_HID_HSARDataRecordVector_HashMap(); HashMap a_ID_RecordID_HashMap = _ISARDataHandler.get_ID_RecordID_HashMap(); File optimisationConstraints_SARs = new File(a_OutputDir_String, "OptimisationConstraints_SARs.csv"); FileOutputStream a_FileOutputStream = new FileOutputStream(optimisationConstraints_SARs); OutputDataHandler_OptimisationConstraints.writeHSARHP_ISARCEPHeader(a_FileOutputStream); a_FileOutputStream.flush();// ww w . j ava 2 s.c o m HashMap<String, Integer> a_SARCounts = null; CASDataRecord a_CASDataRecord; TreeSet<String> a_LADCodes_TreeSet = _CASDataHandler.getLADCodes_TreeSet(); String s2; String s1; Iterator<String> a_Iterator_String = a_LADCodes_TreeSet.iterator(); while (a_Iterator_String.hasNext()) { // Need to reorder data for each LAD as OAs not necessarily returned // in any order and an ordered result is wanted TreeMap<String, HashMap<String, Integer>> resultsForLAD = new TreeMap<String, HashMap<String, Integer>>(); boolean setPrevious_OA_String = true; s1 = a_Iterator_String.next(); s2 = s1.substring(0, 3); File resultsFile = new File(a_OutputDir_String + s2 + "/" + s1 + "/population.csv"); // A few results are missing if (resultsFile.exists()) { System.out.println(resultsFile.toString() + " exists"); String previous_OA_String = ""; BufferedReader aBufferedReader = new BufferedReader( new InputStreamReader(new FileInputStream(resultsFile))); StreamTokenizer aStreamTokenizer = new StreamTokenizer(aBufferedReader); Generic_StaticIO.setStreamTokenizerSyntax1(aStreamTokenizer); String line = ""; int tokenType = aStreamTokenizer.nextToken(); while (tokenType != StreamTokenizer.TT_EOF) { switch (tokenType) { case StreamTokenizer.TT_EOL: //System.out.println(line); String[] lineFields = line.split(","); String a_OA_String = lineFields[0]; if (previous_OA_String.equalsIgnoreCase(a_OA_String)) { if (lineFields[1].equalsIgnoreCase("HP")) { //System.out.println("HP"); // From the id of a household get a Vector // of HSARDataRecords Vector household = (Vector) a_HID_HSARDataRecordVector_HashMap .get(new Integer(lineFields[2])); HSARDataRecord a_HSARDataRecord; for (int i = 0; i < household.size(); i++) { a_HSARDataRecord = (HSARDataRecord) household.elementAt(i); GeneticAlgorithm_HSARHP_ISARCEP.addToCounts(a_HSARDataRecord, a_SARCounts, _Random); } //System.out.println(a_HSARDataRecord.toString()); } else { //System.out.println("CEP"); // From the id of the ISARDataRecord get the // ISARRecordID. long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2])); ISARDataRecord a_ISARDataRecord = _ISARDataHandler .getISARDataRecord(a_ISARRecordID); GeneticAlgorithm_HSARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts, _Random); } } else { // Store result if (setPrevious_OA_String) { previous_OA_String = a_OA_String; setPrevious_OA_String = false; } else { // Store resultsForLAD.put(previous_OA_String, a_SARCounts); } // Initialise/Re-initialise a_CASDataRecord = (CASDataRecord) _CASDataHandler.getDataRecord(a_OA_String); Object[] fitnessCounts = GeneticAlgorithm_HSARHP_ISARCEP .getFitnessCounts(a_CASDataRecord); a_SARCounts = (HashMap<String, Integer>) fitnessCounts[1]; // Start a new aggregation if (lineFields[1].equalsIgnoreCase("HP")) { //System.out.println("HP"); // From the id of a household get a Vector // of HSARDataRecords Vector household = (Vector) a_HID_HSARDataRecordVector_HashMap .get(new Integer(lineFields[2])); HSARDataRecord a_HSARDataRecord; for (int i = 0; i < household.size(); i++) { a_HSARDataRecord = (HSARDataRecord) household.elementAt(i); GeneticAlgorithm_HSARHP_ISARCEP.addToCounts(a_HSARDataRecord, a_SARCounts, _Random); } //System.out.println(a_HSARDataRecord.toString()); } else { //System.out.println("CEP"); // From the id of the ISARDataRecord get the // ISARRecordID. long a_ISARRecordID = (Long) a_ID_RecordID_HashMap.get(new Long(lineFields[2])); ISARDataRecord a_ISARDataRecord = _ISARDataHandler .getISARDataRecord(a_ISARRecordID); GeneticAlgorithm_HSARHP_ISARCEP.addToCountsCEP(a_ISARDataRecord, a_SARCounts, _Random); //System.out.println(a_ISARDataRecord.toString()); } //a_OA_String = lineFields[0]; } previous_OA_String = a_OA_String; break; case StreamTokenizer.TT_WORD: line = aStreamTokenizer.sval; break; } tokenType = aStreamTokenizer.nextToken(); } } else { System.out.println(resultsFile.toString() + " !exists"); } Iterator<String> string_Iterator = resultsForLAD.keySet().iterator(); while (string_Iterator.hasNext()) { String oa_Code = string_Iterator.next(); OutputDataHandler_OptimisationConstraints.writeHSARHP_ISARCEP(resultsForLAD.get(oa_Code), oa_Code, a_FileOutputStream); } } a_FileOutputStream.close(); }
From source file:com.zimbra.common.calendar.ZoneInfo2iCalendar.java
private static void readExtraData(Reader reader) throws IOException, ParseException { char dquote = '"'; StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax();//w w w . ja va 2s.c o m tokenizer.wordChars(32, 126); tokenizer.whitespaceChars(' ', ' '); tokenizer.whitespaceChars('\t', '\t'); tokenizer.whitespaceChars(0, 20); tokenizer.commentChar('#'); tokenizer.quoteChar(dquote); tokenizer.eolIsSignificant(true); List<String> tokenList = new ArrayList<String>(); LineType lineType = LineType.UNKNOWN; boolean atLineStart = true; int ttype; int prevTtype = StreamTokenizer.TT_EOL; // used for empty line detection while ((ttype = tokenizer.nextToken()) != StreamTokenizer.TT_EOF) { int lineNum = tokenizer.lineno(); if (ttype == StreamTokenizer.TT_WORD || ttype == dquote) { String token = tokenizer.sval; if (atLineStart) { lineType = LineType.lookUp(token); if (LineType.UNKNOWN.equals(lineType)) throw new ParseException("Invalid line type", lineNum); } else { tokenList.add(token); } atLineStart = false; } else if (ttype == StreamTokenizer.TT_EOL) { if (prevTtype == StreamTokenizer.TT_EOL) { prevTtype = ttype; continue; } atLineStart = true; switch (lineType) { case PRIMARYZONE: if (tokenList.size() < 1) throw new ParseException("Not enough fields in a PrimaryZone line", lineNum); String primaryTZID = tokenList.get(0); sPrimaryTZIDs.add(primaryTZID); break; case ZONEMATCHSCORE: if (tokenList.size() < 2) throw new ParseException("Not enough fields in a ZoneMatchScore line", lineNum); String zoneName = tokenList.get(0); String zoneMatchScoreStr = tokenList.get(1); int zoneMatchScore = 0; try { zoneMatchScore = Integer.parseInt(zoneMatchScoreStr); } catch (NumberFormatException e) { throw new ParseException("Zone match score must be an integer: " + zoneMatchScoreStr, lineNum); } sMatchScores.put(zoneName, zoneMatchScore); break; } if (atLineStart) { tokenList.clear(); lineType = LineType.UNKNOWN; } } else if (ttype == StreamTokenizer.TT_NUMBER) { // shouldn't happen throw new ParseException("Invalid parser state: TT_NUMBER found", lineNum); } prevTtype = ttype; } }
From source file:FourByFour.java
public void actionPerformed(ActionEvent event) { Object target = event.getSource(); // Process the button events. if (target == skill_return_button) { skill_panel.setVisible(false);/*from ww w . j a v a2 s .com*/ skill_return_button.setVisible(false); c_container.setVisible(true); b_container.setVisible(true); newGame(); } else if (target == winner_return_button) { if (winner_flag) { String name = winner_name.getText(); String tmp_name = new String(""); int tmp_score = 0; boolean insert_flag = false; winner_flag = false; for (int i = 0; i < 20; i++) { if (insert_flag) { name = names[i]; score = scores[i]; names[i] = tmp_name; scores[i] = tmp_score; tmp_name = name; tmp_score = score; } if (!insert_flag && score > scores[i]) { tmp_name = names[i]; tmp_score = scores[i]; scores[i] = score; names[i] = name; insert_flag = true; } high_names[i].setText(names[i]); high_scores[i].setText(Integer.toString(scores[i])); } scoresString = new String(""); int place; for (int i = 0; i < 20; i++) { place = (int) places[i]; scoresString += Integer.toString(place); scoresString += "\t"; scoresString += names[i]; scoresString += " "; scoresString += Integer.toString(scores[i]); scoresString += "\n"; } if (appletFlag) { // Use this section of code when writing the high // scores file back to a server. Requires the use // of a deamon on the server to receive the socket // connection. // // Create the output stream. // try { // Socket socket = new Socket(host, port); // outStream = new BufferedOutputStream // (socket.getOutputStream(), 8192); // } // catch(IOException ioe) { // System.out.println("Error: " + ioe.toString()); // } // System.out.println("Output stream opened"); // // Write the scores to the file back on the server. // outText = scoresString.getBytes(); // try { // outStream.write(outText); // outStream.flush(); // outStream.close(); // outStream = null; // } // catch (IOException ioe) { // System.out.println("Error: " + ioe.toString()); // } // System.out.println("Output stream written"); try { OutputStreamWriter outFile = new OutputStreamWriter(new FileOutputStream("scores.txt")); outFile.write(scoresString); outFile.flush(); outFile.close(); outFile = null; } catch (IOException ioe) { System.out.println("Error: " + ioe.toString()); } catch (Exception e) { System.out.println("Error: " + e.toString()); } } else { try { OutputStreamWriter outFile = new OutputStreamWriter(new FileOutputStream("scores.txt")); outFile.write(scoresString); outFile.flush(); outFile.close(); outFile = null; } catch (IOException ioe) { System.out.println("Error: " + ioe.toString()); } } } winner_panel.setVisible(false); winner_return_button.setVisible(false); winner_label.setVisible(false); winner_score_label.setVisible(false); winner_name_label.setVisible(false); winner_top_label.setVisible(false); winner_name.setVisible(false); c_container.setVisible(true); b_container.setVisible(true); } else if (target == high_return_button) { high_return_button.setVisible(false); high_panel.setVisible(false); c_container.setVisible(true); b_container.setVisible(true); } else if (target == instruct_return_button) { instruct_text.setVisible(false); instruct_return_button.setVisible(false); instruct_text.repaint(); c_container.setVisible(true); b_container.setVisible(true); } else if (target == undo_button) { board.undo_move(); canvas2D.repaint(); } else if (target == instruct_button) { c_container.setVisible(false); b_container.setVisible(false); instruct_text.setVisible(true); instruct_return_button.setVisible(true); } else if (target == new_button) { newGame(); } else if (target == skill_button) { c_container.setVisible(false); b_container.setVisible(false); skill_panel.setVisible(true); skill_return_button.setVisible(true); } else if (target == high_button) { // Read the high scores file. if (appletFlag) { try { inStream = new BufferedInputStream(new URL(getCodeBase(), "scores.txt").openStream(), 8192); Reader read = new BufferedReader(new InputStreamReader(inStream)); StreamTokenizer st = new StreamTokenizer(read); st.whitespaceChars(32, 44); st.eolIsSignificant(false); int count = 0; int token = st.nextToken(); boolean scoreFlag = true; String string; while (count < 20) { places[count] = (int) st.nval; string = new String(""); token = st.nextToken(); while (token == StreamTokenizer.TT_WORD) { string += st.sval; string += " "; token = st.nextToken(); } names[count] = string; scores[count] = (int) st.nval; token = st.nextToken(); count++; } inStream.close(); } catch (Exception ioe) { System.out.println("Error: " + ioe.toString()); } } else { try { inStream = new BufferedInputStream(new FileInputStream("scores.txt")); Reader read = new BufferedReader(new InputStreamReader(inStream)); StreamTokenizer st = new StreamTokenizer(read); st.whitespaceChars(32, 44); st.eolIsSignificant(false); int count = 0; int token = st.nextToken(); boolean scoreFlag = true; String string; while (count < 20) { places[count] = (int) st.nval; string = new String(""); token = st.nextToken(); while (token == StreamTokenizer.TT_WORD) { string += st.sval; string += " "; token = st.nextToken(); } names[count] = string; scores[count] = (int) st.nval; token = st.nextToken(); count++; } inStream.close(); } catch (Exception ioe) { System.out.println("Error: " + ioe.toString()); } } c_container.setVisible(false); b_container.setVisible(false); high_panel.setVisible(true); high_return_button.setVisible(true); } Checkbox box = group.getSelectedCheckbox(); String label = box.getLabel(); if (label.equals("Babe in the Woods ")) { board.set_skill_level(0); } else if (label.equals("Walk and Chew Gum ")) { board.set_skill_level(1); } else if (label.equals("Jeopardy Contestant ")) { board.set_skill_level(2); } else if (label.equals("Rocket Scientist ")) { board.set_skill_level(3); } else if (label.equals("Be afraid, be very afraid")) { board.set_skill_level(4); } }
From source file:com.rapidminer.tools.Tools.java
/** Delivers the next token and skip empty lines. */ public static void getFirstToken(StreamTokenizer tokenizer) throws IOException { // skip empty lines while (tokenizer.nextToken() == StreamTokenizer.TT_EOL) { }/*from w ww .j a v a2s . c o m*/ ; if (tokenizer.ttype == '\'' || tokenizer.ttype == '"') { tokenizer.ttype = StreamTokenizer.TT_WORD; } else if (tokenizer.ttype == StreamTokenizer.TT_WORD && tokenizer.sval.equals("?")) { tokenizer.ttype = '?'; } }
From source file:com.rapidminer.tools.Tools.java
/** Delivers the next token and checks for an unexpected end of line or file. */ public static void getNextToken(StreamTokenizer tokenizer) throws IOException { if (tokenizer.nextToken() == StreamTokenizer.TT_EOL) { throw new IOException("unexpected end of line " + tokenizer.lineno()); }/*from w w w.j a v a2 s . c o m*/ if (tokenizer.ttype == StreamTokenizer.TT_EOF) { throw new IOException("unexpected end of file in line " + tokenizer.lineno()); } else if (tokenizer.ttype == '\'' || tokenizer.ttype == '"') { tokenizer.ttype = StreamTokenizer.TT_WORD; } else if (tokenizer.ttype == StreamTokenizer.TT_WORD && tokenizer.sval.equals("?")) { tokenizer.ttype = '?'; } }
From source file:org.apache.openjpa.jdbc.kernel.SQLStoreQuery.java
/** * Utility method to substitute '?num' for parameters in the given SQL * statement, and fill-in the order of the parameter tokens *//*from ww w. ja va 2s .c o m*/ public static String substituteParams(String sql, List<Integer> paramOrder) throws IOException { // if there's no "?" parameter marker, then we don't need to // perform the parsing process if (sql.indexOf("?") == -1) return sql; paramOrder.clear(); StreamTokenizer tok = new StreamTokenizer(new StringReader(sql)); tok.resetSyntax(); tok.quoteChar('\''); tok.wordChars('0', '9'); tok.wordChars('?', '?'); StringBuilder buf = new StringBuilder(sql.length()); for (int ttype; (ttype = tok.nextToken()) != StreamTokenizer.TT_EOF;) { switch (ttype) { case StreamTokenizer.TT_WORD: // a token is a positional parameter if it starts with // a "?" and the rest of the token are all numbers if (tok.sval.startsWith("?")) { buf.append("?"); String pIndex = tok.sval.substring(1); if (pIndex.length() > 0) { paramOrder.add(Integer.valueOf(pIndex)); } else { // or nothing paramOrder.add(paramOrder.size() + 1); } } else buf.append(tok.sval); break; case '\'': buf.append('\''); if (tok.sval != null) { buf.append(tok.sval); buf.append('\''); } break; default: buf.append((char) ttype); } } return buf.toString(); }
From source file:org.apache.qpid.server.security.access.config.PlainConfiguration.java
@Override public RuleSet load() { RuleSet ruleSet = super.load(); File file = getFile();// ww w . j a v a 2 s . c om FileReader fileReader = null; try { if (_logger.isDebugEnabled()) { _logger.debug("About to load ACL file " + file); } fileReader = new FileReader(file); _st = new StreamTokenizer(new BufferedReader(fileReader)); _st.resetSyntax(); // setup the tokenizer _st.commentChar(COMMENT); // single line comments _st.eolIsSignificant(true); // return EOL as a token _st.ordinaryChar('='); // equals is a token _st.ordinaryChar(CONTINUATION); // continuation character (when followed by EOL) _st.quoteChar('"'); // double quote _st.quoteChar('\''); // single quote _st.whitespaceChars('\u0000', '\u0020'); // whitespace (to be ignored) TODO properly _st.wordChars('a', 'z'); // unquoted token characters [a-z] _st.wordChars('A', 'Z'); // [A-Z] _st.wordChars('0', '9'); // [0-9] _st.wordChars('_', '_'); // underscore _st.wordChars('-', '-'); // dash _st.wordChars('.', '.'); // dot _st.wordChars('*', '*'); // star _st.wordChars('@', '@'); // at _st.wordChars(':', ':'); // colon // parse the acl file lines Stack<String> stack = new Stack<String>(); int current; do { current = _st.nextToken(); switch (current) { case StreamTokenizer.TT_EOF: case StreamTokenizer.TT_EOL: if (stack.isEmpty()) { break; // blank line } // pull out the first token from the bottom of the stack and check arguments exist String first = stack.firstElement(); stack.removeElementAt(0); if (stack.isEmpty()) { throw new IllegalConfigurationException(String.format(NOT_ENOUGH_TOKENS_MSG, getLine())); } // check for and parse optional initial number for ACL lines Integer number = null; if (StringUtils.isNumeric(first)) { // set the acl number and get the next element number = Integer.valueOf(first); first = stack.firstElement(); stack.removeElementAt(0); } if (StringUtils.equalsIgnoreCase(ACL, first)) { parseAcl(number, stack); } else if (number == null) { if (StringUtils.equalsIgnoreCase("GROUP", first)) { throw new IllegalConfigurationException(String.format( "GROUP keyword not supported. Groups should defined via a Group Provider, not in the ACL file.", getLine())); } else if (StringUtils.equalsIgnoreCase(CONFIG, first)) { parseConfig(stack); } else { throw new IllegalConfigurationException( String.format(UNRECOGNISED_INITIAL_MSG, first, getLine())); } } else { throw new IllegalConfigurationException( String.format(NUMBER_NOT_ALLOWED_MSG, first, getLine())); } // reset stack, start next line stack.clear(); break; case StreamTokenizer.TT_NUMBER: stack.push(Integer.toString(Double.valueOf(_st.nval).intValue())); break; case StreamTokenizer.TT_WORD: stack.push(_st.sval); // token break; default: if (_st.ttype == CONTINUATION) { int next = _st.nextToken(); if (next == StreamTokenizer.TT_EOL) { break; // continue reading next line } // invalid location for continuation character (add one to line beacuse we ate the EOL) throw new IllegalConfigurationException( String.format(PREMATURE_CONTINUATION_MSG, getLine() + 1)); } else if (_st.ttype == '\'' || _st.ttype == '"') { stack.push(_st.sval); // quoted token } else { stack.push(Character.toString((char) _st.ttype)); // single character } } } while (current != StreamTokenizer.TT_EOF); if (!stack.isEmpty()) { throw new IllegalConfigurationException(String.format(PREMATURE_EOF_MSG, getLine())); } } catch (IllegalArgumentException iae) { throw new IllegalConfigurationException(String.format(PARSE_TOKEN_FAILED_MSG, getLine()), iae); } catch (FileNotFoundException fnfe) { throw new IllegalConfigurationException(String.format(CONFIG_NOT_FOUND_MSG, file.getName()), fnfe); } catch (IOException ioe) { throw new IllegalConfigurationException(String.format(CANNOT_LOAD_MSG, file.getName()), ioe); } finally { if (fileReader != null) { try { fileReader.close(); } catch (IOException e) { throw new IllegalConfigurationException(String.format(CANNOT_CLOSE_MSG, file.getName()), e); } } } return ruleSet; }