List of usage examples for java.io StreamTokenizer wordChars
public void wordChars(int low, int hi)
low <= c <= high
are word constituents. From source file:StreamTokenizerDemo.java
public static void main(String args[]) throws Exception { FileReader fr = new FileReader(args[0]); BufferedReader br = new BufferedReader(fr); StreamTokenizer st = new StreamTokenizer(br); st.ordinaryChar('.'); st.wordChars('\'', '\''); while (st.nextToken() != StreamTokenizer.TT_EOF) { switch (st.ttype) { case StreamTokenizer.TT_WORD: System.out.println(st.lineno() + ") " + st.sval); break; case StreamTokenizer.TT_NUMBER: System.out.println(st.lineno() + ") " + st.nval); break; default:/* w w w.ja v a 2s . co m*/ System.out.println(st.lineno() + ") " + (char) st.ttype); } } fr.close(); }
From source file:Main.java
public static void main(String args[]) { try {/*from w w w . j a v a2 s .co m*/ FileReader fr = new FileReader(args[0]); BufferedReader br = new BufferedReader(fr); StreamTokenizer st = new StreamTokenizer(br); st.ordinaryChar('.'); st.wordChars('\'', '\''); while (st.nextToken() != StreamTokenizer.TT_EOL) { switch (st.ttype) { case StreamTokenizer.TT_WORD: System.out.println(st.lineno() + ") " + st.sval); break; case StreamTokenizer.TT_NUMBER: System.out.println(st.lineno() + ") " + st.nval); break; default: System.out.println(st.lineno() + ") " + (char) st.ttype); } } fr.close(); } catch (Exception e) { System.out.println("Exception: " + e); } }
From source file:MainClass.java
public static void main(String args[]) { try {/*from w w w. j a v a 2 s .co m*/ FileReader fr = new FileReader(args[0]); BufferedReader br = new BufferedReader(fr); StreamTokenizer st = new StreamTokenizer(br); st.ordinaryChar('.'); st.wordChars('\'', '\''); while (st.nextToken() != StreamTokenizer.TT_EOF) { switch (st.ttype) { case StreamTokenizer.TT_WORD: System.out.println(st.lineno() + ") " + st.sval); break; case StreamTokenizer.TT_NUMBER: System.out.println(st.lineno() + ") " + st.nval); break; default: System.out.println(st.lineno() + ") " + (char) st.ttype); } } fr.close(); } catch (Exception e) { System.out.println("Exception: " + e); } }
From source file:Main.java
public static void main(String[] argv) throws Exception { FileReader rd = new FileReader("filename.java"); StreamTokenizer st = new StreamTokenizer(rd); st.parseNumbers();// ww w . j ava 2s . c om st.wordChars('_', '_'); st.eolIsSignificant(true); st.ordinaryChars(0, ' '); st.slashSlashComments(true); st.slashStarComments(true); int token = st.nextToken(); while (token != StreamTokenizer.TT_EOF) { token = st.nextToken(); switch (token) { case StreamTokenizer.TT_NUMBER: double num = st.nval; System.out.println(num); break; case StreamTokenizer.TT_WORD: String word = st.sval; System.out.println(word); break; case '"': String dquoteVal = st.sval; System.out.println(dquoteVal); break; case '\'': String squoteVal = st.sval; System.out.println(squoteVal); break; case StreamTokenizer.TT_EOL: break; case StreamTokenizer.TT_EOF: break; default: char ch = (char) st.ttype; System.out.println(ch); break; } } rd.close(); }
From source file:com.indeed.imhotep.index.builder.util.SmartArgs.java
/** * parse a String into an argument list, in the same way java parses * arguments passed to main()/*w ww . java 2s . c o m*/ */ public static String[] parseArgParams(String line) { StreamTokenizer tok = new StreamTokenizer(new StringReader(line)); tok.resetSyntax(); tok.wordChars('\u0000', '\uFFFF'); tok.whitespaceChars(' ', ' '); tok.quoteChar('\"'); ArrayList<String> output = new ArrayList<String>(); try { while (tok.nextToken() != StreamTokenizer.TT_EOF) { output.add(tok.sval); } } catch (IOException e) { } return output.toArray(new String[output.size()]); }
From source file:com.xpn.xwiki.util.Util.java
/** * Create a Map from a string holding a space separated list of key=value pairs. If keys or values must contain * spaces, they can be placed inside quotes, like <code>"this key"="a larger value"</code>. To use a quote as part * of a key/value, use <code>%_Q_%</code>. * // w ww . j a v a2s . c o m * @param mapString The string that must be parsed. * @return A Map containing the keys and values. If a key is defined more than once, the last value is used. */ public static Hashtable<String, String> keyValueToHashtable(String mapString) throws IOException { Hashtable<String, String> result = new Hashtable<String, String>(); StreamTokenizer st = new StreamTokenizer(new BufferedReader(new StringReader(mapString))); st.resetSyntax(); st.quoteChar('"'); st.wordChars('a', 'z'); st.wordChars('A', 'Z'); st.whitespaceChars(' ', ' '); st.whitespaceChars('=', '='); while (st.nextToken() != StreamTokenizer.TT_EOF) { String key = st.sval; st.nextToken(); String value = (st.sval != null) ? st.sval : ""; result.put(key, restoreValue(value)); } return result; }
From source file:edu.umd.cfar.lamp.viper.util.StringHelp.java
/** * Checks to see if the file begins with an xml processing directive, eg * <code><?xml?></code>. This method does not check to see that the * file is well-formed, or even if the processing directive is good, just that * the first non-whitespace characters are "<?xml". * * @param f The file to check for xml processing directive * @throws IOException if there is an error while reading the file, eg FileNotFoundException * @return <code>true</code> if the directive was found. *//*www .j a v a2 s . c o m*/ public static boolean isXMLFormat(File f) throws IOException { StreamTokenizer st = new StreamTokenizer(new FileReader(f)); st.wordChars('<', '<'); st.wordChars('>', '>'); st.wordChars('?', '?'); st.nextToken(); return st.toString().startsWith("Token[<?xml"); }
From source file:com.zimbra.common.calendar.ZoneInfo2iCalendar.java
private static void readExtraData(Reader reader) throws IOException, ParseException { char dquote = '"'; StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax();// w w w. j a v a2s.c om tokenizer.wordChars(32, 126); tokenizer.whitespaceChars(' ', ' '); tokenizer.whitespaceChars('\t', '\t'); tokenizer.whitespaceChars(0, 20); tokenizer.commentChar('#'); tokenizer.quoteChar(dquote); tokenizer.eolIsSignificant(true); List<String> tokenList = new ArrayList<String>(); LineType lineType = LineType.UNKNOWN; boolean atLineStart = true; int ttype; int prevTtype = StreamTokenizer.TT_EOL; // used for empty line detection while ((ttype = tokenizer.nextToken()) != StreamTokenizer.TT_EOF) { int lineNum = tokenizer.lineno(); if (ttype == StreamTokenizer.TT_WORD || ttype == dquote) { String token = tokenizer.sval; if (atLineStart) { lineType = LineType.lookUp(token); if (LineType.UNKNOWN.equals(lineType)) throw new ParseException("Invalid line type", lineNum); } else { tokenList.add(token); } atLineStart = false; } else if (ttype == StreamTokenizer.TT_EOL) { if (prevTtype == StreamTokenizer.TT_EOL) { prevTtype = ttype; continue; } atLineStart = true; switch (lineType) { case PRIMARYZONE: if (tokenList.size() < 1) throw new ParseException("Not enough fields in a PrimaryZone line", lineNum); String primaryTZID = tokenList.get(0); sPrimaryTZIDs.add(primaryTZID); break; case ZONEMATCHSCORE: if (tokenList.size() < 2) throw new ParseException("Not enough fields in a ZoneMatchScore line", lineNum); String zoneName = tokenList.get(0); String zoneMatchScoreStr = tokenList.get(1); int zoneMatchScore = 0; try { zoneMatchScore = Integer.parseInt(zoneMatchScoreStr); } catch (NumberFormatException e) { throw new ParseException("Zone match score must be an integer: " + zoneMatchScoreStr, lineNum); } sMatchScores.put(zoneName, zoneMatchScore); break; } if (atLineStart) { tokenList.clear(); lineType = LineType.UNKNOWN; } } else if (ttype == StreamTokenizer.TT_NUMBER) { // shouldn't happen throw new ParseException("Invalid parser state: TT_NUMBER found", lineNum); } prevTtype = ttype; } }
From source file:com.github.lindenb.jvarkit.tools.biostar.Biostar103303.java
private void readGTF(String uri, SAMSequenceDictionary dict) throws IOException { int count_exons = 0; final Set<String> unknown = new HashSet<String>(); LOG.info("Reading " + uri); final Pattern tab = Pattern.compile("[\t]"); final Map<String, GTFGene> transcript2gene = new HashMap<String, GTFGene>(); LineIterator iter = IOUtils.openURIForLineIterator(uri); while (iter.hasNext()) { String line = iter.next(); if (line.startsWith("#")) continue; String tokens[] = tab.split(line); if (tokens.length < 9) continue; if (!tokens[2].equals("exon")) continue; if (dict.getSequence(tokens[0]) == null) { if (!unknown.contains(tokens[0])) { LOG.warn("chromosome in " + line + " not in SAMSequenceDictionary "); unknown.add(tokens[0]);/*from ww w . j a v a2 s. c om*/ } continue; } String transcript_id = null, gene_id = null, gene_name = null, exon_id = null; StreamTokenizer st = new StreamTokenizer(new StringReader(tokens[8])); st.wordChars('_', '_'); String key = null; while (st.nextToken() != StreamTokenizer.TT_EOF) { String s = null; switch (st.ttype) { case StreamTokenizer.TT_NUMBER: s = String.valueOf(st.nval); break; case '"': case '\'': case StreamTokenizer.TT_WORD: s = st.sval; break; case ';': break; default: break; } if (s == null) continue; if (key == null) { key = s; } else { if (key.equals("transcript_id")) { transcript_id = s; } else if (key.equals("gene_id")) { gene_id = s; } else if (key.equals("gene_name")) { gene_name = s; } else if (key.equals("exon_id")) { exon_id = s; } key = null; } } if (transcript_id == null || transcript_id.isEmpty()) continue; GTFGene gene = transcript2gene.get(tokens[0] + " " + transcript_id); if (gene == null) { gene = new GTFGene(); gene.transcript_id = transcript_id; gene.gene_id = gene_id; gene.gene_name = gene_name; gene.chrom = tokens[0]; transcript2gene.put(tokens[0] + " " + transcript_id, gene); } GTFGene.Exon exon = gene.createExon(Integer.parseInt(tokens[3]), Integer.parseInt(tokens[4])); exon.exon_id = exon_id; } CloserUtil.close(iter); for (GTFGene g : transcript2gene.values()) { Collections.sort(g.exons, new Comparator<GTFGene.Exon>() { @Override public int compare(GTFGene.Exon o1, GTFGene.Exon o2) { return o1.start - o2.start; } }); for (int i = 0; i < g.exons.size(); ++i) { GTFGene.Exon exon = g.exons.get(i); exon.index = i; if (i > 0) { GTFGene.Exon prev = g.exons.get(i - 1); if (prev.end >= exon.start) { throw new IOException("exons " + (i) + " and " + (i + 1) + " overlap in " + g); } } Interval interval = new Interval(g.chrom, exon.start, exon.end); List<GTFGene.Exon> L = exonMap.get(interval); if (L == null) { L = new ArrayList<GTFGene.Exon>(1); exonMap.put(interval, L); } L.add(exon); ++count_exons; } } LOG.info("End Reading " + uri + " N=" + count_exons); }
From source file:edu.buffalo.cse.pigout.parser.PigOutMacro.java
void validate() throws IOException { if (rets.isEmpty()) { return;/*from w w w .j a v a 2 s. c o m*/ } HashSet<String> testSet = new HashSet<String>(); StreamTokenizer st = new StreamTokenizer(new StringReader(body)); st.wordChars('.', '.'); st.wordChars('0', '9'); st.wordChars('_', '_'); st.wordChars('$', '$'); st.lowerCaseMode(false); st.ordinaryChar('/'); st.slashStarComments(true); while (st.nextToken() != StreamTokenizer.TT_EOF) { if (matchWord(st, "define", false) && matchDollarAlias(st, true)) { testSet.add(st.sval.substring(1)); } else if (matchDollarAlias(st, false)) { String prevWord = st.sval; if (matchWord(st, "if", true) || matchWord(st, "otherwise", true)) { testSet.add(prevWord.substring(1)); } else if (matchChar(st, '=', true) && !matchChar(st, '=', true)) { testSet.add(prevWord.substring(1)); } else if (matchChar(st, ',', true)) { // possible mult-alias inlining of a macro ArrayList<String> mlist = new ArrayList<String>(); mlist.add(prevWord); if (isMultiValueReturn(st, mlist, true)) { for (String s : mlist) { testSet.add(s.substring(1)); } } } } else if (matchChar(st, '-', false) && matchChar(st, '-', true)) { skipSingleLineComment(st); } } for (String s : rets) { if (!testSet.contains(s)) { throw new IOException("Macro '" + name + "' missing return alias: " + s); } } }