Example usage for java.io StreamTokenizer nextToken

List of usage examples for java.io StreamTokenizer nextToken

Introduction

In this page you can find the example usage for java.io StreamTokenizer nextToken.

Prototype

public int nextToken() throws IOException 

Source Link

Document

Parses the next token from the input stream of this tokenizer.

Usage

From source file:uk.ac.leeds.ccg.andyt.projects.moses.process.RegressionReport.java

protected static Object[] loadData(File _SARExpectedFile, File _CASObservedFile) throws IOException {
    Object[] result = new Object[3];
    BufferedReader _SARExpectedBufferedReader = new BufferedReader(
            new InputStreamReader(new FileInputStream(_SARExpectedFile)));
    StreamTokenizer _SARExpectedStreamTokenizer = new StreamTokenizer(_SARExpectedBufferedReader);
    Generic_StaticIO.setStreamTokenizerSyntax3(_SARExpectedStreamTokenizer);
    int _SARExpectedTokenType = _SARExpectedStreamTokenizer.nextToken();
    BufferedReader _CASObservedBufferedReader = new BufferedReader(
            new InputStreamReader(new FileInputStream(_CASObservedFile)));
    StreamTokenizer _CASObservedStreamTokenizer = new StreamTokenizer(_CASObservedBufferedReader);
    Generic_StaticIO.setStreamTokenizerSyntax3(_CASObservedStreamTokenizer);
    int _CASObservedTokenType = _CASObservedStreamTokenizer.nextToken();
    // Read Headers
    String a_SARExpectedLine = _SARExpectedStreamTokenizer.sval;
    String[] _SARExpectedVariables = a_SARExpectedLine.split(",");
    String a_CASObservedLine = _CASObservedStreamTokenizer.sval;
    String[] _CASObservedVariables = a_CASObservedLine.split(",");
    int _NumberNumericalVariables = 0;
    // Check variables names the same
    if (_SARExpectedVariables.length != _CASObservedVariables.length) {
        System.out.println("t_SARExpectedVariables.length != _CASObservedVariables.length");
    } else {/*from  www. j a  v a2 s  .co  m*/
        _NumberNumericalVariables = _SARExpectedVariables.length - 1;
        for (int i = 0; i < _SARExpectedVariables.length; i++) {
            if (!_CASObservedVariables[i].equalsIgnoreCase(_SARExpectedVariables[i])) {
                System.out.print(_CASObservedVariables[i] + " != " + _SARExpectedVariables[i]);
            }
        }
    }
    result[0] = _SARExpectedVariables; // Variable Names
    // Read Data
    double[] a_SARExpectedRow = new double[_NumberNumericalVariables];
    ArrayList<double[]> _SARExpectedRows = new ArrayList<double[]>();
    double[] a_CASObservedRow = new double[_NumberNumericalVariables];
    ArrayList<double[]> _CASObservedRows = new ArrayList<double[]>();
    _SARExpectedTokenType = _SARExpectedStreamTokenizer.nextToken();
    _CASObservedTokenType = _CASObservedStreamTokenizer.nextToken();
    //ArrayList<String> _ZoneCodes = new ArrayList<String>();

    int _NumberOfAreas = 0;
    while (_SARExpectedTokenType != StreamTokenizer.TT_EOF && _CASObservedTokenType != StreamTokenizer.TT_EOF) {
        if (_SARExpectedTokenType != _CASObservedTokenType) {
            System.out.println("t_SARExpectedTokenType != _CASObservedTokenType");
        } else {
            switch (_SARExpectedTokenType) {
            case StreamTokenizer.TT_WORD:
                _NumberOfAreas++;
                a_SARExpectedRow = new double[_NumberNumericalVariables];
                a_SARExpectedLine = _SARExpectedStreamTokenizer.sval;
                _SARExpectedVariables = a_SARExpectedLine.split(",");
                a_CASObservedLine = _CASObservedStreamTokenizer.sval;
                a_CASObservedRow = new double[_NumberNumericalVariables];
                _CASObservedVariables = a_CASObservedLine.split(",");
                if (_SARExpectedVariables.length != _CASObservedVariables.length) {
                    System.out.println("t_SARExpectedVariables.length != _CASObservedVariables.length");
                }
                if (_NumberNumericalVariables != _SARExpectedVariables.length - 1) {
                    System.out.println("t_NumberNumericalVariables != _SARExpectedVariables.length - 1");
                }
                // if ( _CASObservedVariables[ 0 ].startsWith(
                // _SARExpectedVariables[ 0 ] ) ) {
                //_ZoneCodes.add(_CASObservedVariables[0]);
                for (int i = 0; i < _NumberNumericalVariables; i++) {
                    a_SARExpectedRow[i] = Double.valueOf(_SARExpectedVariables[i + 1]);
                    a_CASObservedRow[i] = Double.valueOf(_CASObservedVariables[i + 1]);
                    if (i == 1 && (a_SARExpectedRow[i] != a_CASObservedRow[i])) {
                        System.out.println("Warning ! constraint that allHouseholds observed ( "
                                + a_CASObservedRow[i] + ") = allHouseholds expected ( " + a_SARExpectedRow[i]
                                + " ) not met for " + _CASObservedVariables[0]);
                    }
                }
                _SARExpectedRows.add(a_SARExpectedRow);
                _CASObservedRows.add(a_CASObservedRow);
                // } else {
                // System.out.println( _CASObservedVariables[ 0 ] + " != " +
                // _SARExpectedVariables[ 0 ] );
                // }
            }
        }
        _SARExpectedTokenType = _SARExpectedStreamTokenizer.nextToken();
        _CASObservedTokenType = _CASObservedStreamTokenizer.nextToken();
    }
    if (_SARExpectedRows.size() != _CASObservedRows.size()) {
        System.out.println("t_SARExpectedRows.size() != _CASObservedRows.size()");
    }
    if (_NumberOfAreas != _SARExpectedRows.size()) {
        System.out.println("t_NumberOfAreas != _SARExpectedRows.size()");
    }
    // Format (Flip) data
    double[][] _SARExpectedData = new double[_NumberNumericalVariables][_NumberOfAreas];
    double[][] _CASObservedData = new double[_NumberNumericalVariables][_NumberOfAreas];
    for (int j = 0; j < _NumberOfAreas; j++) {
        a_SARExpectedRow = (double[]) _SARExpectedRows.get(j);
        a_CASObservedRow = (double[]) _CASObservedRows.get(j);
        for (int i = 0; i < _NumberNumericalVariables; i++) {
            _SARExpectedData[i][j] = a_SARExpectedRow[i];
            _CASObservedData[i][j] = a_CASObservedRow[i];
        }
    }
    result[1] = _SARExpectedData;
    result[2] = _CASObservedData;
    return result;
}

From source file:com.xpn.xwiki.util.Util.java

/**
 * Create a Map from a string holding a space separated list of key=value pairs. If keys or values must contain
 * spaces, they can be placed inside quotes, like <code>"this key"="a larger value"</code>. To use a quote as part
 * of a key/value, use <code>%_Q_%</code>.
 * /*from   w w  w . ja v  a 2 s  . c om*/
 * @param mapString The string that must be parsed.
 * @return A Map containing the keys and values. If a key is defined more than once, the last value is used.
 */
public static Hashtable<String, String> keyValueToHashtable(String mapString) throws IOException {
    Hashtable<String, String> result = new Hashtable<String, String>();
    StreamTokenizer st = new StreamTokenizer(new BufferedReader(new StringReader(mapString)));
    st.resetSyntax();
    st.quoteChar('"');
    st.wordChars('a', 'z');
    st.wordChars('A', 'Z');
    st.whitespaceChars(' ', ' ');
    st.whitespaceChars('=', '=');
    while (st.nextToken() != StreamTokenizer.TT_EOF) {
        String key = st.sval;
        st.nextToken();
        String value = (st.sval != null) ? st.sval : "";
        result.put(key, restoreValue(value));
    }
    return result;
}

From source file:com.denimgroup.threadfix.framework.filefilter.ClassAnnotationBasedFileFilter.java

@Override
public boolean accept(@Nullable File file) {
    boolean returnValue = false;
    boolean hasArroba = false;

    if (file != null && file.exists() && file.isFile() && file.getName().endsWith(".java")) {
        Reader reader = null;/* w w w  .j av a 2  s  . co m*/

        try {

            reader = new InputStreamReader(new FileInputStream(file), "UTF-8");

            StreamTokenizer tokenizer = new StreamTokenizer(reader);
            tokenizer.slashSlashComments(true);
            tokenizer.slashStarComments(true);

            while (tokenizer.nextToken() != StreamTokenizer.TT_EOF) {
                if (hasArroba && tokenizer.sval != null && getClassAnnotations().contains(tokenizer.sval)) {
                    returnValue = true;
                    break;
                } else if (tokenizer.sval != null && tokenizer.sval.equals("class")) {
                    // we've gone too far
                    break;
                }

                hasArroba = tokenizer.ttype == '@';
            }
        } catch (IOException e) {
            log.warn("Encountered IOException while tokenizing file.", e);
        } finally {
            if (reader != null) {
                try {
                    reader.close();
                } catch (IOException e) {
                    log.error("Encountered IOException while attempting to close file.", e);
                }
            }
        }
    }

    return returnValue;
}

From source file:edu.umd.cfar.lamp.viper.util.StringHelp.java

/** 
 * Checks to see if the file begins with an xml processing directive, eg
 * <code>&lt;?xml?&gt;</code>. This method does not check to see that the 
 * file is well-formed, or even if the processing directive is good, just that
 * the first non-whitespace characters are "&lt;?xml".
 *
 * @param f The file to check for xml processing directive
 * @throws IOException if there is an error while reading the file, eg FileNotFoundException
 * @return <code>true</code> if the directive was found. 
 *//*ww  w.ja  v  a2s  . c om*/
public static boolean isXMLFormat(File f) throws IOException {
    StreamTokenizer st = new StreamTokenizer(new FileReader(f));
    st.wordChars('<', '<');
    st.wordChars('>', '>');
    st.wordChars('?', '?');
    st.nextToken();
    return st.toString().startsWith("Token[<?xml");
}

From source file:com.github.lindenb.jvarkit.tools.biostar.Biostar103303.java

private void readGTF(String uri, SAMSequenceDictionary dict) throws IOException {
    int count_exons = 0;
    final Set<String> unknown = new HashSet<String>();
    LOG.info("Reading " + uri);
    final Pattern tab = Pattern.compile("[\t]");
    final Map<String, GTFGene> transcript2gene = new HashMap<String, GTFGene>();
    LineIterator iter = IOUtils.openURIForLineIterator(uri);
    while (iter.hasNext()) {
        String line = iter.next();
        if (line.startsWith("#"))
            continue;
        String tokens[] = tab.split(line);
        if (tokens.length < 9)
            continue;
        if (!tokens[2].equals("exon"))
            continue;
        if (dict.getSequence(tokens[0]) == null) {
            if (!unknown.contains(tokens[0])) {
                LOG.warn("chromosome in " + line + " not in SAMSequenceDictionary ");
                unknown.add(tokens[0]);/*from w  w w  . jav a2s  . com*/
            }
            continue;
        }
        String transcript_id = null, gene_id = null, gene_name = null, exon_id = null;
        StreamTokenizer st = new StreamTokenizer(new StringReader(tokens[8]));
        st.wordChars('_', '_');
        String key = null;
        while (st.nextToken() != StreamTokenizer.TT_EOF) {
            String s = null;
            switch (st.ttype) {
            case StreamTokenizer.TT_NUMBER:
                s = String.valueOf(st.nval);
                break;
            case '"':
            case '\'':
            case StreamTokenizer.TT_WORD:
                s = st.sval;
                break;
            case ';':
                break;
            default:
                break;
            }
            if (s == null)
                continue;
            if (key == null) {
                key = s;
            } else {
                if (key.equals("transcript_id")) {
                    transcript_id = s;
                } else if (key.equals("gene_id")) {
                    gene_id = s;
                } else if (key.equals("gene_name")) {
                    gene_name = s;
                } else if (key.equals("exon_id")) {
                    exon_id = s;
                }
                key = null;
            }
        }
        if (transcript_id == null || transcript_id.isEmpty())
            continue;
        GTFGene gene = transcript2gene.get(tokens[0] + " " + transcript_id);
        if (gene == null) {
            gene = new GTFGene();
            gene.transcript_id = transcript_id;
            gene.gene_id = gene_id;
            gene.gene_name = gene_name;
            gene.chrom = tokens[0];
            transcript2gene.put(tokens[0] + " " + transcript_id, gene);
        }
        GTFGene.Exon exon = gene.createExon(Integer.parseInt(tokens[3]), Integer.parseInt(tokens[4]));
        exon.exon_id = exon_id;
    }
    CloserUtil.close(iter);

    for (GTFGene g : transcript2gene.values()) {
        Collections.sort(g.exons, new Comparator<GTFGene.Exon>() {
            @Override
            public int compare(GTFGene.Exon o1, GTFGene.Exon o2) {
                return o1.start - o2.start;
            }
        });
        for (int i = 0; i < g.exons.size(); ++i) {

            GTFGene.Exon exon = g.exons.get(i);
            exon.index = i;

            if (i > 0) {
                GTFGene.Exon prev = g.exons.get(i - 1);
                if (prev.end >= exon.start) {
                    throw new IOException("exons " + (i) + " and " + (i + 1) + " overlap in " + g);
                }
            }

            Interval interval = new Interval(g.chrom, exon.start, exon.end);
            List<GTFGene.Exon> L = exonMap.get(interval);
            if (L == null) {
                L = new ArrayList<GTFGene.Exon>(1);
                exonMap.put(interval, L);
            }
            L.add(exon);
            ++count_exons;
        }
    }
    LOG.info("End Reading " + uri + " N=" + count_exons);
}

From source file:keel.Algorithms.Decision_Trees.C45.C45.java

/** Function to read the options from the execution file and assign the values to the parameters.
 *
 * @param options       The StreamTokenizer that reads the parameters file.
 *
 * @throws Exception   If the format of the file is not correct.
 */// w w w .j  a  v a 2s.  c om
protected void setOptions(StreamTokenizer options) throws Exception {
    options.nextToken();

    /* Checks that the file starts with the token algorithm */
    if (options.sval.equalsIgnoreCase("algorithm")) {
        options.nextToken();
        options.nextToken();

        //if (!options.sval.equalsIgnoreCase( "C4.5" ) )
        //   throw new Exception( "The name of the algorithm is not correct." );

        options.nextToken();
        options.nextToken();
        options.nextToken();
        options.nextToken();

        /* Reads the names of the input files*/
        if (options.sval.equalsIgnoreCase("inputData")) {
            options.nextToken();
            options.nextToken();
            modelFileName = options.sval;

            if (options.nextToken() != StreamTokenizer.TT_EOL) {
                trainFileName = options.sval;
                options.nextToken();
                testFileName = options.sval;
                if (options.nextToken() != StreamTokenizer.TT_EOL) {
                    trainFileName = modelFileName;
                    options.nextToken();
                }
            }

        } else {
            throw new Exception("No file test provided.");
        }

        /* Reads the names of the output files*/
        while (true) {
            if (options.nextToken() == StreamTokenizer.TT_EOF) {
                throw new Exception("No output file provided.");
            }

            if (options.sval == null) {
                continue;
            } else if (options.sval.equalsIgnoreCase("outputData")) {
                break;
            }
        }

        options.nextToken();
        options.nextToken();
        trainOutputFileName = options.sval;
        options.nextToken();
        testOutputFileName = options.sval;
        options.nextToken();
        resultFileName = options.sval;

        if (!getNextToken(options)) {
            return;
        }

        while (options.ttype != StreamTokenizer.TT_EOF) {
            /* Reads the prune parameter */
            if (options.sval.equalsIgnoreCase("pruned")) {
                options.nextToken();
                options.nextToken();

                if (options.sval.equalsIgnoreCase("TRUE")) {
                    prune = true;
                } else {
                    //prune = false;
                    prune = true;
                }
            }

            /* Reads the confidence parameter */
            if (options.sval.equalsIgnoreCase("confidence")) {
                if (!prune) {
                    throw new Exception("Doesn't make sense to change confidence for prune " + "tree!");
                }

                options.nextToken();
                options.nextToken();

                /* Checks that the confidence threshold is between 0 and 1. */
                float cf = Float.parseFloat(options.sval);

                if (cf <= 1 || cf >= 0) {
                    confidence = Float.parseFloat(options.sval);
                }
            }

            /* Reads the itemsets per leaf parameter */
            if (options.sval.equalsIgnoreCase("itemsetsPerLeaf")) {
                options.nextToken();
                options.nextToken();

                if (Integer.parseInt(options.sval) > 0) {
                    minItemsets = Integer.parseInt(options.sval);
                }
            }

            getNextToken(options);
        }
    }
}

From source file:net.duckling.ddl.service.render.dml.ParseHtmlImg.java

public Map parseArgs(String argstring) throws IOException {
    HashMap<String, String> arglist = new HashMap<String, String>();

    //// w w w.j av a  2 s  .com
    //  Protection against funny users.
    //
    if (argstring == null) {
        return arglist;
    }

    StringReader in = new StringReader(argstring);
    StreamTokenizer tok = new StreamTokenizer(in);
    int type;

    String param = null;
    String value = null;

    tok.eolIsSignificant(true);

    boolean potentialEmptyLine = false;
    boolean quit = false;

    while (!quit) {
        String s;

        type = tok.nextToken();

        switch (type) {
        case StreamTokenizer.TT_EOF:
            quit = true;
            s = null;
            break;

        case StreamTokenizer.TT_WORD:
            s = tok.sval;
            potentialEmptyLine = false;
            break;

        case StreamTokenizer.TT_EOL:
            quit = potentialEmptyLine;
            potentialEmptyLine = true;
            s = null;
            break;

        case StreamTokenizer.TT_NUMBER:
            s = Integer.toString(new Double(tok.nval).intValue());
            potentialEmptyLine = false;
            break;

        case '\'':
            s = tok.sval;
            break;

        default:
            s = null;
        }

        //
        //  Assume that alternate words on the line are
        //  parameter and value, respectively.
        //
        if (s != null) {
            if (param == null) {
                param = s;
            } else {
                value = s;
                arglist.put(param, value);
                param = null;
            }
        }
    }

    //
    //  Now, we'll check the body.
    //

    if (potentialEmptyLine) {
        StringWriter out = new StringWriter();
        FileUtil.copyContents(in, out);

        String bodyContent = out.toString();

        if (bodyContent != null) {
            arglist.put(PARAM_BODY, bodyContent);
        }
    }

    return arglist;
}

From source file:keel.Algorithms.ImbalancedClassification.CSMethods.C45CS.C45CS.java

/** Function to read the options from the execution file and assign the values to the parameters.
 *
 * @param options       The StreamTokenizer that reads the parameters file.
 *
 * @throws Exception   If the format of the file is not correct.
 *///from   www . j a  v  a  2  s  .c om
protected void setOptions(StreamTokenizer options) throws Exception {
    options.nextToken();

    /* Checks that the file starts with the token algorithm */
    if (options.sval.equalsIgnoreCase("algorithm")) {
        options.nextToken();
        options.nextToken();

        //if (!options.sval.equalsIgnoreCase( "C4.5" ) )
        //   throw new Exception( "The name of the algorithm is not correct." );

        options.nextToken();
        options.nextToken();
        options.nextToken();
        options.nextToken();

        /* Reads the names of the input files*/
        if (options.sval.equalsIgnoreCase("inputData")) {
            options.nextToken();
            options.nextToken();
            modelFileName = options.sval;

            if (options.nextToken() != StreamTokenizer.TT_EOL) {
                trainFileName = options.sval;
                options.nextToken();
                testFileName = options.sval;
                if (options.nextToken() != StreamTokenizer.TT_EOL) {
                    trainFileName = modelFileName;
                    options.nextToken();
                }
            }

        } else {
            throw new Exception("No file test provided.");
        }

        /* Reads the names of the output files*/
        while (true) {
            if (options.nextToken() == StreamTokenizer.TT_EOF) {
                throw new Exception("No output file provided.");
            }

            if (options.sval == null) {
                continue;
            } else if (options.sval.equalsIgnoreCase("outputData")) {
                break;
            }
        }

        options.nextToken();
        options.nextToken();
        trainOutputFileName = options.sval;
        options.nextToken();
        testOutputFileName = options.sval;
        options.nextToken();
        resultFileName = options.sval;

        if (!getNextToken(options)) {
            return;
        }

        while (options.ttype != StreamTokenizer.TT_EOF) {
            /* Reads the prune parameter */
            if (options.sval.equalsIgnoreCase("pruned")) {
                options.nextToken();
                options.nextToken();

                if (options.sval.equalsIgnoreCase("TRUE")) {
                    prune = true;
                } else {
                    //prune = false;
                    prune = true;
                }
            }

            /* Reads the confidence parameter */
            if (options.sval.equalsIgnoreCase("confidence")) {
                if (!prune) {
                    throw new Exception("Doesn't make sense to change confidence for prune " + "tree!");
                }

                options.nextToken();
                options.nextToken();

                /* Checks that the confidence threshold is between 0 and 1. */
                float cf = Float.parseFloat(options.sval);

                if (cf <= 1 || cf >= 0) {
                    confidence = Float.parseFloat(options.sval);
                }
            }

            /* Reads the itemsets per leaf parameter */
            if (options.sval.equalsIgnoreCase("itemsetsPerLeaf")) {
                options.nextToken();
                options.nextToken();

                if (Integer.parseInt(options.sval) > 0) {
                    minItemsets = Integer.parseInt(options.sval);
                }
            }

            /* Reads the minimum expected cost parameter */
            if (options.sval.equalsIgnoreCase("minimumExpectedCost")) {
                options.nextToken();
                options.nextToken();

                if (options.sval.equalsIgnoreCase("TRUE")) {
                    minimumExpectedCost = true;
                } else {
                    minimumExpectedCost = false;
                }
            }

            getNextToken(options);
        }
    }
}

From source file:keel.Algorithms.Genetic_Rule_Learning.PART.C45.java

/** Function to read the options from the execution file and assign the values to the parameters.
 *
 * @param options The StreamTokenizer that reads the parameters file.
 *
 * @throws Exception If the format of the file is not correct.
 *///from w w  w  .  ja va  2  s .com
protected void setOptions(StreamTokenizer options) throws Exception {
    options.nextToken();

    /* Checks that the file starts with the token algorithm */
    if (options.sval.equalsIgnoreCase("algorithm")) {
        options.nextToken();
        options.nextToken();

        //if (!options.sval.equalsIgnoreCase( "C4.5" ) )
        //   throw new Exception( "The name of the algorithm is not correct." );

        options.nextToken();
        System.out.println(options.sval + "\n");
        options.nextToken();
        System.out.println(options.sval + "\n");
        //options.nextToken();
        //System.out.println(options.sval+"\n");
        //options.nextToken();
        //System.out.println(options.sval+"\n");

        /* Reads the names of the input files*/
        if (options.sval.equalsIgnoreCase("inputData")) {
            options.nextToken();
            options.nextToken();
            modelFileName = options.sval;
            System.out.println("Hay inputs\n");

            if (options.nextToken() != StreamTokenizer.TT_EOL) {
                trainFileName = options.sval;
                options.nextToken();
                testFileName = options.sval;
                if (options.nextToken() != StreamTokenizer.TT_EOL) {
                    trainFileName = modelFileName;
                    options.nextToken();
                }
                System.out.println(trainFileName + "\n");
                System.out.println(testFileName + "\n");
            }

        } else
            throw new Exception("No file test provided.");

        /* Reads the names of the output files*/
        while (true) {
            if (options.nextToken() == StreamTokenizer.TT_EOF)
                throw new Exception("No output file provided.");

            if (options.sval == null)
                continue;
            else if (options.sval.equalsIgnoreCase("outputData"))
                break;
        }

        options.nextToken();
        options.nextToken();
        trainOutputFileName = options.sval;
        options.nextToken();
        testOutputFileName = options.sval;
        options.nextToken();
        resultFileName = options.sval;

        System.out.println(trainOutputFileName + "\n");
        System.out.println(testOutputFileName + "\n");
        System.out.println(resultFileName + "\n");

        if (!getNextToken(options))
            return;

        while (options.ttype != StreamTokenizer.TT_EOF) {
            /* Reads the prune parameter */
            if (options.sval.equalsIgnoreCase("pruned")) {
                options.nextToken();
                options.nextToken();

                if (options.sval.equalsIgnoreCase("TRUE"))
                    prune = true;
                else
                    prune = false;
                //prune = true;
            }

            /* Reads the confidence parameter */
            if (options.sval.equalsIgnoreCase("confidence")) {
                if (!prune)
                    throw new Exception("Doesn't make sense to change confidence for prune tree!");

                options.nextToken();
                options.nextToken();

                /* Checks that the confidence threshold is between 0 and 1. */
                float cf = Float.parseFloat(options.sval);

                if (cf <= 1 || cf >= 0)
                    confidence = Float.parseFloat(options.sval);
            }

            /* Reads the itemsets per leaf parameter */
            if (options.sval.equalsIgnoreCase("itemsetsPerLeaf")) {
                options.nextToken();
                options.nextToken();

                if (Integer.parseInt(options.sval) > 0)
                    minItemsets = Integer.parseInt(options.sval);
            }

            getNextToken(options);
        }
    }
}

From source file:com.enonic.cms.business.portal.datasource.expressionfunctions.ExpressionFunctions.java

/**
 * This method will take a freetext search string and create a valid query that can be used in the getContent* methods.  The search
 * string is spilt into tokens.  Using the operator, it may be specified whether the field must contain all or any of the words in the
 * search string./*w ww .j ava2  s  . co  m*/
 *
 * @param fieldName    The name of the field to search for the words in the search string.
 * @param searchString The words to search for.
 * @param operator     Must be either AND or OR.  Case doesn't matter.
 * @return A syntactically correct search that may be used as the query parameter in getContent* methods on the data source. With care,
 *         it may also be merged with other queries using AND or OR.
 * @throws IllegalArgumentException If any of the parameters are empty or the operator is not AND or OR.
 */
public String buildFreetextQuery(String fieldName, String searchString, String operator) {
    if (searchString == null || searchString.trim().equals("")) {
        return "";
    }
    if (fieldName == null || fieldName.trim().equals("")) {
        throw new IllegalArgumentException("fieldName can not be empty.");
    }

    String op = "";
    if (operator != null) {
        op = operator.trim().toUpperCase();
    }
    if (!(op.equals("AND") || op.equals("OR"))) {
        throw new IllegalArgumentException("Illegal operator: " + operator);
    }

    boolean first = true;
    StringBuffer queryTokens = new StringBuffer();
    Reader searchStringReader = new StringReader(searchString);
    StreamTokenizer searchStringTokens = new StreamTokenizer(searchStringReader);
    searchStringTokens.slashSlashComments(false);
    searchStringTokens.slashStarComments(false);
    searchStringTokens.eolIsSignificant(false);
    searchStringTokens.ordinaryChar('!');
    searchStringTokens.ordinaryChars('#', '}');
    searchStringTokens.wordChars('!', '!');
    searchStringTokens.wordChars('#', '}');

    try {
        while (searchStringTokens.nextToken() != StreamTokenizer.TT_EOF) {
            String token = searchStringTokens.sval;
            addQueryPart(queryTokens, first, fieldName, token, op);
            if (first) {
                first = false;
            }
        }
    } catch (IOException e) {
        throw new IllegalStateException("This should never happen, since the IO class is wrapping a string!");
    }

    return queryTokens.toString();
}