Example usage for java.util StringTokenizer hasMoreElements

List of usage examples for java.util StringTokenizer hasMoreElements

Introduction

In this page you can find the example usage for java.util StringTokenizer hasMoreElements.

Prototype

public boolean hasMoreElements() 

Source Link

Document

Returns the same value as the hasMoreTokens method.

Usage

From source file:com.irets.bl.service.SearchService.java

public SearchService() {
    try {/*from w w  w. ja va  2 s  .co  m*/
        InputStream is = Thread.currentThread().getContextClassLoader()
                .getResourceAsStream("login_credentials.properties");
        this.loginCredentials.load(is);
        is.close();
        this.objectRequestPhoto = this.loginCredentials.getProperty("objectRequestPhoto");
        this.photoURLAvailable = this.loginCredentials.getProperty("photoURLAvailable");
        this.defaultPageSize = Integer.valueOf(this.loginCredentials.getProperty("default_page_size", "20"));
        this.maxPageSize = Integer.valueOf(this.loginCredentials.getProperty("max_page_size", "100"));
        this.hwmSearchResponses = Integer
                .valueOf(this.loginCredentials.getProperty("high_water_mark_for_search_responses", "500"));
        this.mlsNumberPrefix = this.loginCredentials.getProperty("mls_number_prefix", "");
        this.pendingStatus = this.loginCredentials.getProperty("pendingStatus", "Pending");
        this.activeStatus = this.loginCredentials.getProperty("activeStatus", "Active");
        this.server = this.loginCredentials.getProperty("server");
        this.mlsList = this.loginCredentials.getProperty("mlsList");
        if (mlsList != null) {
            //System.out.println("mlsList is  " + mlsList);
            mlsSpecData = new ArrayList<MlsSpecificData>();
            if (mlsList.indexOf(",") > -1) {
                StringTokenizer sToken = new StringTokenizer(mlsList, ",");
                while (sToken.hasMoreElements()) {
                    String key = (String) sToken.nextElement();
                    String name = (String) this.loginCredentials.get(key + "Name");
                    String logo = (String) this.loginCredentials.get(key + "Logo");
                    String disc = (String) this.loginCredentials.get(key + "Disclaimer");
                    MlsSpecificData msd = new MlsSpecificData(key, name, logo, disc);
                    mlsSpecData.add(msd);
                }

            } else {
                String name = (String) this.loginCredentials.get(mlsList + "Name");
                String logo = (String) this.loginCredentials.get(mlsList + "Logo");
                String disc = (String) this.loginCredentials.get(mlsList + "Disclaimer");
                MlsSpecificData msd = new MlsSpecificData(mlsList, name, logo, disc);
                mlsSpecData.add(msd);
            }
        }

        this.exteriorFeatures = this.loginCredentials.getProperty("exteriorFeaturesList");
        if (exteriorFeatures != null) {
            System.out.println("Ext features List ," + this.exteriorFeatures);
            this.exteriorFeaturesList = new ArrayList<String>();
            // Traversing the Exterior Features
            if (exteriorFeatures.indexOf(",") > -1) {
                StringTokenizer sToken = new StringTokenizer(exteriorFeatures, ",");
                while (sToken.hasMoreElements()) {
                    String key = (String) sToken.nextElement();
                    exteriorFeaturesList.add(key);
                }
            } else {// for just one exterior feature
                System.out.println("Only one Exterior Features... check");
                exteriorFeaturesList.add(this.exteriorFeatures);
            }

            // Number of Keys should be same as the above exterior features
            String keys = (String) this.loginCredentials.get("exteriorFeaturesListKeys");
            if (keys != null) {
                this.extFeaturesData = new ArrayList<ExteriorFeaturesData>();
                if (keys.indexOf(",") > -1) {
                    StringTokenizer sToken = new StringTokenizer(keys, ",");
                    int idx = 0;
                    while (sToken.hasMoreElements()) {
                        String key = (String) sToken.nextElement();
                        String name = this.exteriorFeaturesList.get(idx);
                        String fields = (String) this.loginCredentials.get(key);
                        ExteriorFeaturesData efd = new ExteriorFeaturesData(key, name, fields);
                        this.extFeaturesData.add(efd);
                        idx++;
                        System.out.println("Key/name/data is, " + key + "?" + name + "?" + fields);
                    }

                } else {// for just one key
                    System.out.println("Only one Exterior Features keys... check");
                    String fields = (String) this.loginCredentials.get(keys);
                    ExteriorFeaturesData efd = new ExteriorFeaturesData(keys, this.exteriorFeaturesList.get(0),
                            fields);
                    this.extFeaturesData.add(efd);
                }
            }

        }

    } catch (Exception e) {
        //ignore
        e.printStackTrace();
    }
}

From source file:com.ecofactor.qa.automation.drapi.DRAPI_Test.java

/**
 * Fetch the Event Name after creation of DR Event.
 * @param response the response//from ww  w. jav a2s .com
 * @return String.
 */
public String getDrEventName(final String response) {

    StringTokenizer st = new StringTokenizer(response, ",");
    String eventID = "";
    while (st.hasMoreElements()) {

        @SuppressWarnings("unused")
        String status = st.nextToken();
        eventID = st.nextToken();
    }
    String[] eventValues = eventID.split(":");
    final String eventName = eventValues[2];

    setLogString("DR EventName Fetched : " + eventName.substring(0, eventName.length() - 3), true);
    return eventName.substring(0, eventName.length() - 3);
}

From source file:com.ecofactor.qa.automation.drapi.DRAPI_Test.java

/**
 * Fetch the Event Id after creation of DR Event.
 * @param response the response.// w  w w  .  ja va2s .  c  om
 * @return Integer.
 */
public int getDrEventId(final String response) {

    StringTokenizer st = new StringTokenizer(response, ",");
    String eventID = "";
    while (st.hasMoreElements()) {

        String[] values = new String[(st.countTokens())];
        for (int i = 0; i < 5; i++) {
            values[i] = st.nextToken();
        }

        eventID = values[1];
    }
    String[] eventValues = eventID.split(":");
    String value = eventValues[1];
    String str = value.substring(1, value.length() - 1);
    final int eventId = Integer.parseInt(str);
    return eventId;
}

From source file:net.semanticmetadata.lire.solr.FastLireRequestHandler.java

/**
 * Actual search implementation based on (i) hash based retrieval and (ii) feature based re-ranking.
 *
 * @param rsp/*  w ww  . ja  v a2 s. c o m*/
 * @param searcher
 * @param hashFieldName the hash field name
 * @param maximumHits
 * @param terms
 * @param queryFeature
 * @throws java.io.IOException
 * @throws IllegalAccessException
 * @throws InstantiationException
 */
private void doSearch(SolrQueryRequest req, SolrQueryResponse rsp, SolrIndexSearcher searcher,
        String hashFieldName, int maximumHits, List<Term> terms, Query query, LireFeature queryFeature)
        throws IOException, IllegalAccessException, InstantiationException {
    // temp feature instance
    LireFeature tmpFeature = queryFeature.getClass().newInstance();
    // Taking the time of search for statistical purposes.
    time = System.currentTimeMillis();

    Filter filter = null;
    // if the request contains a filter:
    if (req.getParams().get("fq") != null) {
        // only filters with [<field>:<value> ]+ are supported
        StringTokenizer st = new StringTokenizer(req.getParams().get("fq"), " ");
        LinkedList<Term> filterTerms = new LinkedList<Term>();
        while (st.hasMoreElements()) {
            String[] tmpToken = st.nextToken().split(":");
            if (tmpToken.length > 1) {
                filterTerms.add(new Term(tmpToken[0], tmpToken[1]));
            }
        }
        if (filterTerms.size() > 0)
            filter = new TermsFilter(filterTerms);
    }

    TopDocs docs; // with query only.
    if (filter == null) {
        docs = searcher.search(query, numberOfCandidateResults);
    } else {
        docs = searcher.search(query, filter, numberOfCandidateResults);
    }
    //        TopDocs docs = searcher.search(query, new TermsFilter(terms), numberOfCandidateResults);   // with TermsFilter and boosting by simple query
    //        TopDocs docs = searcher.search(new ConstantScoreQuery(new TermsFilter(terms)), numberOfCandidateResults); // just with TermsFilter
    time = System.currentTimeMillis() - time;
    rsp.add("RawDocsCount", docs.scoreDocs.length + "");
    rsp.add("RawDocsSearchTime", time + "");
    // re-rank
    time = System.currentTimeMillis();
    TreeSet<SimpleResult> resultScoreDocs = new TreeSet<SimpleResult>();
    float maxDistance = -1f;
    float tmpScore;

    String featureFieldName = FeatureRegistry.getFeatureFieldName(hashFieldName);
    // iterating and re-ranking the documents.
    BinaryDocValues binaryValues = MultiDocValues.getBinaryValues(searcher.getIndexReader(), featureFieldName); // ***  #
    BytesRef bytesRef = new BytesRef();
    for (int i = 0; i < docs.scoreDocs.length; i++) {
        // using DocValues to retrieve the field values ...
        binaryValues.get(docs.scoreDocs[i].doc, bytesRef);
        tmpFeature.setByteArrayRepresentation(bytesRef.bytes, bytesRef.offset, bytesRef.length);
        // Getting the document from the index.
        // This is the slow step based on the field compression of stored fields.
        //            tmpFeature.setByteArrayRepresentation(d.getBinaryValue(name).bytes, d.getBinaryValue(name).offset, d.getBinaryValue(name).length);
        tmpScore = queryFeature.getDistance(tmpFeature);
        if (resultScoreDocs.size() < maximumHits) { // todo: There's potential here for a memory saver, think of a clever data structure that can do the trick without creating a new SimpleResult for each result.
            resultScoreDocs.add(
                    new SimpleResult(tmpScore, searcher.doc(docs.scoreDocs[i].doc), docs.scoreDocs[i].doc));
            maxDistance = resultScoreDocs.last().getDistance();
        } else if (tmpScore < maxDistance) {
            //                if it is nearer to the sample than at least one of the current set:
            //                remove the last one ...
            resultScoreDocs.remove(resultScoreDocs.last());
            //                add the new one ...
            resultScoreDocs.add(
                    new SimpleResult(tmpScore, searcher.doc(docs.scoreDocs[i].doc), docs.scoreDocs[i].doc));
            //                and set our new distance border ...
            maxDistance = resultScoreDocs.last().getDistance();
        }
    }
    //        System.out.println("** Creating response.");
    time = System.currentTimeMillis() - time;
    rsp.add("ReRankSearchTime", time + "");
    LinkedList list = new LinkedList();
    for (Iterator<SimpleResult> it = resultScoreDocs.iterator(); it.hasNext();) {
        SimpleResult result = it.next();
        HashMap m = new HashMap(2);
        m.put("d", result.getDistance());
        // add fields as requested:
        if (req.getParams().get("fl") == null) {
            m.put("id", result.getDocument().get("id"));
            if (result.getDocument().get("title") != null)
                m.put("title", result.getDocument().get("title"));
        } else {
            String fieldsRequested = req.getParams().get("fl");
            if (fieldsRequested.contains("score")) {
                m.put("score", result.getDistance());
            }
            if (fieldsRequested.contains("*")) {
                // all fields
                for (IndexableField field : result.getDocument().getFields()) {
                    String tmpField = field.name();
                    if (result.getDocument().getFields(tmpField).length > 1) {
                        m.put(result.getDocument().getFields(tmpField)[0].name(),
                                result.getDocument().getValues(tmpField));
                    } else if (result.getDocument().getFields(tmpField).length > 0) {
                        m.put(result.getDocument().getFields(tmpField)[0].name(),
                                result.getDocument().getFields(tmpField)[0].stringValue());
                    }
                }
            } else {
                StringTokenizer st;
                if (fieldsRequested.contains(","))
                    st = new StringTokenizer(fieldsRequested, ",");
                else
                    st = new StringTokenizer(fieldsRequested, " ");
                while (st.hasMoreElements()) {
                    String tmpField = st.nextToken();
                    if (result.getDocument().getFields(tmpField).length > 1) {
                        m.put(result.getDocument().getFields(tmpField)[0].name(),
                                result.getDocument().getValues(tmpField));
                    } else if (result.getDocument().getFields(tmpField).length > 0) {
                        m.put(result.getDocument().getFields(tmpField)[0].name(),
                                result.getDocument().getFields(tmpField)[0].stringValue());
                    }
                }
            }
        }
        //            m.put(field, result.getDocument().get(field));
        //            m.put(field.replace("_ha", "_hi"), result.getDocument().getBinaryValue(field));
        list.add(m);
    }
    rsp.add("docs", list);
    // rsp.add("Test-name", "Test-val");
}

From source file:cc.recommenders.utils.parser.MavenVersionParser.java

@Override
public Version parse(final String version) throws IllegalArgumentException {
    int major = 0;
    int minor = 0;
    int micro = 0;
    String qualifier = "";
    try {/*from  w w w.  j a  v a  2 s  .c  om*/
        final StringTokenizer tokenizer = new StringTokenizer(version, ".-", true);
        major = parseInt(tokenizer);
        if (tokenizer.hasMoreTokens()) {
            consumeDelimiter(tokenizer, ".");
            minor = parseInt(tokenizer);
            if (tokenizer.hasMoreTokens()) {
                consumeDelimiter(tokenizer, ".");
                micro = parseInt(tokenizer);
                if (tokenizer.hasMoreTokens()) {
                    consumeDelimiter(tokenizer, "-", ".");
                    qualifier = parseString(tokenizer);
                    // everything that follows after the third separator is treated as being part of the qualifier
                    while (tokenizer.hasMoreElements()) {
                        qualifier += tokenizer.nextToken();
                    }
                }
            }
        }
    } catch (final NoSuchElementException e) {
        Throws.throwIllegalArgumentException("couldn't convert string into version: '%s'", version);
    }
    return Version.create(major, minor, micro, qualifier);
}

From source file:ch.epfl.lsir.xin.algorithm.core.BiasedMF.java

@Override
public void readModel(String file) {
    // TODO Auto-generated method stub

    try {/*from  ww w.j av  a  2s  .  co  m*/
        //read user bias
        BufferedReader ubReader = new BufferedReader(new FileReader(file + "_userBias"));
        String line = null;
        int index1 = 0;
        while ((line = ubReader.readLine()) != null) {
            this.userBias[index1++] = Double.parseDouble(line);
        }
        ubReader.close();

        //read item bias
        BufferedReader ibReader = new BufferedReader(new FileReader(file + "_itemBias"));
        int index2 = 0;
        while ((line = ibReader.readLine()) != null) {
            this.itemBias[index2++] = Double.parseDouble(line);
        }
        ibReader.close();

        //read user factors
        BufferedReader uReader = new BufferedReader(new FileReader(file + "_userFactors"));
        int index3 = 0;
        while ((line = uReader.readLine()) != null) {
            StringTokenizer tokens = new StringTokenizer(line);
            int index = 0;
            while (tokens.hasMoreElements()) {
                this.userMatrix.set(index3, index++, Double.parseDouble(tokens.nextToken()));
            }
            index3++;
        }
        uReader.close();

        //read item factors
        BufferedReader iReader = new BufferedReader(new FileReader(file + "_itemFactors"));
        int index4 = 0;
        while ((line = iReader.readLine()) != null) {
            StringTokenizer tokens = new StringTokenizer(line);
            int index = 0;
            while (tokens.hasMoreElements()) {
                this.itemMatrix.set(index4, index++, Double.parseDouble(tokens.nextToken()));
            }
            index4++;
        }
        iReader.close();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.hadoop.hbase.client.transactional.TmDDL.java

public void getRow(final long lvTransid, StringBuilder state, ArrayList<String> createList,
        ArrayList<String> dropList, ArrayList<String> truncateList) throws IOException, Exception {
    if (LOG.isTraceEnabled())
        LOG.trace("TmDDL getRow start, TxID: " + lvTransid);
    String recordString = null;//w  ww.  j  a  v  a2s  . com
    StringTokenizer st = null;
    byte[] value = null;
    try {
        Get g = new Get(Bytes.toBytes(lvTransid));
        Result r = table.get(g);

        if (!r.isEmpty()) {
            value = r.getValue(TDDL_FAMILY, TDDL_CREATE);
            if (value != null && value.length > 0) {
                recordString = new String(Bytes.toString(value));
                st = new StringTokenizer(recordString, ",");

                while (st.hasMoreElements()) {
                    createList.add(st.nextToken());
                }
            }

            value = r.getValue(TDDL_FAMILY, TDDL_DROP);
            if (value != null && value.length > 0) {
                recordString = new String(Bytes.toString(value));
                st = new StringTokenizer(recordString, ",");
                while (st.hasMoreElements()) {
                    dropList.add(st.nextToken());
                }
            }

            value = r.getValue(TDDL_FAMILY, TDDL_TRUNCATE);
            if (value != null && value.length > 0) {
                recordString = new String(Bytes.toString(value));
                st = new StringTokenizer(recordString, ",");
                while (st.hasMoreElements()) {
                    truncateList.add(st.nextToken());
                }
            }

            value = r.getValue(TDDL_FAMILY, TDDL_STATE);
            if (value != null && value.length > 0) {
                state.append(Bytes.toString(value));
            }
        }

    } catch (Exception e) {
        LOG.error("TmDDL getRow Exception, TxId: " + lvTransid + "Exception:" + e);
        throw e;
    }
}

From source file:com.tek42.perforce.parse.AbstractPerforceTemplate.java

/**
* Parses lines of formatted text for a list of values. Tokenizes each line into columns and adds the column
* specified by index to the list.// w ww  . j av a  2  s  .c  o  m
* 
* @param response   The response from perforce to parse
* @param index      The column index to add to the list
* @return   A List of strings parsed from the response
*/
protected List<String> parseList(StringBuilder response, int index) {
    StringTokenizer lines = new StringTokenizer(response.toString(), "\n\r");
    List<String> list = new ArrayList<String>(100);
    while (lines.hasMoreElements()) {
        StringTokenizer columns = new StringTokenizer(lines.nextToken());
        for (int column = 0; column < index; column++) {
            columns.nextToken();
        }
        list.add(columns.nextToken());
    }
    return list;

}

From source file:org.hfoss.posit.android.sync.Communicator.java

/**
 * The data has the form: ["1","2", ...] or '[]'
 * @param data/*from   w  w w. ja v  a 2s . co m*/
 * the list of image ids
 * @return the last image id in the list or null
 */
private static String parseImageIds(String data) {
    Log.i(TAG, "imageIdData = " + data + " " + data.length());
    if (data.equals("[]")) {
        return null;
    }
    data = data.trim();
    data = data.substring(1, data.length() - 1); //removes brackets
    StringTokenizer st = new StringTokenizer(data, ","); //in the form "123"
    String imgId = null; //only care about one image for this version of posit
    while (st.hasMoreElements()) {
        imgId = (String) st.nextElement();
        Log.i(TAG, "Is this with quotes: " + imgId);
        imgId = imgId.substring(1, imgId.indexOf('"', 1)); // removes quotes. find the second quote in the string
        Log.i(TAG, "Is this without quotes: " + imgId);
    }
    Log.i(TAG, "Planning to fetch imageId " + imgId + " for a find");
    return imgId;
}

From source file:org.kchine.rpf.PoolUtils.java

public static Vector<String> tokenize(String command, String sep) {
    Vector<String> result = new Vector<String>();
    StringTokenizer st = new StringTokenizer(command, sep);
    while (st.hasMoreElements())
        result.add(st.nextToken());/*from   w  w w .  j  av a2  s  .  c om*/
    return result;
}