Example usage for java.util StringTokenizer hasMoreTokens

List of usage examples for java.util StringTokenizer hasMoreTokens

Introduction

In this page you can find the example usage for java.util StringTokenizer hasMoreTokens.

Prototype

public boolean hasMoreTokens() 

Source Link

Document

Tests if there are more tokens available from this tokenizer's string.

Usage

From source file:eionet.cr.web.action.admin.harvestscripts.HarvestScriptParser.java

/**
 *
 * @param str//  ww  w  .  j av  a2 s  . c  o m
 * @param token
 * @return
 */
private static String substringBeforeToken(String str, String token) {

    if (str == null || str.trim().length() == 0 || token == null || token.trim().length() == 0) {
        return str;
    }

    StringTokenizer st = new StringTokenizer(str, " \t\n\r\f", true);
    ArrayList<String> originalTokens = new ArrayList<String>();
    ArrayList<String> upperCaseTokens = new ArrayList<String>();
    while (st.hasMoreTokens()) {
        String nextToken = st.nextToken();
        originalTokens.add(nextToken);
        upperCaseTokens.add(nextToken.toUpperCase());
    }

    int tokenIndex = upperCaseTokens.indexOf(token.toUpperCase());
    if (tokenIndex >= 0) {
        return tokensToString(originalTokens.subList(0, tokenIndex));
    } else {
        return str;
    }
}

From source file:com.github.dozermapper.core.util.ReflectionUtils.java

public static DeepHierarchyElement[] getDeepFieldHierarchy(Class<?> parentClass, String field,
        HintContainer deepIndexHintContainer) {
    if (!MappingUtils.isDeepMapping(field)) {
        MappingUtils.throwMappingException("Field does not contain deep field delimitor");
    }//from ww w  . j  ava  2s.c  o  m

    StringTokenizer toks = new StringTokenizer(field, DozerConstants.DEEP_FIELD_DELIMITER);
    Class<?> latestClass = parentClass;
    DeepHierarchyElement[] hierarchy = new DeepHierarchyElement[toks.countTokens()];
    int index = 0;
    int hintIndex = 0;
    while (toks.hasMoreTokens()) {
        String aFieldName = toks.nextToken();
        String theFieldName = aFieldName;
        int collectionIndex = -1;

        if (aFieldName.contains("[")) {
            theFieldName = aFieldName.substring(0, aFieldName.indexOf("["));
            collectionIndex = Integer
                    .parseInt(aFieldName.substring(aFieldName.indexOf("[") + 1, aFieldName.indexOf("]")));
        }

        PropertyDescriptor propDescriptor = findPropertyDescriptor(latestClass, theFieldName,
                deepIndexHintContainer);
        DeepHierarchyElement r = new DeepHierarchyElement(propDescriptor, collectionIndex);

        if (propDescriptor == null) {
            MappingUtils
                    .throwMappingException("Exception occurred determining deep field hierarchy for Class --> "
                            + parentClass.getName() + ", Field --> " + field
                            + ".  Unable to determine property descriptor for Class --> "
                            + latestClass.getName() + ", Field Name: " + aFieldName);
        }

        latestClass = propDescriptor.getPropertyType();
        if (toks.hasMoreTokens()) {
            if (latestClass.isArray()) {
                latestClass = latestClass.getComponentType();
            } else if (Collection.class.isAssignableFrom(latestClass)) {
                Class<?> genericType = determineGenericsType(parentClass.getClass(), propDescriptor);

                if (genericType == null && deepIndexHintContainer == null) {
                    MappingUtils.throwMappingException(
                            "Hint(s) or Generics not specified.  Hint(s) or Generics must be specified for deep mapping with indexed field(s). "
                                    + "Exception occurred determining deep field hierarchy for Class --> "
                                    + parentClass.getName() + ", Field --> " + field
                                    + ".  Unable to determine property descriptor for Class --> "
                                    + latestClass.getName() + ", Field Name: " + aFieldName);
                }

                if (genericType != null) {
                    latestClass = genericType;
                } else {
                    latestClass = deepIndexHintContainer.getHint(hintIndex);
                    hintIndex += 1;
                }
            }
        }
        hierarchy[index++] = r;
    }

    return hierarchy;
}

From source file:org.fao.fenix.wds.core.utils.Wrapper.java

public static List<List<String>> unwrapFromCSV(String csv, String cellDelimiter, String rowDelimiter)
        throws WDSException {
    try {/*from   ww w.  j a  v a 2 s .  com*/
        List<List<String>> table = new ArrayList<List<String>>();
        StringTokenizer rows = new StringTokenizer(csv, rowDelimiter);
        while (rows.hasMoreTokens()) {
            List<String> rowList = new ArrayList<String>();
            String row = rows.nextToken();
            StringTokenizer cells = new StringTokenizer(row, cellDelimiter);
            while (cells.hasMoreTokens())
                rowList.add(cells.nextToken());
            table.add(rowList);
        }
        return table;
    } catch (Exception e) {
        throw new WDSException(e.getMessage());
    }
}

From source file:graphene.util.StringUtils.java

/**
 * Tokenize the given String into a String array via a StringTokenizer.
 * <p>/*from  w w  w  .  j a v  a 2  s .c o  m*/
 * The given delimiters string is supposed to consist of any number of
 * delimiter characters. Each of those characters can be used to separate
 * tokens. A delimiter is always a single character; for multi-character
 * delimiters, consider using <code>delimitedListToStringArray</code>
 * 
 * <p>
 * Copied from the Spring Framework while retaining all license, copyright
 * and author information.
 * 
 * @param str
 *            the String to tokenize
 * @param delimiters
 *            the delimiter characters, assembled as String (each of those
 *            characters is individually considered as delimiter)
 * @param trimTokens
 *            trim the tokens via String's <code>trim</code>
 * @param ignoreEmptyTokens
 *            omit empty tokens from the result array (only applies to
 *            tokens that are empty after trimming; StringTokenizer will not
 *            consider subsequent delimiters as token in the first place).
 * @return an array of the tokens (<code>null</code> if the input String was
 *         <code>null</code>)
 * @see java.util.StringTokenizer
 * @see java.lang.String#trim()
 */
public static String[] tokenizeToStringArray(final String str, final String delimiters,
        final boolean trimTokens, final boolean ignoreEmptyTokens) {

    if (str == null) {
        return null;
    }
    final StringTokenizer st = new StringTokenizer(str, delimiters);
    final List<String> tokens = new ArrayList<String>();
    while (st.hasMoreTokens()) {
        String token = st.nextToken();
        if (trimTokens) {
            token = token.trim();
        }
        if (!ignoreEmptyTokens || (token.length() > 0)) {
            tokens.add(token);
        }
    }
    return toStringArray(tokens);
}

From source file:eionet.cr.web.action.admin.harvestscripts.HarvestScriptParser.java

/**
 *
 * @param str/*w w  w .  j a v  a  2  s.c  o  m*/
 * @param token
 * @return
 */
private static String substringAfterToken(String str, String token) {

    if (str == null || str.trim().length() == 0 || token == null || token.trim().length() == 0) {
        return str;
    }

    StringTokenizer st = new StringTokenizer(str, " \t\n\r\f", true);
    ArrayList<String> originalTokens = new ArrayList<String>();
    ArrayList<String> upperCaseTokens = new ArrayList<String>();
    while (st.hasMoreTokens()) {
        String nextToken = st.nextToken();
        originalTokens.add(nextToken);
        upperCaseTokens.add(nextToken.toUpperCase());
    }

    int tokenIndex = upperCaseTokens.indexOf(token.toUpperCase());
    if (tokenIndex >= 0) {
        String afterToken = "";
        int tokensSize = originalTokens.size();
        if (tokenIndex < tokensSize - 1) {
            afterToken = tokensToString(originalTokens.subList(tokenIndex + 1, tokensSize));
        }

        return afterToken;
    } else {
        return str;
    }
}

From source file:com.thoughtworks.cruise.util.command.CommandLine.java

public static String[] translateCommandLine(String toProcess) throws CommandLineException {
    if (toProcess == null || toProcess.length() == 0) {
        return new String[0];
    }//from  w  ww. ja  v  a 2 s  . c o  m

    // parse with a simple finite state machine

    final int normal = 0;
    final int inQuote = 1;
    final int inDoubleQuote = 2;
    int state = normal;
    StringTokenizer tok = new StringTokenizer(toProcess, "\"\' ", true);
    Vector v = new Vector();
    StringBuffer current = new StringBuffer();

    while (tok.hasMoreTokens()) {
        String nextTok = tok.nextToken();
        switch (state) {
        case inQuote:
            if ("\'".equals(nextTok)) {
                state = normal;
            } else {
                current.append(nextTok);
            }
            break;
        case inDoubleQuote:
            if ("\"".equals(nextTok)) {
                state = normal;
            } else {
                current.append(nextTok);
            }
            break;
        default:
            if ("\'".equals(nextTok)) {
                state = inQuote;
            } else if ("\"".equals(nextTok)) {
                state = inDoubleQuote;
            } else if (" ".equals(nextTok)) {
                if (current.length() != 0) {
                    v.addElement(current.toString());
                    current.setLength(0);
                }
            } else {
                current.append(nextTok);
            }
            break;
        }
    }

    if (current.length() != 0) {
        v.addElement(current.toString());
    }

    if (state == inQuote || state == inDoubleQuote) {
        throw new CommandLineException("unbalanced quotes in " + toProcess);
    }

    String[] args = new String[v.size()];
    v.copyInto(args);
    return args;
}

From source file:graphene.util.StringUtils.java

public static Collection<? extends String> tokenizeToStringCollection(final String str, final String delimiters,
        final boolean trimTokens, final boolean ignoreEmptyTokens) {
    if (str == null) {
        return null;
    }//  ww  w .ja v  a 2s.co  m
    final StringTokenizer st = new StringTokenizer(str, delimiters);
    final Collection<String> tokens = new ArrayList<String>();
    while (st.hasMoreTokens()) {
        String token = st.nextToken();
        if (trimTokens) {
            token = token.trim();
        }
        if (!ignoreEmptyTokens || (token.length() > 0)) {
            tokens.add(token);
        }
    }
    return tokens;
}

From source file:eionet.cr.web.action.admin.harvestscripts.HarvestScriptParser.java

/**
 *
 * @param str/*from  ww w.  j a va  2 s  . c om*/
 * @param tokenToReplace
 * @param replacement
 * @return
 */
private static String replaceToken(String str, String tokenToReplace, String replacement) {

    if (str == null || str.trim().length() == 0 || tokenToReplace == null
            || tokenToReplace.trim().length() == 0) {
        return str;
    }

    StringTokenizer st = new StringTokenizer(str, " \t\n\r\f", true);
    ArrayList<String> originalTokens = new ArrayList<String>();
    ArrayList<String> upperCaseTokens = new ArrayList<String>();
    while (st.hasMoreTokens()) {
        String nextToken = st.nextToken();
        originalTokens.add(nextToken);
        upperCaseTokens.add(nextToken.toUpperCase());
    }

    StringBuilder buf = new StringBuilder();
    for (String token : originalTokens) {

        if (token.equalsIgnoreCase(tokenToReplace)) {
            buf.append(replacement);
        } else if (StringUtils.startsWithIgnoreCase(token, tokenToReplace)) {
            if (!Character.isLetterOrDigit(token.charAt(tokenToReplace.length()))) {
                buf.append(replacement).append(StringUtils.substringAfter(token, tokenToReplace));
            } else {
                buf.append(token);
            }
        } else {
            buf.append(token);
        }
    }
    return buf.toString();
}

From source file:com.alecgorge.minecraft.jsonapi.NanoHTTPD.java

/**
 * Decodes parameters in percent-encoded URI-format ( e.g.
 * "name=Jack%20Daniels&pass=Single%20Malt" ) and adds them to given
 * Properties. NOTE: this doesn't support multiple identical keys due to the
 * simplicity of Properties -- if you need multiples, you might want to
 * replace the Properties with a Hastable of Vectors or such.
 *//*from   www  .  j  av a 2  s  .  c  om*/
public static void decodeParms(String parms, Properties p) {
    if (parms == null)
        return;

    StringTokenizer st = new StringTokenizer(parms, "&");
    while (st.hasMoreTokens()) {
        String e = st.nextToken();
        int sep = e.indexOf('=');
        if (sep >= 0)
            p.put(decodePercent(e.substring(0, sep)).trim(), decodePercent(e.substring(sep + 1)));
    }
}

From source file:com.jaspersoft.jasperserver.api.engine.common.service.impl.ActionModel.java

private static StringBuffer formatDelimitedStringAsArrayString(String delimitedString) {
    StringTokenizer tokenizer = new StringTokenizer(delimitedString, "@@");
    StringBuffer valueBuffer = new StringBuffer();
    valueBuffer.append("[");
    while (tokenizer.hasMoreTokens()) {
        valueBuffer.append("\"");
        valueBuffer.append(swapOutSpecialStrings(tokenizer.nextToken()));
        valueBuffer.append("\"");
        if (tokenizer.hasMoreTokens()) {
            valueBuffer.append(",");
        }/*w  w w  . j a v  a 2s.  co m*/
    }
    valueBuffer.append("]");
    return valueBuffer;
}