List of usage examples for java.util StringTokenizer hasMoreTokens
public boolean hasMoreTokens()
From source file:Main.java
/** * <p>/*from w ww . j av a 2 s .c om*/ * Splits the provided text into a array, based on a given separator. * </p> * <p/> * <p> * The separator is not included in the returned String array. The maximum number of splits to perfom can be * controlled. A <code>null</code> separator will cause parsing to be on whitespace. * </p> * <p/> * <p> * This is useful for quickly splitting a String directly into an array of tokens, instead of an enumeration of * tokens (as <code>StringTokenizer</code> does). * </p> * * @param str The string to parse. * @param separator Characters used as the delimiters. If <code>null</code>, splits on whitespace. * @param max The maximum number of elements to parse. The rest of the string to parse will be contained in the last * array element. A zero or negative value implies no limit. * @return an array of parsed Strings */ public static String[] split(String str, String separator, int max) { StringTokenizer tok = null; if (separator == null) { // Null separator means we're using StringTokenizer's default // delimiter, which comprises all whitespace characters. tok = new StringTokenizer(str); } else { tok = new StringTokenizer(str, separator); } int listSize = tok.countTokens(); if ((max > 0) && (listSize > max)) { listSize = max; } String[] list = new String[listSize]; int i = 0; int lastTokenBegin = 0; int lastTokenEnd = 0; while (tok.hasMoreTokens()) { if ((max > 0) && (i == listSize - 1)) { // In the situation where we hit the max yet have // tokens left over in our input, the last list // element gets all remaining text. String endToken = tok.nextToken(); lastTokenBegin = str.indexOf(endToken, lastTokenEnd); list[i] = str.substring(lastTokenBegin); break; } else { list[i] = tok.nextToken(); lastTokenBegin = str.indexOf(list[i], lastTokenEnd); lastTokenEnd = lastTokenBegin + list[i].length(); } i++; } return list; }
From source file:org.jruby.rack.mock.WebUtils.java
/** * Parse the given string with matrix variables. An example string would look * like this {@code "q1=a;q1=b;q2=a,b,c"}. The resulting map would contain * keys {@code "q1"} and {@code "q2"} with values {@code ["a","b"]} and * {@code ["a","b","c"]} respectively./* w ww. j a va 2s . c o m*/ * * @param matrixVariables the unparsed matrix variables string * @return a map with matrix variable names and values, never {@code null} */ public static MultiValueMap<String, String> parseMatrixVariables(String matrixVariables) { MultiValueMap<String, String> result = new LinkedMultiValueMap<String, String>(); if (!StringUtils.hasText(matrixVariables)) { return result; } StringTokenizer pairs = new StringTokenizer(matrixVariables, ";"); while (pairs.hasMoreTokens()) { String pair = pairs.nextToken(); int index = pair.indexOf('='); if (index != -1) { String name = pair.substring(0, index); String rawValue = pair.substring(index + 1); for (String value : StringUtils.commaDelimitedListToStringArray(rawValue)) { result.add(name, value); } } else { result.add(pair, ""); } } return result; }
From source file:com.medallia.tiny.Strings.java
/** * Splits the given string into a list of tokens separated by sep. * @param trim If true, tokens are trimmed. * @return The list of tokens or an empty list if str is null. *///from w ww . j a va 2 s . c om public static List<String> split(String str, String sep, boolean trim) { List<String> l = Empty.list(); if (str != null) { StringTokenizer st = new StringTokenizer(str, sep); while (st.hasMoreTokens()) l.add(trim ? st.nextToken().trim() : st.nextToken()); } return l; }
From source file:edu.stanford.muse.index.NER.java
public static void readLocationNamesToSuppress() { String suppress_file = "suppress.locations.txt.gz"; try {/*ww w .j av a 2s . c om*/ InputStream is = new GZIPInputStream(NER.class.getClassLoader().getResourceAsStream(suppress_file)); LineNumberReader lnr = new LineNumberReader(new InputStreamReader(is, "UTF-8")); while (true) { String line = lnr.readLine(); if (line == null) break; StringTokenizer st = new StringTokenizer(line); if (st.hasMoreTokens()) { String s = st.nextToken(); if (!s.startsWith("#")) locationsToSuppress.add(s.toLowerCase()); } } is.close(); } catch (Exception e) { log.warn("Error: unable to read " + suppress_file); Util.print_exception(e); } log.info(locationsToSuppress.size() + " names to suppress as locations"); }
From source file:com.cisco.dvbu.ps.deploytool.dao.wsapi.VCSWSDAOImpl.java
private static boolean resolveExecCommandLineError(String prefix, String error, String vcsIgnoreMessages) { boolean throwOriginalError = true; if (prefix == null) { prefix = ""; } else {// ww w. ja va2 s . co m prefix = prefix + "::"; } StringTokenizer st = new StringTokenizer(vcsIgnoreMessages, ","); while (st.hasMoreTokens()) { String message = st.nextToken().trim(); if (error.toLowerCase().contains(message.toLowerCase())) { throwOriginalError = false; if (logger.isErrorEnabled()) { logger.info( prefix + "Warning::Error message ignored. Error Message matches VCS_IGNORE_MESSAGES=" + message); } } } return throwOriginalError; }
From source file:com.amalto.core.storage.StorageMetadataUtils.java
/** * Checks whether <code>value</code> is valid for <code>typeName</code>. * // w ww .j a v a 2 s . c o m * @param value The value to check. * @param field The field to receive the value. * @return <code>true</code> if correct, <code>false</code> otherwise. Since all fields can receive * <code>null</code>, <code>null</code> always returns <code>true</code>. */ public static boolean isValueAssignable(final String value, FieldMetadata field) { if (value == null) { return true; } try { List<TypeMetadata> fieldType = field.accept(new DefaultMetadataVisitor<List<TypeMetadata>>() { List<TypeMetadata> fieldTypes = new LinkedList<TypeMetadata>(); @Override public List<TypeMetadata> visit(ReferenceFieldMetadata referenceField) { fieldTypes .add(MetadataUtils.getSuperConcreteType(referenceField.getReferencedField().getType())); return fieldTypes; } @Override public List<TypeMetadata> visit(SimpleTypeFieldMetadata simpleField) { fieldTypes.add(MetadataUtils.getSuperConcreteType(simpleField.getType())); return fieldTypes; } @Override public List<TypeMetadata> visit(EnumerationFieldMetadata enumField) { fieldTypes.add(MetadataUtils.getSuperConcreteType(enumField.getType())); return fieldTypes; } }); List<String> convertValue = field.accept(new DefaultMetadataVisitor<List<String>>() { List<String> values = new LinkedList<String>(); @Override public List<String> visit(ReferenceFieldMetadata referenceField) { if (value.startsWith("[")) { //$NON-NLS-1$ StringTokenizer tokenizer = new StringTokenizer(value, "["); //$NON-NLS-1$ while (tokenizer.hasMoreTokens()) { String nextToken = tokenizer.nextToken(); values.add(nextToken.substring(1, nextToken.length() - 1)); } } else { values.add(value); } return values; } @Override public List<String> visit(SimpleTypeFieldMetadata simpleField) { values.add(value); return values; } @Override public List<String> visit(EnumerationFieldMetadata enumField) { values.add(value); return values; } }); for (int i = 0; i < fieldType.size(); i++) { try { convert(convertValue.get(i), fieldType.get(i)); } catch (Exception e) { return false; } } return true; } catch (Exception e) { return false; } }
From source file:com.kamuda.common.exception.ExceptionUtils.java
/** * <p>Functionality shared between the * <code>getStackFrames(Throwable)</code> methods of this and the * {@link org.apache.commons.lang.exception.NestableDelegate} * classes.</p>// ww w .j a v a2 s.com */ static String[] getStackFrames(String stackTrace) { String linebreak = LINE_SEPARATOR; StringTokenizer frames = new StringTokenizer(stackTrace, linebreak); List list = new LinkedList(); while (frames.hasMoreTokens()) { list.add(frames.nextToken()); } return (String[]) list.toArray(new String[list.size()]); }
From source file:com.sfs.DataFilter.java
/** * Tokenizer to map./*from w ww . j a v a 2 s .com*/ * * @param tokenizer the tokenizer * * @return the tree map< integer, string> */ private static TreeMap<Integer, String> tokenizerToMap(final StringTokenizer tokenizer) { TreeMap<Integer, String> parsedData = new TreeMap<Integer, String>(); int lineCounter = 1; if (tokenizer != null) { while (tokenizer.hasMoreTokens()) { String token = tokenizer.nextToken(); parsedData.put(lineCounter, token.trim()); lineCounter++; } } return parsedData; }
From source file:com.izforge.izpack.util.IoHelper.java
/** * Extracts a long value from a string in a special manner. The string will be broken into * tokens with a standard StringTokenizer. Around the assumed place (with the given half range) * the tokens are scanned reverse for a token which represents a long. if useNotIdentifier is not * null, tokens which are contains this string will be ignored. The first founded long returns. * * @param in the string which should be parsed * @param assumedPlace token number which should contain the value * @return founded long//w w w . j a va 2s. c om */ private static long extractLong(String in, int assumedPlace) { long ret = -1; StringTokenizer st = new StringTokenizer(in); int length = st.countTokens(); int i; int currentRange = 0; String[] interestedEntries = new String[3 + 3]; for (i = 0; i < length - 3 + assumedPlace; ++i) { st.nextToken(); // Forget this entries. } for (i = 0; i < 3 + 3; ++i) { // Put the interesting Strings into an intermediate array. if (st.hasMoreTokens()) { interestedEntries[i] = st.nextToken(); currentRange++; } } for (i = currentRange - 1; i >= 0; --i) { if (interestedEntries[i].contains("%")) { continue; } try { ret = Long.parseLong(interestedEntries[i]); } catch (NumberFormatException nfe) { continue; } break; } return ret; }
From source file:eionet.cr.web.action.admin.postHarvest.PostHarvestScriptParser.java
/** * * @param str//from ww w . ja va2s .c o m * @param tokenToReplace * @param replacement * @return */ private static String replaceToken(String str, String tokenToReplace, String replacement) { if (str == null || str.trim().length() == 0 || tokenToReplace == null || tokenToReplace.trim().length() == 0) { return str; } StringTokenizer st = new StringTokenizer(str, " \t\n\r\f", true); ArrayList<String> originalTokens = new ArrayList<String>(); ArrayList<String> upperCaseTokens = new ArrayList<String>(); while (st.hasMoreTokens()) { String nextToken = st.nextToken(); originalTokens.add(nextToken); upperCaseTokens.add(nextToken.toUpperCase()); } StringBuilder buf = new StringBuilder(); for (String token : originalTokens) { if (token.equalsIgnoreCase(tokenToReplace)) { buf.append(replacement); } else { buf.append(token); } } return buf.toString(); }