List of usage examples for java.util StringTokenizer hasMoreTokens
public boolean hasMoreTokens()
From source file:ch.entwine.weblounge.common.impl.util.config.ConfigurationUtils.java
/** * Returns the single option values as a <code>String[]</code> array. The * values are expected to be separated by either comma, semicolon or space * characters.// w ww .j av a 2s. co m * * @param optionValue * the option value * @return the values */ public static String[] getMultiOptionValues(String optionValue) { if (optionValue == null) { return new String[] {}; } List<String> values = new ArrayList<String>(); StringTokenizer tok = new StringTokenizer(optionValue, " ,;"); while (tok.hasMoreTokens()) { values.add(tok.nextToken()); } return values.toArray(new String[values.size()]); }
From source file:com.seer.datacruncher.utils.generic.CommonUtils.java
/** * Break XPATH string into the String ArrayList * @param arg//from w w w. j a v a 2 s .com * @return */ private static List<String> breakXPathInList(String arg) { ArrayList<String> arrayList = new ArrayList<String>(); StringTokenizer st = new StringTokenizer(arg, "/"); while (st.hasMoreTokens()) arrayList.add(st.nextToken()); return arrayList; }
From source file:org.ardverk.daap.DaapUtil.java
/** * Splits a meta String ("foo,bar,alice,bob") and stores the data in an * ArrayList/*from ww w . j a v a 2s . c o m*/ * * @param meta * a meta String * @return the splitten meta String as ArrayList */ public static final List<String> parseMeta(String meta) { StringTokenizer tok = new StringTokenizer(meta, ","); List<String> list = new ArrayList<String>(tok.countTokens()); boolean flag = false; while (tok.hasMoreTokens()) { String token = tok.nextToken(); // Must be te fist! See DAAP documentation // for more info! if (!flag && token.equals("dmap.itemkind")) { list.add(0, token); flag = true; } else { list.add(token); } } return list; }
From source file:com.sfs.DataFilter.java
/** * Parses the text data./*from www. ja v a 2 s . c o m*/ * * @param text the text * * @return the tree map< integer, tree map< integer, string>> */ public static TreeMap<Integer, TreeMap<Integer, String>> parseTextData(final String text) { TreeMap<Integer, TreeMap<Integer, String>> parsedData = new TreeMap<Integer, TreeMap<Integer, String>>(); // This counter holds the maximum number of columns provided int maxNumberOfTokens = 0; if (text != null) { StringTokenizer tokenizer = new StringTokenizer(text, "\n"); int lineCounter = 1; while (tokenizer.hasMoreTokens()) { String line = tokenizer.nextToken(); TreeMap<Integer, String> parsedLine = new TreeMap<Integer, String>(); final StringTokenizer tabTokenizer = new StringTokenizer(line, "\t"); if (tabTokenizer.countTokens() > 1) { parsedLine = tokenizerToMap(tabTokenizer); } else { final StringTokenizer commaTokenizer = new StringTokenizer(line, ","); parsedLine = tokenizerToMap(commaTokenizer); } if (parsedLine.size() > maxNumberOfTokens) { maxNumberOfTokens = parsedLine.size(); } parsedData.put(lineCounter, parsedLine); lineCounter++; } } // Now cycle through all the parsed data // Ensure that each row has the same (max) number of tokens for (int rowIndex : parsedData.keySet()) { TreeMap<Integer, String> parsedLine = parsedData.get(rowIndex); // This map holds the final values TreeMap<Integer, String> columnTokens = new TreeMap<Integer, String>(); for (int i = 0; i < maxNumberOfTokens; i++) { int columnIndex = i + 1; if (parsedLine.containsKey(columnIndex)) { String value = parsedLine.get(columnIndex); columnTokens.put(columnIndex, value); } else { columnTokens.put(columnIndex, ""); } } parsedData.put(rowIndex, columnTokens); } return parsedData; }
From source file:keel.Algorithms.Neural_Networks.IRPropPlus_Clas.KEELIRPropPlusWrapperClas.java
/** * <p>// w ww. j a v a2 s . c o m * Reads schema from the KEEL file * * @param jobFilename Name of the KEEL dataset file * </p> */ private static byte[] readSchema(String fileName) throws IOException, DatasetException { KeelDataSet dataset = new KeelDataSet(fileName); dataset.open(); File file = new File(fileName); List<String> inputIds = new ArrayList<String>(); List<String> outputIds = new ArrayList<String>(); Reader reader = new BufferedReader(new FileReader(file)); String line = ((BufferedReader) reader).readLine(); StringTokenizer elementLine = new StringTokenizer(line); String element = elementLine.nextToken(); while (!element.equalsIgnoreCase("@data")) { if (element.equalsIgnoreCase("@inputs")) { while (elementLine.hasMoreTokens()) { StringTokenizer commaTokenizer = new StringTokenizer(elementLine.nextToken(), ","); while (commaTokenizer.hasMoreTokens()) inputIds.add(commaTokenizer.nextToken()); } } else if (element.equalsIgnoreCase("@outputs")) { while (elementLine.hasMoreTokens()) { StringTokenizer commaTokenizer = new StringTokenizer(elementLine.nextToken(), ","); while (commaTokenizer.hasMoreTokens()) outputIds.add(commaTokenizer.nextToken()); } } // Next line of the file line = ((BufferedReader) reader).readLine(); while (line.startsWith("%") || line.equalsIgnoreCase("")) line = ((BufferedReader) reader).readLine(); elementLine = new StringTokenizer(line); element = elementLine.nextToken(); } IMetadata metadata = dataset.getMetadata(); byte[] schema = new byte[metadata.numberOfAttributes()]; if (inputIds.isEmpty() || outputIds.isEmpty()) { for (int i = 0; i < schema.length; i++) { if (i != (schema.length - 1)) schema[i] = 1; else { IAttribute outputAttribute = metadata.getAttribute(i); schema[i] = 2; consoleReporter.setOutputAttribute(outputAttribute); } } } else { for (int i = 0; i < schema.length; i++) { if (inputIds.contains(metadata.getAttribute(i).getName())) schema[i] = 1; else if (outputIds.contains(metadata.getAttribute(i).getName())) { IAttribute outputAttribute = metadata.getAttribute(i); schema[i] = 2; consoleReporter.setOutputAttribute(outputAttribute); } else schema[i] = -1; } } StringBuffer header = new StringBuffer(); header.append("@relation " + dataset.getName() + "\n"); for (int i = 0; i < metadata.numberOfAttributes(); i++) { IAttribute attribute = metadata.getAttribute(i); header.append("@attribute " + attribute.getName() + " "); if (attribute.getType() == AttributeType.Categorical) { CategoricalAttribute catAtt = (CategoricalAttribute) attribute; Interval interval = catAtt.intervalValues(); header.append("{"); for (int j = (int) interval.getLeft(); j <= interval.size() + 1; j++) { header.append(catAtt.show(j) + (j != interval.size() + 1 ? "," : "}\n")); } } else if (attribute.getType() == AttributeType.IntegerNumerical) { IntegerNumericalAttribute intAtt = (IntegerNumericalAttribute) attribute; header.append("integer[" + (int) intAtt.intervalValues().getLeft() + "," + (int) intAtt.intervalValues().getRight() + "]\n"); } else if (attribute.getType() == AttributeType.DoubleNumerical) { RealNumericalAttribute doubleAtt = (RealNumericalAttribute) attribute; header.append("real[" + doubleAtt.intervalValues().getLeft() + "," + doubleAtt.intervalValues().getRight() + "]\n"); } } header.append("@data\n"); consoleReporter.setHeader(header.toString()); dataset.close(); return schema; }
From source file:edu.stanford.muse.index.NER.java
/** like namesFromURLs, just takes multiple \s separated urls */ public static List<Pair<String, Float>> namesFromURLs(String urls, boolean removeCommonNames) { List<Pair<String, Float>> result = new ArrayList<Pair<String, Float>>(); StringTokenizer st = new StringTokenizer(urls); while (st.hasMoreTokens()) { try {/*from w ww .java2 s .c om*/ String url = st.nextToken(); result.addAll(namesFromURL(url, removeCommonNames)); } catch (Exception e) { Util.print_exception(e); } } return result; }
From source file:edu.ku.brc.ui.IconManager.java
/** * Loads icons from config file/*from w w w . j av a 2 s . c o m*/ * */ public static void loadIcons(final File iconFile) { try { Element root = XMLHelper.readFileToDOM4J(iconFile); if (root != null) { Hashtable<String, String> aliases = new Hashtable<String, String>(); Element iconsNode = (Element) root.selectSingleNode("/icons"); String type = XMLHelper.getAttr(iconsNode, "type", null); String subdir = XMLHelper.getAttr(iconsNode, "subdir", null); if (StringUtils.isNotEmpty(type)) { if (instance.iconSets.get(type) == null) { instance.iconListForType = new Vector<String>(); instance.iconSets.put(type, instance.iconListForType); } else { log.debug("Type [" + type + "] has already been loaded."); } } if (StringUtils.isNotEmpty(subdir)) { subdirPath = subdir + "/"; } else { subdirPath = null; } List<?> boxes = root.selectNodes("/icons/icon"); for (Iterator<?> iter = boxes.iterator(); iter.hasNext();) { org.dom4j.Element iconElement = (org.dom4j.Element) iter.next(); String name = iconElement.attributeValue("name"); String sizes = iconElement.attributeValue("sizes"); String file = iconElement.attributeValue("file"); String alias = iconElement.attributeValue("alias"); if (StringUtils.isNotEmpty(alias)) { aliases.put(name, alias); } else if (sizes == null || sizes.length() == 0 || sizes.toLowerCase().equals("all")) { //log.info("["+name+"]["+sizes+"]["+file+"]"); //this is the cache of the icons, i want to just cache filename /*IconEntry entry = register(name, file, IconManager.IconSize.Std32); if (entry != null) { entry.addScaled( IconSize.Std32, IconSize.Std24); entry.addScaled( IconSize.Std32, IconSize.Std16); }*/ //---------do not need to addScaled, the image will scale when it is needed register(name, file, IconManager.IconSize.Std32); } else if (sizes.toLowerCase().equals("nonstd")) { register(name, file, IconSize.NonStd); } else { StringTokenizer st = new StringTokenizer(sizes, ","); while (st.hasMoreTokens()) { String sz = st.nextToken(); register(name, file, getSizeFromInt(Integer.parseInt(sz))); } } } for (String name : aliases.keySet()) { IconEntry entry = instance.defaultEntries.get(aliases.get(name)); if (entry != null) { instance.defaultEntries.put(name, entry); } //makeAlias(aliases.get(name), name); } } else { log.debug("Couldn't open icons.xml"); } } catch (Exception ex) { edu.ku.brc.af.core.UsageTracker.incrHandledUsageCount(); edu.ku.brc.exceptions.ExceptionTracker.getInstance().capture(IconManager.class, ex); ex.printStackTrace(); log.error(ex); } subdirPath = null; instance.iconListForType = null; }
From source file:com.doculibre.constellio.services.SearchServicesImpl.java
private static void addFacetsTo(SimpleSearch simpleSearch, SolrQuery query, boolean withMultiValuedFacets, boolean withSingleValuedFacets, boolean notIncludedOnly) { List<SearchedFacet> searchedFacets = simpleSearch.getSearchedFacets(); for (SearchedFacet searchedFacet : searchedFacets) { SearchableFacet searchableFacet = searchedFacet.getSearchableFacet(); if ((searchableFacet.isMultiValued() && withMultiValuedFacets) || (!searchableFacet.isMultiValued() && withSingleValuedFacets)) { if (!searchableFacet.isCluster()) { if (searchableFacet.isQuery()) { if (!searchedFacet.getIncludedValues().isEmpty()) { StringBuffer sb = new StringBuffer(""); if (notIncludedOnly) { sb.append("{!tag=dt}"); // sb.append("{!tag="); // boolean first = true; // for (String includedValue : searchedFacet.getIncludedValues()) { // if (first) { // first = false; // } else { // sb.append(","); // } // sb.append(includedValue); // } // sb.append("}"); }/*from w w w.j a va 2s . c o m*/ sb.append("("); boolean first = true; for (String includedValue : searchedFacet.getIncludedValues()) { if (first) { first = false; } else { sb.append(" OR "); } sb.append(includedValue); } sb.append(")"); query.addFilterQuery(sb.toString()); } } else { String facetName = searchableFacet.getName(); if (!searchedFacet.getIncludedValues().isEmpty()) { StringBuffer sb = new StringBuffer(); if (notIncludedOnly) { sb.append("{!tag=dt}"); // StringBuffer sbTag = new StringBuffer(); // sbTag.append("{!tag="); // boolean first = true; // for (String includedValue : searchedFacet.getIncludedValues()) { // if (first) { // first = false; // } else { // sbTag.append(","); // } // sbTag.append(includedValue); // } // sbTag.append("}"); // sb.append(sbTag); } sb.append(facetName + ":("); boolean first = true; for (String includedValue : searchedFacet.getIncludedValues()) { if (first) { first = false; } else { sb.append(" OR "); } sb.append("\""); sb.append(SimpleSearch.correctFacetValue(includedValue)); sb.append("\""); } sb.append(")"); query.addFilterQuery(sb.toString()); } } } } } // valeurs exclues for (SearchedFacet searchedFacet : searchedFacets) { SearchableFacet searchableFacet = searchedFacet.getSearchableFacet(); if (!searchableFacet.isCluster() && !searchedFacet.getExcludedValues().isEmpty()) { StringBuffer sb = new StringBuffer(); String facetName = searchableFacet.getName(); for (String excludedValue : searchedFacet.getExcludedValues()) { sb.append("NOT "); if (searchableFacet.isQuery()) { sb.append(SimpleSearch.correctFacetValue(excludedValue)); } else { sb.append(facetName); sb.append(":\""); sb.append(SimpleSearch.correctFacetValue(excludedValue)); sb.append("\""); } } String sbToString = sb.toString(); if (!sbToString.isEmpty()) { query.addFilterQuery(sb.toString()); } } } SearchedFacet cluster = simpleSearch.getCluster(); if (cluster != null) { RecordCollectionServices collectionServices = ConstellioSpringUtils.getRecordCollectionServices(); RecordCollection collection = collectionServices.get(simpleSearch.getCollectionName()); IndexField uniqueKeyIndexField = collection.getUniqueKeyIndexField(); if (!cluster.getIncludedValues().isEmpty()) { StringBuilder sb = new StringBuilder(uniqueKeyIndexField.getName() + ":("); for (String includedValue : cluster.getIncludedValues()) { boolean first = true; StringTokenizer st = new StringTokenizer(includedValue, FacetValue.CONCAT_DELIM); while (st.hasMoreTokens()) { String docId = st.nextToken(); if (first) { first = false; } else { sb.append(" OR "); } sb.append("\""); sb.append(docId); sb.append("\""); } } sb.append(")"); query.addFilterQuery(sb.toString()); } if (!cluster.getExcludedValues().isEmpty()) { StringBuilder sb = new StringBuilder(); for (String excludedValue : cluster.getExcludedValues()) { StringTokenizer st = new StringTokenizer(excludedValue, FacetValue.CONCAT_DELIM); while (st.hasMoreTokens()) { String docId = st.nextToken(); sb.append("NOT "); sb.append(uniqueKeyIndexField.getName()); sb.append(":\""); sb.append(docId); sb.append("\""); if (st.hasMoreTokens()) { sb.append(" "); } } } query.addFilterQuery(sb.toString()); } } CloudKeyword cloudKeyword = simpleSearch.getCloudKeyword(); if (cloudKeyword != null) { query.addFilterQuery("keyword:\"" + cloudKeyword.getKeyword() + "\""); } Locale singleSearchLocale = simpleSearch.getSingleSearchLocale(); if (singleSearchLocale != null && StringUtils.isNotBlank(singleSearchLocale.getLanguage())) { query.addFilterQuery(IndexField.LANGUAGE_FIELD + ":\"" + singleSearchLocale.getLanguage() + "\""); } query.addFilterQuery(IndexField.DB_EXCLUDED_FIELD + ":\"false\""); }
From source file:com.mg.framework.utils.StringUtils.java
/** * Splits a String on a delimiter into a List of Strings. * * @param str the String to split/*from w ww . j a v a 2s .c o m*/ * @param delim the delimiter character(s) to join on (null will split on whitespace) * @return a list of Strings */ public static List<String> split(String str, String delim) { List<String> splitList = null; StringTokenizer st = null; if (str == null) return splitList; if (delim != null) st = new StringTokenizer(str, delim); else st = new StringTokenizer(str); if (st != null && st.hasMoreTokens()) { splitList = new ArrayList<String>(); while (st.hasMoreTokens()) splitList.add(st.nextToken()); } return splitList; }
From source file:gov.nasa.ensemble.common.CommonUtils.java
public static List<String> parseAsList(String string) { ArrayList<String> result = new ArrayList<String>(); StringTokenizer tokenizer = new StringTokenizer(string, ","); while (tokenizer.hasMoreTokens()) { String token = tokenizer.nextToken(); if (token != null) { result.add(token.trim());/*from w ww.jav a 2 s.c o m*/ } } return result; }