List of usage examples for java.lang Character isDigit
public static boolean isDigit(int codePoint)
From source file:com.ery.ertc.estorm.util.ToolUtil.java
public static Double toDouble(String val) { Double l = 0.0;/*from w ww . j a v a 2s .c o m*/ boolean dig = false; long len = 10; long digs = 0; for (char c : val.toCharArray()) { if (Character.isDigit(c)) { if (dig) { digs = digs * 10 + c - 48; len *= 10; } else { l = l * 10 + c - 48; } } else if (c == '.') { dig = true; } else { break; } } return l + (double) digs * 10 / len; }
From source file:net.argilo.busfollower.ocdata.OCTranspoDataFetcher.java
private void validateRouteNumber(String routeNumber) { if (routeNumber.length() < 1 || routeNumber.length() > 3) { throw new IllegalArgumentException(context.getString(R.string.invalid_route_number)); }//from w ww . j a v a 2 s .co m for (int i = 0; i < routeNumber.length(); i++) { if (!Character.isDigit(routeNumber.charAt(i))) { throw new IllegalArgumentException(context.getString(R.string.invalid_route_number)); } } }
From source file:com.ddubyat.develop.jhawtcode.web.InternalResourceController.java
private static boolean isNumeric(String str) { for (char c : str.toCharArray()) { if (!Character.isDigit(c)) return false; }// w w w . j ava2 s. c o m return true; }
From source file:com.fujitsu.dc.core.odata.DcExpressionParser.java
/** * tokenizer./*from ww w. j a va2 s. c om*/ * OData4j?tokenizer???'_'????????? * @param value value * @return */ public static List<Token> tokenize(String value) { List<Token> rt = new ArrayList<Token>(); int current = 0; int end = 0; while (true) { if (current == value.length()) { return rt; } char c = value.charAt(current); if (Character.isWhitespace(c)) { end = readWhitespace(value, current); rt.add(new Token(TokenType.WHITESPACE, value.substring(current, end))); current = end; } else if (c == '\'') { end = readQuotedString(value, current + 1); rt.add(new Token(TokenType.QUOTED_STRING, value.substring(current, end))); current = end; } else if (Character.isLetter(c)) { end = readWord(value, current + 1); rt.add(new Token(TokenType.WORD, value.substring(current, end))); current = end; } else if (c == '_') { end = readWord(value, current + 1); rt.add(new Token(TokenType.WORD, value.substring(current, end))); current = end; } else if (Character.isDigit(c)) { end = readDigits(value, current + 1); rt.add(new Token(TokenType.NUMBER, value.substring(current, end))); current = end; } else if (c == '(') { rt.add(new Token(TokenType.OPENPAREN, Character.toString(c))); current++; } else if (c == ')') { rt.add(new Token(TokenType.CLOSEPAREN, Character.toString(c))); current++; } else if (c == '-') { if (Character.isDigit(value.charAt(current + 1))) { end = readDigits(value, current + 1); rt.add(new Token(TokenType.NUMBER, value.substring(current, end))); current = end; } else { rt.add(new Token(TokenType.SYMBOL, Character.toString(c))); current++; } } else if (",.+=:".indexOf(c) > -1) { rt.add(new Token(TokenType.SYMBOL, Character.toString(c))); current++; } else { dumpTokens(rt); throw new RuntimeException("Unable to tokenize: " + value + " current: " + current + " rem: " + value.substring(current)); } } }
From source file:net.metanotion.json.StreamingParser.java
private String lexExp(final Reader in) throws IOException { in.mark(MAX_BUFFER);//w w w .j a v a2 s. c om int c = in.read(); if (Character.toLowerCase(c) == 'e') { c = in.read(); if (c == '+') { return "e+" + lexDigits(in); } else if (c == '-') { return "e-" + lexDigits(in); } else if (Character.isDigit(c)) { return (new String(Character.toChars(c))) + lexDigits(in); } else if (c == -1) { throw new ParserException("Unexpected end of stream"); } else { throw new ParserException( "Expected exponent, instead found: '" + (new String(Character.toChars(c))) + QUOTE); } } else { in.reset(); return ""; } }
From source file:org.alfresco.textgen.TextGenerator.java
private String getSingleWord(ArrayList<String> choice, Random random) { String candidate;/*from ww w. ja va 2s. c om*/ NEXT: for (int i = 0; i < 100000; i++) { candidate = choice.get(random.nextInt(choice.size())); for (int j = 0; j < candidate.length(); j++) { int cp = candidate.codePointAt(j); if (!Character.isAlphabetic(cp) && !Character.isDigit(cp)) { continue NEXT; } } return candidate; } return ""; }
From source file:de.tudarmstadt.ukp.clarin.webanno.tsv.WebannoCustomTsvReader.java
/** * Iterate through lines and create span annotations accordingly. For multiple span annotation, * based on the position of the annotation in the line, update only the end position of the * annotation//from w w w . ja v a 2 s . co m */ private void setAnnotations(JCas aJcas, InputStream aIs, String aEncoding, StringBuilder text) throws IOException { // getting header information LineIterator lineIterator = IOUtils.lineIterator(aIs, aEncoding); int columns = 1;// token number + token columns (minimum required) int tokenStart = 0, sentenceStart = 0; Map<Type, Set<Feature>> spanLayers = new LinkedHashMap<Type, Set<Feature>>(); Map<Type, Type> relationayers = new LinkedHashMap<Type, Type>(); // an annotation for every feature in a layer Map<Type, Map<Integer, AnnotationFS>> annotations = new LinkedHashMap<Type, Map<Integer, AnnotationFS>>(); // store if this is a Begin/Intermediate/End of an annotation Map<Type, Map<Integer, String>> beginEndAnno = new LinkedHashMap<Type, Map<Integer, String>>(); // Store annotations of tokens so that it can be used later for relation annotations Map<Type, Map<String, List<AnnotationFS>>> tokenAnnotations = new LinkedHashMap<Type, Map<String, List<AnnotationFS>>>(); // store target token ids used for a relation Map<Type, Map<String, List<String>>> relationTargets = new LinkedHashMap<Type, Map<String, List<String>>>(); // store tokens indexing with the concat of itsbegin-end so that lemma and pos annotation // can be attached, if exists, later indexedTokens = new HashMap<String, Token>(); while (lineIterator.hasNext()) { String line = lineIterator.next().trim(); if (line.trim().equals("") && sentenceStart == tokenStart) { continue; } if (line.trim().equals("")) { text.replace(tokenStart - 1, tokenStart, ""); tokenStart = tokenStart - 1; Sentence sentence = new Sentence(aJcas, sentenceStart, tokenStart); sentence.addToIndexes(); tokenStart++; sentenceStart = tokenStart; text.append("\n"); continue; } // sentence if (line.startsWith("#text=")) { continue; } if (line.startsWith("#id=")) { continue;// it is a comment line } if (line.startsWith("#")) { columns = getLayerAndFeature(aJcas, columns, spanLayers, relationayers, line); continue; } // some times, the sentence in #text= might have a new line which break this reader, // so skip such lines if (!Character.isDigit(line.split(" ")[0].charAt(0))) { continue; } // If we are still unlucky, the line starts with a number from the sentence but not // a token number, check if it didn't in the format NUM-NUM if (!Character.isDigit(line.split("-")[1].charAt(0))) { continue; } int count = StringUtils.countMatches(line, "\t"); if (columns != count) { throw new IOException(fileName + " This is not a valid TSV File. check this line: " + line); } // adding tokens and sentence StringTokenizer lineTk = new StringTokenizer(line, "\t"); String tokenNumberColumn = lineTk.nextToken(); String tokenColumn = lineTk.nextToken(); Token token = new Token(aJcas, tokenStart, tokenStart + tokenColumn.length()); token.addToIndexes(); Type posType = JCasUtil.getType(aJcas, POS.class); Type lemmaType = JCasUtil.getType(aJcas, Lemma.class); if (spanLayers.containsKey(posType) || spanLayers.containsKey(lemmaType)) { indexedTokens.put(tokenStart + "-" + tokenStart + tokenColumn.length(), token); } // adding the annotations createSpanAnnotation(aJcas, tokenStart, spanLayers, relationayers, annotations, beginEndAnno, tokenAnnotations, relationTargets, lineTk, tokenColumn, tokenNumberColumn); tokenStart = tokenStart + tokenColumn.length() + 1; text.append(tokenColumn + " "); } if (tokenStart > sentenceStart) { Sentence sentence = new Sentence(aJcas, sentenceStart, tokenStart); sentence.addToIndexes(); text.append("\n"); } createRelationLayer(aJcas, relationayers, tokenAnnotations, relationTargets); }
From source file:de.tudarmstadt.ukp.clarin.webanno.tsv.WebannoTsv2Reader.java
/** * Iterate through lines and create span annotations accordingly. For * multiple span annotation, based on the position of the annotation in the * line, update only the end position of the annotation *///from w w w.ja v a 2 s . c om private void setAnnotations(JCas aJcas, InputStream aIs, String aEncoding, StringBuilder text) throws IOException { // getting header information LineIterator lineIterator = IOUtils.lineIterator(aIs, aEncoding); int columns = 1;// token number + token columns (minimum required) int tokenStart = 0, sentenceStart = 0; Map<Type, Set<Feature>> spanLayers = new LinkedHashMap<Type, Set<Feature>>(); Map<Type, Type> relationayers = new LinkedHashMap<Type, Type>(); // an annotation for every feature in a layer Map<Type, Map<Integer, AnnotationFS>> annotations = new LinkedHashMap<Type, Map<Integer, AnnotationFS>>(); // store if this is a Begin/Intermediate/End of an annotation Map<Type, Map<Integer, String>> beginEndAnno = new LinkedHashMap<Type, Map<Integer, String>>(); // Store annotations of tokens so that it can be used later for relation // annotations Map<Type, Map<String, List<AnnotationFS>>> tokenAnnotations = new LinkedHashMap<Type, Map<String, List<AnnotationFS>>>(); // store target token ids used for a relation Map<Type, Map<String, List<String>>> relationTargets = new LinkedHashMap<Type, Map<String, List<String>>>(); // store tokens indexing with the concat of itsbegin-end so that lemma // and pos annotation // can be attached, if exists, later indexedTokens = new HashMap<String, Token>(); while (lineIterator.hasNext()) { String line = lineIterator.next().trim(); if (line.trim().equals("") && sentenceStart == tokenStart) { continue; } if (line.trim().equals("")) { text.replace(tokenStart - 1, tokenStart, ""); tokenStart = tokenStart - 1; Sentence sentence = new Sentence(aJcas, sentenceStart, tokenStart); sentence.addToIndexes(); tokenStart++; sentenceStart = tokenStart; text.append("\n"); continue; } // sentence if (line.startsWith("#text=")) { continue; } if (line.startsWith("#id=")) { continue;// it is a comment line } if (line.startsWith("#")) { columns = getLayerAndFeature(aJcas, columns, spanLayers, relationayers, line); continue; } // some times, the sentence in #text= might have a new line which // break this reader, // so skip such lines if (!Character.isDigit(line.split(" ")[0].charAt(0))) { continue; } // If we are still unlucky, the line starts with a number from the // sentence but not // a token number, check if it didn't in the format NUM-NUM if (!Character.isDigit(line.split("-")[1].charAt(0))) { continue; } int count = StringUtils.countMatches(line, "\t"); if (columns != count) { throw new IOException(fileName + " This is not a valid TSV File. check this line: " + line); } // adding tokens and sentence StringTokenizer lineTk = new StringTokenizer(line, "\t"); String tokenNumberColumn = lineTk.nextToken(); String tokenColumn = lineTk.nextToken(); Token token = new Token(aJcas, tokenStart, tokenStart + tokenColumn.length()); token.addToIndexes(); Type posType = JCasUtil.getType(aJcas, POS.class); Type lemmaType = JCasUtil.getType(aJcas, Lemma.class); if (spanLayers.containsKey(posType) || spanLayers.containsKey(lemmaType)) { indexedTokens.put(tokenStart + "-" + tokenStart + tokenColumn.length(), token); } // adding the annotations createSpanAnnotation(aJcas, tokenStart, spanLayers, relationayers, annotations, beginEndAnno, tokenAnnotations, relationTargets, lineTk, tokenColumn, tokenNumberColumn); tokenStart = tokenStart + tokenColumn.length() + 1; text.append(tokenColumn + " "); } if (tokenStart > sentenceStart) { Sentence sentence = new Sentence(aJcas, sentenceStart, tokenStart); sentence.addToIndexes(); text.append("\n"); } createRelationLayer(aJcas, relationayers, tokenAnnotations, relationTargets); }
From source file:com.careerly.utils.TextUtils.java
/** * ?a-zA-Z0-9/* w ww .ja va2 s . c om*/ * * @param ch ? * @return true */ public static boolean isDigitOrEngilishChar(char ch) { return Character.isLowerCase(ch) || Character.isUpperCase(ch) || Character.isDigit(ch); }
From source file:io.lightlink.excel.SheetTemplateHandler.java
private String shiftR(String value, Integer forceXPos) { String x;// www .j a v a2 s. c om int y; if (Character.isDigit(value.charAt(0))) { int row = Integer.parseInt(value); if (row > rowNumber) rowNumber = row; x = ""; } else { x = value.replaceAll("[0-9]*", ""); } if (forceXPos != null) { int col = ExcelUtils.toExcelColumnNumber(value.replaceAll("[0-9]*", "")); if (col < forceXPos) x = ExcelUtils.toExcelColumnName(forceXPos); } return x + rowNumber; }