List of usage examples for java.util.regex Matcher start
public int start()
From source file:com.nextep.designer.sqlgen.ui.commands.MarkOccurrencesJob.java
@Override protected IStatus run(IProgressMonitor monitor) { // Removing previous occurrences IDocument doc = editor.getDocumentProvider().getDocument(editor.getEditorInput()); IAnnotationModel annotationModel = editor.getDocumentProvider().getAnnotationModel(editor.getEditorInput()); // We should always have an annotation model if (annotationModel == null) { log.warn("Unable to anotate SQL editor, contact neXtep Software if the problem persists."); return Status.CANCEL_STATUS; }/*from w w w . ja v a 2 s . co m*/ // Removing synchronized (annotationModel) { Iterator<?> it = annotationModel.getAnnotationIterator(); final Collection<Annotation> toRemove = new ArrayList<Annotation>(); while (it.hasNext()) { Annotation ann = (Annotation) it.next(); if (ANNOTATION_TYPE_OCCURRENCES.equals(ann.getType())) { synchronized (annotationModel) { toRemove.add(ann); } } } // Now looking for the text to highlight, regexp String source = doc.get(); if (selection.contains("\n")) { return Status.OK_STATUS; } try { final Pattern p = Pattern.compile( "(\\W|\\s|^)" + FindReplaceDocumentAdapter.escapeForRegExPattern(selection.toUpperCase()) + "(\\W|\\s|$)"); final Matcher m = p.matcher(source.toUpperCase()); Map<Annotation, Position> annotationMap = new HashMap<Annotation, Position>(); while (m.find()) { // Since we may have captured enclosing characters, we localize our selection // string inside the found pattern to properly highlight it String capturedText = m.group(); int offset = capturedText.indexOf(selection.toUpperCase()); final Position annPosition = new Position(m.start() + offset, selection.length()); final Annotation annotation = new Annotation(ANNOTATION_TYPE_OCCURRENCES, false, selection); annotationMap.put(annotation, annPosition); } ((IAnnotationModelExtension) annotationModel) .replaceAnnotations(toRemove.toArray(new Annotation[toRemove.size()]), annotationMap); } catch (RuntimeException e) { // Should be silent on errors as it can be really annoying log.debug("Problems while trying to mark occurrences", e); } } return Status.OK_STATUS; }
From source file:com.joliciel.talismane.filters.RegexMarkerFilter.java
@Override public Set<TextMarker> apply(String prevText, String text, String nextText) { if (LOG.isTraceEnabled()) { LOG.trace("Matching " + regex + ""); }/* w ww . ja v a 2 s .c o m*/ String context = prevText + text + nextText; int textStartPos = prevText.length(); int textEndPos = prevText.length() + text.length(); Matcher matcher = pattern.matcher(context); Set<TextMarker> textMarkers = new TreeSet<TextMarker>(); while (matcher.find()) { int matcherStart = 0; int matcherEnd = 0; if (groupIndex == 0) { matcherStart = matcher.start(); matcherEnd = matcher.end(); } else { matcherStart = matcher.start(groupIndex); matcherEnd = matcher.end(groupIndex); } String matchText = context.substring(matcher.start(), matcher.end()); if (LOG.isTraceEnabled()) { LOG.trace("Next match: " + matchText); if (matcher.start() != matcherStart || matcher.end() != matcherEnd) { LOG.trace("But matching group: " + context.substring(matcherStart, matcherEnd)); } LOG.trace("matcher.start()=" + matcher.start() + ", matcher.end()=" + matcher.end() + ", matcherStart=" + matcherStart + ", matcherEnd=" + matcherEnd + ", textStartPos=" + textStartPos + ", textEndPos=" + textEndPos); } if (matcherEnd - matcherStart > blockSize) { String errorString = "Match size (" + (matcherEnd - matcherStart) + ") bigger than block size (" + blockSize + "). " + "Increase blockSize or change filter. " + "Maybe you need to change a greedy quantifier (e.g. .*) to a reluctant quantifier (e.g. .*?)? " + "Regex: " + regex + ". Text: " + matchText; throw new TalismaneException(errorString); } if (matcherStart >= textStartPos && matcherStart < textEndPos) { if (LOG.isTraceEnabled()) { LOG.trace("Start in range: textStartPos " + textStartPos + ">= matcherStart [[" + matcherStart + "]] < textEndPos " + textEndPos); } for (MarkerFilterType filterType : filterTypes) { switch (filterType) { case SKIP: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.PUSH_SKIP, matcherStart - prevText.length(), this, matchText); textMarkers.add(textMarker); break; } case SENTENCE_BREAK: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.SENTENCE_BREAK, matcherStart - prevText.length(), this, matchText); textMarkers.add(textMarker); break; } case SPACE: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.SPACE, matcherStart - prevText.length(), this, matchText); textMarker.setInsertionText(" "); textMarkers.add(textMarker); TextMarker textMarker2 = this.getFilterService().getTextMarker(TextMarkerType.PUSH_SKIP, matcherStart - prevText.length(), this, matchText); textMarkers.add(textMarker2); break; } case REPLACE: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.INSERT, matcherStart - prevText.length(), this, matchText); String newText = RegexUtils.getReplacement(replacement, context, matcher); if (LOG.isTraceEnabled()) { LOG.trace("Setting replacement to: " + newText); } textMarker.setInsertionText(newText); textMarkers.add(textMarker); TextMarker textMarker2 = this.getFilterService().getTextMarker(TextMarkerType.PUSH_SKIP, matcherStart - prevText.length(), this, matchText); textMarkers.add(textMarker2); break; } case OUTPUT: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.PUSH_OUTPUT, matcherStart - prevText.length(), this, matchText); textMarkers.add(textMarker); TextMarker textMarker2 = this.getFilterService().getTextMarker(TextMarkerType.PUSH_SKIP, matcherStart - prevText.length(), this, matchText); textMarkers.add(textMarker2); break; } case INCLUDE: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.PUSH_INCLUDE, matcherStart - prevText.length(), this, matchText); textMarkers.add(textMarker); break; } case OUTPUT_START: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.START_OUTPUT, matcherStart - prevText.length(), this, matchText); textMarkers.add(textMarker); break; } case STOP: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.STOP, matcherStart - prevText.length(), this, matchText); textMarkers.add(textMarker); break; } } } } if (matcherEnd >= textStartPos && matcherEnd < textEndPos) { if (LOG.isTraceEnabled()) { LOG.trace("End in range: textStartPos " + textStartPos + ">= matcherEnd [[" + matcherEnd + "]] < textEndPos " + textEndPos); } for (MarkerFilterType filterType : filterTypes) { switch (filterType) { case SKIP: case SPACE: case REPLACE: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.POP_SKIP, matcherEnd - prevText.length(), this, matchText); textMarkers.add(textMarker); break; } case OUTPUT: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.POP_OUTPUT, matcherEnd - prevText.length(), this, matchText); textMarkers.add(textMarker); TextMarker textMarker2 = this.getFilterService().getTextMarker(TextMarkerType.POP_SKIP, matcherEnd - prevText.length(), this, matchText); textMarkers.add(textMarker2); break; } case INCLUDE: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.POP_INCLUDE, matcherEnd - prevText.length(), this, matchText); textMarkers.add(textMarker); break; } case START: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.START, matcherEnd - prevText.length(), this, matchText); textMarkers.add(textMarker); break; } case OUTPUT_STOP: { TextMarker textMarker = this.getFilterService().getTextMarker(TextMarkerType.STOP_OUTPUT, matcherEnd - prevText.length(), this, matchText); textMarkers.add(textMarker); break; } } } } } // next match if (textMarkers.size() > 0) LOG.debug("Added markers: " + textMarkers); return textMarkers; }
From source file:com.google.acre.script.NHttpAsyncUrlfetch.java
private Scriptable callback_result(long start_time, URL url, HttpResponse res, boolean system, boolean log_to_user, String response_encoding) { BrowserCompatSpecFactory bcsf = new BrowserCompatSpecFactory(); CookieSpec cspec = bcsf.newInstance(null); String protocol = url.getProtocol(); boolean issecure = ("https".equals(protocol)); int port = url.getPort(); if (port == -1) port = 80;// w ww. ja v a 2 s .c o m CookieOrigin origin = new CookieOrigin(url.getHost(), port, url.getPath(), issecure); Object body = ""; int status = res.getStatusLine().getStatusCode(); Context ctx = Context.getCurrentContext(); Scriptable out = ctx.newObject(_scope); Scriptable headers = ctx.newObject(_scope); Scriptable cookies = ctx.newObject(_scope); out.put("status", out, status); out.put("headers", out, headers); out.put("cookies", out, cookies); Header content_type_header = null; StringBuilder response_header_log = new StringBuilder(); for (Header h : res.getAllHeaders()) { if (h.getName().equalsIgnoreCase("set-cookie")) { String set_cookie = h.getValue(); Matcher m = Pattern.compile("\\s*(([^,]|(,\\s*\\d))+)").matcher(set_cookie); while (m.find()) { Header ch = new BasicHeader("Set-Cookie", set_cookie.substring(m.start(), m.end())); try { List<Cookie> pcookies = cspec.parse(ch, origin); for (Cookie c : pcookies) { cookies.put(c.getName(), cookies, new AcreCookie(c).toJsObject(_scope)); } } catch (MalformedCookieException e) { throw new RuntimeException(e); } } } else if (h.getName().equalsIgnoreCase("content-type")) { content_type_header = h; } response_header_log.append(h.getName() + ": " + h.getValue() + "\r\n"); headers.put(h.getName(), headers, h.getValue()); } String charset = null; if (content_type_header != null) { HeaderElement values[] = content_type_header.getElements(); if (values.length == 1) { NameValuePair param = values[0].getParameterByName("charset"); if (param != null) { charset = param.getValue(); } } } if (charset == null) charset = response_encoding; // read body HttpEntity ent = res.getEntity(); try { if (ent != null) { InputStream res_stream = ent.getContent(); Header cenc = ent.getContentEncoding(); if (cenc != null && res_stream != null) { HeaderElement[] codecs = cenc.getElements(); for (HeaderElement codec : codecs) { if (codec.getName().equalsIgnoreCase("gzip")) { res_stream = new GZIPInputStream(res_stream); } } } long first_byte_time = 0; long end_time = 0; if (content_type_header != null && (content_type_header.getValue().startsWith("image/") || content_type_header.getValue().startsWith("application/octet-stream") || content_type_header.getValue().startsWith("multipart/form-data"))) { // HttpClient's InputStream doesn't support mark/reset, so // wrap it with one that does. BufferedInputStream bufis = new BufferedInputStream(res_stream); bufis.mark(2); bufis.read(); first_byte_time = System.currentTimeMillis(); bufis.reset(); byte[] data = IOUtils.toByteArray(bufis); end_time = System.currentTimeMillis(); body = new JSBinary(); ((JSBinary) body).set_data(data); try { if (res_stream != null) res_stream.close(); } catch (IOException e) { // ignore } } else if (res_stream == null || charset == null) { first_byte_time = end_time = System.currentTimeMillis(); body = ""; } else { StringWriter writer = new StringWriter(); Reader reader = new InputStreamReader(res_stream, charset); int i = reader.read(); first_byte_time = System.currentTimeMillis(); writer.write(i); IOUtils.copy(reader, writer); end_time = System.currentTimeMillis(); body = writer.toString(); try { reader.close(); writer.close(); } catch (IOException e) { // ignore } } long reading_time = end_time - first_byte_time; long waiting_time = first_byte_time - start_time; String httprephdr = response_header_log.toString(); // XXX need to log start-time of request _logger.syslog4j("DEBUG", "urlfetch.response.async", "URL", url.toString(), "Status", Integer.toString(status), "Headers", httprephdr, "Reading time", reading_time, "Waiting time", waiting_time); if (system && log_to_user) { _response.userlog4j("DEBUG", "urlfetch.response.async", "URL", url.toString(), "Status", Integer.toString(status), "Headers", httprephdr); } // XXX seems like AcreResponse should be able to use // the statistics object to generate x-metaweb-cost // given a bit of extra information Statistics.instance().collectUrlfetchTime(start_time, first_byte_time, end_time); _costCollector.collect((system) ? "asuc" : "auuc").collect((system) ? "asuw" : "auuw", waiting_time); } } catch (IOException e) { throw new RuntimeException(e); } out.put("body", out, body); return out; }
From source file:de.csw.linkgenerator.CSWLinksetRenderer.java
public String renderLinks(String text) { Matcher linksetMatcher = linksetPattern.matcher(text); StringBuilder newText = new StringBuilder(); int oldEnd = 0; while (linksetMatcher.find()) { String linkset = linksetMatcher.group(); // extract the content of the text nodes StringBuilder textContent = new StringBuilder(); Matcher textContentMatcher = textContentPattern.matcher(linkset); while (textContentMatcher.find()) { textContent.append(textContentMatcher.group(1)); }//from ww w . j a va2 s . c o m int start = linksetMatcher.start(); int end = linksetMatcher.end(); newText.append(text.substring(oldEnd, start)); newText.append("<a href=\"#\" onclick=\"showPopup(this, new Array("); Matcher linkMatcher = linkPattern.matcher(linkset); while (linkMatcher.find()) { String page = linkMatcher.group(1); newText.append('\''); newText.append(page); newText.append("',"); } newText.setCharAt(newText.length() - 1, ')'); newText.append("); return false;\">"); newText.append(textContent); newText.append("</a>"); oldEnd = end; } // append rest of text newText.append(text.substring(oldEnd, text.length())); // end == 0 means that there are no csw:linkset elements, thus we do not need to include the popup html and javascript if (oldEnd != 0) { newText.append('\n'); newText.append(cswLinkPopupHTML); } return newText.toString(); }
From source file:com.semperos.screwdriver.js.LessSource.java
private void resolveImports() throws FileNotFoundException, IOException { Matcher importMatcher = IMPORT_PATTERN.matcher(normalizedContent); while (importMatcher.find()) { String importedFile = importMatcher.group(3); importedFile = importedFile.matches(".*\\.(le?|c)ss$") ? importedFile : importedFile + ".less"; boolean css = importedFile.matches(".*css$"); if (!css) { LessSource importedLessSource = new LessSource(new File(file.getParentFile(), importedFile)); imports.put(importedFile, importedLessSource); normalizedContent = normalizedContent.substring(0, importMatcher.start()) + importedLessSource.getNormalizedContent() + normalizedContent.substring(importMatcher.end()); importMatcher = IMPORT_PATTERN.matcher(normalizedContent); }//from www .ja v a2s . com } }
From source file:gate.creole.kea.CorpusImporter.java
protected boolean annotateKeyPhrases(Document document, String annSetName, String keyphraseAnnotationType, List<String> phrases) throws Exception { if (phrases == null || phrases.isEmpty()) return false; //create a pattern String patternStr = ""; Iterator<String> phraseIter = phrases.iterator(); while (phraseIter.hasNext()) { String phrase = phraseIter.next(); patternStr += patternStr.length() == 0 ? "\\Q" + phrase + "\\E" : "|\\Q" + phrase + "\\E"; }/*from w w w .ja v a 2 s . c o m*/ Pattern pattern = Pattern.compile(patternStr, Pattern.CASE_INSENSITIVE | Pattern.MULTILINE); Matcher matcher = pattern.matcher(document.getContent().toString()); AnnotationSet outputSet = annSetName == null || annSetName.length() == 0 ? document.getAnnotations() : document.getAnnotations(annSetName); boolean result = false; while (matcher.find()) { int start = matcher.start(); int end = matcher.end(); outputSet.add(new Long(start), new Long(end), keyphraseAnnotationType, Factory.newFeatureMap()); result = true; } document.getFeatures().put("Author assigned keyphrases", phrases); return result; }
From source file:net.dv8tion.jda.core.entities.impl.MessageImpl.java
@Override public synchronized String getStrippedContent() { if (strippedContent == null) { String tmp = getContent(); //all the formatting keys to keep track of String[] keys = new String[] { "*", "_", "`", "~~" }; //find all tokens (formatting strings described above) TreeSet<FormatToken> tokens = new TreeSet<>((t1, t2) -> Integer.compare(t1.start, t2.start)); for (String key : keys) { Matcher matcher = Pattern.compile(Pattern.quote(key)).matcher(tmp); while (matcher.find()) { tokens.add(new FormatToken(key, matcher.start())); } }/*from ww w. jav a2 s . c om*/ //iterate over all tokens, find all matching pairs, and add them to the list toRemove Stack<FormatToken> stack = new Stack<>(); List<FormatToken> toRemove = new ArrayList<>(); boolean inBlock = false; for (FormatToken token : tokens) { if (stack.empty() || !stack.peek().format.equals(token.format) || stack.peek().start + token.format.length() == token.start) { //we are at opening tag if (!inBlock) { //we are outside of block -> handle normally if (token.format.equals("`")) { //block start... invalidate all previous tags stack.clear(); inBlock = true; } stack.push(token); } else if (token.format.equals("`")) { //we are inside of a block -> handle only block tag stack.push(token); } } else if (!stack.empty()) { //we found a matching close-tag toRemove.add(stack.pop()); toRemove.add(token); if (token.format.equals("`") && stack.empty()) { //close tag closed the block inBlock = false; } } } //sort tags to remove by their start-index and iteratively build the remaining string Collections.sort(toRemove, (t1, t2) -> Integer.compare(t1.start, t2.start)); StringBuilder out = new StringBuilder(); int currIndex = 0; for (FormatToken formatToken : toRemove) { if (currIndex < formatToken.start) { out.append(tmp.substring(currIndex, formatToken.start)); } currIndex = formatToken.start + formatToken.format.length(); } if (currIndex < tmp.length()) { out.append(tmp.substring(currIndex)); } //return the stripped text, escape all remaining formatting characters (did not have matching open/close before or were left/right of block strippedContent = out.toString().replace("*", "\\*").replace("_", "\\_").replace("~", "\\~"); } return strippedContent; }
From source file:qhindex.controller.SearchAuthorWorksController.java
private String resolvePublisher(String urlCitationWork, String publisherNameIncomplete) throws IOException { String publisher = publisherNameIncomplete; if (urlCitationWork.contains(".pdf") == false) { // Get the header and determine if the resource is in text format (html or plain) // to be able to extract the publisher name final RequestConfig requestConfig = RequestConfig.custom() .setConnectTimeout(AppHelper.connectionTimeOut) .setConnectionRequestTimeout(AppHelper.connectionTimeOut) .setSocketTimeout(AppHelper.connectionTimeOut).setStaleConnectionCheckEnabled(true).build(); final CloseableHttpClient httpclient = HttpClients.custom().setDefaultRequestConfig(requestConfig) .build();/*from w w w. j a v a2s . c om*/ HttpHead httpHead = new HttpHead(urlCitationWork); try { CloseableHttpResponse responseHead = httpclient.execute(httpHead); StatusLine statusLineHead = responseHead.getStatusLine(); responseHead.close(); String contentType = responseHead.getFirstHeader("Content-Type").toString().toLowerCase(); if (statusLineHead.getStatusCode() < 300 && contentType.contains("text/html") || contentType.contains("text/plain")) { HttpGet httpGet = new HttpGet(urlCitationWork); CloseableHttpResponse responsePost = httpclient.execute(httpGet); StatusLine statusLine = responsePost.getStatusLine(); if (statusLine.getStatusCode() < 300) { //AppHelper.waitBeforeNewRequest(); BufferedReader br = new BufferedReader( new InputStreamReader((responsePost.getEntity().getContent()))); String content = new String(); String line; while ((line = br.readLine()) != null) { content += line; } int bodyStartIndex = content.indexOf("<body"); if (bodyStartIndex < 0) bodyStartIndex = 0; try { publisherNameIncomplete = formatRegExSpecialCharsInString(publisherNameIncomplete); Pattern pattern = Pattern.compile(publisherNameIncomplete + "(\\w|\\d|-|\\s)+"); Matcher matcher = pattern.matcher(content); if (matcher.find(bodyStartIndex)) { publisher = content.substring(matcher.start(), matcher.end()); } else { publisher = publisherNameIncomplete; } } catch (Exception ex) { Debug.print( "Exception while resolving publisher for citing work - extrating pattern from citation web resource: " + ex.toString()); resultsMsg += "Exception while resolving publisher for citing work - extrating pattern from citation web resource.\n"; } } responsePost.close(); } } catch (IOException ioEx) { Debug.print("Exception while resolving publisher for citing work: " + ioEx.toString()); resultsMsg += "Exception while resolving publisher for citing work.\n"; } } publisher = publisher.trim(); return publisher; }
From source file:ninja.javafx.smartcsv.fx.validation.ValidationEditorController.java
private static StyleSpans<Collection<String>> computeHighlighting(String text) { Matcher matcher = PATTERN.matcher(text); int lastKwEnd = 0; StyleSpansBuilder<Collection<String>> spansBuilder = new StyleSpansBuilder<>(); while (matcher.find()) { String styleClass = matcher .group("KEYWORD") != null ? "keyword" : matcher.group("PAREN") != null ? "paren" : matcher.group("BRACE") != null ? "brace" : matcher.group("BRACKET") != null ? "bracket" : matcher.group("SEMICOLON") != null ? "semicolon" : matcher.group("STRING") != null ? "string" : matcher.group("STRING2") != null ? "string" : matcher.group("COMMENT") != null ? "comment" : null; /* never happens */ assert styleClass != null; spansBuilder.add(Collections.emptyList(), matcher.start() - lastKwEnd); spansBuilder.add(Collections.singleton(styleClass), matcher.end() - matcher.start()); lastKwEnd = matcher.end();//w w w . ja v a2s .c o m } spansBuilder.add(Collections.emptyList(), text.length() - lastKwEnd); return spansBuilder.create(); }
From source file:cn.dockerfoundry.ide.eclipse.dockerfile.validator.DockerfileDelegatingValidator.java
@SuppressWarnings("unchecked") public Map<DockerfileValidationLevel, List<DockerfileValidationResult>> validate() { Map<DockerfileValidationLevel, List<DockerfileValidationResult>> result = new HashMap<DockerfileValidationLevel, List<DockerfileValidationResult>>(); if (this.dockerfileInputStream == null) return result; ValidatorUtils validatorUtils = new ValidatorUtils(); boolean fromCheck = false; int currentLine = 0; List<DockerfileValidationResult> errors = new ArrayList<DockerfileValidationResult>(); List<DockerfileValidationResult> warnings = new ArrayList<DockerfileValidationResult>(); List<DockerfileValidationResult> infos = new ArrayList<DockerfileValidationResult>(); Map<String, Object> ruleObject = validatorUtils .getRules(DockerfileDelegatingValidator.class.getResourceAsStream("default.yaml")); List<Map<String, Object>> requiredInstructions = validatorUtils.createReqInstructionHash(ruleObject); Map<String, Object> general = (Map<String, Object>) ruleObject.get("general"); List<String> valid_instructions = (List<String>) general.get("valid_instructions"); Pattern validInstructionsRegex = validatorUtils.createValidCommandRegex(valid_instructions); Pattern continuationRegex = null; // Pattern ignoreRegex = null; Object multiline_regex = general.get("multiline_regex"); if (multiline_regex != null && multiline_regex.toString().length() > 2) { String _multiline_regex = multiline_regex.toString().substring(1, multiline_regex.toString().length() - 1); continuationRegex = Pattern.compile(_multiline_regex, Pattern.CASE_INSENSITIVE); }//w w w .j a v a 2s . co m Object ignore_regex = general.get("ignore_regex"); if (ignore_regex != null && ignore_regex.toString().length() > 2) { String _ignore_regex = ignore_regex.toString().substring(1, ignore_regex.toString().length() - 1); Pattern ignoreRegex = Pattern.compile(_ignore_regex, Pattern.CASE_INSENSITIVE); System.out.println("ignore_regex is not used for now: " + ignoreRegex.pattern()); } try { String dockerfile = IOUtils.toString(dockerfileInputStream); String[] linesArr = dockerfile.split("(\\r|\\n)"); if (linesArr != null && linesArr.length > 0) { for (int i = 0; i < linesArr.length; i++) { currentLine++; String line = linesArr[i]; int lineOffSet = 0; if (line == null || line.length() == 0 || line.charAt(0) == '#') { continue; } while (validatorUtils.isPartialLine(line, continuationRegex)) { line = continuationRegex.matcher(line).replaceAll(" "); if (linesArr[currentLine + lineOffSet].charAt(0) == '#') { linesArr[currentLine + lineOffSet] = null; line = line + "\\"; } else { line = line + linesArr[currentLine + lineOffSet]; linesArr[currentLine + lineOffSet] = null; } lineOffSet++; } // First instruction must be FROM if (!fromCheck) { fromCheck = true; if (line.toUpperCase().indexOf("FROM") != 0) { DockerfileValidationResult error = new DockerfileValidationResult(); error.setLine(currentLine); error.setLevel(DockerfileValidationLevel.ERROR); error.setMessage("Missing or misplaced FROM"); error.setLineContent(line); errors.add(error); } } // end for FROM Matcher matcher = validInstructionsRegex.matcher(line); if (!matcher.find()) { DockerfileValidationResult error = new DockerfileValidationResult(); error.setLine(currentLine); error.setLevel(DockerfileValidationLevel.ERROR); error.setMessage("Invalid instruction"); error.setLineContent(line); errors.add(error); } else { String instruction = line.substring(matcher.start(), matcher.end()).trim(); String params = matcher.replaceAll(""); validatorUtils.checkLineRules(ruleObject, instruction, params, line, currentLine, errors, warnings, infos); requiredInstructions.remove(instruction); } // end for valid instructions checking } } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } validatorUtils.checkRequiredInstructions(requiredInstructions, errors, warnings, infos); result.put(DockerfileValidationLevel.ERROR, errors); result.put(DockerfileValidationLevel.WARNING, warnings); result.put(DockerfileValidationLevel.INFO, infos); return result; }