List of usage examples for java.io Reader Reader
protected Reader()
From source file:org.apache.taverna.scufl2.api.io.structure.StructureReader.java
protected void parseLine(final String nextLine) throws ReaderException { try (Scanner scanner = new Scanner(nextLine.trim())) { // In case it is the last line if (!scanner.hasNext()) return; // allow any whitespace String next = scanner.next(); if (next.isEmpty()) return; switch (next) { case "WorkflowBundle": parseWorkflowBundle(scanner); return; case "MainWorkflow": mainWorkflow = parseName(scanner); return; case "Workflow": parseWorkflow(scanner);/*w w w. ja v a 2 s.c o m*/ return; case "In": case "Out": parsePort(scanner, next); return; case "Links": level = Level.Links; processor = null; return; case "Controls": level = Level.Controls; return; case "MainProfile": mainProfile = parseName(scanner); return; case "Profile": parseProfile(scanner); return; case "Type": parseType(nextLine); return; case "ProcessorBinding": parseProcessorBinding(scanner); return; case "InputPortBindings": level = Level.InputPortBindings; return; case "OutputPortBindings": level = Level.OutputPortBindings; return; case "Configuration": parseConfiguration(scanner); return; case "Configures": parseConfigures(scanner); return; case "Activity": switch (level) { case Profile: case Activity: level = Level.Activity; activity = new Activity(); activity.setName(parseName(scanner)); profile.getActivities().add(activity); return; case ProcessorBinding: Activity boundActivity = profile.getActivities().getByName(parseName(scanner)); processorBinding.setBoundActivity(boundActivity); return; default: break; } break; case "Processor": switch (level) { case Workflow: case Processor: level = Level.Processor; processor = new Processor(); processor.setName(parseName(scanner)); processor.setParent(workflow); workflow.getProcessors().add(processor); return; case ProcessorBinding: String[] wfProcName = parseName(scanner).split(":"); Workflow wf = wb.getWorkflows().getByName(wfProcName[0]); Processor boundProcessor = wf.getProcessors().getByName(wfProcName[1]); processorBinding.setBoundProcessor(boundProcessor); return; default: break; } break; } if (next.equals("block")) { Matcher blockMatcher = blockPattern.matcher(nextLine); blockMatcher.find(); String block = blockMatcher.group(1); String untilFinish = blockMatcher.group(2); Processor blockProc = workflow.getProcessors().getByName(block); Processor untilFinishedProc = workflow.getProcessors().getByName(untilFinish); new BlockingControlLink(blockProc, untilFinishedProc); } if (next.startsWith("'") && level.equals(Level.Links)) { Matcher linkMatcher = linkPattern.matcher(nextLine); linkMatcher.find(); String firstLink = linkMatcher.group(1); String secondLink = linkMatcher.group(2); SenderPort senderPort; if (firstLink.contains(":")) { String[] procPort = firstLink.split(":"); Processor proc = workflow.getProcessors().getByName(procPort[0]); senderPort = proc.getOutputPorts().getByName(procPort[1]); } else senderPort = workflow.getInputPorts().getByName(firstLink); ReceiverPort receiverPort; if (secondLink.contains(":")) { String[] procPort = secondLink.split(":"); Processor proc = workflow.getProcessors().getByName(procPort[0]); receiverPort = proc.getInputPorts().getByName(procPort[1]); } else receiverPort = workflow.getOutputPorts().getByName(secondLink); new DataLink(workflow, senderPort, receiverPort); return; } if (next.startsWith("'") && (level == Level.InputPortBindings || level == Level.OutputPortBindings)) { Matcher linkMatcher = linkPattern.matcher(nextLine); linkMatcher.find(); String firstLink = linkMatcher.group(1); String secondLink = linkMatcher.group(2); if (level == Level.InputPortBindings) { InputProcessorPort processorPort = processorBinding.getBoundProcessor().getInputPorts() .getByName(firstLink); InputActivityPort activityPort = processorBinding.getBoundActivity().getInputPorts() .getByName(secondLink); new ProcessorInputPortBinding(processorBinding, processorPort, activityPort); } else { OutputActivityPort activityPort = processorBinding.getBoundActivity().getOutputPorts() .getByName(firstLink); OutputProcessorPort processorPort = processorBinding.getBoundProcessor().getOutputPorts() .getByName(secondLink); new ProcessorOutputPortBinding(processorBinding, activityPort, processorPort); } return; } if (level == Level.JSON) { /* * A silly reader that feeds (no more than) a single line at a * time from our parent scanner, starting with the current line */ Reader reader = new Reader() { char[] line = nextLine.toCharArray(); int pos = 0; @Override public int read(char[] cbuf, int off, int len) throws IOException { if (pos >= line.length) { // Need to read next line to fill buffer if (!StructureReader.this.scanner.hasNextLine()) return -1; String newLine = StructureReader.this.scanner.nextLine(); pos = 0; line = newLine.toCharArray(); // System.out.println("Read new line: " + newLine); } int length = Math.min(len, line.length - pos); if (length <= 0) return 0; arraycopy(line, pos, cbuf, off, length); pos += length; return length; } @Override public void close() throws IOException { line = null; } }; ObjectMapper mapper = new ObjectMapper(); try { JsonParser parser = mapper.getFactory().createParser(reader); JsonNode jsonNode = parser.readValueAs(JsonNode.class); // System.out.println("Parsed " + jsonNode); configuration.setJson(jsonNode); } catch (IOException e) { throw new ReaderException("Can't parse json", e); } level = Level.Configuration; return; } } }
From source file:com.github.gekoh.yagen.ddl.CreateDDL.java
private DDLGenerator.AddDDLEntry getAddDDL() { return new DDLGenerator.AddDDLEntry(new Reader() { StringReader reader;/* w w w.j a va2s . c o m*/ @Override public int read(char[] cbuf, int off, int len) throws IOException { if (reader == null) { StringBuilder ddl = new StringBuilder(); if (deferredDdl.length() > 0) { ddl.append(STATEMENT_SEPARATOR); ddl.append("-- deferred DDL executed after creation of entities\n") .append("-- DO NOT EDIT MANUALLY!\n"); if (deferredDdl.indexOf(LANGUAGE_VIEW_NAME) >= 0) { ddl.append(STATEMENT_SEPARATOR); ddl.append("create or replace view ").append(LANGUAGE_VIEW_NAME).append(" as\n") .append("select 'DE' language_cd from dual union all\n") .append("select 'EN' language_cd from dual;"); } ddl.append(deferredDdl); } reader = new StringReader(ddl.toString()); } return reader.read(cbuf, off, len); } @Override public void close() throws IOException { } }); }
From source file:org.alfresco.repo.search.impl.lucene.ADMLuceneIndexerImpl.java
/** * @param indexAtomicPropertiesOnly/*from w w w . j a va 2 s . c o m*/ * true to ignore all properties that must be indexed non-atomically * @return Returns true if the property was indexed atomically, or false if it should be done asynchronously */ protected boolean indexProperty(NodeRef nodeRef, QName propertyName, Serializable value, Document doc, boolean indexAtomicPropertiesOnly, boolean isContentIndexedForNode) { String attributeName = "@" + QName.createQName(propertyName.getNamespaceURI(), ISO9075.encode(propertyName.getLocalName())); boolean store = true; boolean index = true; IndexTokenisationMode tokenise = IndexTokenisationMode.TRUE; boolean atomic = true; boolean isContent = false; boolean isMultiLingual = false; boolean isText = false; boolean isDateTime = false; PropertyDefinition propertyDef = getDictionaryService().getProperty(propertyName); if (propertyDef != null) { index = propertyDef.isIndexed(); store = propertyDef.isStoredInIndex(); tokenise = propertyDef.getIndexTokenisationMode(); atomic = propertyDef.isIndexedAtomically(); isContent = propertyDef.getDataType().getName().equals(DataTypeDefinition.CONTENT); isMultiLingual = propertyDef.getDataType().getName().equals(DataTypeDefinition.MLTEXT); isText = propertyDef.getDataType().getName().equals(DataTypeDefinition.TEXT); if (propertyDef.getDataType().getName().equals(DataTypeDefinition.DATETIME)) { DataTypeDefinition dataType = propertyDef.getDataType(); String analyserClassName = propertyDef.resolveAnalyserClassName(); isDateTime = analyserClassName.equals(DateTimeAnalyser.class.getCanonicalName()); } } if (value == null) { // the value is null return true; } else if (indexAtomicPropertiesOnly && !atomic) { // we are only doing atomic properties and the property is definitely non-atomic return false; } if (!indexAtomicPropertiesOnly) { doc.removeFields(propertyName.toString()); } boolean wereAllAtomic = true; // convert value to String for (Serializable serializableValue : DefaultTypeConverter.INSTANCE.getCollection(Serializable.class, value)) { String strValue = null; try { strValue = DefaultTypeConverter.INSTANCE.convert(String.class, serializableValue); } catch (TypeConversionException e) { doc.add(new Field(attributeName, NOT_INDEXED_NO_TYPE_CONVERSION, Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.NO)); continue; } if (strValue == null) { // nothing to index continue; } if (isContent) { // Content is always tokenised ContentData contentData = DefaultTypeConverter.INSTANCE.convert(ContentData.class, serializableValue); if (!index || contentData == null || contentData.getMimetype() == null) { // no content, mimetype or property not indexed continue; } // store mimetype in index - even if content does not index it is useful // Added szie and locale - size needs to be tokenised correctly doc.add(new Field(attributeName + ".mimetype", contentData.getMimetype(), Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.NO)); doc.add(new Field(attributeName + ".size", Long.toString(contentData.getSize()), Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO)); // TODO: Use the node locale in preferanced to the system locale Locale locale = contentData.getLocale(); if (locale == null) { Serializable localeProperty = nodeService.getProperty(nodeRef, ContentModel.PROP_LOCALE); if (localeProperty != null) { locale = DefaultTypeConverter.INSTANCE.convert(Locale.class, localeProperty); } } if (locale == null) { locale = I18NUtil.getLocale(); } doc.add(new Field(attributeName + ".locale", locale.toString().toLowerCase(), Field.Store.NO, Field.Index.UN_TOKENIZED, Field.TermVector.NO)); if (getLuceneConfig().isContentIndexingEnabled() && isContentIndexedForNode) { // Avoid handling (or even getting) a reader if (maxAtomicTransformationTime <= 0) // ALF-5677: Extremely long launch of the Alfresco server with connector V1.2 boolean avoidReader = maxAtomicTransformationTime <= 0 && indexAtomicPropertiesOnly; ContentReader reader = avoidReader ? null : contentService.getReader(nodeRef, propertyName); if (reader != null && reader.exists()) { // We have a reader, so use it boolean readerReady = true; // transform if necessary (it is not a UTF-8 text document) String sourceMimetype = reader.getMimetype(); if (!EqualsHelper.nullSafeEquals(sourceMimetype, MimetypeMap.MIMETYPE_TEXT_PLAIN) || !EqualsHelper.nullSafeEquals(reader.getEncoding(), "UTF-8")) { try { // get the transformer TransformationOptions options = new TransformationOptions(); options.setUse("index"); options.setSourceNodeRef(nodeRef); transformerDebug.pushAvailable(reader.getContentUrl(), sourceMimetype, MimetypeMap.MIMETYPE_TEXT_PLAIN, options); long sourceSize = reader.getSize(); List<ContentTransformer> transformers = contentService.getActiveTransformers( sourceMimetype, sourceSize, MimetypeMap.MIMETYPE_TEXT_PLAIN, options); transformerDebug.availableTransformers(transformers, sourceSize, options, "ADMLuceneIndexer"); if (transformers.isEmpty()) { // log it if (s_logger.isInfoEnabled()) { s_logger.info("Not indexed: No transformation: \n" + " source: " + reader + "\n" + " target: " + MimetypeMap.MIMETYPE_TEXT_PLAIN + " at " + nodeService.getPath(nodeRef)); } // don't index from the reader readerReady = false; // not indexed: no transformation // doc.add(new Field("TEXT", NOT_INDEXED_NO_TRANSFORMATION, Field.Store.NO, // Field.Index.TOKENIZED, Field.TermVector.NO)); doc.add(new Field(attributeName, NOT_INDEXED_NO_TRANSFORMATION, Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO)); } // is this transformer good enough? else if (indexAtomicPropertiesOnly && transformers.get(0).getTransformationTime(sourceMimetype, MimetypeMap.MIMETYPE_TEXT_PLAIN) > maxAtomicTransformationTime) { // only indexing atomic properties // indexing will take too long, so push it to the background wereAllAtomic = false; readerReady = false; if (transformerDebug.isEnabled()) { transformerDebug.debug("Run later. Transformer average (" + transformers.get(0).getTransformationTime() + " ms) > " + maxAtomicTransformationTime + " ms"); } } else { // We have a transformer that is fast enough ContentTransformer transformer = transformers.get(0); ContentWriter writer = contentService.getTempWriter(); writer.setMimetype(MimetypeMap.MIMETYPE_TEXT_PLAIN); // this is what the analyzers expect on the stream writer.setEncoding("UTF-8"); try { transformer.transform(reader, writer, options); // point the reader to the new-written content reader = writer.getReader(); // Check that the reader is a view onto something concrete if (!reader.exists()) { throw new ContentIOException( "The transformation did not write any content, yet: \n" + " transformer: " + transformer + "\n" + " temp writer: " + writer); } } catch (ContentIOException | NoTransformerException | UnsupportedTransformationException e) { // log it if (s_logger.isInfoEnabled()) { s_logger.info("Not indexed: Transformation failed at " + nodeService.getPath(nodeRef), e); } // don't index from the reader readerReady = false; doc.add(new Field(attributeName, NOT_INDEXED_TRANSFORMATION_FAILED, Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO)); } } } finally { transformerDebug.popAvailable(); } } // add the text field using the stream from the // reader, but only if the reader is valid if (readerReady) { // ALF-15857: We want to avoid actually opening any streams until we're writing this document to the // index. Then we can 'stream through' final ContentReader contentReader = reader; Reader lazyReader = new Reader() { private Reader isr; private Reader getReader() { if (isr == null) { InputStream ris = contentReader.getReader().getContentInputStream(); try { isr = new InputStreamReader(ris, "UTF-8"); } catch (UnsupportedEncodingException e) { isr = new InputStreamReader(ris); } } return isr; } @Override public int read(java.nio.CharBuffer target) throws IOException { return getReader().read(target); } @Override public int read() throws IOException { return getReader().read(); } @Override public int read(char cbuf[], int off, int len) throws IOException { return getReader().read(cbuf, off, len); } @Override public long skip(long n) throws IOException { return getReader().skip(n); } @Override public void close() throws IOException { if (isr != null) { getReader().close(); } } }; StringBuilder builder = new StringBuilder(); builder.append("\u0000").append(locale.toString()).append("\u0000"); StringReader prefix = new StringReader(builder.toString()); Reader multiReader = new MultiReader(prefix, lazyReader); doc.add(new Field(attributeName, multiReader, Field.TermVector.NO)); } } else if (avoidReader) { // Reader was deliberately not used; process in the background wereAllAtomic = false; } else // URL not present (null reader) or no content at the URL (file missing) { // log it if (s_logger.isInfoEnabled()) { s_logger.info("Not indexed: Content Missing \n" + " node: " + nodeRef + " at " + nodeService.getPath(nodeRef) + "\n" + " reader: " + reader + "\n" + " content exists: " + (reader == null ? " --- " : Boolean.toString(reader.exists()))); } // not indexed: content missing doc.add(new Field(attributeName, NOT_INDEXED_CONTENT_MISSING, Field.Store.NO, Field.Index.TOKENIZED, Field.TermVector.NO)); } } else { wereAllAtomic = false; } } else { Field.Store fieldStore = store ? Field.Store.YES : Field.Store.NO; Field.Index fieldIndex; if (index) { switch (tokenise) { case TRUE: case BOTH: default: fieldIndex = Field.Index.TOKENIZED; break; case FALSE: fieldIndex = Field.Index.UN_TOKENIZED; break; } } else { fieldIndex = Field.Index.NO; } if ((fieldIndex != Field.Index.NO) || (fieldStore != Field.Store.NO)) { if (isMultiLingual) { MLText mlText = DefaultTypeConverter.INSTANCE.convert(MLText.class, serializableValue); for (Locale locale : mlText.getLocales()) { String localeString = mlText.getValue(locale); if (localeString == null) { // No text for that locale continue; } StringBuilder builder; MLAnalysisMode analysisMode; VerbatimAnalyser vba; MLTokenDuplicator duplicator; Token t; switch (tokenise) { case TRUE: builder = new StringBuilder(); builder.append("\u0000").append(locale.toString()).append("\u0000") .append(localeString); doc.add(new Field(attributeName, builder.toString(), fieldStore, fieldIndex, Field.TermVector.NO)); break; case FALSE: // analyse ml text analysisMode = getLuceneConfig().getDefaultMLIndexAnalysisMode(); // Do the analysis here vba = new VerbatimAnalyser(false); duplicator = new MLTokenDuplicator( vba.tokenStream(attributeName, new StringReader(localeString)), locale, null, analysisMode); try { while ((t = duplicator.next()) != null) { String localeText = ""; if (t.termText().indexOf('{') == 0) { int end = t.termText().indexOf('}', 1); if (end != -1) { localeText = t.termText().substring(1, end); } } if (localeText.length() > 0) { doc.add(new Field(attributeName + "." + localeText + ".sort", t.termText(), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); } // locale free identifiers are in the default field doc.add(new Field(attributeName, t.termText(), fieldStore, Field.Index.NO_NORMS, Field.TermVector.NO)); } } catch (IOException e) { // TODO ?? } break; case BOTH: builder = new StringBuilder(); builder.append("\u0000").append(locale.toString()).append("\u0000") .append(localeString); doc.add(new Field(attributeName, builder.toString(), fieldStore, fieldIndex, Field.TermVector.NO)); // analyse ml text analysisMode = getLuceneConfig().getDefaultMLIndexAnalysisMode(); // Do the analysis here vba = new VerbatimAnalyser(false); duplicator = new MLTokenDuplicator( vba.tokenStream(attributeName, new StringReader(localeString)), locale, null, analysisMode); try { while ((t = duplicator.next()) != null) { String localeText = ""; if (t.termText().indexOf('{') == 0) { int end = t.termText().indexOf('}', 1); if (end != -1) { localeText = t.termText().substring(1, end); } } if (localeText.length() > 0) { doc.add(new Field(attributeName + "." + localeText + ".sort", t.termText(), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); } else { // no locale doc.add(new Field(attributeName + ".no_locale", t.termText(), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); } } } catch (IOException e) { // TODO ?? } break; } } } else if (isText) { // Temporary special case for uids and gids if (propertyName.equals(ContentModel.PROP_USER_USERNAME) || propertyName.equals(ContentModel.PROP_USERNAME) || propertyName.equals(ContentModel.PROP_AUTHORITY_NAME)) { doc.add(new Field(attributeName, strValue, fieldStore, fieldIndex, Field.TermVector.NO)); } // TODO: Use the node locale in preferanced to the system locale Locale locale = null; Serializable localeProperty = nodeService.getProperty(nodeRef, ContentModel.PROP_LOCALE); if (localeProperty != null) { locale = DefaultTypeConverter.INSTANCE.convert(Locale.class, localeProperty); } if (locale == null) { locale = I18NUtil.getLocale(); } StringBuilder builder; MLAnalysisMode analysisMode; VerbatimAnalyser vba; MLTokenDuplicator duplicator; Token t; switch (tokenise) { default: case TRUE: builder = new StringBuilder(); builder.append("\u0000").append(locale.toString()).append("\u0000").append(strValue); doc.add(new Field(attributeName, builder.toString(), fieldStore, fieldIndex, Field.TermVector.NO)); break; case FALSE: analysisMode = getLuceneConfig().getDefaultMLIndexAnalysisMode(); // Do the analysis here vba = new VerbatimAnalyser(false); duplicator = new MLTokenDuplicator( vba.tokenStream(attributeName, new StringReader(strValue)), locale, null, analysisMode); try { while ((t = duplicator.next()) != null) { String localeText = ""; if (t.termText().indexOf('{') == 0) { int end = t.termText().indexOf('}', 1); if (end != -1) { localeText = t.termText().substring(1, end); } } if (localeText.length() > 0) { doc.add(new Field(attributeName + "." + localeText + ".sort", t.termText(), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); } doc.add(new Field(attributeName, t.termText(), fieldStore, Field.Index.NO_NORMS, Field.TermVector.NO)); } } catch (IOException e) { // TODO ?? } break; case BOTH: builder = new StringBuilder(); builder.append("\u0000").append(locale.toString()).append("\u0000").append(strValue); doc.add(new Field(attributeName, builder.toString(), fieldStore, fieldIndex, Field.TermVector.NO)); analysisMode = getLuceneConfig().getDefaultMLIndexAnalysisMode(); // Do the analysis here vba = new VerbatimAnalyser(false); duplicator = new MLTokenDuplicator( vba.tokenStream(attributeName, new StringReader(strValue)), locale, null, analysisMode); try { while ((t = duplicator.next()) != null) { String localeText = ""; if (t.termText().indexOf('{') == 0) { int end = t.termText().indexOf('}', 1); if (end != -1) { localeText = t.termText().substring(1, end); } else { } } // localised sort support if (localeText.length() > 0) { doc.add(new Field(attributeName + "." + localeText + ".sort", t.termText(), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); } else { // All identifiers for cross language search as supported by false doc.add(new Field(attributeName + ".no_locale", t.termText(), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); } } } catch (IOException e) { // TODO ?? } break; } } else if (isDateTime) { SimpleDateFormat df; Date date; switch (tokenise) { default: case TRUE: doc.add(new Field(attributeName, strValue, fieldStore, fieldIndex, Field.TermVector.NO)); break; case FALSE: df = CachingDateFormat.getDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS", true); try { date = df.parse(strValue); doc.add(new Field(attributeName, df.format(date), fieldStore, Field.Index.NO_NORMS, Field.TermVector.NO)); } catch (ParseException e) { // ignore for ordering } break; case BOTH: doc.add(new Field(attributeName, strValue, fieldStore, fieldIndex, Field.TermVector.NO)); df = CachingDateFormat.getDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS", true); try { date = df.parse(strValue); doc.add(new Field(attributeName + ".sort", df.format(date), Field.Store.NO, Field.Index.NO_NORMS, Field.TermVector.NO)); } catch (ParseException e) { // ignore for ordering } break; } } else { doc.add(new Field(attributeName, strValue, fieldStore, fieldIndex, Field.TermVector.NO)); } } } } return wereAllAtomic; }
From source file:org.apache.druid.data.input.impl.FileIteratingFirehoseTest.java
@Test(expected = RuntimeException.class) public void testClose() throws IOException { final LineIterator lineIterator = new LineIterator(new Reader() { @Override/*from w w w .j av a 2 s . c o m*/ public int read(char[] cbuf, int off, int len) { System.arraycopy(LINE_CHARS, 0, cbuf, 0, LINE_CHARS.length); return LINE_CHARS.length; } @Override public void close() { throw new RuntimeException("close test for FileIteratingFirehose"); } }); final TestCloseable closeable = new TestCloseable(); final FileIteratingFirehose firehose = new FileIteratingFirehose(ImmutableList.of(lineIterator).iterator(), parser, closeable); firehose.hasMore(); // initialize lineIterator firehose.close(); Assert.assertTrue(closeable.closed); }
From source file:org.lockss.extractor.GoslingHtmlLinkExtractor.java
protected void parseStyleContentsFromRing(ArchivalUnit au, LinkExtractor.Callback cb) { Reader cssReader = new Reader() { boolean closed = false; public void close() { closed = true;//w ww. ja v a2 s . c om } public int read(char[] cbuf, int off, int len) throws IOException { int ix = 0; while (ix < len) { int ret = read(); if (ret == -1) break; cbuf[off + ix] = (char) ret; ++ix; } return ix == 0 ? -1 : ix; } public int read() throws IOException { if (!refill("</style>".length()) && !closed) { logger.siteWarning("Unclosed <style> section in " + srcUrl); } if (ring.size() == 0 || ring.startsWithIgnoreCase("</style>")) { return -1; } return ring.remove(); } }; processStyleText(au, cb, cssReader, "tag"); }
From source file:org.lockss.util.StringUtil.java
/** Return a reader that transforms platform newline sequences to standard * newline characters. /*from w ww. j a va2s . c o m*/ * @param r a Reader * @return a filtered reader that transforms platform newline sequences to standard * newline characters. */ public static Reader getLineReader(final Reader r) { return new Reader() { boolean saw_CR = false; final char[] cb = new char[1]; public int read(char cbuf[], int off, int len) throws IOException { int i; int n = 0; for (i = 0; i < len; i++) { if ((n = r.read(cb, 0, 1)) <= 0) { break; } if (saw_CR) { saw_CR = false; if (cb[0] == '\n') { if (r.read(cb, 0, 1) <= 0) { break; } } } if (cb[0] == '\r') { saw_CR = true; cb[0] = '\n'; } cbuf[off + i] = cb[0]; } return (i == 0) ? n : i; } public void close() throws IOException { r.close(); } }; }
From source file:org.lockss.util.StringUtil.java
/** Return a reader that removes backslash-newline sequences * (line-continuation)/*from w ww . j a v a 2 s. com*/ * @param r a Reader * @return a filtered reader that removes line-continuation sequences */ public static Reader getLineContinuationReader(final Reader r) { return new Reader() { boolean saw_bslash = false; final char[] cb = new char[1]; int lastch = -1; public int read(char cbuf[], int off, int len) throws IOException { int i; int n = 0; int endoff = off + len; while (off < endoff) { // if have a character waiting, emit it if (lastch >= 0) { cbuf[off++] = (char) lastch; lastch = -1; } else { if ((n = r.read(cb, 0, 1)) <= 0) { // end of input. do we have a hanging backslash? if (saw_bslash) { cbuf[off++] = '\\'; saw_bslash = false; } break; } switch (cb[0]) { case '\\': if (saw_bslash) { // if already seen a backslash, output that one cbuf[off++] = '\\'; } else { saw_bslash = true; } break; case '\n': if (saw_bslash) { saw_bslash = false; } else { cbuf[off++] = cb[0]; } break; default: if (saw_bslash) { cbuf[off++] = '\\'; saw_bslash = false; lastch = cb[0]; } else { cbuf[off++] = cb[0]; } break; } } } int nread = len - (endoff - off); return nread == 0 ? -1 : nread; } public void close() throws IOException { r.close(); } }; }