List of usage examples for java.io Reader reset
public void reset() throws IOException
From source file:com.globalsight.everest.edit.offline.upload.UploadApi.java
/** * Loads the upload file into an OfflinePageData object. * //from w w w .j a v a 2s . co m * @param p_reader * a stream opened on the upload file. * @param p_keepIssues * when an OfflinePageData object is called *twice* to load data, * this parameter allows to keep issues read in the first run * (the second run normally clears the entire object). This is * necessary for RTF list view which first parses the RTF, then * loads the textual content as list view text file. * @return if there are no errors, null is returned. If there are errors, a * fully formed HTML error report page is returned. */ public String loadListViewTextFile(Reader p_reader, String p_fileName, boolean p_keepIssues) { if (m_uploadPageData == null) { m_uploadPageData = new OfflinePageData(); m_referencePageDatas = new ArrayList<PageData>(); } try { p_reader.mark(0); } catch (IOException e1) { e1.printStackTrace(); } String errPage = null; // Set the linefeed normalization sequence. if ((errPage = getLFNormalizationSequence()) != null) { return errPage; } // filter some text Reader new_reader = null; try { StringBuffer content = new StringBuffer(); BufferedReader br = new BufferedReader(p_reader); String line = br.readLine(); String previousLine = null; while (line != null) { boolean ignoreThisLine = line.startsWith(SEGMENT_PAGE_NAME_KEY) || line.startsWith(SEGMENT_FILE_PATH_KEY) || line.startsWith(HEADER_JOB_NAME) || line.startsWith(HEADER_JOB_ID) || line.startsWith(GS_TOOLKIT_FORMAT) || line.startsWith(SEGMENT_SID_KEY) || line.startsWith(SEGMENT_XLF_TARGET_STATE_KEY) || line.startsWith(SEGMENT_INCONTEXT_MATCH_KEY) || line.startsWith(SEGMENT_TM_PROFILE_KEY) || line.startsWith(SEGMENT_TERMBASE_KEY) || line.startsWith(HEADER_POPULATE_100_SEGMENTS); if (!ignoreThisLine) { content.append(line).append("\r\n"); } // check if it is omegat if (ignoreThisLine && line.startsWith(GS_TOOLKIT_FORMAT)) { int index = line.indexOf(":"); String f = index > 0 ? line.substring(index + 1).trim() : "xliff"; m_uploadPageData.setIsOmegaT("omegat".equalsIgnoreCase(f)); m_uploadPageData.setIsXliff("xliff".equalsIgnoreCase(f)); } // (GBS-3711) Store "state" attribute value of XLF target // section. if (ignoreThisLine && line.startsWith(SEGMENT_XLF_TARGET_STATE_KEY)) { int index = line.indexOf(":"); if (index > 0) { String state = line.substring(index + 1).trim(); String tuId = previousLine.substring(2); m_uploadPageData.addXlfTargetState(tuId, state); } } // GBS-3825 if (ignoreThisLine && line.startsWith(HEADER_POPULATE_100_SEGMENTS)) { int index = line.indexOf(":"); if (index > 0) { String isPopulate100 = line.substring(index + 1).trim(); m_uploadPageData.setPopulate100("yes".equalsIgnoreCase(isPopulate100)); } } previousLine = line; line = br.readLine(); } new_reader = new StringReader(content.toString()); br.close(); } catch (Exception e) { new_reader = p_reader; } // Read the upload file into an OfflinePageData object. try { m_errWriter.setFileName(p_fileName); m_uploadPageData.setLoadConversionLineBreak(m_normalizedLB); m_uploadPageData.loadOfflineTextFile(new_reader, false); Vector<OfflineSegmentData> list = m_uploadPageData.getSegmentList(); for (OfflineSegmentData object : list) { String targetText = object.getDisplayTargetText(); targetText = StringUtil.replace(targetText, OfflineConstants.PONUD_SIGN, "#"); object.setDisplayTargetText(targetText); } // set err writer's page, task and job ids m_errWriter.processOfflinePageData(m_uploadPageData); } catch (Throwable ex) { try { p_reader.reset(); } catch (IOException e) { e.printStackTrace(); } String errMsg = null; boolean noSegments = false; if (ex instanceof ParseException) { ParseException pe = (ParseException) ex; int[][] expected = pe.expectedTokenSequences; if (expected != null && expected.length == 2 && expected[0].length == 1 && expected[1].length == 1 && expected[0][0] == 8 && expected[1][0] == 9) { Token current = pe.currentToken; if (current != null && current.next != null && current.next.kind == 17) { noSegments = true; } } } // check if this is empty if (noSegments) { errMsg = m_messages.getString("NoSegmentsInFile"); } else { String exMsg = ex.getMessage(); String args[] = { EditUtil.encodeHtmlEntities(exMsg) }; bindErrMsg(args, p_reader); errMsg = MessageFormat.format(m_messages.getString("FormatTwoLoadError"), (Object[]) args); CATEGORY.error(errMsg); } m_errWriter.addFileErrorMsg(errMsg); m_errWriter.processOfflinePageData(m_uploadPageData); return m_errWriter.buildPage().toString(); } return null; }
From source file:net.sourceforge.pmd.util.IOUtil.java
public static Reader skipBOM(Reader source) { Reader in = new BufferedReader(source); try {/*w ww. j ava2 s. c o m*/ in.mark(1); int firstCharacter = in.read(); if (firstCharacter != '\ufeff') { in.reset(); } } catch (IOException e) { throw new RuntimeException("Error while trying to skip BOM marker", e); } return in; }
From source file:org.apache.tika.parser.csv.CSVSniffer.java
List<CSVResult> sniff(Reader reader) throws IOException { if (!reader.markSupported()) { reader = new BufferedReader(reader); }/*from w ww .ja va 2 s . c om*/ List<CSVResult> ret = new ArrayList<>(); for (char delimiter : delimiters) { reader.mark(markLimit); try { CSVResult result = new Snifflet(delimiter).sniff(reader); ret.add(result); } finally { reader.reset(); } } Collections.sort(ret); return ret; }
From source file:org.exist.collections.Collection.java
/** Stores an XML document in the database. {@link #validateXMLResourceInternal(org.exist.storage.txn.Txn, * org.exist.storage.DBBroker, org.exist.xmldb.XmldbURI, CollectionConfiguration, org.exist.collections.Collection.ValidateBlock)} * should have been called previously in order to acquire a write lock for the document. Launches the finish trigger. * /*from w ww . j av a 2 s . c o m*/ * @param transaction * @param broker * @param info * @param source * @param privileged * * @throws EXistException * @throws PermissionDeniedException * @throws TriggerException * @throws SAXException * @throws LockException */ public void store(final Txn transaction, final DBBroker broker, final IndexInfo info, final InputSource source, boolean privileged) throws EXistException, PermissionDeniedException, TriggerException, SAXException, LockException { storeXMLInternal(transaction, broker, info, privileged, new StoreBlock() { @Override public void run() throws EXistException, SAXException { try { final InputStream is = source.getByteStream(); if (is != null && is.markSupported()) { is.reset(); } else { final Reader cs = source.getCharacterStream(); if (cs != null && cs.markSupported()) { cs.reset(); } } } catch (final IOException e) { // mark is not supported: exception is expected, do nothing LOG.debug( "InputStream or CharacterStream underlying the InputSource does not support marking and therefore cannot be re-read."); } final XMLReader reader = getReader(broker, false, info.getCollectionConfig()); info.setReader(reader, null); try { reader.parse(source); } catch (final IOException e) { throw new EXistException(e); } finally { releaseReader(broker, info, reader); } } }); }
From source file:org.exist.collections.MutableCollection.java
@Override public void store(final Txn transaction, final DBBroker broker, final IndexInfo info, final InputSource source) throws EXistException, PermissionDeniedException, TriggerException, SAXException, LockException { storeXMLInternal(transaction, broker, info, storeInfo -> { try {/* w w w. ja v a 2s . c o m*/ final InputStream is = source.getByteStream(); if (is != null && is.markSupported()) { is.reset(); } else { final Reader cs = source.getCharacterStream(); if (cs != null && cs.markSupported()) { cs.reset(); } } } catch (final IOException e) { // mark is not supported: exception is expected, do nothing LOG.debug( "InputStream or CharacterStream underlying the InputSource does not support marking and therefore cannot be re-read."); } final XMLReader reader = getReader(broker, false, storeInfo.getCollectionConfig()); storeInfo.setReader(reader, null); try { reader.parse(source); } catch (final IOException e) { throw new EXistException(e); } finally { releaseReader(broker, storeInfo, reader); } }); }
From source file:org.mule.util.ClassUtils.java
private static String classNameHelper(Reader encodedName) { // I did consider separating this data from the code, but I could not find a // solution that was as clear to read, or clearly motivated (these data are not // used elsewhere). try {/*from www . j av a 2s . co m*/ encodedName.mark(1); switch (encodedName.read()) { case -1: return "null"; case 'Z': return "boolean"; case 'B': return "byte"; case 'C': return "char"; case 'D': return "double"; case 'F': return "float"; case 'I': return "int"; case 'J': return "long"; case 'S': return "short"; case '[': return classNameHelper(encodedName) + "[]"; case 'L': return shorten(new BufferedReader(encodedName).readLine()); default: encodedName.reset(); return shorten(new BufferedReader(encodedName).readLine()); } } catch (IOException e) { return "unknown type: " + e.getMessage(); } }
From source file:org.structr.common.geo.BingGeoCodingProvider.java
@Override public GeoCodingResult geocode(String street, String house, String postalCode, String city, String state, String country, String language) throws IOException { if (apiKey != null && !apiKey.isEmpty()) { StringBuilder urlBuffer = new StringBuilder("http://dev.virtualearth.net/REST/v1/Locations"); // api key urlBuffer.append("?key=").append(apiKey); // culture for language-specific formatting urlBuffer.append("&c=").append(language); // output format: XML urlBuffer.append("&o=xml"); // address line string urlBuffer.append("&query="); // house/*from w ww . j ava 2 s . co m*/ if (house != null && !house.isEmpty()) { urlBuffer.append(encodeURL(house)).append("+"); } // street if (street != null && !street.isEmpty()) { urlBuffer.append(encodeURL(street)).append("+"); } // city if (city != null && !city.isEmpty()) { urlBuffer.append(encodeURL(city)).append("+"); } // postalCode if (postalCode != null && !postalCode.isEmpty()) { urlBuffer.append("&postalCode=").append(encodeURL(postalCode)); } /* disabled because the ISO country code is required here which we don't have // countryRegion if (country != null && !country.isEmpty()) { urlBuffer.append("&countryRegion=").append(encodeURL(country)); } */ // max results urlBuffer.append("&maxResults=1"); String url = urlBuffer.toString(); try { logger.log(Level.INFO, "Using url {0}", url); URL mapsUrl = new URL(urlBuffer.toString()); HttpURLConnection connection = (HttpURLConnection) mapsUrl.openConnection(); connection.connect(); Reader reader = new InputStreamReader(connection.getInputStream()); SAXReader saxReader = new SAXReader(); // skip leading 0xFEFF character if present if (reader.read() != 65279) { reader.reset(); } Document xmlDoc = saxReader.read(reader); connection.disconnect(); reader.close(); if (xmlDoc != null) { Map<String, String> data = new LinkedHashMap<>(); Element root = xmlDoc.getRootElement(); try { data.put("lat", root.element("ResourceSets").element("ResourceSet").element("Resources") .element("Location").element("Point").element("Latitude").getTextTrim()); } catch (Throwable t) { } try { data.put("lon", root.element("ResourceSets").element("ResourceSet").element("Resources") .element("Location").element("Point").element("Longitude").getTextTrim()); } catch (Throwable t) { } try { data.put("postalCode", root.element("ResourceSets").element("ResourceSet").element("Resources") .element("Location").element("Address").element("PostalCode") .getTextTrim()); } catch (Throwable t) { } try { data.put("adminDistrict", root.element("ResourceSets").element("ResourceSet").element("Resources") .element("Location").element("Address").element("AdminDistrict") .getTextTrim()); } catch (Throwable t) { } try { data.put("adminDistrict2", root.element("ResourceSets").element("ResourceSet").element("Resources") .element("Location").element("Address").element("AdminDistrict2") .getTextTrim()); } catch (Throwable t) { } try { data.put("locality", root.element("ResourceSets").element("ResourceSet").element("Resources") .element("Location").element("Address").element("Locality").getTextTrim()); } catch (Throwable t) { } try { data.put("countryRegion", root.element("ResourceSets").element("ResourceSet").element("Resources") .element("Location").element("Address").element("CountryRegion") .getTextTrim()); } catch (Throwable t) { } if (data.containsKey("lat") && data.containsKey("lon")) { String address = (StringUtils.isNotBlank(street) ? street : "") + " " + (StringUtils.isNotBlank(house) ? house : "") + " " + (StringUtils.isNotBlank(postalCode) ? postalCode : "" + (StringUtils.isNotBlank(city) ? city : "") + " " + (StringUtils.isNotBlank(state) ? state : "") + " " + (StringUtils.isNotBlank(country) ? country : "") + " "); return new BingGeoCodingResult(address, data); } else { logger.log(Level.WARNING, "Geocoding result did not contain location information:\n{0}", xmlDoc.asXML()); } } } catch (DocumentException dex) { logger.log(Level.WARNING, "Unable to use Bing geocoding provider: {0}", dex.getMessage()); // maybe not a permanent error => wrap in IOException so the request is retried later throw new IOException(dex); } } else { logger.log(Level.WARNING, "Unable to use Bing geocoding provider, missing API key. Please supply API key in structr.conf using the key geocoding.apikey."); } return null; }
From source file:org.transitime.utils.csv.CsvBaseReader.java
/** * Parse the CSV file. Reads in the header info and then each line. Calls * the abstract handleRecord() method for each record. Adds each resulting * CSV object to the gtfsObjecgts array. *//*w ww .ja va 2 s . com*/ private void parse() { CSVRecord record = null; try { IntervalTimer timer = new IntervalTimer(); logger.debug("Parsing CSV file {} ...", fileName); // Open the file for reading. Use UTF-8 format since that will work // for both regular ASCII format and UTF-8 extended format files // since UTF-8 was designed to be backwards compatible with ASCII. // This way will work for Chinese and other character sets. Use // InputStreamReader so can specify that using UTF-8 format. Use // BufferedReader so that can determine if first character is an // optional BOM (Byte Order Mark) character used to indicate that // file is in UTF-8 format. BufferedReader allows us to read in // first character and then discard if it is a BOM character or // reset the reader to back to the beginning if it is not. This // way the CSV parser will process the file starting with the first // true character. Reader in = new BufferedReader(new InputStreamReader(new FileInputStream(fileName), "UTF-8")); // Deal with the possible BOM character at the beginning of the file in.mark(1); int firstRead = in.read(); final int BOM_CHARACTER = 0xFEFF; if (firstRead != BOM_CHARACTER) in.reset(); // Get ready to parse the CSV file. // Allow lines to be comments if they start with "-" so that can // easily comment out problems and also test what happens when // certain data is missing. Using the '-' character so can // comment out line that starts with "--", which is what is // used for SQL. CSVFormat formatter = CSVFormat.DEFAULT.withHeader().withCommentMarker('-'); // Parse the file Iterable<CSVRecord> records = formatter.parse(in); logger.debug("Finished CSV parsing of file {}. Took {} msec.", fileName, timer.elapsedMsec()); int lineNumberWhenLogged = 0; timer = new IntervalTimer(); IntervalTimer loggingTimer = new IntervalTimer(); Iterator<CSVRecord> iterator = records.iterator(); while (iterator.hasNext()) { // Determine the record to process record = iterator.next(); // If blank line then skip it. This way avoid error messages since // expected data column won't exist if (record.size() == 0) continue; // Process the record using appropriate handler // and create the corresponding CSV object T gtfsObject; try { gtfsObject = handleRecord(record, supplemental); } catch (ParseException e) { logger.error("ParseException occurred for record {} " + "(comment lines not included when determing record #) for " + "filename {} . {}", record.getRecordNumber(), fileName, e.getMessage()); // Continue even though there was an error so that all errors // logged at once. continue; } catch (NumberFormatException e) { logger.error("NumberFormatException occurred for record {} " + "(comment lines not included when determing record #) " + "for filename {} . {}", record.getRecordNumber(), fileName, e.getMessage()); // Continue even though there was an error so that all errors // logged at once. continue; } // Add the newly created CSV object to the object list if (gtfsObject != null) gtfsObjects.add(gtfsObject); // Log info if it has been a while. Check only every 20,000 // lines to see if the 10 seconds has gone by. If so, then log // number of lines. By only looking at timer every 20,000 lines // not slowing things down by for every line doing system call // for to get current time. final int LINES_TO_PROCESS_BEFORE_CHECKING_IF_SHOULD_LOG = 20000; final long SECONDS_ELSAPSED_UNTIL_SHOULD_LOG = 5; if (record.getRecordNumber() >= lineNumberWhenLogged + LINES_TO_PROCESS_BEFORE_CHECKING_IF_SHOULD_LOG) { lineNumberWhenLogged = (int) record.getRecordNumber(); if (loggingTimer.elapsedMsec() > SECONDS_ELSAPSED_UNTIL_SHOULD_LOG * Time.MS_PER_SEC) { logger.info(" Processed {} lines. Took {} msec...", lineNumberWhenLogged, timer.elapsedMsec()); loggingTimer = new IntervalTimer(); } } } // End of while iterating over records // Close up the file reader in.close(); // Determine number of records for logging message long numberRecords = 0; if (record != null) numberRecords = record.getRecordNumber(); logger.info("Finished parsing {} records from file {} . Took {} msec.", numberRecords, fileName, timer.elapsedMsec()); } catch (FileNotFoundException e) { if (required) logger.error("Required CSV file {} not found.", fileName); else logger.info("CSV file {} not found but OK because this file " + "not required.", fileName); } catch (IOException e) { logger.error("IOException occurred when reading in filename {}.", fileName, e); } }