List of usage examples for java.io InputStream mark
public synchronized void mark(int readlimit)
From source file:com.graphhopper.reader.OSMInputFile.java
@SuppressWarnings("unchecked") private InputStream decode(File file) throws IOException { final String name = file.getName(); InputStream ips = null; try {//from w w w . j a v a2s. c o m ips = new BufferedInputStream(new FileInputStream(file), 50000); } catch (FileNotFoundException e) { throw new RuntimeException(e); } ips.mark(10); // check file header byte header[] = new byte[6]; ips.read(header); /* can parse bz2 directly with additional lib if (header[0] == 'B' && header[1] == 'Z') { return new CBZip2InputStream(ips); } */ if (header[0] == 31 && header[1] == -117) { ips.reset(); return new GZIPInputStream(ips, 50000); } else if (header[0] == 0 && header[1] == 0 && header[2] == 0 && header[4] == 10 && header[5] == 9 && (header[3] == 13 || header[3] == 14)) { ips.reset(); binary = true; return ips; } else if (header[0] == 'P' && header[1] == 'K') { ips.reset(); ZipInputStream zip = new ZipInputStream(ips); zip.getNextEntry(); return zip; } else if (name.endsWith(".osm") || name.endsWith(".xml")) { ips.reset(); return ips; } else if (name.endsWith(".bz2") || name.endsWith(".bzip2")) { String clName = "org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream"; try { Class clazz = Class.forName(clName); ips.reset(); Constructor<InputStream> ctor = clazz.getConstructor(InputStream.class, boolean.class); return ctor.newInstance(ips, true); } catch (Exception e) { throw new IllegalArgumentException("Cannot instantiate " + clName, e); } } else { throw new IllegalArgumentException("Input file is not of valid type " + file.getPath()); } }
From source file:com.graphhopper.reader.osm.OSMInputFile.java
@SuppressWarnings("unchecked") private InputStream decode(File file) throws IOException { final String name = file.getName(); InputStream ips = null; try {//from w w w . j a v a 2s.c o m ips = new BufferedInputStream(new FileInputStream(file), 50000); } catch (FileNotFoundException e) { throw new RuntimeException(e); } ips.mark(10); // check file header byte header[] = new byte[6]; if (ips.read(header) < 0) throw new IllegalArgumentException("Input file is not of valid type " + file.getPath()); /* can parse bz2 directly with additional lib if (header[0] == 'B' && header[1] == 'Z') { return new CBZip2InputStream(ips); } */ if (header[0] == 31 && header[1] == -117) { ips.reset(); return new GZIPInputStream(ips, 50000); } else if (header[0] == 0 && header[1] == 0 && header[2] == 0 && header[4] == 10 && header[5] == 9 && (header[3] == 13 || header[3] == 14)) { ips.reset(); binary = true; return ips; } else if (header[0] == 'P' && header[1] == 'K') { ips.reset(); ZipInputStream zip = new ZipInputStream(ips); zip.getNextEntry(); return zip; } else if (name.endsWith(".osm") || name.endsWith(".xml")) { ips.reset(); return ips; } else if (name.endsWith(".bz2") || name.endsWith(".bzip2")) { String clName = "org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream"; try { Class clazz = Class.forName(clName); ips.reset(); Constructor<InputStream> ctor = clazz.getConstructor(InputStream.class, boolean.class); return ctor.newInstance(ips, true); } catch (Exception e) { throw new IllegalArgumentException("Cannot instantiate " + clName, e); } } else { throw new IllegalArgumentException("Input file is not of valid type " + file.getPath()); } }
From source file:pt.lunacloud.auth.AbstractAWSSigner.java
/** * Returns the request's payload contents as binary data, without processing * any query string params (i.e. no form encoding for query params). * * @param request/* ww w.j av a2 s . co m*/ * The request * @return The request's payload contents as binary data, not including any * form encoding of query string params. */ protected byte[] getBinaryRequestPayloadWithoutQueryParams(Request<?> request) { InputStream content = getBinaryRequestPayloadStreamWithoutQueryParams(request); try { content.mark(-1); ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024 * 5]; while (true) { int bytesRead = content.read(buffer); if (bytesRead == -1) break; byteArrayOutputStream.write(buffer, 0, bytesRead); } byteArrayOutputStream.close(); content.reset(); return byteArrayOutputStream.toByteArray(); } catch (Exception e) { throw new LunacloudClientException("Unable to read request payload to sign request: " + e.getMessage(), e); } }
From source file:com.sina.auth.AbstractAWSSigner.java
/** * Returns the request's payload contents as binary data, without processing * any query string params (i.e. no form encoding for query params). * * @param request/* w w w . j av a 2s . c om*/ * The request * @return The request's payload contents as binary data, not including any * form encoding of query string params. */ protected byte[] getBinaryRequestPayloadWithoutQueryParams(Request<?> request) { InputStream content = getBinaryRequestPayloadStreamWithoutQueryParams(request); try { content.mark(-1); ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); byte[] buffer = new byte[1024 * 5]; while (true) { int bytesRead = content.read(buffer); if (bytesRead == -1) break; byteArrayOutputStream.write(buffer, 0, bytesRead); } byteArrayOutputStream.close(); content.reset(); return byteArrayOutputStream.toByteArray(); } catch (Exception e) { throw new SCSClientException("Unable to read request payload to sign request: " + e.getMessage(), e); } }
From source file:org.apache.ivory.resource.AbstractEntityManager.java
protected Entity deserializeEntity(HttpServletRequest request, EntityType entityType) throws IOException, IvoryException { EntityParser<?> entityParser = EntityParserFactory.getParser(entityType); InputStream xmlStream = request.getInputStream(); if (xmlStream.markSupported()) { xmlStream.mark(XML_DEBUG_LEN); // mark up to debug len }/*from ww w . j a v a2 s . c om*/ try { return entityParser.parse(xmlStream); } catch (IvoryException e) { if (LOG.isDebugEnabled() && xmlStream.markSupported()) { try { xmlStream.reset(); String xmlData = getAsString(xmlStream); LOG.debug("XML DUMP for (" + entityType + "): " + xmlData, e); } catch (IOException ignore) { } } throw e; } }
From source file:eu.europa.ec.markt.dss.validation.SignedDocumentValidator.java
/** * Guess the document format and return an appropriate document * //from w w w .j ava2 s . c om * @param document * @return */ public static SignedDocumentValidator fromDocument(Document document) throws IOException { InputStream input = null; try { if (document.getName() != null && document.getName().toLowerCase().endsWith(".xml")) { try { return new XMLDocumentValidator(document); } catch (ParserConfigurationException e) { throw new IOException("Not a valid XML"); } catch (SAXException e) { throw new IOException("Not a valid XML"); } } input = new BufferedInputStream(document.openStream()); input.mark(5); byte[] preamble = new byte[5]; int read = input.read(preamble); input.reset(); if (read < 5) { throw new RuntimeException("Not a signed document"); } String preambleString = new String(preamble); byte[] xmlPreable = new byte[] { '<', '?', 'x', 'm', 'l' }; byte[] xmlUtf8 = new byte[] { -17, -69, -65, '<', '?' }; if (Arrays.equals(preamble, xmlPreable) || Arrays.equals(preamble, xmlUtf8)) { try { return new XMLDocumentValidator(document); } catch (ParserConfigurationException e) { throw new IOException("Not a valid XML"); } catch (SAXException e) { throw new IOException("Not a valid XML"); } } else if (preambleString.equals("%PDF-")) { return new PDFDocumentValidator(document); } else if (preamble[0] == 'P' && preamble[1] == 'K') { try { input.close(); } catch (IOException e) { } input = null; return getInstanceForAsics(document); } else if (preambleString.getBytes()[0] == 0x30) { try { return new CMSDocumentValidator(document); } catch (CMSException e) { throw new IOException("Not a valid CAdES file"); } } else { throw new RuntimeException("Document format not recognized/handled"); } } finally { if (input != null) { try { input.close(); } catch (IOException e) { } } } }
From source file:net.yacy.document.TextParser.java
private static Document[] parseSource(final DigestURL location, String mimeType, final String charset, final Set<String> ignore_class_name, final VocabularyScraper scraper, final int timezoneOffset, final int depth, final long contentLength, final InputStream sourceStream, final int maxLinks, final long maxBytes) throws Parser.Failure { if (AbstractParser.log.isFine()) AbstractParser.log.fine("Parsing '" + location + "' from stream"); mimeType = normalizeMimeType(mimeType); Set<Parser> idioms = null; try {//from www. j av a 2 s . co m idioms = parsers(location, mimeType); } catch (final Parser.Failure e) { final String errorMsg = "Parser Failure for extension '" + MultiProtocolURL.getFileExtension(location.getFileName()) + "' or mimetype '" + mimeType + "': " + e.getMessage(); AbstractParser.log.warn(errorMsg); throw new Parser.Failure(errorMsg, location); } assert !idioms.isEmpty() : "no parsers applied for url " + location.toNormalform(true); boolean canStream = false; if (idioms.size() == 1) { canStream = true; } else if (idioms.size() == 2) { /* When there are only 2 available parsers, stream oriented parsing can still be applied when one of the 2 parsers is the generic one */ for (Parser idiom : idioms) { if (idiom instanceof genericParser) { canStream = true; } } } else if (sourceStream instanceof ByteArrayInputStream) { /* Also check if we have a ByteArrayInputStream as source to prevent useless bytes duplication in a new byte array */ canStream = true; } // if we do not have more than one non generic parser, or the content size is over MaxInt (2GB), or is over the totally available memory, // or stream is already in memory as a ByteArrayInputStream // then we use only stream-oriented parser. if (canStream || contentLength > Integer.MAX_VALUE || contentLength > MemoryControl.available()) { try { /* The size of the buffer on the stream must be large enough to allow parser implementations to start parsing the resource * and eventually fail, but must also be larger than eventual parsers internal buffers such as BufferedInputStream.DEFAULT_BUFFER_SIZE (8192 bytes) */ int rewindSize = 10 * 1024; final InputStream markableStream; if (sourceStream instanceof ByteArrayInputStream) { /* No nead to use a wrapping buffered stream when the source is already entirely in memory. * What's more, ByteArrayInputStream has no read limit when marking.*/ markableStream = sourceStream; } else { markableStream = new BufferedInputStream(sourceStream, rewindSize); } /* Mark now to allow resetting the buffered stream to the beginning of the stream */ markableStream.mark(rewindSize); /* Loop on parser : they are supposed to be sorted in order to start with the most specific and end with the most generic */ for (Parser parser : idioms) { /* Wrap in a CloseShieldInputStream to prevent SAX parsers closing the sourceStream * and so let us eventually reuse the same opened stream with other parsers on parser failure */ CloseShieldInputStream nonCloseInputStream = new CloseShieldInputStream(markableStream); try { return parseSource(location, mimeType, parser, charset, ignore_class_name, scraper, timezoneOffset, nonCloseInputStream, maxLinks, maxBytes); } catch (Parser.Failure e) { /* Try to reset the marked stream. If the failed parser has consumed too many bytes : * too bad, the marks is invalid and process fails now with an IOException */ markableStream.reset(); if (parser instanceof gzipParser && e.getCause() instanceof GZIPOpeningStreamException && (idioms.size() == 1 || (idioms.size() == 2 && idioms.contains(genericIdiom)))) { /* The gzip parser failed directly when opening the content stream : before falling back to the generic parser, * let's have a chance to parse the stream as uncompressed. */ /* Indeed, this can be a case of misconfigured web server, providing both headers "Content-Encoding" with value "gzip", * and "Content-type" with value such as "application/gzip". * In that case our HTTP client (see GzipResponseInterceptor) is already uncompressing the stream on the fly, * that's why the gzipparser fails opening the stream. * (see RFC 7231 section 3.1.2.2 for "Content-Encoding" header specification https://tools.ietf.org/html/rfc7231#section-3.1.2.2)*/ gzipParser gzParser = (gzipParser) parser; nonCloseInputStream = new CloseShieldInputStream(markableStream); Document maindoc = gzipParser.createMainDocument(location, mimeType, charset, gzParser); try { Document[] docs = gzParser.parseCompressedInputStream(location, charset, timezoneOffset, depth, nonCloseInputStream, maxLinks, maxBytes); if (docs != null) { maindoc.addSubDocuments(docs); } return new Document[] { maindoc }; } catch (Exception e1) { /* Try again to reset the marked stream if the failed parser has not consumed too many bytes */ markableStream.reset(); } } } } } catch (IOException e) { throw new Parser.Failure("Error reading source", location); } } // in case that we know more parsers we first transform the content into a byte[] and use that as base // for a number of different parse attempts. int maxBytesToRead = -1; if (maxBytes < Integer.MAX_VALUE) { /* Load at most maxBytes + 1 : - to let parsers not supporting Parser.parseWithLimits detect the maxBytes size is exceeded and end with a Parser.Failure - but let parsers supporting Parser.parseWithLimits perform partial parsing of maxBytes content */ maxBytesToRead = (int) maxBytes + 1; } if (contentLength >= 0 && contentLength < maxBytesToRead) { maxBytesToRead = (int) contentLength; } byte[] b = null; try { b = FileUtils.read(sourceStream, maxBytesToRead); } catch (final IOException e) { throw new Parser.Failure(e.getMessage(), location); } Document[] docs = parseSource(location, mimeType, idioms, charset, ignore_class_name, scraper, timezoneOffset, depth, b, maxLinks, maxBytes); return docs; }
From source file:org.infoscoop.web.MultiRssServlet.java
private int getStreamLength(InputStream is) throws IOException { is.mark(1); byte[] b = new byte[1024]; int i = 0;/*from w ww. j av a 2s . c om*/ int length = 0; while ((i = is.read(b)) != -1) { length += i; } is.reset(); return length; }
From source file:com.soma.daemin.fragment.NewPicUploadTaskFragment.java
public Bitmap decodeSampledBitmapFromPath(String filePath, int reqWidth, int reqHeight) throws IOException { File file = new File(filePath); InputStream stream = new BufferedInputStream(new FileInputStream(file)); /*InputStream stream = new BufferedInputStream( mApplicationContext.getContentResolver().openInputStream(fileUri));*/ stream.mark(stream.available()); BitmapFactory.Options options = new BitmapFactory.Options(); // First decode with inJustDecodeBounds=true to check dimensions options.inJustDecodeBounds = true;// ww w. j a va 2 s.c om BitmapFactory.decodeStream(stream, null, options); stream.reset(); options.inSampleSize = calculateInSampleSize(options, reqWidth, reqHeight); options.inJustDecodeBounds = false; BitmapFactory.decodeStream(stream, null, options); // Decode bitmap with inSampleSize set stream.reset(); return BitmapFactory.decodeStream(stream, null, options); }
From source file:dk.dr.radio.net.Diverse.java
sStreng(InputStream is) throws IOException, UnsupportedEncodingException { // Det kan vre ndvendigt at hoppe over BOM mark - se http://android.forums.wordpress.org/topic/xml-pull-error?replies=2 //is.read(); is.read(); is.read(); // - dette virker kun hvis der ALTID er en BOM // Hop over BOM - hvis den er der! is = new BufferedInputStream(is); // bl.a. FileInputStream understtter ikke mark, s brug BufferedInputStream is.mark(1); // vi har faktisk kun brug for at sge n byte tilbage if (is.read() == 0xef) { is.read();/*from ww w. j av a 2 s. c o m*/ is.read(); } // Der var en BOM! Ls de sidste 2 byte else is.reset(); // Der var ingen BOM - hop tilbage til start final char[] buffer = new char[0x3000]; StringBuilder out = new StringBuilder(); Reader in = new InputStreamReader(is, "UTF-8"); int read; do { read = in.read(buffer, 0, buffer.length); if (read > 0) { out.append(buffer, 0, read); } } while (read >= 0); in.close(); return out.toString(); }