Example usage for java.io InputStream reset

List of usage examples for java.io InputStream reset

Introduction

In this page you can find the example usage for java.io InputStream reset.

Prototype

public synchronized void reset() throws IOException 

Source Link

Document

Repositions this stream to the position at the time the mark method was last called on this input stream.

Usage

From source file:org.apache.any23.mime.TikaMIMETypeDetector.java

/**
 * Extracts a sample data from the input stream, from the current
 * mark to the first <i>breakChar</i> char.
 *
 * @param is the input stream to sample.
 * @param breakChar the char to break to sample.
 * @return the sample string.// w  w w.j av a 2  s .c om
 * @throws IOException if an error occurs during sampling.
 */
private static StringBuilder extractDataSample(InputStream is, char breakChar, char[] insideBlockCharacters,
        char[] lineCommentChars, char[] outsideBlockCharacters, char[] switchBlockCharacters)
        throws IOException {
    BufferedReader br = new BufferedReader(new InputStreamReader(is));
    StringBuilder sb = new StringBuilder();
    // TODO: Make this configurable
    final int MAX_SIZE = 1024 * 2;
    int c;
    boolean insideBlock = false;
    int read = 0;
    br.mark(MAX_SIZE);
    try {
        while ((c = br.read()) != -1) {
            read++;
            if (sb.length() > MAX_SIZE) {
                break;
            }

            if (!insideBlock) {
                for (char nextLineCommentChar : lineCommentChars) {
                    // if we hit a comment character that signals the rest of the line is a comment 
                    // then we do not want to extract any of the rest of the line, including the 
                    // comment character for our sample, so we read to the end of the line and then 
                    // continue the loop without appending anything
                    if (c == nextLineCommentChar) {
                        br.readLine();
                        continue;
                    }
                }
            }

            for (char nextInsideChar : insideBlockCharacters) {
                if (c == nextInsideChar)
                    insideBlock = true;
            }

            for (char nextOutsideChar : outsideBlockCharacters) {
                if (c == nextOutsideChar)
                    insideBlock = false;
            }

            for (char nextSwitchChar : switchBlockCharacters) {
                if (c == nextSwitchChar)
                    insideBlock = !insideBlock;
            }
            sb.append((char) c);
            if (!insideBlock && breakChar == c) {
                break;
            }
        }
    } finally {
        is.reset();
        br.reset();
    }
    return sb;
}

From source file:org.apache.bval.jsr.xml.ValidationMappingParser.java

/** @param in XML stream to parse using the validation-mapping-1.0.xsd */
private ConstraintMappingsType parseXmlMappings(final InputStream in) {
    ConstraintMappingsType mappings;//  w ww  .  j a va 2  s . c  o  m
    try {
        final JAXBContext jc = JAXBContext.newInstance(ConstraintMappingsType.class);
        final Unmarshaller unmarshaller = jc.createUnmarshaller();
        unmarshaller.setSchema(getSchema());
        final StreamSource stream = new StreamSource(in);
        final JAXBElement<ConstraintMappingsType> root = unmarshaller.unmarshal(stream,
                ConstraintMappingsType.class);
        mappings = root.getValue();
    } catch (final JAXBException e) {
        throw new ValidationException("Failed to parse XML deployment descriptor file.", e);
    } finally {
        IOs.closeQuietly(in);
        try {
            in.reset(); // can be read several times + we ensured it was re-readable in addMapping()
        } catch (final IOException e) {
            // no-op
        }
    }
    return mappings;
}

From source file:com.aliyun.odps.mapred.bridge.MetaExplorerImpl.java

@Override
public String addTempResourceWithRetry(InputStream in, String prefix, Resource.Type type) throws OdpsException {
    FileResource res = null;//from   www.  j a  va  2  s .c om
    String extension = "";
    switch (type) {
    case FILE:
        res = new FileResource();
        break;
    case JAR:
        res = new JarResource();
        extension = "jar";
        break;
    default:
        throw new OdpsException("Unsupported resource type:" + type);
    }

    int trial = 0;
    String resourceName = null;
    res.setIsTempResource(true);
    while (trial <= 3) {
        try {
            resourceName = prefix + "_" + trial;
            if (extension != null && !extension.isEmpty()) {
                resourceName += "." + extension;
            }
            res.setName(resourceName);
            odps.resources().create(res, in);
            return resourceName;
        } catch (OdpsException e) {
            LOG.error(
                    "Upload resource " + resourceName + " failed:" + e.getMessage() + ", retry count=" + trial);
            try {
                Thread.sleep(60 * 1000);
            } catch (InterruptedException e1) {
                // Silently swallow it.
            }
            try {
                in.reset();
            } catch (IOException e1) {
                // If input stream is not reset-able, drop coming retry.
                break;
            }
        }
        trial++;
    }
    throw new OdpsException("Upload resource failed.");
}

From source file:org.codice.ddf.endpoints.rest.RESTEndpoint.java

CreateInfo parseAttachment(Attachment contentPart) {
    CreateInfo createInfo = new CreateInfo();

    InputStream stream = null;
    FileBackedOutputStream fbos = null;/*ww w.j  av  a  2s .  c om*/
    String filename = null;
    String contentType = null;

    // Get the file contents as an InputStream and ensure the stream is positioned
    // at the beginning
    try {
        stream = contentPart.getDataHandler().getInputStream();
        if (stream != null && stream.available() == 0) {
            stream.reset();
        }
        createInfo.setStream(stream);
    } catch (IOException e) {
        LOGGER.warn("IOException reading stream from file attachment in multipart body", e);
    }

    // Example Content-Type header:
    // Content-Type: application/json;id=geojson
    if (contentPart.getContentType() != null) {
        contentType = contentPart.getContentType().toString();
    }

    if (contentPart.getContentDisposition() != null) {
        filename = contentPart.getContentDisposition()
                .getParameter(FILENAME_CONTENT_DISPOSITION_PARAMETER_NAME);
    }

    // Only interested in attachments for file uploads. Any others should be covered by
    // the FormParam arguments.
    // If the filename was not specified, then generate a default filename based on the
    // specified content type.
    if (StringUtils.isEmpty(filename)) {
        LOGGER.debug("No filename parameter provided - generating default filename");
        String fileExtension = DEFAULT_FILE_EXTENSION;
        try {
            fileExtension = mimeTypeMapper.getFileExtensionForMimeType(contentType); // DDF-2307
            if (StringUtils.isEmpty(fileExtension)) {
                fileExtension = DEFAULT_FILE_EXTENSION;
            }
        } catch (MimeTypeResolutionException e) {
            LOGGER.debug("Exception getting file extension for contentType = {}", contentType);
        }
        filename = DEFAULT_FILE_NAME + "." + fileExtension; // DDF-2263
        LOGGER.debug("No filename parameter provided - default to {}", filename);
    } else {
        filename = FilenameUtils.getName(filename);

        // DDF-908: filename with extension was specified by the client. If the
        // contentType is null or the browser default, try to refine the contentType
        // by determining the mime type based on the filename's extension.
        if (StringUtils.isEmpty(contentType) || REFINEABLE_MIME_TYPES.contains(contentType)) {
            String fileExtension = FilenameUtils.getExtension(filename);
            LOGGER.debug("fileExtension = {}, contentType before refinement = {}", fileExtension, contentType);
            try {
                contentType = mimeTypeMapper.getMimeTypeForFileExtension(fileExtension);
            } catch (MimeTypeResolutionException e) {
                LOGGER.debug("Unable to refine contentType {} based on filename extension {}", contentType,
                        fileExtension);
            }
            LOGGER.debug("Refined contentType = {}", contentType);
        }
    }

    createInfo.setContentType(contentType);
    createInfo.setFilename(filename);

    return createInfo;
}

From source file:org.infoscoop.request.filter.DetectTypeFilter.java

protected InputStream postProcess(ProxyRequest request, InputStream responseStream) throws IOException {
    JSONArray result = new JSONArray();
    //BufferedInputStream responseStream = new BufferedInputStream(_responseStream);

    String contentType = request.getResponseHeader("Content-Type");

    String encoding = getContentTypeCharset(contentType);
    responseStream = new ByteArrayInputStream(ProxyRequest.stream2Bytes(responseStream)); //FIXME
    XMLFilter.skipEmptyLine(responseStream);

    try {//  w  w w .  j  a v  a  2s  .  co  m
        DetectManager handler = null;
        if (isXml(contentType, responseStream)) {
            try {
                DetectManager h = new DetectManager();
                XMLReader reader = factory.newSAXParser().getXMLReader();
                reader.setEntityResolver(NoOpEntityResolver.getInstance());
                reader.setContentHandler(h);
                reader.parse(new InputSource(responseStream));

                handler = h;
            } catch (SAXException e) {
                log.warn("parse error", e);

                responseStream.reset();
            }
        }

        if (handler == null) {
            handler = new DetectManager();

            if (encoding == null) {
                encoding = findEncoding(responseStream);//TODO:
                responseStream.reset();
            }

            org.cyberneko.html.parsers.SAXParser nekoParser = new org.cyberneko.html.parsers.SAXParser();
            nekoParser.setProperty("http://cyberneko.org/html/properties/names/elems", "lower");
            nekoParser.setProperty("http://cyberneko.org/html/properties/names/attrs", "lower");

            if (encoding != null)
                nekoParser.setProperty("http://cyberneko.org/html/properties/default-encoding", encoding);

            Transformer transformer = TransformerFactory.newInstance().newTransformer();
            SAXSource source = new SAXSource(nekoParser, new InputSource(responseStream));
            SAXResult saxResult = new SAXResult(handler);
            transformer.transform(source, saxResult);
        }

        result = handler.getResult(request);
    } catch (Exception e) {
        log.error("Url: [" + request.getTargetURL() + "] detect widget type failed.", e);
        //return 500;TODO:
        return null;
    }
    byte[] resultBytes = result.toString().getBytes("UTF-8");
    request.putResponseHeader("Content-Type", "text/plain; charset=UTF-8");
    request.putResponseHeader("Content-Length", String.valueOf(resultBytes.length));
    //request.setResponseBody(new ByteArrayInputStream( resultBytes ));
    return new ByteArrayInputStream(resultBytes);
}

From source file:eu.medsea.mimeutil.TextMimeDetector.java

/**
 * @see MimeDetector.getMimeTypesInputStream(InputStream in)
 *//*from www .  ja va  2s  .com*/
public Collection getMimeTypesInputStream(InputStream in) throws UnsupportedOperationException {

    int offset = 0;
    int len = TextMimeDetector.BUFFER_SIZE;
    byte[] data = new byte[len];
    byte[] copy = null;
    // Mark the input stream
    in.mark(len);

    try {
        // Since an InputStream might return only some data (not all
        // requested), we have to read in a loop until
        // either EOF is reached or the desired number of bytes have been
        // read.
        int restBytesToRead = len;
        while (restBytesToRead > 0) {
            int bytesRead = in.read(data, offset, restBytesToRead);
            if (bytesRead < 0)
                break; // EOF

            offset += bytesRead;
            restBytesToRead -= bytesRead;
        }
        if (offset < len) {
            copy = new byte[offset];
            System.arraycopy(data, 0, copy, 0, offset);
        } else {
            copy = data;
        }
    } catch (IOException ioe) {
        throw new MimeException(ioe);
    } finally {
        try {
            // Reset the input stream to where it was marked.
            in.reset();
        } catch (Exception e) {
            throw new MimeException(e);
        }
    }
    return getMimeTypesByteArray(copy);
}

From source file:net.yacy.document.TextParser.java

private static Document[] parseSource(final DigestURL location, String mimeType, final String charset,
        final Set<String> ignore_class_name, final VocabularyScraper scraper, final int timezoneOffset,
        final int depth, final long contentLength, final InputStream sourceStream, final int maxLinks,
        final long maxBytes) throws Parser.Failure {
    if (AbstractParser.log.isFine())
        AbstractParser.log.fine("Parsing '" + location + "' from stream");
    mimeType = normalizeMimeType(mimeType);
    Set<Parser> idioms = null;
    try {/*w w  w .  j  a v  a  2s  .  c  o m*/
        idioms = parsers(location, mimeType);
    } catch (final Parser.Failure e) {
        final String errorMsg = "Parser Failure for extension '"
                + MultiProtocolURL.getFileExtension(location.getFileName()) + "' or mimetype '" + mimeType
                + "': " + e.getMessage();
        AbstractParser.log.warn(errorMsg);
        throw new Parser.Failure(errorMsg, location);
    }
    assert !idioms.isEmpty() : "no parsers applied for url " + location.toNormalform(true);

    boolean canStream = false;
    if (idioms.size() == 1) {
        canStream = true;
    } else if (idioms.size() == 2) {
        /* When there are only 2 available parsers, stream oriented parsing can still be applied when one of the 2 parsers is the generic one */
        for (Parser idiom : idioms) {
            if (idiom instanceof genericParser) {
                canStream = true;
            }
        }
    } else if (sourceStream instanceof ByteArrayInputStream) {
        /* Also check if we have a ByteArrayInputStream as source to prevent useless bytes duplication in a new byte array */
        canStream = true;
    }

    // if we do not have more than one non generic parser, or the content size is over MaxInt (2GB), or is over the totally available memory,
    // or stream is already in memory as a ByteArrayInputStream
    // then we use only stream-oriented parser.
    if (canStream || contentLength > Integer.MAX_VALUE || contentLength > MemoryControl.available()) {
        try {
            /* The size of the buffer on the stream must be large enough to allow parser implementations to start parsing the resource
             * and eventually fail, but must also be larger than eventual parsers internal buffers such as BufferedInputStream.DEFAULT_BUFFER_SIZE (8192 bytes) */
            int rewindSize = 10 * 1024;
            final InputStream markableStream;
            if (sourceStream instanceof ByteArrayInputStream) {
                /* No nead to use a wrapping buffered stream when the source is already entirely in memory. 
                 * What's more, ByteArrayInputStream has no read limit when marking.*/
                markableStream = sourceStream;
            } else {
                markableStream = new BufferedInputStream(sourceStream, rewindSize);
            }
            /* Mark now to allow resetting the buffered stream to the beginning of the stream */
            markableStream.mark(rewindSize);

            /* Loop on parser : they are supposed to be sorted in order to start with the most specific and end with the most generic */
            for (Parser parser : idioms) {
                /* Wrap in a CloseShieldInputStream to prevent SAX parsers closing the sourceStream 
                 * and so let us eventually reuse the same opened stream with other parsers on parser failure */
                CloseShieldInputStream nonCloseInputStream = new CloseShieldInputStream(markableStream);

                try {
                    return parseSource(location, mimeType, parser, charset, ignore_class_name, scraper,
                            timezoneOffset, nonCloseInputStream, maxLinks, maxBytes);
                } catch (Parser.Failure e) {
                    /* Try to reset the marked stream. If the failed parser has consumed too many bytes : 
                     * too bad, the marks is invalid and process fails now with an IOException */
                    markableStream.reset();

                    if (parser instanceof gzipParser && e.getCause() instanceof GZIPOpeningStreamException
                            && (idioms.size() == 1 || (idioms.size() == 2 && idioms.contains(genericIdiom)))) {
                        /* The gzip parser failed directly when opening the content stream : before falling back to the generic parser,
                         * let's have a chance to parse the stream as uncompressed. */
                        /* Indeed, this can be a case of misconfigured web server, providing both headers "Content-Encoding" with value "gzip", 
                         * and "Content-type" with value such as "application/gzip".
                        * In that case our HTTP client (see GzipResponseInterceptor) is already uncompressing the stream on the fly,
                        * that's why the gzipparser fails opening the stream. 
                        * (see RFC 7231 section 3.1.2.2 for "Content-Encoding" header specification https://tools.ietf.org/html/rfc7231#section-3.1.2.2)*/
                        gzipParser gzParser = (gzipParser) parser;

                        nonCloseInputStream = new CloseShieldInputStream(markableStream);

                        Document maindoc = gzipParser.createMainDocument(location, mimeType, charset, gzParser);

                        try {
                            Document[] docs = gzParser.parseCompressedInputStream(location, charset,
                                    timezoneOffset, depth, nonCloseInputStream, maxLinks, maxBytes);
                            if (docs != null) {
                                maindoc.addSubDocuments(docs);
                            }
                            return new Document[] { maindoc };
                        } catch (Exception e1) {
                            /* Try again to reset the marked stream if the failed parser has not consumed too many bytes */
                            markableStream.reset();
                        }
                    }
                }
            }
        } catch (IOException e) {
            throw new Parser.Failure("Error reading source", location);
        }
    }

    // in case that we know more parsers we first transform the content into a byte[] and use that as base
    // for a number of different parse attempts.

    int maxBytesToRead = -1;
    if (maxBytes < Integer.MAX_VALUE) {
        /* Load at most maxBytes + 1 :
            - to let parsers not supporting Parser.parseWithLimits detect the maxBytes size is exceeded and end with a Parser.Failure
            - but let parsers supporting Parser.parseWithLimits perform partial parsing of maxBytes content */
        maxBytesToRead = (int) maxBytes + 1;
    }
    if (contentLength >= 0 && contentLength < maxBytesToRead) {
        maxBytesToRead = (int) contentLength;
    }

    byte[] b = null;
    try {
        b = FileUtils.read(sourceStream, maxBytesToRead);
    } catch (final IOException e) {
        throw new Parser.Failure(e.getMessage(), location);
    }
    Document[] docs = parseSource(location, mimeType, idioms, charset, ignore_class_name, scraper,
            timezoneOffset, depth, b, maxLinks, maxBytes);

    return docs;
}

From source file:org.gudy.azureus2.pluginsimpl.local.utils.xml.simpleparser.SimpleXMLParserDocumentImpl.java

private void create(InputStream _input_stream)

        throws SimpleXMLParserDocumentException {
    // make sure we can mark the stream to permit later recovery if needed

    if (!_input_stream.markSupported()) {

        _input_stream = new BufferedInputStream(_input_stream);
    }//from   w w w. j  av  a 2s.c o  m

    _input_stream.mark(100 * 1024);

    // prevent the parser from screwing with our stream by closing it

    UncloseableInputStream uc_is = new UncloseableInputStream(_input_stream);

    try {
        createSupport(uc_is);

    } catch (SimpleXMLParserDocumentException e) {

        String msg = Debug.getNestedExceptionMessage(e);

        if ((msg.contains("entity") && msg.contains("was referenced")) || msg.contains("entity reference")) {

            try {
                // nasty hack to try and handle HTML entities that some annoying feeds include :(

                _input_stream.reset();

                createSupport(new EntityFudger(_input_stream));

                return;

            } catch (Throwable f) {
            }
        }

        //Debug.out( e );

        throw (e);

    } finally {

        try {
            _input_stream.close();

        } catch (Throwable e) {
        }
    }
}

From source file:no.nordicsemi.android.nrftoolbox.dfu.HexInputStream.java

private int calculateBinSize() throws IOException {
    int binSize = 0;
    final InputStream in = this.in;
    in.mark(in.available());//from www .  j  a v a2  s  .  com

    int b, lineSize, type;
    try {
        b = in.read();
        while (true) {
            checkComma(b);

            lineSize = readByte(in); // reading the length of the data in this line
            in.skip(4); // skipping address part
            type = readByte(in); // reading the line type
            switch (type) {
            case 0x00:
                // data type line
                binSize += lineSize;
                break;
            case 0x01:
                // end of file
                return binSize;
            case 0x02:
                // extended segment address record
            case 0x04:
                // extended linear address record
            default:
                break;
            }
            in.skip(lineSize * 2 /* 2 hex per one byte */ + 2 /* check sum */);
            // skip end of line
            while (true) {
                b = in.read();

                if (b != '\n' && b != '\r') {
                    break;
                }
            }
        }
    } finally {
        try {
            in.reset();
        } catch (IOException ex) {
            this.in = new BufferedInputStream(
                    new DefaultHttpClient().execute(this.httpGet).getEntity().getContent());
        }
    }
}