List of usage examples for java.io InputStream reset
public synchronized void reset() throws IOException
mark
method was last called on this input stream. From source file:com.android.kalite27.ScriptActivity.java
private void copyResourcesToLocal() { String name, sFileName;/*from w w w .j av a2 s . c o m*/ InputStream content; R.raw a = new R.raw(); java.lang.reflect.Field[] t = R.raw.class.getFields(); Resources resources = getResources(); boolean succeed = true; for (int i = 0; i < t.length; i++) { try { name = resources.getText(t[i].getInt(a)).toString(); sFileName = name.substring(name.lastIndexOf('/') + 1, name.length()); content = getResources().openRawResource(t[i].getInt(a)); content.reset(); // python project if (sFileName.endsWith(GlobalConstants.PYTHON_PROJECT_ZIP_NAME)) { succeed &= Utils.unzip(content, this.getFilesDir().getAbsolutePath() + "/", true); } // python -> /data/data/com.android.python27/files/python else if (sFileName.endsWith(GlobalConstants.PYTHON_ZIP_NAME)) { succeed &= Utils.unzip(content, this.getFilesDir().getAbsolutePath() + "/", true); FileUtils.chmod(new File(this.getFilesDir().getAbsolutePath() + "/python/bin/python"), 0755); } // python extras -> /sdcard/com.android.python27/extras/python else if (sFileName.endsWith(GlobalConstants.PYTHON_EXTRAS_ZIP_NAME)) { Utils.createDirectoryOnExternalStorage(this.getPackageName() + "/" + "extras"); Utils.createDirectoryOnExternalStorage(this.getPackageName() + "/" + "extras" + "/" + "tmp"); succeed &= Utils.unzip(content, Environment.getExternalStorageDirectory().getAbsolutePath() + "/" + this.getPackageName() + "/extras/", true); } } catch (Exception e) { Log.e(GlobalConstants.LOG_TAG, "Failed to copyResourcesToLocal", e); succeed = false; } } // end for all files in res/raw }
From source file:org.pentaho.platform.dataaccess.datasource.api.AnalysisService.java
private String getSchemaName(String encoding, InputStream inputStream) throws XMLStreamException, IOException { String domainId = null;//from w w w. j av a 2 s. com XMLStreamReader reader = null; try { XMLInputFactory factory = XMLInputFactory.newInstance(); factory.setProperty(XMLInputFactory.IS_COALESCING, Boolean.TRUE); if (StringUtils.isEmpty(encoding)) { reader = factory.createXMLStreamReader(inputStream); } else { reader = factory.createXMLStreamReader(inputStream, encoding); } while (reader.next() != XMLStreamReader.END_DOCUMENT) { if (reader.getEventType() == XMLStreamReader.START_ELEMENT && reader.getLocalName().equalsIgnoreCase("Schema")) { domainId = reader.getAttributeValue("", "name"); return domainId; } } } finally { if (reader != null) { reader.close(); } inputStream.reset(); } return domainId; }
From source file:com.amazonaws.client.service.AmazonHttpClient.java
/** * Internal method to execute the HTTP method given. * * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler) * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext) *//*w w w .j a va 2 s.c o m*/ private <T> Response<T> executeHelper(final Request<?> request, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler, HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext) throws AmazonClientException, AmazonServiceException { /* * Depending on which response handler we end up choosing to handle the * HTTP response, it might require us to leave the underlying HTTP * connection open, depending on whether or not it reads the complete * HTTP response stream from the HTTP connection, or if delays reading * any of the content until after a response is returned to the caller. */ boolean leaveHttpConnectionOpen = false; /* add the service endpoint to the logs. You can infer service name from service endpoint */ AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics() .addPropertyWith(ServiceName, request.getServiceName()) .addPropertyWith(ServiceEndpoint, request.getEndpoint()); // Apply whatever request options we know how to handle, such as user-agent. setUserAgent(request); int requestCount = 0; URI redirectedURI = null; HttpEntity entity = null; AmazonClientException retriedException = null; // Make a copy of the original request params and headers so that we can // permute it in this loop and start over with the original every time. Map<String, String> originalParameters = new LinkedHashMap<String, String>(); originalParameters.putAll(request.getParameters()); Map<String, String> originalHeaders = new HashMap<String, String>(); originalHeaders.putAll(request.getHeaders()); final AWSCredentials credentials = executionContext.getCredentials(); AmazonWebServiceRequest awsreq = request.getOriginalRequest(); ProgressListener listener = awsreq.getGeneralProgressListener(); Signer signer = null; while (true) { ++requestCount; awsRequestMetrics.setCounter(RequestCount, requestCount); if (requestCount > 1) { // retry request.setParameters(originalParameters); request.setHeaders(originalHeaders); } HttpRequestBase httpRequest = null; org.apache.http.HttpResponse apacheResponse = null; try { // Sign the request if a signer was provided if (signer == null) signer = executionContext.getSignerByURI(request.getEndpoint()); if (signer != null && credentials != null) { awsRequestMetrics.startEvent(RequestSigningTime); try { signer.sign(request, credentials); } finally { awsRequestMetrics.endEvent(RequestSigningTime); } } if (requestLog.isDebugEnabled()) { requestLog.debug("Sending Request: " + request.toString()); } httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } if (redirectedURI != null) { httpRequest.setURI(redirectedURI); } if (requestCount > 1) { // retry // Notify the progress listener of the retry publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT); awsRequestMetrics.startEvent(RetryPauseTime); try { pauseBeforeNextRetry(request.getOriginalRequest(), retriedException, requestCount, config.getRetryPolicy()); } finally { awsRequestMetrics.endEvent(RetryPauseTime); } } if (entity != null) { InputStream content = entity.getContent(); if (requestCount > 1) { // retry if (content.markSupported()) { content.reset(); content.mark(-1); } } else { if (content.markSupported()) { content.mark(-1); } } } captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics); HttpContext httpContext = new BasicHttpContext(); httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics); retriedException = null; publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT); awsRequestMetrics.startEvent(HttpRequestTime); try { apacheResponse = httpClient.execute(httpRequest, httpContext); } finally { awsRequestMetrics.endEvent(HttpRequestTime); } publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT); final StatusLine statusLine = apacheResponse.getStatusLine(); final int statusCode = statusLine == null ? -1 : statusLine.getStatusCode(); if (isRequestSuccessful(apacheResponse)) { awsRequestMetrics.addProperty(StatusCode, statusCode); /* * If we get back any 2xx status code, then we know we should * treat the service call as successful. */ leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen(); HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse); T response = handleResponse(request, responseHandler, httpRequest, httpResponse, apacheResponse, executionContext); return new Response<T>(response, httpResponse); } if (isTemporaryRedirect(apacheResponse)) { /* * S3 sends 307 Temporary Redirects if you try to delete an * EU bucket from the US endpoint. If we get a 307, we'll * point the HTTP method to the redirected location, and let * the next retry deliver the request to the right location. */ Header[] locationHeaders = apacheResponse.getHeaders("location"); String redirectedLocation = locationHeaders[0].getValue(); if (log.isDebugEnabled()) log.debug("Redirecting to: " + redirectedLocation); redirectedURI = URI.create(redirectedLocation); httpRequest.setURI(redirectedURI); awsRequestMetrics.addPropertyWith(StatusCode, statusCode) .addPropertyWith(RedirectLocation, redirectedLocation) .addPropertyWith(AWSRequestID, null); continue; } leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen(); final AmazonServiceException ase = handleErrorResponse(request, errorResponseHandler, httpRequest, apacheResponse); awsRequestMetrics.addPropertyWith(AWSRequestID, ase.getRequestId()) .addPropertyWith(AWSErrorCode, ase.getErrorCode()) .addPropertyWith(StatusCode, ase.getStatusCode()); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ase, requestCount, config.getRetryPolicy())) { throw ase; } // Comment out for now. Ref: CR2662349 // Preserve the cause of retry before retrying // awsRequestMetrics.addProperty(RetryCause, ase); if (RetryUtils.isThrottlingException(ase)) { awsRequestMetrics.incrementCounterWith(ThrottleException).addProperty(ThrottleException, ase); } // Cache the retryable exception retriedException = ase; /* * Checking for clock skew error again because we don't want to set the * global time offset for every service exception. */ if (RetryUtils.isClockSkewError(ase)) { int timeOffset = parseClockSkewOffset(apacheResponse, ase); SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset); } resetRequestAfterError(request, ase); } catch (IOException ioe) { if (log.isInfoEnabled()) { log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe); } captureExceptionMetrics(ioe, awsRequestMetrics); awsRequestMetrics.addProperty(AWSRequestID, null); AmazonClientException ace = new AmazonClientException( "Unable to execute HTTP request: " + ioe.getMessage(), ioe); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ace, requestCount, config.getRetryPolicy())) { throw ace; } // Cache the retryable exception retriedException = ace; resetRequestAfterError(request, ioe); } catch (RuntimeException e) { throw captureExceptionMetrics(e, awsRequestMetrics); } catch (Error e) { throw captureExceptionMetrics(e, awsRequestMetrics); } finally { /* * Some response handlers need to manually manage the HTTP * connection and will take care of releasing the connection on * their own, but if this response handler doesn't need the * connection left open, we go ahead and release the it to free * up resources. */ if (!leaveHttpConnectionOpen) { try { if (apacheResponse != null && apacheResponse.getEntity() != null && apacheResponse.getEntity().getContent() != null) { apacheResponse.getEntity().getContent().close(); } } catch (IOException e) { log.warn("Cannot close the response content.", e); } } } } /* end while (true) */ }
From source file:uk.ac.kcl.tika.parsers.PDFPreprocessorParser.java
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { ImageMagickConfig config = context.get(ImageMagickConfig.class, DEFAULT_IMAGEMAGICK_CONFIG); // If ImageMagick is not on the path with the current config, do not try to run OCR // getSupportedTypes shouldn't have listed us as handling it, so this should only // occur if someone directly calls this parser, not via DefaultParser or similar // TemporaryResources tmp = new TemporaryResources(); //TikaInputStream pdfStream = TikaInputStream.get(stream); PDFParser pdfParser = new PDFParser(); //create temp handlers to investigate object BodyContentHandler body = new BodyContentHandler(); Metadata pdfMetadata = new Metadata(); //needed to reset stream if (stream.markSupported()) { stream.mark(Integer.MAX_VALUE); }/*from w ww .jav a2 s . co m*/ //first do initial parse to see if there's subsantial content in pdf metadata already pdfParser.parse(stream, body, pdfMetadata, context); stream.reset(); //if there's content - reparse with official handlers/metadata. What else can you do? Also check imagemagick is available if (body.toString().length() > 100 || !hasImageMagick(config)) { pdfParser.parse(stream, handler, metadata, context); return; } else { //add the PDF metadata to the official metadata object Arrays.asList(pdfMetadata.names()).stream().forEach(name -> { metadata.add(name, pdfMetadata.get(name)); }); } //objects to hold file references for manipulation outside of Java File tiffFileOfPDF = null; File pdfFileFromStream = File.createTempFile("tempPDF", ".pdf"); try { FileUtils.copyInputStreamToFile(stream, pdfFileFromStream); tiffFileOfPDF = File.createTempFile("tempTIFF", ".tiff"); makeTiffFromPDF(pdfFileFromStream, tiffFileOfPDF, config); if (tiffFileOfPDF.exists()) { TesseractOCRParser tesseract = new TesseractOCRParser(); tesseract.parse(FileUtils.openInputStream(tiffFileOfPDF), handler, metadata, context); } } finally { if (tiffFileOfPDF.exists()) { tiffFileOfPDF.delete(); } if (pdfFileFromStream.exists()) { pdfFileFromStream.delete(); } } }
From source file:org.obm.push.backend.obm22.mail.EmailManager.java
public void sendEmail(BackendSession bs, String from, Set<Address> setTo, Set<Address> setCc, Set<Address> setCci, InputStream mimeMail, Boolean saveInSent) { try {//from w ww . j ava2 s .c o m logger.info("Send mail to " + setTo); if (!mimeMail.markSupported()) { ByteArrayOutputStream outPut = new ByteArrayOutputStream(); FileUtils.transfer(mimeMail, outPut, true); mimeMail = new ByteArrayInputStream(outPut.toByteArray()); } SMTPProtocol smtp = getSmtpClient(bs); smtp.openPort(); smtp.ehlo(InetAddress.getLocalHost()); Address addrFrom = new Address(from); smtp.mail(addrFrom); Address[] recipients = getAllRistrettoRecipients(setTo, setCc, setCci); for (Address to : recipients) { Address cleaned = new Address(to.getMailAddress()); smtp.rcpt(cleaned); } smtp.data(mimeMail); smtp.quit(); if (saveInSent) { mimeMail.reset(); storeInSent(bs, mimeMail); } } catch (Throwable e) { logger.error(e.getMessage(), e); } }
From source file:org.springframework.extensions.webscripts.connector.AuthenticatingConnector.java
public Response call(String uri, ConnectorContext context, InputStream in, OutputStream out) { Response response = null;/*from ww w . ja v a2 s .c o m*/ boolean handshake = false; boolean firstcall = true; if (isAuthenticated()) { // try to call into the connector to see if we can successfully do this context.setCommitResponseOnAuthenticationError(false); response = this.connector.call(uri, context, in, out); firstcall = false; if (logger.isDebugEnabled()) logger.debug("Received " + response.getStatus().getCode() + " on first call to: " + uri); // if there was an authentication challenge, handle here if (response.getStatus().getCode() == ResponseStatus.STATUS_UNAUTHORIZED) { handshake = true; } } else { handshake = true; } if (handshake) { handshake(); // ignore result // now that we've authenticated, try again if (in.markSupported()) { try { in.reset(); } catch (IOException ioErr) { // if we cannot reset the stream - there's nothing else we can do } } context.setCommitResponseOnAuthenticationError(true); response = this.connector.call(uri, context, in, out); if (logger.isDebugEnabled()) logger.debug("Received " + response.getStatus().getCode() + " on " + (firstcall ? "first" : "second") + " call to: " + uri); } return response; }
From source file:org.guvnor.m2repo.backend.server.helpers.HttpPostHelper.java
private String uploadJar(final FormData formData) throws IOException { GAV gav = formData.getGav();/* ww w . j av a 2 s .c om*/ InputStream jarStream = null; try { jarStream = formData.getFile().getInputStream(); if (gav == null) { if (!jarStream.markSupported()) { jarStream = new BufferedInputStream(jarStream); } // is available() safe? jarStream.mark(jarStream.available()); PomModel pomModel = PomModelResolver.resolveFromJar(jarStream); //If we were able to get a POM model we can get the GAV if (pomModel != null) { String groupId = pomModel.getReleaseId().getGroupId(); String artifactId = pomModel.getReleaseId().getArtifactId(); String version = pomModel.getReleaseId().getVersion(); if (isNullOrEmpty(groupId) || isNullOrEmpty(artifactId) || isNullOrEmpty(version)) { return UPLOAD_MISSING_POM; } else { gav = new GAV(groupId, artifactId, version); } } else { return UPLOAD_MISSING_POM; } jarStream.reset(); } m2RepoService.deployJar(jarStream, gav); return UPLOAD_OK; } catch (IOException ioe) { log.error(ioe.getMessage(), ioe); throw ExceptionUtilities.handleException(ioe); } finally { if (jarStream != null) { jarStream.close(); } } }
From source file:org.alfresco.encoding.AbstractCharactersetFinder.java
/** * {@inheritDoc}//from w ww. j a v a 2s . co m * <p> * The input stream is checked to ensure that it supports marks, after which * a buffer is extracted, leaving the stream in its original state. */ public final Charset detectCharset(InputStream is) { // Only support marking streams if (!is.markSupported()) { throw new IllegalArgumentException( "The InputStream must support marks. Wrap the stream in a BufferedInputStream."); } try { int bufferSize = getBufferSize(); if (bufferSize < 0) { throw new RuntimeException("The required buffer size may not be negative: " + bufferSize); } // Mark the stream for just a few more than we actually will need is.mark(bufferSize); // Create a buffer to hold the data byte[] buffer = new byte[bufferSize]; // Fill it int read = is.read(buffer); // Create an appropriately sized buffer if (read > -1 && read < buffer.length) { byte[] copyBuffer = new byte[read]; System.arraycopy(buffer, 0, copyBuffer, 0, read); buffer = copyBuffer; } // Detect return detectCharset(buffer); } catch (IOException e) { // Attempt a reset throw new AlfrescoRuntimeException("IOException while attempting to detect charset encoding.", e); } finally { try { is.reset(); } catch (Throwable ee) { } } }
From source file:org.mule.transport.http.multipart.MultiPartInputStream.java
private byte[] readLine(InputStream in) throws IOException { byte[] buf = new byte[256]; int i = 0;/*from w w w .j a va 2 s . co m*/ int loops = 0; int ch = 0; while (true) { ch = in.read(); if (ch < 0) break; loops++; // skip a leading LF's if (loops == 1 && ch == LF) continue; if (ch == CR || ch == LF) break; if (i >= buf.length) { byte[] old_buf = buf; buf = new byte[old_buf.length + 256]; System.arraycopy(old_buf, 0, buf, 0, old_buf.length); } buf[i++] = (byte) ch; } if (ch == -1 && i == 0) return null; // skip a trailing LF if it exists if (ch == CR && in.available() >= 1 && in.markSupported()) { in.mark(1); ch = in.read(); if (ch != LF) in.reset(); } byte[] old_buf = buf; buf = new byte[i]; System.arraycopy(old_buf, 0, buf, 0, i); return buf; }
From source file:org.apache.falcon.resource.AbstractEntityManager.java
protected Entity deserializeEntity(HttpServletRequest request, EntityType entityType) throws IOException, FalconException { EntityParser<?> entityParser = EntityParserFactory.getParser(entityType); InputStream xmlStream = request.getInputStream(); if (xmlStream.markSupported()) { xmlStream.mark(XML_DEBUG_LEN); // mark up to debug len }//from w ww . j ava2s. co m try { return entityParser.parse(xmlStream); } catch (FalconException e) { if (LOG.isDebugEnabled() && xmlStream.markSupported()) { try { xmlStream.reset(); String xmlData = getAsString(xmlStream); LOG.debug("XML DUMP for ({}): {}", entityType, xmlData, e); } catch (IOException ignore) { // ignore } } throw e; } }