List of usage examples for java.io InputStream markSupported
public boolean markSupported()
mark
and reset
methods. From source file:com.microsoft.tfs.core.clients.versioncontrol.internal.httpclient.CancellableChunkPart.java
public CancellableChunkPart(final File file, final InputStream stream, final String contentType, final long chunkSize) throws FileNotFoundException { /*//from w ww . j a va 2 s .com * NOTE We construct the file part in a special way so the character set * is never sent to the server (TFS can't handle that header, and it * causes an internal server error). If we construct the * CancellableFilePart object with a null charset, the header is still * included, and its value is the default charset. If we invoke * setCharSet() with null, the header is never supplied (which is what * we desire). * * Also, we use the file name "item" to match Visual Studio's * implementation. Sending the actual file name doesn't seem to hurt, * but appears to be ignored by the server. */ super("content", "item", file, contentType, null); //$NON-NLS-1$ //$NON-NLS-2$ setCharSet(null); Check.isTrue(stream.markSupported(), "The stream does not support retry."); //$NON-NLS-1$ this.stream = stream; this.chunkSize = chunkSize; }
From source file:org.springframework.remoting.caucho.HessianExporter.java
/** * Actually invoke the skeleton with the given streams. * @param skeleton the skeleton to invoke * @param inputStream the request stream * @param outputStream the response stream * @throws Throwable if invocation failed *///from w w w . j a v a2s. c o m protected void doInvoke(HessianSkeleton skeleton, InputStream inputStream, OutputStream outputStream) throws Throwable { ClassLoader originalClassLoader = overrideThreadContextClassLoader(); try { InputStream isToUse = inputStream; OutputStream osToUse = outputStream; if (this.debugLogger != null && this.debugLogger.isDebugEnabled()) { try (PrintWriter debugWriter = new PrintWriter(new CommonsLogWriter(this.debugLogger))) { @SuppressWarnings("resource") HessianDebugInputStream dis = new HessianDebugInputStream(inputStream, debugWriter); @SuppressWarnings("resource") HessianDebugOutputStream dos = new HessianDebugOutputStream(outputStream, debugWriter); dis.startTop2(); dos.startTop2(); isToUse = dis; osToUse = dos; } } if (!isToUse.markSupported()) { isToUse = new BufferedInputStream(isToUse); isToUse.mark(1); } int code = isToUse.read(); int major; int minor; AbstractHessianInput in; AbstractHessianOutput out; if (code == 'H') { // Hessian 2.0 stream major = isToUse.read(); minor = isToUse.read(); if (major != 0x02) { throw new IOException("Version " + major + '.' + minor + " is not understood"); } in = new Hessian2Input(isToUse); out = new Hessian2Output(osToUse); in.readCall(); } else if (code == 'C') { // Hessian 2.0 call... for some reason not handled in HessianServlet! isToUse.reset(); in = new Hessian2Input(isToUse); out = new Hessian2Output(osToUse); in.readCall(); } else if (code == 'c') { // Hessian 1.0 call major = isToUse.read(); minor = isToUse.read(); in = new HessianInput(isToUse); if (major >= 2) { out = new Hessian2Output(osToUse); } else { out = new HessianOutput(osToUse); } } else { throw new IOException( "Expected 'H'/'C' (Hessian 2.0) or 'c' (Hessian 1.0) in hessian input at " + code); } in.setSerializerFactory(this.serializerFactory); out.setSerializerFactory(this.serializerFactory); if (this.remoteResolver != null) { in.setRemoteResolver(this.remoteResolver); } try { skeleton.invoke(in, out); } finally { try { in.close(); isToUse.close(); } catch (IOException ex) { // ignore } try { out.close(); osToUse.close(); } catch (IOException ex) { // ignore } } } finally { resetThreadContextClassLoader(originalClassLoader); } }
From source file:com.amazonaws.client.service.AmazonHttpClient.java
/** * Resets the specified request, so that it can be sent again, after * receiving the specified error. If a problem is encountered with resetting * the request, then an AmazonClientException is thrown with the original * error as the cause (not an error about being unable to reset the stream). * * @param request/* w w w. j ava 2s . c o m*/ * The request being executed that failed and needs to be reset. * @param cause * The original error that caused the request to fail. * * @throws AmazonClientException * If the request can't be reset. */ private void resetRequestAfterError(Request<?> request, Exception cause) throws AmazonClientException { InputStream is = request.getContent(); if (is == null) { return; // no reset needed } if (!is.markSupported()) { throw new AmazonClientException("Encountered an exception and stream is not resettable", cause); } try { is.reset(); } catch (IOException e) { // This exception comes from being unable to reset the input stream, // so throw the original, more meaningful exception if (log.isDebugEnabled()) log.debug("Failed to reset the input stream", e); throw new AmazonClientException("Encountered an exception and couldn't reset the stream to retry", cause); } }
From source file:org.apache.falcon.resource.AbstractEntityManager.java
protected Entity deserializeEntity(HttpServletRequest request, EntityType entityType) throws IOException, FalconException { EntityParser<?> entityParser = EntityParserFactory.getParser(entityType); InputStream xmlStream = request.getInputStream(); if (xmlStream.markSupported()) { xmlStream.mark(XML_DEBUG_LEN); // mark up to debug len }//from w w w.ja v a2s.c o m try { return entityParser.parse(xmlStream); } catch (FalconException e) { if (LOG.isDebugEnabled() && xmlStream.markSupported()) { try { xmlStream.reset(); String xmlData = getAsString(xmlStream); LOG.debug("XML DUMP for ({}): {}", entityType, xmlData, e); } catch (IOException ignore) { // ignore } } throw e; } }
From source file:uk.ac.kcl.tika.parsers.PDFPreprocessorParser.java
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { ImageMagickConfig config = context.get(ImageMagickConfig.class, DEFAULT_IMAGEMAGICK_CONFIG); // If ImageMagick is not on the path with the current config, do not try to run OCR // getSupportedTypes shouldn't have listed us as handling it, so this should only // occur if someone directly calls this parser, not via DefaultParser or similar // TemporaryResources tmp = new TemporaryResources(); //TikaInputStream pdfStream = TikaInputStream.get(stream); PDFParser pdfParser = new PDFParser(); //create temp handlers to investigate object BodyContentHandler body = new BodyContentHandler(); Metadata pdfMetadata = new Metadata(); //needed to reset stream if (stream.markSupported()) { stream.mark(Integer.MAX_VALUE); }/*from w ww . java 2s .co m*/ //first do initial parse to see if there's subsantial content in pdf metadata already pdfParser.parse(stream, body, pdfMetadata, context); stream.reset(); //if there's content - reparse with official handlers/metadata. What else can you do? Also check imagemagick is available if (body.toString().length() > 100 || !hasImageMagick(config)) { pdfParser.parse(stream, handler, metadata, context); return; } else { //add the PDF metadata to the official metadata object Arrays.asList(pdfMetadata.names()).stream().forEach(name -> { metadata.add(name, pdfMetadata.get(name)); }); } //objects to hold file references for manipulation outside of Java File tiffFileOfPDF = null; File pdfFileFromStream = File.createTempFile("tempPDF", ".pdf"); try { FileUtils.copyInputStreamToFile(stream, pdfFileFromStream); tiffFileOfPDF = File.createTempFile("tempTIFF", ".tiff"); makeTiffFromPDF(pdfFileFromStream, tiffFileOfPDF, config); if (tiffFileOfPDF.exists()) { TesseractOCRParser tesseract = new TesseractOCRParser(); tesseract.parse(FileUtils.openInputStream(tiffFileOfPDF), handler, metadata, context); } } finally { if (tiffFileOfPDF.exists()) { tiffFileOfPDF.delete(); } if (pdfFileFromStream.exists()) { pdfFileFromStream.delete(); } } }
From source file:edu.harvard.hmdc.dvnplugin.DVNOAIUrlCacher.java
private InputStream checkLoginPage(InputStream input, Properties headers, String lastModified) throws IOException { LoginPageChecker checker = au.getCrawlSpec().getLoginPageChecker(); if (checker != null) { logger.debug3("Found a login page checker"); if (!input.markSupported()) { input = new BufferedInputStream(input); }/*w w w . j a v a 2s. com*/ input.mark(LOGIN_BUFFER_MAX); Reader reader = new InputStreamReader(input, Constants.DEFAULT_ENCODING); try { if (checker.isLoginPage(headers, reader)) { throw new CacheException.PermissionException("Found a login page"); } else { input = resetInputStream(input, fetchUrl, lastModified); } } catch (PluginException e) { throw new RuntimeException(e); } } else { logger.debug3("Didn't find a login page checker"); } return input; }
From source file:edu.harvard.iq.dvn.lockss.plugin.DVNOAIUrlCacher.java
private InputStream checkLoginPage(InputStream input, Properties headers, String lastModified) throws IOException { LoginPageChecker checker = au.getCrawlSpec().getLoginPageChecker(); if (checker != null) { logger.debug3("Found a login page checker"); if (!input.markSupported()) { input = new BufferedInputStream(input); }/*from ww w. j ava 2s . c o m*/ input.mark(CurrentConfig.getIntParam(PARAM_LOGIN_CHECKER_MARK_LIMIT, DEFAULT_LOGIN_CHECKER_MARK_LIMIT)); Reader reader = new InputStreamReader(input, Constants.DEFAULT_ENCODING); try { if (checker.isLoginPage(headers, reader)) { throw new CacheException.PermissionException("Found a login page"); } else { input = resetInputStream(input, fetchUrl, lastModified); } } catch (PluginException e) { throw new RuntimeException(e); } } else { logger.debug3("Didn't find a login page checker"); } return input; }
From source file:org.springframework.extensions.webscripts.connector.AuthenticatingConnector.java
public Response call(String uri, ConnectorContext context, InputStream in) { Response response = null;/* ww w.j a v a 2 s. c o m*/ boolean handshake = false; boolean firstcall = true; if (isAuthenticated()) { // try to call into the connector to see if we can successfully do this response = this.connector.call(uri, context, in); firstcall = false; if (logger.isDebugEnabled()) logger.debug("Received " + response.getStatus().getCode() + " on first call to: " + uri); // if there was an authentication challenge, handle here if (response.getStatus().getCode() == ResponseStatus.STATUS_UNAUTHORIZED) { handshake = true; } } else { handshake = true; } if (handshake) { handshake(); // ignore result // now that we've authenticated, try again if (in.markSupported()) { try { in.reset(); } catch (IOException ioErr) { // if we cannot reset the stream - there's nothing else we can do } } response = this.connector.call(uri, context, in); if (logger.isDebugEnabled()) logger.debug("Received " + response.getStatus().getCode() + " on " + (firstcall ? "first" : "second") + " call to: " + uri); } return response; }
From source file:com.amazonaws.http.AmazonHttpClient.java
/** * Internal method to execute the HTTP method given. * * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler) * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext) *///ww w. ja v a 2 s .c om private <T> Response<T> executeHelper(final Request<?> request, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler, HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext) throws AmazonClientException, AmazonServiceException { /* * Depending on which response handler we end up choosing to handle the * HTTP response, it might require us to leave the underlying HTTP * connection open, depending on whether or not it reads the complete * HTTP response stream from the HTTP connection, or if delays reading * any of the content until after a response is returned to the caller. */ boolean leaveHttpConnectionOpen = false; AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); /* add the service endpoint to the logs. You can infer service name from service endpoint */ awsRequestMetrics.addProperty(Field.ServiceName, request.getServiceName()); awsRequestMetrics.addProperty(Field.ServiceEndpoint, request.getEndpoint()); // Apply whatever request options we know how to handle, such as user-agent. setUserAgent(request); int requestCount = 0; URI redirectedURI = null; HttpEntity entity = null; AmazonClientException retriedException = null; // Make a copy of the original request params and headers so that we can // permute it in this loop and start over with the original every time. Map<String, String> originalParameters = new LinkedHashMap<String, String>(); originalParameters.putAll(request.getParameters()); Map<String, String> originalHeaders = new HashMap<String, String>(); originalHeaders.putAll(request.getHeaders()); final AWSCredentials credentials = executionContext.getCredentials(); AmazonWebServiceRequest awsreq = request.getOriginalRequest(); ProgressListener listener = awsreq.getGeneralProgressListener(); Signer signer = null; while (true) { ++requestCount; awsRequestMetrics.setCounter(Field.RequestCount, requestCount); if (requestCount > 1) { // retry request.setParameters(originalParameters); request.setHeaders(originalHeaders); } HttpRequestBase httpRequest = null; org.apache.http.HttpResponse apacheResponse = null; try { // Sign the request if a signer was provided if (signer == null) signer = executionContext.getSignerByURI(request.getEndpoint()); if (signer != null && credentials != null) { awsRequestMetrics.startEvent(Field.RequestSigningTime); try { signer.sign(request, credentials); } finally { awsRequestMetrics.endEvent(Field.RequestSigningTime); } } if (requestLog.isDebugEnabled()) { requestLog.debug("Sending Request: " + request.toString()); } httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } if (redirectedURI != null) { httpRequest.setURI(redirectedURI); } if (requestCount > 1) { // retry // Notify the progress listener of the retry publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT); awsRequestMetrics.startEvent(Field.RetryPauseTime); try { pauseBeforeNextRetry(request.getOriginalRequest(), retriedException, requestCount, config.getRetryPolicy()); } finally { awsRequestMetrics.endEvent(Field.RetryPauseTime); } } if (entity != null) { InputStream content = entity.getContent(); if (requestCount > 1) { // retry if (content.markSupported()) { content.reset(); content.mark(-1); } } else { if (content.markSupported()) { content.mark(-1); } } } captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics); HttpContext httpContext = new BasicHttpContext(); httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics); retriedException = null; publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT); awsRequestMetrics.startEvent(Field.HttpRequestTime); try { apacheResponse = httpClient.execute(httpRequest, httpContext); } finally { awsRequestMetrics.endEvent(Field.HttpRequestTime); } publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT); if (isRequestSuccessful(apacheResponse)) { awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode()); /* * If we get back any 2xx status code, then we know we should * treat the service call as successful. */ leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen(); HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse); T response = handleResponse(request, responseHandler, httpRequest, httpResponse, apacheResponse, executionContext); return new Response<T>(response, httpResponse); } else if (isTemporaryRedirect(apacheResponse)) { /* * S3 sends 307 Temporary Redirects if you try to delete an * EU bucket from the US endpoint. If we get a 307, we'll * point the HTTP method to the redirected location, and let * the next retry deliver the request to the right location. */ Header[] locationHeaders = apacheResponse.getHeaders("location"); String redirectedLocation = locationHeaders[0].getValue(); log.debug("Redirecting to: " + redirectedLocation); redirectedURI = URI.create(redirectedLocation); httpRequest.setURI(redirectedURI); awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode()); awsRequestMetrics.addProperty(Field.RedirectLocation, redirectedLocation); awsRequestMetrics.addProperty(Field.AWSRequestID, null); } else { leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen(); AmazonServiceException ase = handleErrorResponse(request, errorResponseHandler, httpRequest, apacheResponse); awsRequestMetrics.addProperty(Field.AWSRequestID, ase.getRequestId()); awsRequestMetrics.addProperty(Field.AWSErrorCode, ase.getErrorCode()); awsRequestMetrics.addProperty(Field.StatusCode, ase.getStatusCode()); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ase, requestCount, config.getRetryPolicy())) { throw ase; } // Cache the retryable exception retriedException = ase; /* * Checking for clock skew error again because we don't want to set the * global time offset for every service exception. */ if (RetryUtils.isClockSkewError(ase)) { int timeOffset = parseClockSkewOffset(apacheResponse, ase); SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset); } resetRequestAfterError(request, ase); } } catch (IOException ioe) { if (log.isInfoEnabled()) { log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe); } awsRequestMetrics.incrementCounter(Field.Exception); awsRequestMetrics.addProperty(Field.Exception, ioe); awsRequestMetrics.addProperty(Field.AWSRequestID, null); AmazonClientException ace = new AmazonClientException( "Unable to execute HTTP request: " + ioe.getMessage(), ioe); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ace, requestCount, config.getRetryPolicy())) { throw ace; } // Cache the retryable exception retriedException = ace; resetRequestAfterError(request, ioe); } catch (RuntimeException e) { throw handleUnexpectedFailure(e, awsRequestMetrics); } catch (Error e) { throw handleUnexpectedFailure(e, awsRequestMetrics); } finally { /* * Some response handlers need to manually manage the HTTP * connection and will take care of releasing the connection on * their own, but if this response handler doesn't need the * connection left open, we go ahead and release the it to free * up resources. */ if (!leaveHttpConnectionOpen) { try { if (apacheResponse != null && apacheResponse.getEntity() != null && apacheResponse.getEntity().getContent() != null) { apacheResponse.getEntity().getContent().close(); } } catch (IOException e) { log.warn("Cannot close the response content.", e); } } } } /* end while (true) */ }
From source file:com.amazonaws.client.service.AmazonHttpClient.java
/** * Internal method to execute the HTTP method given. * * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler) * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext) *//*from ww w. j av a 2 s .c o m*/ private <T> Response<T> executeHelper(final Request<?> request, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler, HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext) throws AmazonClientException, AmazonServiceException { /* * Depending on which response handler we end up choosing to handle the * HTTP response, it might require us to leave the underlying HTTP * connection open, depending on whether or not it reads the complete * HTTP response stream from the HTTP connection, or if delays reading * any of the content until after a response is returned to the caller. */ boolean leaveHttpConnectionOpen = false; /* add the service endpoint to the logs. You can infer service name from service endpoint */ AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics() .addPropertyWith(ServiceName, request.getServiceName()) .addPropertyWith(ServiceEndpoint, request.getEndpoint()); // Apply whatever request options we know how to handle, such as user-agent. setUserAgent(request); int requestCount = 0; URI redirectedURI = null; HttpEntity entity = null; AmazonClientException retriedException = null; // Make a copy of the original request params and headers so that we can // permute it in this loop and start over with the original every time. Map<String, String> originalParameters = new LinkedHashMap<String, String>(); originalParameters.putAll(request.getParameters()); Map<String, String> originalHeaders = new HashMap<String, String>(); originalHeaders.putAll(request.getHeaders()); final AWSCredentials credentials = executionContext.getCredentials(); AmazonWebServiceRequest awsreq = request.getOriginalRequest(); ProgressListener listener = awsreq.getGeneralProgressListener(); Signer signer = null; while (true) { ++requestCount; awsRequestMetrics.setCounter(RequestCount, requestCount); if (requestCount > 1) { // retry request.setParameters(originalParameters); request.setHeaders(originalHeaders); } HttpRequestBase httpRequest = null; org.apache.http.HttpResponse apacheResponse = null; try { // Sign the request if a signer was provided if (signer == null) signer = executionContext.getSignerByURI(request.getEndpoint()); if (signer != null && credentials != null) { awsRequestMetrics.startEvent(RequestSigningTime); try { signer.sign(request, credentials); } finally { awsRequestMetrics.endEvent(RequestSigningTime); } } if (requestLog.isDebugEnabled()) { requestLog.debug("Sending Request: " + request.toString()); } httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } if (redirectedURI != null) { httpRequest.setURI(redirectedURI); } if (requestCount > 1) { // retry // Notify the progress listener of the retry publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT); awsRequestMetrics.startEvent(RetryPauseTime); try { pauseBeforeNextRetry(request.getOriginalRequest(), retriedException, requestCount, config.getRetryPolicy()); } finally { awsRequestMetrics.endEvent(RetryPauseTime); } } if (entity != null) { InputStream content = entity.getContent(); if (requestCount > 1) { // retry if (content.markSupported()) { content.reset(); content.mark(-1); } } else { if (content.markSupported()) { content.mark(-1); } } } captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics); HttpContext httpContext = new BasicHttpContext(); httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics); retriedException = null; publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT); awsRequestMetrics.startEvent(HttpRequestTime); try { apacheResponse = httpClient.execute(httpRequest, httpContext); } finally { awsRequestMetrics.endEvent(HttpRequestTime); } publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT); final StatusLine statusLine = apacheResponse.getStatusLine(); final int statusCode = statusLine == null ? -1 : statusLine.getStatusCode(); if (isRequestSuccessful(apacheResponse)) { awsRequestMetrics.addProperty(StatusCode, statusCode); /* * If we get back any 2xx status code, then we know we should * treat the service call as successful. */ leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen(); HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse); T response = handleResponse(request, responseHandler, httpRequest, httpResponse, apacheResponse, executionContext); return new Response<T>(response, httpResponse); } if (isTemporaryRedirect(apacheResponse)) { /* * S3 sends 307 Temporary Redirects if you try to delete an * EU bucket from the US endpoint. If we get a 307, we'll * point the HTTP method to the redirected location, and let * the next retry deliver the request to the right location. */ Header[] locationHeaders = apacheResponse.getHeaders("location"); String redirectedLocation = locationHeaders[0].getValue(); if (log.isDebugEnabled()) log.debug("Redirecting to: " + redirectedLocation); redirectedURI = URI.create(redirectedLocation); httpRequest.setURI(redirectedURI); awsRequestMetrics.addPropertyWith(StatusCode, statusCode) .addPropertyWith(RedirectLocation, redirectedLocation) .addPropertyWith(AWSRequestID, null); continue; } leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen(); final AmazonServiceException ase = handleErrorResponse(request, errorResponseHandler, httpRequest, apacheResponse); awsRequestMetrics.addPropertyWith(AWSRequestID, ase.getRequestId()) .addPropertyWith(AWSErrorCode, ase.getErrorCode()) .addPropertyWith(StatusCode, ase.getStatusCode()); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ase, requestCount, config.getRetryPolicy())) { throw ase; } // Comment out for now. Ref: CR2662349 // Preserve the cause of retry before retrying // awsRequestMetrics.addProperty(RetryCause, ase); if (RetryUtils.isThrottlingException(ase)) { awsRequestMetrics.incrementCounterWith(ThrottleException).addProperty(ThrottleException, ase); } // Cache the retryable exception retriedException = ase; /* * Checking for clock skew error again because we don't want to set the * global time offset for every service exception. */ if (RetryUtils.isClockSkewError(ase)) { int timeOffset = parseClockSkewOffset(apacheResponse, ase); SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset); } resetRequestAfterError(request, ase); } catch (IOException ioe) { if (log.isInfoEnabled()) { log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe); } captureExceptionMetrics(ioe, awsRequestMetrics); awsRequestMetrics.addProperty(AWSRequestID, null); AmazonClientException ace = new AmazonClientException( "Unable to execute HTTP request: " + ioe.getMessage(), ioe); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ace, requestCount, config.getRetryPolicy())) { throw ace; } // Cache the retryable exception retriedException = ace; resetRequestAfterError(request, ioe); } catch (RuntimeException e) { throw captureExceptionMetrics(e, awsRequestMetrics); } catch (Error e) { throw captureExceptionMetrics(e, awsRequestMetrics); } finally { /* * Some response handlers need to manually manage the HTTP * connection and will take care of releasing the connection on * their own, but if this response handler doesn't need the * connection left open, we go ahead and release the it to free * up resources. */ if (!leaveHttpConnectionOpen) { try { if (apacheResponse != null && apacheResponse.getEntity() != null && apacheResponse.getEntity().getContent() != null) { apacheResponse.getEntity().getContent().close(); } } catch (IOException e) { log.warn("Cannot close the response content.", e); } } } } /* end while (true) */ }