List of usage examples for java.io InputStream reset
public synchronized void reset() throws IOException
mark
method was last called on this input stream. From source file:org.codice.ddf.admin.application.rest.ApplicationUploadEndpoint.java
/** * Copies the attachment to a system file location. Once copied, a file is returned of the * copied file.//w ww . ja v a 2 s .c o m * * @param attachment * the attachment to copy and extract. * @param response * the response object to manipulate if anything goes wrong. * @return The file of the copied attachment. */ private File createFileFromAttachement(Attachment attachment, Response response) { InputStream inputStream = null; String filename = null; File newFile = null; if (attachment.getContentDisposition() != null) { filename = attachment.getContentDisposition().getParameter(FILENAME_CONTENT_DISPOSITION_PARAMETER_NAME); } if (StringUtils.isEmpty(filename)) { LOGGER.debug("Filename not found, using default."); filename = DEFAULT_FILE_NAME; } else { filename = FilenameUtils.getName(filename); LOGGER.debug("Filename: {}", filename); } try { inputStream = attachment.getDataHandler().getInputStream(); if (inputStream != null && inputStream.available() == 0) { inputStream.reset(); } } catch (IOException e) { LOGGER.warn("IOException reading stream from file attachment in multipart body", e); IOUtils.closeQuietly(inputStream); } if (filename.endsWith(JAR_EXT) || filename.endsWith(KAR_EXT)) { if (inputStream != null) { try { File uploadDir = new File(defaultFileLocation); if (!uploadDir.exists()) { if (uploadDir.mkdirs()) { LOGGER.warn("Unable to make directory"); } } newFile = new File(uploadDir, filename); FileUtils.copyInputStreamToFile(inputStream, newFile); } catch (IOException e) { LOGGER.warn("Unable to write file.", e); newFile = null; } finally { IOUtils.closeQuietly(inputStream); } } else { LOGGER.debug("No file attachment found"); } } else { LOGGER.debug("Wrong file type."); Response.ResponseBuilder responseBuilder = Response.serverError(); responseBuilder.status(HttpStatus.UNSUPPORTED_MEDIA_TYPE_415); IOUtils.closeQuietly(inputStream); } return newFile; }
From source file:org.apache.fop.fonts.type1.PFMFile.java
/** * Parses a PFM file/*from www .ja v a2 s.c o m*/ * * @param inStream The stream from which to read the PFM file. * @throws IOException In case of an I/O problem */ public void load(InputStream inStream) throws IOException { byte[] pfmBytes = IOUtils.toByteArray(inStream); InputStream bufin = inStream; bufin = new ByteArrayInputStream(pfmBytes); PFMInputStream in = new PFMInputStream(bufin); bufin.mark(512); short sh1 = in.readByte(); short sh2 = in.readByte(); if (sh1 == 128 && sh2 == 1) { //Found the first section header of a PFB file! throw new IOException("Cannot parse PFM file. You probably specified the PFB file" + " of a Type 1 font as parameter instead of the PFM."); } bufin.reset(); byte[] b = new byte[16]; bufin.read(b); if (new String(b, "US-ASCII").equalsIgnoreCase("StartFontMetrics")) { //Found the header of a AFM file! throw new IOException("Cannot parse PFM file. You probably specified the AFM file" + " of a Type 1 font as parameter instead of the PFM."); } bufin.reset(); final int version = in.readShort(); if (version != 256) { log.warn("PFM version expected to be '256' but got '" + version + "'." + " Please make sure you specify the PFM as parameter" + " and not the PFB or the AFM."); } //final long filesize = in.readInt(); bufin.reset(); loadHeader(in); loadExtension(in); }
From source file:com.amazon.s3.http.AmazonHttpClient.java
/** * Internal method to execute the HTTP method given. * //from w ww. ja va2 s .c o m * @see AmazonHttpClient#execute(Request, HttpResponseHandler, * HttpResponseHandler) * @see AmazonHttpClient#execute(Request, HttpResponseHandler, * HttpResponseHandler, ExecutionContext) */ private <T extends Object> T executeHelper(Request<?> request, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler, HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext) throws AmazonClientException, AmazonServiceException { /* * Depending on which response handler we end up choosing to handle the * HTTP response, it might require us to leave the underlying HTTP * connection open, depending on whether or not it reads the complete * HTTP response stream from the HTTP connection, or if delays reading * any of the content until after a response is returned to the caller. */ boolean leaveHttpConnectionOpen = false; AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); /* * add the service endpoint to the logs. You can infer service name from * service endpoint */ awsRequestMetrics.addProperty(Field.ServiceName.name(), request.getServiceName()); awsRequestMetrics.addProperty(Field.ServiceEndpoint.name(), request.getEndpoint()); // Apply whatever request options we know how to handle, such as // user-agent. applyRequestData(request); int retryCount = 0; URI redirectedURI = null; HttpEntity entity = null; AmazonServiceException exception = null; // Make a copy of the original request params and headers so that we can // permute it in this loop and start over with the original every time. Map<String, String> originalParameters = new HashMap<String, String>(); originalParameters.putAll(request.getParameters()); Map<String, String> originalHeaders = new HashMap<String, String>(); originalHeaders.putAll(request.getHeaders()); while (true) { awsRequestMetrics.setCounter(Field.AttemptCount.name(), retryCount + 1); if (retryCount > 0) { request.setParameters(originalParameters); request.setHeaders(originalHeaders); } HttpRequestBase httpRequest = null; org.apache.http.HttpResponse response = null; try { // Sign the request if a signer was provided if (executionContext.getSigner() != null && executionContext.getCredentials() != null) { awsRequestMetrics.startEvent(Field.RequestSigningTime.name()); executionContext.getSigner().sign(request, executionContext.getCredentials()); awsRequestMetrics.endEvent(Field.RequestSigningTime.name()); } Log.d(TAG, "Sending Request: " + request.toString()); httpRequest = httpRequestFactory.createHttpRequest(request, config, entity, executionContext); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } if (redirectedURI != null) { httpRequest.setURI(redirectedURI); } if (retryCount > 0) { awsRequestMetrics.startEvent(Field.RetryPauseTime.name()); pauseExponentially(retryCount, exception, executionContext.getCustomBackoffStrategy()); awsRequestMetrics.endEvent(Field.RetryPauseTime.name()); } if (entity != null) { InputStream content = entity.getContent(); if (retryCount > 0) { if (content.markSupported()) { content.reset(); content.mark(-1); } } else { if (content.markSupported()) { content.mark(-1); } } } exception = null; awsRequestMetrics.startEvent(Field.HttpRequestTime.name()); response = httpClient.execute(httpRequest); awsRequestMetrics.endEvent(Field.HttpRequestTime.name()); if (isRequestSuccessful(response)) { awsRequestMetrics.addProperty(Field.StatusCode.name(), response.getStatusLine().getStatusCode()); /* * If we get back any 2xx status code, then we know we * should treat the service call as successful. */ leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen(); return handleResponse(request, responseHandler, httpRequest, response, executionContext); } else if (isTemporaryRedirect(response)) { /* * S3 sends 307 Temporary Redirects if you try to delete an * EU bucket from the US endpoint. If we get a 307, we'll * point the HTTP method to the redirected location, and let * the next retry deliver the request to the right location. */ Header[] locationHeaders = response.getHeaders("location"); String redirectedLocation = locationHeaders[0].getValue(); Log.d(TAG, "Redirecting to: " + redirectedLocation); redirectedURI = URI.create(redirectedLocation); httpRequest.setURI(redirectedURI); awsRequestMetrics.addProperty(Field.StatusCode.name(), response.getStatusLine().getStatusCode()); awsRequestMetrics.addProperty(Field.RedirectLocation.name(), redirectedLocation); awsRequestMetrics.addProperty(Field.AWSRequestID.name(), null); } else { leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen(); exception = handleErrorResponse(request, errorResponseHandler, httpRequest, response); awsRequestMetrics.addProperty(Field.AWSRequestID.name(), exception.getRequestId()); awsRequestMetrics.addProperty(Field.AWSErrorCode.name(), exception.getErrorCode()); awsRequestMetrics.addProperty(Field.StatusCode.name(), exception.getStatusCode()); if (!shouldRetry(httpRequest, exception, retryCount)) { throw exception; } resetRequestAfterError(request, exception); } } catch (IOException ioe) { Log.i(TAG, "Unable to execute HTTP request: " + ioe.getMessage(), ioe); awsRequestMetrics.addProperty(Field.Exception.name(), ioe.toString()); awsRequestMetrics.addProperty(Field.AWSRequestID.name(), null); if (!shouldRetry(httpRequest, ioe, retryCount)) { throw new AmazonClientException("Unable to execute HTTP request: " + ioe.getMessage(), ioe); } resetRequestAfterError(request, ioe); } finally { retryCount++; /* * Some response handlers need to manually manage the HTTP * connection and will take care of releasing the connection on * their own, but if this response handler doesn't need the * connection left open, we go ahead and release the it to free * up resources. */ if (!leaveHttpConnectionOpen) { try { response.getEntity().getContent().close(); } catch (Throwable t) { } } } } /* end while (true) */ }
From source file:eu.europa.ec.markt.dss.validation102853.SignedDocumentValidator.java
/** * Guess the document format and return an appropriate document * * @param document The instance of DSSDocument to be validated * @return returns the specific instance of SignedDocumentValidator in terms of the document type *//*w w w . jav a 2 s . c om*/ public static SignedDocumentValidator fromDocument(final DSSDocument document) throws IOException { InputStream input = null; try { if (document.getName() != null && document.getName().toLowerCase().endsWith(".xml")) { try { return new XMLDocumentValidator(document); } catch (ParserConfigurationException e) { throw new IOException("Not a valid XML", e); } catch (SAXException e) { throw new IOException("Not a valid XML", e); } } input = new BufferedInputStream(document.openStream()); input.mark(5); byte[] preamble = new byte[5]; int read = input.read(preamble); input.reset(); if (read < 5) { throw new RuntimeException("Not a signed document"); } String preambleString = new String(preamble); byte[] xmlPreamble = new byte[] { '<', '?', 'x', 'm', 'l' }; byte[] xmlUtf8 = new byte[] { -17, -69, -65, '<', '?' }; if (Arrays.equals(preamble, xmlPreamble) || Arrays.equals(preamble, xmlUtf8)) { try { return new XMLDocumentValidator(document); } catch (ParserConfigurationException e) { throw new IOException("Not a valid XML", e); } catch (SAXException e) { throw new IOException("Not a valid XML", e); } } else if (preambleString.equals("%PDF-")) { return new PDFDocumentValidator(document); } else if (preamble[0] == 'P' && preamble[1] == 'K') { try { input.close(); } catch (IOException e) { } input = null; return getInstanceForAsics(document); } else if (preambleString.getBytes()[0] == 0x30) { try { return new CMSDocumentValidator(document); } catch (CMSException e) { throw new IOException("Not a valid CAdES file", e); } } else { throw new RuntimeException("Document format not recognized/handled"); } } finally { if (input != null) { try { input.close(); } catch (IOException e) { } } } }
From source file:org.infoscoop.request.filter.DetectTypeFilter.java
private boolean isXml(String contentType, InputStream is) throws IOException { if (contentType != null && (contentType.indexOf("text/xml") >= 0 || contentType.indexOf("application/xml") >= 0 || contentType.indexOf("application/rss+xml") >= 0 || contentType.indexOf("application/rdf+xml") >= 0 || contentType.indexOf("application/atom+xml") >= 0)) { return true; }//from ww w. j a v a2 s . co m is.mark(1); byte[] xmldec = new byte[500]; is.read(xmldec); String xmlDecStr = new String(xmldec); is.reset(); if (xmlDecStr.indexOf("<?xml") >= 0) { return true; } return false; }
From source file:pt.lunacloud.http.AmazonHttpClient.java
/** * Internal method to execute the HTTP method given. * * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler) * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext) *///from w w w . j a va 2s .c om private <T extends Object> T executeHelper(Request<?> request, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler, HttpResponseHandler<LunacloudServiceException> errorResponseHandler, ExecutionContext executionContext) throws LunacloudClientException, LunacloudServiceException { /* * Depending on which response handler we end up choosing to handle the * HTTP response, it might require us to leave the underlying HTTP * connection open, depending on whether or not it reads the complete * HTTP response stream from the HTTP connection, or if delays reading * any of the content until after a response is returned to the caller. */ boolean leaveHttpConnectionOpen = false; AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); /* add the service endpoint to the logs. You can infer service name from service endpoint */ awsRequestMetrics.addProperty(Field.ServiceName.name(), request.getServiceName()); awsRequestMetrics.addProperty(Field.ServiceEndpoint.name(), request.getEndpoint()); // Apply whatever request options we know how to handle, such as user-agent. applyRequestData(request); int retryCount = 0; URI redirectedURI = null; HttpEntity entity = null; LunacloudServiceException exception = null; // Make a copy of the original request params and headers so that we can // permute it in this loop and start over with the original every time. Map<String, String> originalParameters = new HashMap<String, String>(); originalParameters.putAll(request.getParameters()); Map<String, String> originalHeaders = new HashMap<String, String>(); originalHeaders.putAll(request.getHeaders()); while (true) { awsRequestMetrics.setCounter(Field.AttemptCount.name(), retryCount + 1); if (retryCount > 0) { request.setParameters(originalParameters); request.setHeaders(originalHeaders); } HttpRequestBase httpRequest = null; org.apache.http.HttpResponse response = null; try { // Sign the request if a signer was provided if (executionContext.getSigner() != null && executionContext.getCredentials() != null) { awsRequestMetrics.startEvent(Field.RequestSigningTime.name()); executionContext.getSigner().sign(request, executionContext.getCredentials()); awsRequestMetrics.endEvent(Field.RequestSigningTime.name()); } if (requestLog.isDebugEnabled()) { requestLog.debug("Sending Request: " + request.toString()); } httpRequest = httpRequestFactory.createHttpRequest(request, config, entity, executionContext); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } if (redirectedURI != null) { httpRequest.setURI(redirectedURI); } if (retryCount > 0) { awsRequestMetrics.startEvent(Field.RetryPauseTime.name()); pauseExponentially(retryCount, exception, executionContext.getCustomBackoffStrategy()); awsRequestMetrics.endEvent(Field.RetryPauseTime.name()); } if (entity != null) { InputStream content = entity.getContent(); if (retryCount > 0) { if (content.markSupported()) { content.reset(); content.mark(-1); } } else { if (content.markSupported()) { content.mark(-1); } } } exception = null; awsRequestMetrics.startEvent(Field.HttpRequestTime.name()); response = httpClient.execute(httpRequest); awsRequestMetrics.endEvent(Field.HttpRequestTime.name()); if (isRequestSuccessful(response)) { awsRequestMetrics.addProperty(Field.StatusCode.name(), response.getStatusLine().getStatusCode()); /* * If we get back any 2xx status code, then we know we should * treat the service call as successful. */ leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen(); return handleResponse(request, responseHandler, httpRequest, response, executionContext); } else if (isTemporaryRedirect(response)) { /* * S3 sends 307 Temporary Redirects if you try to delete an * EU bucket from the US endpoint. If we get a 307, we'll * point the HTTP method to the redirected location, and let * the next retry deliver the request to the right location. */ Header[] locationHeaders = response.getHeaders("location"); String redirectedLocation = locationHeaders[0].getValue(); log.debug("Redirecting to: " + redirectedLocation); redirectedURI = URI.create(redirectedLocation); httpRequest.setURI(redirectedURI); awsRequestMetrics.addProperty(Field.StatusCode.name(), response.getStatusLine().getStatusCode()); awsRequestMetrics.addProperty(Field.RedirectLocation.name(), redirectedLocation); awsRequestMetrics.addProperty(Field.AWSRequestID.name(), null); } else { leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen(); exception = handleErrorResponse(request, errorResponseHandler, httpRequest, response); awsRequestMetrics.addProperty(Field.AWSRequestID.name(), exception.getRequestId()); awsRequestMetrics.addProperty(Field.AWSErrorCode.name(), exception.getErrorCode()); awsRequestMetrics.addProperty(Field.StatusCode.name(), exception.getStatusCode()); if (!shouldRetry(httpRequest, exception, retryCount)) { throw exception; } resetRequestAfterError(request, exception); } } catch (IOException ioe) { log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe); awsRequestMetrics.addProperty(Field.Exception.name(), ioe.toString()); awsRequestMetrics.addProperty(Field.AWSRequestID.name(), null); if (!shouldRetry(httpRequest, ioe, retryCount)) { throw new LunacloudClientException("Unable to execute HTTP request: " + ioe.getMessage(), ioe); } resetRequestAfterError(request, ioe); } finally { retryCount++; /* * Some response handlers need to manually manage the HTTP * connection and will take care of releasing the connection on * their own, but if this response handler doesn't need the * connection left open, we go ahead and release the it to free * up resources. */ if (!leaveHttpConnectionOpen) { try { response.getEntity().getContent().close(); } catch (Throwable t) { } } } } /* end while (true) */ }
From source file:org.jahia.test.services.acl.AclExtendedNodesPerformanceTest.java
private void createFoldersAndFiles(JCRNodeWrapper parent, int numLevelsToCreate, int numFilesPerFolder, InputStream fileContentStream, List<String> filePaths, Collection<Long> createFolderSamples, Collection<Long> uploadFileSamples) { for (int i = 0; i < numFilesPerFolder; i++) { String fileName = String.format(FOLDER_FILE_NAME, System.currentTimeMillis()); try {/*from ww w. j a v a 2 s .c om*/ fileContentStream.reset(); } catch (IOException e) { throw new RuntimeException(e); } try { long start = System.currentTimeMillis(); parent.uploadFile(fileName, fileContentStream, FILE_CONTENT_TYPE); parent.getSession().save(); uploadFileSamples.add(System.currentTimeMillis() - start); filePaths.add(parent.getNode(fileName).getPath()); } catch (RepositoryException e) { throw new RuntimeException(e); } } if (numLevelsToCreate == 0) { return; } for (int i = 0; i < NUM_USER_FOLDERS_PER_LEVEL; i++) { String folderName = String.format(FOLDER_FILE_NAME, System.currentTimeMillis()); JCRNodeWrapper folder; long start = System.currentTimeMillis(); try { folder = parent.addNode(folderName, Constants.JAHIANT_FOLDER); parent.getSession().save(); } catch (RepositoryException e) { throw new RuntimeException(e); } createFolderSamples.add(System.currentTimeMillis() - start); createFoldersAndFiles(folder, numLevelsToCreate - 1, numFilesPerFolder, fileContentStream, filePaths, createFolderSamples, uploadFileSamples); } }
From source file:edu.utsa.sifter.FileInfo.java
Document makeDoc(final AbstractParser tika, final Analyzer analyzer, final long id, final InputStream data, final Map<String, Object> metadata, final String ext, final boolean noTikaAndTest) throws IOException { Document doc = new Document(); DocMaker.addField(doc, "ID", Long.toString(id)); // makes querying easier if this is a string, counter-intuitively if (ext != null && !ext.isEmpty()) { DocMaker.addField(doc, "extension", ext); }/*w ww .jav a2s . c o m*/ DocMaker.addMetadata(doc, Metadata, ""); final String fp = fullPath(); data.mark((int) FileSize); try { if (noTikaAndTest) { return rawDoc(analyzer, doc, data, fp, noTikaAndTest); } else { DocMaker.addBody(doc, basename(), data, tika, analyzer, false); } } catch (IOException ex) { data.reset(); // System.err.println("Could not extract body from " + fullPath() + ". " + ex.toString()); return rawDoc(analyzer, doc, data, fp, noTikaAndTest); } catch (SAXException ex) { data.reset(); // System.err.println("Had SAXException on body of " + fp + ". " + ex.toString()); return rawDoc(analyzer, doc, data, fp, noTikaAndTest); } catch (TikaException ex) { data.reset(); // System.err.println("Extracting text raw. Had TikaException on body of " + fp + ". " + ex.toString()); return rawDoc(analyzer, doc, data, fp, noTikaAndTest); } return doc; }
From source file:cn.ctyun.amazonaws.http.AmazonHttpClient.java
/** * Internal method to execute the HTTP method given. * * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler) * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext) *///from w w w .j ava2 s. co m private <T extends Object> T executeHelper(Request<?> request, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler, HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext) throws AmazonClientException, AmazonServiceException { /* * Depending on which response handler we end up choosing to handle the * HTTP response, it might require us to leave the underlying HTTP * connection open, depending on whether or not it reads the complete * HTTP response stream from the HTTP connection, or if delays reading * any of the content until after a response is returned to the caller. */ boolean leaveHttpConnectionOpen = false; AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); /* add the service endpoint to the logs. You can infer service name from service endpoint */ awsRequestMetrics.addProperty(Field.ServiceName.name(), request.getServiceName()); awsRequestMetrics.addProperty(Field.ServiceEndpoint.name(), request.getEndpoint()); // Apply whatever request options we know how to handle, such as user-agent. setUserAgent(request); int retryCount = 0; URI redirectedURI = null; HttpEntity entity = null; AmazonServiceException exception = null; // Make a copy of the original request params and headers so that we can // permute it in this loop and start over with the original every time. Map<String, String> originalParameters = new HashMap<String, String>(); originalParameters.putAll(request.getParameters()); Map<String, String> originalHeaders = new HashMap<String, String>(); originalHeaders.putAll(request.getHeaders()); while (true) { awsRequestMetrics.setCounter(Field.AttemptCount.name(), retryCount + 1); if (retryCount > 0) { request.setParameters(originalParameters); request.setHeaders(originalHeaders); } HttpRequestBase httpRequest = null; org.apache.http.HttpResponse response = null; try { // Sign the request if a signer was provided if (executionContext.getSigner() != null && executionContext.getCredentials() != null) { awsRequestMetrics.startEvent(Field.RequestSigningTime.name()); executionContext.getSigner().sign(request, executionContext.getCredentials()); awsRequestMetrics.endEvent(Field.RequestSigningTime.name()); } if (requestLog.isDebugEnabled()) { requestLog.debug("Sending Request: " + request.toString()); } httpRequest = httpRequestFactory.createHttpRequest(request, config, entity, executionContext); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } if (redirectedURI != null) { httpRequest.setURI(redirectedURI); } if (retryCount > 0) { awsRequestMetrics.startEvent(Field.RetryPauseTime.name()); pauseExponentially(retryCount, exception, executionContext.getCustomBackoffStrategy()); awsRequestMetrics.endEvent(Field.RetryPauseTime.name()); } if (entity != null) { InputStream content = entity.getContent(); if (retryCount > 0) { if (content.markSupported()) { content.reset(); content.mark(-1); } } else { if (content.markSupported()) { content.mark(-1); } } } exception = null; awsRequestMetrics.startEvent(Field.HttpRequestTime.name()); response = httpClient.execute(httpRequest); awsRequestMetrics.endEvent(Field.HttpRequestTime.name()); if (isRequestSuccessful(response)) { awsRequestMetrics.addProperty(Field.StatusCode.name(), response.getStatusLine().getStatusCode()); /* * If we get back any 2xx status code, then we know we should * treat the service call as successful. */ leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen(); return handleResponse(request, responseHandler, httpRequest, response, executionContext); } else if (isTemporaryRedirect(response)) { /* * S3 sends 307 Temporary Redirects if you try to delete an * EU bucket from the US endpoint. If we get a 307, we'll * point the HTTP method to the redirected location, and let * the next retry deliver the request to the right location. */ Header[] locationHeaders = response.getHeaders("location"); String redirectedLocation = locationHeaders[0].getValue(); log.debug("Redirecting to: " + redirectedLocation); redirectedURI = URI.create(redirectedLocation); httpRequest.setURI(redirectedURI); awsRequestMetrics.addProperty(Field.StatusCode.name(), response.getStatusLine().getStatusCode()); awsRequestMetrics.addProperty(Field.RedirectLocation.name(), redirectedLocation); awsRequestMetrics.addProperty(Field.AWSRequestID.name(), null); } else { leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen(); exception = handleErrorResponse(request, errorResponseHandler, httpRequest, response); awsRequestMetrics.addProperty(Field.AWSRequestID.name(), exception.getRequestId()); awsRequestMetrics.addProperty(Field.AWSErrorCode.name(), exception.getErrorCode()); awsRequestMetrics.addProperty(Field.StatusCode.name(), exception.getStatusCode()); if (!shouldRetry(httpRequest, exception, retryCount)) { throw exception; } resetRequestAfterError(request, exception); } } catch (IOException ioe) { log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe); awsRequestMetrics.addProperty(Field.Exception.name(), ioe.toString()); awsRequestMetrics.addProperty(Field.AWSRequestID.name(), null); if (!shouldRetry(httpRequest, ioe, retryCount)) { throw new AmazonClientException("Unable to execute HTTP request: " + ioe.getMessage(), ioe); } resetRequestAfterError(request, ioe); } finally { retryCount++; /* * Some response handlers need to manually manage the HTTP * connection and will take care of releasing the connection on * their own, but if this response handler doesn't need the * connection left open, we go ahead and release the it to free * up resources. */ if (!leaveHttpConnectionOpen) { try { response.getEntity().getContent().close(); } catch (Throwable t) { } } } } /* end while (true) */ }
From source file:org.geotools.data.wfs.internal.v1_1.TinyOwsTest.java
@Test public void testGetFeatureByIncludeAndOperatorAndInclude() throws Exception { WFSDataStore wfs = getWFSDataStore(new TinyOwsMockHttpClient() { @Override// w w w . jav a2s. co m public HTTPResponse post(URL url, InputStream postContent, String postContentType) throws IOException { String request = new String(IOUtils.toByteArray(postContent), "UTF-8"); if (isResultsRequest(request, "<wfs:GetFeature", "maxFeatures=\"20\"", "resultType=\"results\"", "<ogc:PropertyIsGreaterThan")) { assertXMLEqual("tinyows/GetFeatureIncludeAndPropertyGreaterThanAndIncludeRequest.xml", request); return new TestHttpResponse(TestData.getResource(this, "tinyows/GetFirstFeatures.xml"), "text/xml"); } else { postContent.reset(); return super.post(url, postContent, postContentType); } } }); SimpleFeatureSource source = wfs.getFeatureSource(typeName); FilterFactory2 ff = CommonFactoryFinder.getFilterFactory2(); Filter and = ff .and(Arrays.asList(Filter.INCLUDE, ff.greater(ff.property("gid"), ff.literal(0)), Filter.INCLUDE)); Query query = new Query(typeName.getLocalPart(), and, 20, Query.ALL_NAMES, "my query"); iterate(source.getFeatures(query), 20, false); }