List of usage examples for java.io InputStream reset
public synchronized void reset() throws IOException
mark
method was last called on this input stream. From source file:com.amazonaws.services.glacier.transfer.ArchiveTransferManager.java
private UploadResult uploadInMultipleParts(final String accountId, final String vaultName, final String archiveDescription, final File file, ProgressListener progressListener) { long partSize = calculatePartSize(file.length()); String partSizeString = Long.toString(partSize); publishProgress(progressListener, ProgressEventType.TRANSFER_PREPARING_EVENT); String uploadId = null;//from w ww. ja v a 2 s.c o m try { InitiateMultipartUploadResult initiateResult = glacier .initiateMultipartUpload(new InitiateMultipartUploadRequest().withAccountId(accountId) .withArchiveDescription(archiveDescription).withVaultName(vaultName) .withPartSize(partSizeString)); uploadId = initiateResult.getUploadId(); } catch (Throwable t) { publishProgress(progressListener, ProgressEventType.TRANSFER_FAILED_EVENT); throw failure(t); } publishProgress(progressListener, ProgressEventType.TRANSFER_STARTED_EVENT); try { List<byte[]> binaryChecksums = new LinkedList<byte[]>(); long currentPosition = 0; while (currentPosition < file.length()) { long length = partSize; if (currentPosition + partSize > file.length()) { length = file.length() - currentPosition; } Exception failedException = null; boolean completed = false; int tries = 0; while (!completed && tries < 5) { tries++; InputStream inputSubStream = newInputSubstream(file, currentPosition, length); inputSubStream.mark(-1); String checksum = TreeHashGenerator.calculateTreeHash(inputSubStream); byte[] binaryChecksum = BinaryUtils.fromHex(checksum); inputSubStream.reset(); UploadMultipartPartRequest req = new UploadMultipartPartRequest().withAccountId(accountId) .withChecksum(checksum).withBody(inputSubStream) .withRange("bytes " + currentPosition + "-" + (currentPosition + length - 1) + "/*") .withUploadId(uploadId).withVaultName(vaultName) .withGeneralProgressListener(progressListener); try { glacier.uploadMultipartPart(req); completed = true; binaryChecksums.add(binaryChecksum); } catch (Exception e) { failedException = e; } finally { closeQuietly(inputSubStream, log); } } if (!completed && failedException != null) throw failedException; currentPosition += partSize; } String checksum = TreeHashGenerator.calculateTreeHash(binaryChecksums); String archiveSize = Long.toString(file.length()); CompleteMultipartUploadResult completeMultipartUploadResult = glacier.completeMultipartUpload( new CompleteMultipartUploadRequest().withAccountId(accountId).withArchiveSize(archiveSize) .withVaultName(vaultName).withChecksum(checksum).withUploadId(uploadId)); String artifactId = completeMultipartUploadResult.getArchiveId(); publishProgress(progressListener, ProgressEventType.TRANSFER_COMPLETED_EVENT); return new UploadResult(artifactId); } catch (Throwable t) { publishProgress(progressListener, ProgressEventType.TRANSFER_FAILED_EVENT); glacier.abortMultipartUpload(new AbortMultipartUploadRequest(accountId, vaultName, uploadId)); throw failure(t, "Unable to finish the upload"); } }
From source file:org.lockss.util.CharsetUtil.java
/** * This will guess the charset of an inputstream. If the inpust * @param in an input stream which we will be checking * @return the charset or null if nothing could be determined with greater * than 50% accuracy//from w w w. j a v a2 s. c om * @throws IOException if mark() not supported or read fails */ public static String guessCharsetName(InputStream in) throws IOException { if (!in.markSupported()) throw new IllegalArgumentException("InputStream must support mark."); ByteArrayOutputStream buffered = new ByteArrayOutputStream(); byte[] buf = new byte[1024]; in.mark(2048); int len = in.read(buf); if (len <= 0) { return UTF8; // this is just a default for 0 len stream } // If the charset is specified in the document, use that. String charset = findCharsetInText(buf, len); if (charset == null) { // we didn't find it check BOM if (hasUtf8BOM(buf, len)) { charset = UTF8; // Check UTF32 before UTF16 since a little endian UTF16 BOM is a prefix of // a little endian UTF32 BOM. } else if (hasUtf32BEBOM(buf, len)) { charset = UTF32BE; } else if (hasUtf32LEBOM(buf, len)) { charset = UTF32LE; } else if (hasUtf16BEBOM(buf, len)) { charset = UTF16BE; } else if (hasUtf16LEBOM(buf, len)) { charset = UTF16LE; } else if (hasUtf7BOM(buf, len)) { charset = UTF7; } else if (hasUtf1BOM(buf, len)) { charset = UTF1; } else { // Use icu4j to guess an encoding. charset = guessCharsetFromBytes(buf); } } if (charset != null) { charset = supportedCharsetName(charset); } if (charset == null) { charset = UTF8; } in.reset(); return charset; }
From source file:org.sipfoundry.sipxconfig.phonebook.PhonebookManagerImpl.java
public String getEncoding(InputStream is) throws IOException { byte[] buffer = new byte[4096]; is.mark(0);//from w ww .j a v a 2 s . c o m is.read(buffer); is.reset(); File tempFile = File.createTempFile("PhonebookFileEntryTemp", null); FileOutputStream out = new FileOutputStream(tempFile); out.write(buffer); out.flush(); out.close(); String encoding = CharsetToolkit.guessEncoding(tempFile, buffer.length).displayName(); tempFile.delete(); return encoding; }
From source file:edu.umd.cs.marmoset.modelClasses.ZipFileAggregator.java
/** * Adds a zipfile from an inputStream to the aggregate zipfile. * * @param dirName name of the top-level directory that will be * @param inputStream the inputStream to the zipfile created in the aggregate zip file * @throws IOException//from ww w . ja v a 2 s . co m * @throws BadInputZipFileException */ private void addZipFileFromInputStream(String dirName, long time, InputStream inputStream) throws IOException, BadInputZipFileException { // First pass: just scan through the contents of the // input file to make sure it's really valid. ZipInputStream zipInput = null; try { zipInput = new ZipInputStream(new BufferedInputStream(inputStream)); ZipEntry entry; while ((entry = zipInput.getNextEntry()) != null) { zipInput.closeEntry(); } } catch (IOException e) { throw new BadInputZipFileException("Input zip file seems to be invalid", e); } finally { if (zipInput != null) zipInput.close(); } // FIXME: It is probably wrong to call reset() on any input stream; for my application the inputStream will only ByteArrayInputStream or FileInputStream inputStream.reset(); // Second pass: read each entry from the input zip file, // writing it to the output file. zipInput = null; try { // add the root directory with the correct timestamp if (time > 0L) { // Create output entry ZipEntry outputEntry = new ZipEntry(dirName + "/"); outputEntry.setTime(time); zipOutput.closeEntry(); } zipInput = new ZipInputStream(new BufferedInputStream(inputStream)); ZipEntry entry; while ((entry = zipInput.getNextEntry()) != null) { try { String name = entry.getName(); // Convert absolute paths to relative if (name.startsWith("/")) { name = name.substring(1); } // Prepend directory name name = dirName + "/" + name; // Create output entry ZipEntry outputEntry = new ZipEntry(name); if (time > 0L) outputEntry.setTime(time); zipOutput.putNextEntry(outputEntry); // Copy zip input to output CopyUtils.copy(zipInput, zipOutput); } catch (Exception zex) { // ignore it } finally { zipInput.closeEntry(); zipOutput.closeEntry(); } } } finally { if (zipInput != null) { try { zipInput.close(); } catch (IOException ignore) { // Ignore } } } }
From source file:org.jclouds.kinetic.strategy.internal.KineticStorageStrategyImpl.java
@Override public String putBlob(final String containerName, final Blob blob) throws IOException { String blobKey = blob.getMetadata().getName(); Payload payload = blob.getPayload(); InputStream payloadStream = payload.openStream(); HashingInputStream his = new HashingInputStream(Hashing.md5(), payloadStream); // Reset input stream back to beginning payloadStream.reset(); kineticContainerNameValidator.validate(containerName); kineticBlobKeyValidator.validate(blobKey); if (getDirectoryBlobSuffix(blobKey) != null) { return putDirectoryBlob(containerName, blob); }//from w w w . ja va 2 s.c o m long fileLength = payload.getContentMetadata().getContentLength(); long chunksRequired = numberOfChunksForSize(fileLength); int chunkDataLength = KineticConstants.PROPERTY_CHUNK_SIZE_BYTES - KineticConstants.PROPERTY_CHUNK_FULL_HEADER_SIZE_BYTES; int currentChunk = 0; long fileId = -1; try { fileId = KineticDatabaseUtils.getInstance().getFileIdFromDatabase(containerName + "/" + blobKey); } catch (SQLException sqle) { sqle.printStackTrace(); } while (currentChunk < chunksRequired) { Chunk chunk = new Chunk(this, fileId, currentChunk); byte[] chunkData = new byte[KineticConstants.PROPERTY_CHUNK_SIZE_BYTES]; // Get header type values Map<String, String> headers = getChunkHeaders(containerName, blobKey, currentChunk); String chunkKey = getChunkKey(containerName, blobKey, currentChunk); // Set header values into the actual data of the chunk byte[] headerBytes = chunkKey.getBytes("UTF-8"); for (int i = 0; i < headerBytes.length; i++) { chunkData[i] = headerBytes[i]; } // Read data from blob into chunk payload.openStream().read(chunkData, headerBytes.length, chunkDataLength); chunk.setData(chunkData); // Send data to KDCC try { KineticDatabaseUtils.getInstance().addChunkToDatabase(chunkKey, chunkData); } catch (SQLException sqle) { return null; } } try { KineticDatabaseUtils.getInstance().addFileToDatabase(containerName + "/" + blobKey, fileLength); } catch (SQLException e) { e.printStackTrace(); } if (payload != null) { payload.release(); } return base16().lowerCase().encode(his.hash().asBytes()); }
From source file:com.temenos.interaction.media.odata.xml.atom.AtomXMLProvider.java
/** * Method to verify if receieved stream has content or its empty * @param stream Stream to check//from www . j a v a 2s .com * @return verified stream * @throws IOException */ private InputStream verifyContentReceieved(InputStream stream) throws IOException { if (stream == null) { // Check if its null LOGGER.debug("Request stream received as null"); return null; } else if (stream.markSupported()) { // Check stream supports mark/reset // mark() and read the first byte just to check stream.mark(1); final int bytesRead = stream.read(new byte[1]); if (bytesRead != -1) { //stream not empty stream.reset(); // reset the stream as if untouched return stream; } else { //stream empty LOGGER.debug("Request received with empty body"); return null; } } else { // Panic! this stream does not support mark/reset, try with PushbackInputStream as a last resort int bytesRead; PushbackInputStream pbs = new PushbackInputStream(stream); if ((bytesRead = pbs.read()) != -1) { // Contents detected, unread and return pbs.unread(bytesRead); return pbs; } else { // Empty stream detected LOGGER.debug("Request received with empty body!"); return null; } } }
From source file:org.dataconservancy.packaging.tool.impl.AnnotationDrivenPackageStateSerializer.java
boolean isArchiveStream(InputStream in) { if (in == null) { throw new IllegalArgumentException("Stream must not be null."); }//from www. jav a2 s. c o m if (!in.markSupported()) { throw new IllegalArgumentException("Mark is not supported."); } final byte[] signature = new byte[12]; in.mark(signature.length); int signatureLength; try { signatureLength = IOUtils.readFully(in, signature); in.reset(); } catch (IOException e) { throw new RuntimeException(String.format(ERR_UNMARSHALLING_STREAM, "<unknown>", e.getMessage()), e); } return ZipArchiveInputStream.matches(signature, signatureLength); }
From source file:com.amazonaws.client.service.AmazonHttpClient.java
/** * Resets the specified request, so that it can be sent again, after * receiving the specified error. If a problem is encountered with resetting * the request, then an AmazonClientException is thrown with the original * error as the cause (not an error about being unable to reset the stream). * * @param request// www .j av a2 s . c o m * The request being executed that failed and needs to be reset. * @param cause * The original error that caused the request to fail. * * @throws AmazonClientException * If the request can't be reset. */ private void resetRequestAfterError(Request<?> request, Exception cause) throws AmazonClientException { InputStream is = request.getContent(); if (is == null) { return; // no reset needed } if (!is.markSupported()) { throw new AmazonClientException("Encountered an exception and stream is not resettable", cause); } try { is.reset(); } catch (IOException e) { // This exception comes from being unable to reset the input stream, // so throw the original, more meaningful exception if (log.isDebugEnabled()) log.debug("Failed to reset the input stream", e); throw new AmazonClientException("Encountered an exception and couldn't reset the stream to retry", cause); } }
From source file:org.springframework.extensions.webscripts.connector.AuthenticatingConnector.java
public Response call(String uri, ConnectorContext context, InputStream in) { Response response = null;/*from w w w . j a va2s .co m*/ boolean handshake = false; boolean firstcall = true; if (isAuthenticated()) { // try to call into the connector to see if we can successfully do this response = this.connector.call(uri, context, in); firstcall = false; if (logger.isDebugEnabled()) logger.debug("Received " + response.getStatus().getCode() + " on first call to: " + uri); // if there was an authentication challenge, handle here if (response.getStatus().getCode() == ResponseStatus.STATUS_UNAUTHORIZED) { handshake = true; } } else { handshake = true; } if (handshake) { handshake(); // ignore result // now that we've authenticated, try again if (in.markSupported()) { try { in.reset(); } catch (IOException ioErr) { // if we cannot reset the stream - there's nothing else we can do } } response = this.connector.call(uri, context, in); if (logger.isDebugEnabled()) logger.debug("Received " + response.getStatus().getCode() + " on " + (firstcall ? "first" : "second") + " call to: " + uri); } return response; }
From source file:com.amazonaws.http.AmazonHttpClient.java
/** * Internal method to execute the HTTP method given. * * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler) * @see AmazonHttpClient#execute(Request, HttpResponseHandler, HttpResponseHandler, ExecutionContext) *///from w ww. ja v a 2s. com private <T> Response<T> executeHelper(final Request<?> request, HttpResponseHandler<AmazonWebServiceResponse<T>> responseHandler, HttpResponseHandler<AmazonServiceException> errorResponseHandler, ExecutionContext executionContext) throws AmazonClientException, AmazonServiceException { /* * Depending on which response handler we end up choosing to handle the * HTTP response, it might require us to leave the underlying HTTP * connection open, depending on whether or not it reads the complete * HTTP response stream from the HTTP connection, or if delays reading * any of the content until after a response is returned to the caller. */ boolean leaveHttpConnectionOpen = false; AWSRequestMetrics awsRequestMetrics = executionContext.getAwsRequestMetrics(); /* add the service endpoint to the logs. You can infer service name from service endpoint */ awsRequestMetrics.addProperty(Field.ServiceName, request.getServiceName()); awsRequestMetrics.addProperty(Field.ServiceEndpoint, request.getEndpoint()); // Apply whatever request options we know how to handle, such as user-agent. setUserAgent(request); int requestCount = 0; URI redirectedURI = null; HttpEntity entity = null; AmazonClientException retriedException = null; // Make a copy of the original request params and headers so that we can // permute it in this loop and start over with the original every time. Map<String, String> originalParameters = new LinkedHashMap<String, String>(); originalParameters.putAll(request.getParameters()); Map<String, String> originalHeaders = new HashMap<String, String>(); originalHeaders.putAll(request.getHeaders()); final AWSCredentials credentials = executionContext.getCredentials(); AmazonWebServiceRequest awsreq = request.getOriginalRequest(); ProgressListener listener = awsreq.getGeneralProgressListener(); Signer signer = null; while (true) { ++requestCount; awsRequestMetrics.setCounter(Field.RequestCount, requestCount); if (requestCount > 1) { // retry request.setParameters(originalParameters); request.setHeaders(originalHeaders); } HttpRequestBase httpRequest = null; org.apache.http.HttpResponse apacheResponse = null; try { // Sign the request if a signer was provided if (signer == null) signer = executionContext.getSignerByURI(request.getEndpoint()); if (signer != null && credentials != null) { awsRequestMetrics.startEvent(Field.RequestSigningTime); try { signer.sign(request, credentials); } finally { awsRequestMetrics.endEvent(Field.RequestSigningTime); } } if (requestLog.isDebugEnabled()) { requestLog.debug("Sending Request: " + request.toString()); } httpRequest = httpRequestFactory.createHttpRequest(request, config, executionContext); if (httpRequest instanceof HttpEntityEnclosingRequest) { entity = ((HttpEntityEnclosingRequest) httpRequest).getEntity(); } if (redirectedURI != null) { httpRequest.setURI(redirectedURI); } if (requestCount > 1) { // retry // Notify the progress listener of the retry publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT); awsRequestMetrics.startEvent(Field.RetryPauseTime); try { pauseBeforeNextRetry(request.getOriginalRequest(), retriedException, requestCount, config.getRetryPolicy()); } finally { awsRequestMetrics.endEvent(Field.RetryPauseTime); } } if (entity != null) { InputStream content = entity.getContent(); if (requestCount > 1) { // retry if (content.markSupported()) { content.reset(); content.mark(-1); } } else { if (content.markSupported()) { content.mark(-1); } } } captureConnectionPoolMetrics(httpClient.getConnectionManager(), awsRequestMetrics); HttpContext httpContext = new BasicHttpContext(); httpContext.setAttribute(AWSRequestMetrics.class.getSimpleName(), awsRequestMetrics); retriedException = null; publishProgress(listener, ProgressEventType.HTTP_REQUEST_STARTED_EVENT); awsRequestMetrics.startEvent(Field.HttpRequestTime); try { apacheResponse = httpClient.execute(httpRequest, httpContext); } finally { awsRequestMetrics.endEvent(Field.HttpRequestTime); } publishProgress(listener, ProgressEventType.HTTP_REQUEST_COMPLETED_EVENT); if (isRequestSuccessful(apacheResponse)) { awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode()); /* * If we get back any 2xx status code, then we know we should * treat the service call as successful. */ leaveHttpConnectionOpen = responseHandler.needsConnectionLeftOpen(); HttpResponse httpResponse = createResponse(httpRequest, request, apacheResponse); T response = handleResponse(request, responseHandler, httpRequest, httpResponse, apacheResponse, executionContext); return new Response<T>(response, httpResponse); } else if (isTemporaryRedirect(apacheResponse)) { /* * S3 sends 307 Temporary Redirects if you try to delete an * EU bucket from the US endpoint. If we get a 307, we'll * point the HTTP method to the redirected location, and let * the next retry deliver the request to the right location. */ Header[] locationHeaders = apacheResponse.getHeaders("location"); String redirectedLocation = locationHeaders[0].getValue(); log.debug("Redirecting to: " + redirectedLocation); redirectedURI = URI.create(redirectedLocation); httpRequest.setURI(redirectedURI); awsRequestMetrics.addProperty(Field.StatusCode, apacheResponse.getStatusLine().getStatusCode()); awsRequestMetrics.addProperty(Field.RedirectLocation, redirectedLocation); awsRequestMetrics.addProperty(Field.AWSRequestID, null); } else { leaveHttpConnectionOpen = errorResponseHandler.needsConnectionLeftOpen(); AmazonServiceException ase = handleErrorResponse(request, errorResponseHandler, httpRequest, apacheResponse); awsRequestMetrics.addProperty(Field.AWSRequestID, ase.getRequestId()); awsRequestMetrics.addProperty(Field.AWSErrorCode, ase.getErrorCode()); awsRequestMetrics.addProperty(Field.StatusCode, ase.getStatusCode()); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ase, requestCount, config.getRetryPolicy())) { throw ase; } // Cache the retryable exception retriedException = ase; /* * Checking for clock skew error again because we don't want to set the * global time offset for every service exception. */ if (RetryUtils.isClockSkewError(ase)) { int timeOffset = parseClockSkewOffset(apacheResponse, ase); SDKGlobalConfiguration.setGlobalTimeOffset(timeOffset); } resetRequestAfterError(request, ase); } } catch (IOException ioe) { if (log.isInfoEnabled()) { log.info("Unable to execute HTTP request: " + ioe.getMessage(), ioe); } awsRequestMetrics.incrementCounter(Field.Exception); awsRequestMetrics.addProperty(Field.Exception, ioe); awsRequestMetrics.addProperty(Field.AWSRequestID, null); AmazonClientException ace = new AmazonClientException( "Unable to execute HTTP request: " + ioe.getMessage(), ioe); if (!shouldRetry(request.getOriginalRequest(), httpRequest, ace, requestCount, config.getRetryPolicy())) { throw ace; } // Cache the retryable exception retriedException = ace; resetRequestAfterError(request, ioe); } catch (RuntimeException e) { throw handleUnexpectedFailure(e, awsRequestMetrics); } catch (Error e) { throw handleUnexpectedFailure(e, awsRequestMetrics); } finally { /* * Some response handlers need to manually manage the HTTP * connection and will take care of releasing the connection on * their own, but if this response handler doesn't need the * connection left open, we go ahead and release the it to free * up resources. */ if (!leaveHttpConnectionOpen) { try { if (apacheResponse != null && apacheResponse.getEntity() != null && apacheResponse.getEntity().getContent() != null) { apacheResponse.getEntity().getContent().close(); } } catch (IOException e) { log.warn("Cannot close the response content.", e); } } } } /* end while (true) */ }