List of usage examples for java.io InputStream markSupported
public boolean markSupported()
mark
and reset
methods. From source file:org.exist.collections.MutableCollection.java
@Override public void store(final Txn transaction, final DBBroker broker, final IndexInfo info, final InputSource source) throws EXistException, PermissionDeniedException, TriggerException, SAXException, LockException { storeXMLInternal(transaction, broker, info, storeInfo -> { try {//from w w w . j a v a 2 s .c o m final InputStream is = source.getByteStream(); if (is != null && is.markSupported()) { is.reset(); } else { final Reader cs = source.getCharacterStream(); if (cs != null && cs.markSupported()) { cs.reset(); } } } catch (final IOException e) { // mark is not supported: exception is expected, do nothing LOG.debug( "InputStream or CharacterStream underlying the InputSource does not support marking and therefore cannot be re-read."); } final XMLReader reader = getReader(broker, false, storeInfo.getCollectionConfig()); storeInfo.setReader(reader, null); try { reader.parse(source); } catch (final IOException e) { throw new EXistException(e); } finally { releaseReader(broker, storeInfo, reader); } }); }
From source file:org.docx4j.XmlUtils.java
public static Object unmarshal(InputStream is, JAXBContext jc) throws JAXBException { // Guard against XXE XMLInputFactory xif = XMLInputFactory.newInstance(); xif.setProperty(XMLInputFactory.IS_SUPPORTING_EXTERNAL_ENTITIES, false); xif.setProperty(XMLInputFactory.SUPPORT_DTD, false); // a DTD is merely ignored, its presence doesn't cause an exception XMLStreamReader xsr = null;/*from w ww. j av a 2 s. c om*/ try { xsr = xif.createXMLStreamReader(is); } catch (XMLStreamException e) { throw new JAXBException(e); } Object o = null; Unmarshaller u = jc.createUnmarshaller(); JaxbValidationEventHandler eventHandler = new JaxbValidationEventHandler(); // if (is.markSupported()) { // // Only fail hard if we know we can restart // eventHandler.setContinue(false); // } u.setEventHandler(eventHandler); try { o = u.unmarshal(xsr); return o; } catch (UnmarshalException ue) { if (ue.getLinkedException() != null && ue.getLinkedException().getMessage().contains("entity")) { /* Caused by: javax.xml.stream.XMLStreamException: ParseError at [row,col]:[10,19] Message: The entity "xxe" was referenced, but not declared. at com.sun.org.apache.xerces.internal.impl.XMLStreamReaderImpl.next(Unknown Source) at com.sun.xml.internal.bind.v2.runtime.unmarshaller.StAXStreamConnector.bridge(Unknown Source) */ log.error(ue.getMessage(), ue); throw ue; } if (is.markSupported()) { // When reading from zip, we use a ByteArrayInputStream, // which does support this. log.info("encountered unexpected content; pre-processing"); eventHandler.setContinue(true); try { Templates mcPreprocessorXslt = JaxbValidationEventHandler.getMcPreprocessor(); is.reset(); JAXBResult result = XmlUtils.prepareJAXBResult(jc); XmlUtils.transform(new StreamSource(is), mcPreprocessorXslt, null, result); return //XmlUtils.unwrap( result.getResult(); } catch (Exception e) { throw new JAXBException("Preprocessing exception", e); } } else { log.error(ue.getMessage(), ue); log.error(".. and mark not supported"); throw ue; } } }
From source file:org.exist.collections.Collection.java
/** Stores an XML document in the database. {@link #validateXMLResourceInternal(org.exist.storage.txn.Txn, * org.exist.storage.DBBroker, org.exist.xmldb.XmldbURI, CollectionConfiguration, org.exist.collections.Collection.ValidateBlock)} * should have been called previously in order to acquire a write lock for the document. Launches the finish trigger. * /*from ww w .jav a2s. c o m*/ * @param transaction * @param broker * @param info * @param source * @param privileged * * @throws EXistException * @throws PermissionDeniedException * @throws TriggerException * @throws SAXException * @throws LockException */ public void store(final Txn transaction, final DBBroker broker, final IndexInfo info, final InputSource source, boolean privileged) throws EXistException, PermissionDeniedException, TriggerException, SAXException, LockException { storeXMLInternal(transaction, broker, info, privileged, new StoreBlock() { @Override public void run() throws EXistException, SAXException { try { final InputStream is = source.getByteStream(); if (is != null && is.markSupported()) { is.reset(); } else { final Reader cs = source.getCharacterStream(); if (cs != null && cs.markSupported()) { cs.reset(); } } } catch (final IOException e) { // mark is not supported: exception is expected, do nothing LOG.debug( "InputStream or CharacterStream underlying the InputSource does not support marking and therefore cannot be re-read."); } final XMLReader reader = getReader(broker, false, info.getCollectionConfig()); info.setReader(reader, null); try { reader.parse(source); } catch (final IOException e) { throw new EXistException(e); } finally { releaseReader(broker, info, reader); } } }); }
From source file:ee.sk.digidoc.factory.SAXDigiDocFactory.java
/** * Checks if this stream could be a bdoc input stream * @param is input stream, must support mark() and reset() operations! * @return true if bdoc//from www . j a va 2 s.co m */ private boolean isBdocFile(InputStream is) throws DigiDocException { try { if (is.markSupported()) is.mark(10); byte[] tdata = new byte[10]; int n = is.read(tdata); if (is.markSupported()) is.reset(); if (n >= 2 && tdata[0] == (byte) 'P' && tdata[1] == (byte) 'K') return true; // probably a zip file if (n >= 5 && tdata[0] == (byte) '<' && tdata[1] == (byte) '?' && tdata[2] == (byte) 'x' && tdata[3] == (byte) 'm' && tdata[4] == (byte) 'l') return false; // an xml file - probably ddoc format? } catch (Exception ex) { m_logger.error("Error determining file type: " + ex); } return false; }
From source file:com.emc.vipr.services.s3.ViPRS3Client.java
/** * Executes a (Subclass of) PutObjectRequest. In particular, we check for subclasses * of the UpdateObjectRequest and inject the value of the Range header. This version * also returns the raw ObjectMetadata for the response so callers can construct * their own result objects.// w ww.ja va 2 s . c om * @param putObjectRequest the request to execute * @return an ObjectMetadata containing the response headers. */ protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) { assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object"); String bucketName = putObjectRequest.getBucketName(); String key = putObjectRequest.getKey(); ObjectMetadata metadata = putObjectRequest.getMetadata(); InputStream input = putObjectRequest.getInputStream(); if (metadata == null) metadata = new ObjectMetadata(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object"); assertParameterNotNull(key, "The key parameter must be specified when uploading an object"); /* * This is compatible with progress listener set by either the legacy * method GetObjectRequest#setProgressListener or the new method * GetObjectRequest#setGeneralProgressListener. */ com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener(); ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor .wrapListener(progressListener); // If a file is specified for upload, we need to pull some additional // information from it to auto-configure a few options if (putObjectRequest.getFile() != null) { File file = putObjectRequest.getFile(); // Always set the content length, even if it's already set metadata.setContentLength(file.length()); // Only set the content type if it hasn't already been set if (metadata.getContentType() == null) { metadata.setContentType(Mimetypes.getInstance().getMimetype(file)); } FileInputStream fileInputStream = null; try { fileInputStream = new FileInputStream(file); byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream); metadata.setContentMD5(BinaryUtils.toBase64(md5Hash)); } catch (Exception e) { throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e); } finally { try { fileInputStream.close(); } catch (Exception e) { } } try { input = new RepeatableFileInputStream(file); } catch (FileNotFoundException fnfe) { throw new AmazonClientException("Unable to find file to upload", fnfe); } } Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT); if (putObjectRequest.getAccessControlList() != null) { addAclHeaders(request, putObjectRequest.getAccessControlList()); } else if (putObjectRequest.getCannedAcl() != null) { request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); } if (putObjectRequest.getStorageClass() != null) { request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass()); } if (putObjectRequest.getRedirectLocation() != null) { request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation()); if (input == null) { input = new ByteArrayInputStream(new byte[0]); } } // Use internal interface to differentiate 0 from unset. if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) { /* * There's nothing we can do except for let the HTTP client buffer * the input stream contents if the caller doesn't tell us how much * data to expect in a stream since we have to explicitly tell * Amazon S3 how much we're sending before we start sending any of * it. */ log.warn("No content length specified for stream data. " + "Stream contents will be buffered in memory and could result in " + "out of memory errors."); } if (progressListenerCallbackExecutor != null) { com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream( input, progressListenerCallbackExecutor); fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE); } if (!input.markSupported()) { int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE; String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize"); if (bufferSizeOverride != null) { try { streamBufferSize = Integer.parseInt(bufferSizeOverride); } catch (Exception e) { log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride); } } input = new RepeatableInputStream(input, streamBufferSize); } MD5DigestCalculatingInputStream md5DigestStream = null; if (metadata.getContentMD5() == null) { /* * If the user hasn't set the content MD5, then we don't want to * buffer the whole stream in memory just to calculate it. Instead, * we can calculate it on the fly and validate it with the returned * ETag from the object upload. */ try { md5DigestStream = new MD5DigestCalculatingInputStream(input); input = md5DigestStream; } catch (NoSuchAlgorithmException e) { log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e); } } if (metadata.getContentType() == null) { /* * Default to the "application/octet-stream" if the user hasn't * specified a content type. */ metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); } populateRequestMetadata(request, metadata); request.setContent(input); if (putObjectRequest instanceof UpdateObjectRequest) { request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange()); } ObjectMetadata returnedMetadata = null; try { returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); } catch (AmazonClientException ace) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw ace; } finally { try { input.close(); } catch (Exception e) { log.warn("Unable to cleanly close input stream: " + e.getMessage(), e); } } String contentMd5 = metadata.getContentMD5(); if (md5DigestStream != null) { contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest()); } // Can't verify MD5 on appends/update (yet). if (!(putObjectRequest instanceof UpdateObjectRequest)) { if (returnedMetadata != null && contentMd5 != null) { byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5); byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag()); if (!Arrays.equals(clientSideHash, serverSideHash)) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw new AmazonClientException("Unable to verify integrity of data upload. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "You may need to delete the data stored in Amazon S3."); } } } fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE); return returnedMetadata; }
From source file:org.regenstrief.util.Util.java
/** * Retrieves an InputStream that can be marked and marks it * /*from w w w . j a v a 2 s . c om*/ * @param in the InputStream * @return the markable InputStream **/ public static final InputStream getMarkableInputStream(InputStream in) { in = in.markSupported() ? in : new BufferedInputStream(in); in.mark(MAX_MARK_BUFFER); return in; }
From source file:org.regenstrief.util.Util.java
/** * Retrieves an InputStream that can be marked and marks it * //from www.j av a2 s . c om * @param in the InputStream * @param buffer the buffer size * @return the markable InputStream **/ public static final InputStream getMarkableInputStream(InputStream in, final int buffer) { in = in.markSupported() ? in : new BufferedInputStream(in); in.mark(buffer); return in; }
From source file:com.sina.scs.SCSClient.java
@SuppressWarnings("resource") public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws SCSClientException, SCSServiceException { assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object"); String bucketName = putObjectRequest.getBucketName(); String key = putObjectRequest.getKey(); ObjectMetadata metadata = putObjectRequest.getMetadata(); InputStream input = putObjectRequest.getInputStream(); /*// www .java 2 s .c o m * This is compatible with progress listener set by either the legacy * method PutObjectRequest#setProgressListener or the new method * PutObjectRequest#setGeneralProgressListener. */ ProgressListener progressListener = putObjectRequest.getGeneralProgressListener(); ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor .wrapListener(progressListener); if (metadata == null) metadata = new ObjectMetadata(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object"); assertParameterNotNull(key, "The key parameter must be specified when uploading an object"); // If a file is specified for upload, we need to pull some additional // information from it to auto-configure a few options if (putObjectRequest.getFile() != null) { File file = putObjectRequest.getFile(); // Always set the content length, even if it's already set metadata.setContentLength(file.length()); // Only set the content type if it hasn't already been set if (metadata.getContentType() == null) { metadata.setContentType(Mimetypes.getInstance().getMimetype(file)); } FileInputStream fileInputStream = null; try { fileInputStream = new FileInputStream(file); byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream); metadata.setContentMD5(BinaryUtils.toBase64(md5Hash)); } catch (Exception e) { throw new SCSClientException("Unable to calculate MD5 hash: " + e.getMessage(), e); } finally { try { fileInputStream.close(); } catch (Exception e) { } } try { input = new RepeatableFileInputStream(file); } catch (FileNotFoundException fnfe) { throw new SCSClientException("Unable to find file to upload", fnfe); } } Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT); if (putObjectRequest.getAccessControlList() != null) { addAclHeaders(request, putObjectRequest.getAccessControlList()); } else if (putObjectRequest.getCannedAcl() != null) { request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); } if (putObjectRequest.getStorageClass() != null) { request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass()); } if (putObjectRequest.getRedirectLocation() != null) { request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation()); if (input == null) { input = new ByteArrayInputStream(new byte[0]); } } // Use internal interface to differentiate 0 from unset. if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) { /* * There's nothing we can do except for let the HTTP client buffer * the input stream contents if the caller doesn't tell us how much * data to expect in a stream since we have to explicitly tell * Amazon S3 how much we're sending before we start sending any of * it. */ log.warn("No content length specified for stream data. " + "Stream contents will be buffered in memory and could result in " + "out of memory errors."); } if (progressListenerCallbackExecutor != null) { input = new ProgressReportingInputStream(input, progressListenerCallbackExecutor); fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.STARTED_EVENT_CODE); } if (!input.markSupported()) { int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE; String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize"); if (bufferSizeOverride != null) { try { streamBufferSize = Integer.parseInt(bufferSizeOverride); } catch (Exception e) { log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride); } } input = new RepeatableInputStream(input, streamBufferSize); } MD5DigestCalculatingInputStream md5DigestStream = null; if (metadata.getContentMD5() == null) { /* * If the user hasn't set the content MD5, then we don't want to * buffer the whole stream in memory just to calculate it. Instead, * we can calculate it on the fly and validate it with the returned * ETag from the object upload. */ try { md5DigestStream = new MD5DigestCalculatingInputStream(input); input = md5DigestStream; } catch (NoSuchAlgorithmException e) { log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e); } } if (metadata.getContentType() == null) { /* * Default to the "application/octet-stream" if the user hasn't * specified a content type. */ metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); } populateRequestMetadata(request, metadata); request.setContent(input); ObjectMetadata returnedMetadata = null; try { returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); } catch (SCSClientException ace) { fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE); throw ace; } finally { try { input.close(); } catch (Exception e) { log.warn("Unable to cleanly close input stream: " + e.getMessage(), e); } } String contentMd5 = metadata.getContentMD5(); if (md5DigestStream != null) { contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest()); } //?ETag? if (returnedMetadata != null && contentMd5 != null && returnedMetadata.getETag() != null) { byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5); byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag()); if (!Arrays.equals(clientSideHash, serverSideHash)) { fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE); throw new SCSClientException("Unable to verify integrity of data upload. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "You may need to delete the data stored in Amazon S3."); } } fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.COMPLETED_EVENT_CODE); PutObjectResult result = new PutObjectResult(); result.setETag(returnedMetadata.getETag()); result.setVersionId(returnedMetadata.getVersionId()); result.setServerSideEncryption(returnedMetadata.getServerSideEncryption()); result.setExpirationTime(returnedMetadata.getExpirationTime()); result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId()); result.setContentMd5(contentMd5); result.setServiceSideKey(returnedMetadata.getServersideKey()); return result; }
From source file:com.sinacloud.scs.services.scs.SCSClient.java
public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws SCSClientException, SCSServiceException { assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object"); String bucketName = putObjectRequest.getBucketName(); String key = putObjectRequest.getKey(); ObjectMetadata metadata = putObjectRequest.getMetadata(); InputStream input = putObjectRequest.getInputStream(); /*/* w ww . ja v a 2 s . c om*/ * This is compatible with progress listener set by either the legacy * method PutObjectRequest#setProgressListener or the new method * PutObjectRequest#setGeneralProgressListener. */ ProgressListener progressListener = putObjectRequest.getGeneralProgressListener(); ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor .wrapListener(progressListener); if (metadata == null) metadata = new ObjectMetadata(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object"); assertParameterNotNull(key, "The key parameter must be specified when uploading an object"); // If a file is specified for upload, we need to pull some additional // information from it to auto-configure a few options if (putObjectRequest.getFile() != null) { File file = putObjectRequest.getFile(); // Always set the content length, even if it's already set metadata.setContentLength(file.length()); // Only set the content type if it hasn't already been set if (metadata.getContentType() == null) { metadata.setContentType(Mimetypes.getInstance().getMimetype(file)); } FileInputStream fileInputStream = null; try { fileInputStream = new FileInputStream(file); byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream); metadata.setContentMD5(BinaryUtils.toBase64(md5Hash)); } catch (Exception e) { throw new SCSClientException("Unable to calculate MD5 hash: " + e.getMessage(), e); } finally { try { fileInputStream.close(); } catch (Exception e) { } } try { input = new RepeatableFileInputStream(file); } catch (FileNotFoundException fnfe) { throw new SCSClientException("Unable to find file to upload", fnfe); } } Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT); if (putObjectRequest.getAccessControlList() != null) { addAclHeaders(request, putObjectRequest.getAccessControlList()); } else if (putObjectRequest.getCannedAcl() != null) { request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); } //?? // if (putObjectRequest.getStorageClass() != null) { // request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass()); // } // // if (putObjectRequest.getRedirectLocation() != null) { // request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation()); // if (input == null) { // input = new ByteArrayInputStream(new byte[0]); // } // } // Use internal interface to differentiate 0 from unset. if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) { /* * There's nothing we can do except for let the HTTP client buffer * the input stream contents if the caller doesn't tell us how much * data to expect in a stream since we have to explicitly tell * Amazon S3 how much we're sending before we start sending any of * it. */ log.warn("No content length specified for stream data. " + "Stream contents will be buffered in memory and could result in " + "out of memory errors."); } if (progressListenerCallbackExecutor != null) { input = new ProgressReportingInputStream(input, progressListenerCallbackExecutor); fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.STARTED_EVENT_CODE); } if (!input.markSupported()) { int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE; String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize"); if (bufferSizeOverride != null) { try { streamBufferSize = Integer.parseInt(bufferSizeOverride); } catch (Exception e) { log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride); } } input = new RepeatableInputStream(input, streamBufferSize); } MD5DigestCalculatingInputStream md5DigestStream = null; if (metadata.getContentMD5() == null) { /* * If the user hasn't set the content MD5, then we don't want to * buffer the whole stream in memory just to calculate it. Instead, * we can calculate it on the fly and validate it with the returned * ETag from the object upload. */ try { md5DigestStream = new MD5DigestCalculatingInputStream(input); input = md5DigestStream; } catch (NoSuchAlgorithmException e) { log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e); } } if (metadata.getContentType() == null) { /* * Default to the "application/octet-stream" if the user hasn't * specified a content type. */ metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); } populateRequestMetadata(request, metadata); request.setContent(input); ObjectMetadata returnedMetadata = null; try { returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); } catch (SCSClientException ace) { fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE); throw ace; } finally { try { input.close(); } catch (Exception e) { log.warn("Unable to cleanly close input stream: " + e.getMessage(), e); } } String contentMd5 = metadata.getContentMD5(); if (md5DigestStream != null) { contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest()); } //?ETag? if (returnedMetadata != null && contentMd5 != null && returnedMetadata.getETag() != null) { byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5); byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag()); if (!Arrays.equals(clientSideHash, serverSideHash)) { fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.FAILED_EVENT_CODE); throw new SCSClientException("Unable to verify integrity of data upload. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "You may need to delete the data stored in Amazon S3."); } } fireProgressEvent(progressListenerCallbackExecutor, ProgressEvent.COMPLETED_EVENT_CODE); PutObjectResult result = new PutObjectResult(); result.setETag(returnedMetadata.getETag()); result.setVersionId(returnedMetadata.getVersionId()); result.setServerSideEncryption(returnedMetadata.getServerSideEncryption()); result.setExpirationTime(returnedMetadata.getExpirationTime()); result.setExpirationTimeRuleId(returnedMetadata.getExpirationTimeRuleId()); result.setContentMd5(contentMd5); result.setServiceSideKey(returnedMetadata.getServersideKey()); return result; }