List of usage examples for com.amazonaws.util BinaryUtils toBase64
public static String toBase64(byte[] data)
From source file:com.emc.vipr.services.s3.ViPRS3Client.java
License:Open Source License
/** * Executes a (Subclass of) PutObjectRequest. In particular, we check for subclasses * of the UpdateObjectRequest and inject the value of the Range header. This version * also returns the raw ObjectMetadata for the response so callers can construct * their own result objects.// ww w . java 2s. co m * @param putObjectRequest the request to execute * @return an ObjectMetadata containing the response headers. */ protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) { assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object"); String bucketName = putObjectRequest.getBucketName(); String key = putObjectRequest.getKey(); ObjectMetadata metadata = putObjectRequest.getMetadata(); InputStream input = putObjectRequest.getInputStream(); if (metadata == null) metadata = new ObjectMetadata(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object"); assertParameterNotNull(key, "The key parameter must be specified when uploading an object"); /* * This is compatible with progress listener set by either the legacy * method GetObjectRequest#setProgressListener or the new method * GetObjectRequest#setGeneralProgressListener. */ com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener(); ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor .wrapListener(progressListener); // If a file is specified for upload, we need to pull some additional // information from it to auto-configure a few options if (putObjectRequest.getFile() != null) { File file = putObjectRequest.getFile(); // Always set the content length, even if it's already set metadata.setContentLength(file.length()); // Only set the content type if it hasn't already been set if (metadata.getContentType() == null) { metadata.setContentType(Mimetypes.getInstance().getMimetype(file)); } FileInputStream fileInputStream = null; try { fileInputStream = new FileInputStream(file); byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream); metadata.setContentMD5(BinaryUtils.toBase64(md5Hash)); } catch (Exception e) { throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e); } finally { try { fileInputStream.close(); } catch (Exception e) { } } try { input = new RepeatableFileInputStream(file); } catch (FileNotFoundException fnfe) { throw new AmazonClientException("Unable to find file to upload", fnfe); } } Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT); if (putObjectRequest.getAccessControlList() != null) { addAclHeaders(request, putObjectRequest.getAccessControlList()); } else if (putObjectRequest.getCannedAcl() != null) { request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); } if (putObjectRequest.getStorageClass() != null) { request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass()); } if (putObjectRequest.getRedirectLocation() != null) { request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation()); if (input == null) { input = new ByteArrayInputStream(new byte[0]); } } // Use internal interface to differentiate 0 from unset. if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) { /* * There's nothing we can do except for let the HTTP client buffer * the input stream contents if the caller doesn't tell us how much * data to expect in a stream since we have to explicitly tell * Amazon S3 how much we're sending before we start sending any of * it. */ log.warn("No content length specified for stream data. " + "Stream contents will be buffered in memory and could result in " + "out of memory errors."); } if (progressListenerCallbackExecutor != null) { com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream( input, progressListenerCallbackExecutor); fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE); } if (!input.markSupported()) { int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE; String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize"); if (bufferSizeOverride != null) { try { streamBufferSize = Integer.parseInt(bufferSizeOverride); } catch (Exception e) { log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride); } } input = new RepeatableInputStream(input, streamBufferSize); } MD5DigestCalculatingInputStream md5DigestStream = null; if (metadata.getContentMD5() == null) { /* * If the user hasn't set the content MD5, then we don't want to * buffer the whole stream in memory just to calculate it. Instead, * we can calculate it on the fly and validate it with the returned * ETag from the object upload. */ try { md5DigestStream = new MD5DigestCalculatingInputStream(input); input = md5DigestStream; } catch (NoSuchAlgorithmException e) { log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e); } } if (metadata.getContentType() == null) { /* * Default to the "application/octet-stream" if the user hasn't * specified a content type. */ metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); } populateRequestMetadata(request, metadata); request.setContent(input); if (putObjectRequest instanceof UpdateObjectRequest) { request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange()); } ObjectMetadata returnedMetadata = null; try { returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); } catch (AmazonClientException ace) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw ace; } finally { try { input.close(); } catch (Exception e) { log.warn("Unable to cleanly close input stream: " + e.getMessage(), e); } } String contentMd5 = metadata.getContentMD5(); if (md5DigestStream != null) { contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest()); } // Can't verify MD5 on appends/update (yet). if (!(putObjectRequest instanceof UpdateObjectRequest)) { if (returnedMetadata != null && contentMd5 != null) { byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5); byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag()); if (!Arrays.equals(clientSideHash, serverSideHash)) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw new AmazonClientException("Unable to verify integrity of data upload. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "You may need to delete the data stored in Amazon S3."); } } } fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE); return returnedMetadata; }
From source file:org.springframework.integration.aws.s3.core.AbstractAmazonS3Operations.java
License:Apache License
/** * The implementation of the core common operations that would be performed * before and after the subclass does the actual work of putting the object to * the given bucket/*from w ww. ja v a 2s .com*/ * * @param bucketName The bucket to which the object is to be put * @param folder The folder to which the object is to be uploaded * @param objectName The name of the object in the given bucket * @param s3Object The {@link AmazonS3Object} instance that represents the object * to be uploaded. */ @Override public final void putObject(String bucketName, String folder, String objectName, AmazonS3Object s3Object) { Assert.hasText(bucketName, "null or empty bucketName provided"); Assert.hasText(objectName, "null or empty object name provided"); Assert.notNull(s3Object, "null s3 object provided for upload"); File file = s3Object.getFileSource(); InputStream in = s3Object.getInputStream(); Assert.isTrue(file != null ^ in != null, "Exactly one of file or inpuut stream needed in the provided s3 object"); boolean isTempFile = false; if (in != null) { //We don't know the source of the stream and hence we read the content //and write to the temporary file. file = getTempFile(in, bucketName, objectName); isTempFile = true; } String key = getKeyFromFolder(folder, objectName); //if the size of the file is greater than the threshold for multipart upload, //set the Content-MD5 header for this upload. This header will also come handy //later in inbound-channel-adapter where we cant find the MD5 sum of the //multipart upload file from its ETag String stringContentMD5 = BinaryUtils.toBase64(AWSCommonUtils.getContentsMD5AsBytes(file)); try { doPut(bucketName, key, file, s3Object.getObjectACL(), s3Object.getUserMetaData(), stringContentMD5); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, key, "Encountered exception while putting an object, see root cause for more details", e); } if (isTempFile) { //Delete the temp file if (logger.isDebugEnabled()) { logger.debug("Deleting temp file: " + file.getName()); } boolean deleteSuccessful = file.delete(); if (!deleteSuccessful) { logger.warn("Unable to delete file '" + file.getName() + "'"); } } }