Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, InputStream input, ObjectMetadata metadata) 

Source Link

Document

Constructs a new PutObjectRequest object to upload a stream of data to the specified bucket and key.

Usage

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();/*from  w w w  . j  a  v  a  2 s  . c  o  m*/
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;

    final AmazonS3Client s3 = getClient();
    final FlowFile ff = flowFile;
    final Map<String, String> attributes = new HashMap<>();
    final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
    attributes.put(S3_BUCKET_KEY, bucket);
    attributes.put(S3_OBJECT_KEY, key);

    final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
    final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();

    final long now = System.currentTimeMillis();

    /*
     * If necessary, run age off for existing uploads in AWS S3 and local state
     */
    ageoffS3Uploads(context, s3, now);

    /*
     * Then
     */
    try {
        session.read(flowFile, new InputStreamCallback() {
            @Override
            public void process(final InputStream rawIn) throws IOException {
                try (final InputStream in = new BufferedInputStream(rawIn)) {
                    final ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
                    objectMetadata.setContentLength(ff.getSize());

                    final String contentType = context.getProperty(CONTENT_TYPE)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (contentType != null) {
                        objectMetadata.setContentType(contentType);
                        attributes.put(S3_CONTENT_TYPE, contentType);
                    }

                    final String expirationRule = context.getProperty(EXPIRATION_RULE_ID)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (expirationRule != null) {
                        objectMetadata.setExpirationTimeRuleId(expirationRule);
                    }

                    final Map<String, String> userMetadata = new HashMap<>();
                    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties()
                            .entrySet()) {
                        if (entry.getKey().isDynamic()) {
                            final String value = context.getProperty(entry.getKey())
                                    .evaluateAttributeExpressions(ff).getValue();
                            userMetadata.put(entry.getKey().getName(), value);
                        }
                    }

                    final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
                    if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
                        objectMetadata.setSSEAlgorithm(serverSideEncryption);
                        attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
                    }

                    if (!userMetadata.isEmpty()) {
                        objectMetadata.setUserMetadata(userMetadata);
                    }

                    if (ff.getSize() <= multipartThreshold) {
                        //----------------------------------------
                        // single part upload
                        //----------------------------------------
                        final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
                        request.setStorageClass(
                                StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                        final AccessControlList acl = createACL(context, ff);
                        if (acl != null) {
                            request.setAccessControlList(acl);
                        }
                        final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                        if (cannedAcl != null) {
                            request.withCannedAcl(cannedAcl);
                        }

                        try {
                            final PutObjectResult result = s3.putObject(request);
                            if (result.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
                            }
                            if (result.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
                            }
                            if (result.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
                            }
                            if (result.getMetadata().getRawMetadata().keySet()
                                    .contains(S3_STORAGECLASS_META_KEY)) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata()
                                        .getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    } else {
                        //----------------------------------------
                        // multipart upload
                        //----------------------------------------

                        // load or create persistent state
                        //------------------------------------------------------------
                        MultipartState currentState;
                        try {
                            currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
                            if (currentState != null) {
                                if (currentState.getPartETags().size() > 0) {
                                    final PartETag lastETag = currentState.getPartETags()
                                            .get(currentState.getPartETags().size() - 1);
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' partsLoaded={} lastPart={}/{}",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength(),
                                                    currentState.getPartETags().size(),
                                                    Integer.toString(lastETag.getPartNumber()),
                                                    lastETag.getETag() });
                                } else {
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' no partsLoaded",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength() });
                                }
                            } else {
                                currentState = new MultipartState();
                                currentState.setPartSize(multipartPartSize);
                                currentState.setStorageClass(
                                        StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                                currentState.setContentLength(ff.getSize());
                                persistLocalState(cacheKey, currentState);
                                getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'",
                                        new Object[] { ffFilename, bucket, key });
                            }
                        } catch (IOException e) {
                            getLogger().error("IOException initiating cache state while processing flow files: "
                                    + e.getMessage());
                            throw (e);
                        }

                        // initiate multipart upload or find position in file
                        //------------------------------------------------------------
                        if (currentState.getUploadId().isEmpty()) {
                            final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(
                                    bucket, key, objectMetadata);
                            initiateRequest.setStorageClass(currentState.getStorageClass());
                            final AccessControlList acl = createACL(context, ff);
                            if (acl != null) {
                                initiateRequest.setAccessControlList(acl);
                            }
                            final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                            if (cannedAcl != null) {
                                initiateRequest.withCannedACL(cannedAcl);
                            }
                            try {
                                final InitiateMultipartUploadResult initiateResult = s3
                                        .initiateMultipartUpload(initiateRequest);
                                currentState.setUploadId(initiateResult.getUploadId());
                                currentState.getPartETags().clear();
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state while processing flow file: "
                                            + e.getMessage());
                                    throw (new ProcessException("Exception saving cache state", e));
                                }
                                getLogger().info(
                                        "Success initiating upload flowfile={} available={} position={} "
                                                + "length={} bucket={} key={} uploadId={}",
                                        new Object[] { ffFilename, in.available(),
                                                currentState.getFilePosition(), currentState.getContentLength(),
                                                bucket, key, currentState.getUploadId() });
                                if (initiateResult.getUploadId() != null) {
                                    attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
                                }
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure initiating upload flowfile={} bucket={} key={} reason={}",
                                        new Object[] { ffFilename, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        } else {
                            if (currentState.getFilePosition() > 0) {
                                try {
                                    final long skipped = in.skip(currentState.getFilePosition());
                                    if (skipped != currentState.getFilePosition()) {
                                        getLogger().info(
                                                "Failure skipping to resume upload flowfile={} "
                                                        + "bucket={} key={} position={} skipped={}",
                                                new Object[] { ffFilename, bucket, key,
                                                        currentState.getFilePosition(), skipped });
                                    }
                                } catch (Exception e) {
                                    getLogger().info(
                                            "Failure skipping to resume upload flowfile={} bucket={} "
                                                    + "key={} position={} reason={}",
                                            new Object[] { ffFilename, bucket, key,
                                                    currentState.getFilePosition(), e.getMessage() });
                                    throw (new ProcessException(e));
                                }
                            }
                        }

                        // upload parts
                        //------------------------------------------------------------
                        long thisPartSize;
                        for (int part = currentState.getPartETags().size() + 1; currentState
                                .getFilePosition() < currentState.getContentLength(); part++) {
                            if (!PutS3Object.this.isScheduled()) {
                                throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename
                                        + " part=" + part + " uploadId=" + currentState.getUploadId());
                            }
                            thisPartSize = Math.min(currentState.getPartSize(),
                                    (currentState.getContentLength() - currentState.getFilePosition()));
                            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                                    .withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in)
                                    .withPartNumber(part).withPartSize(thisPartSize);
                            try {
                                UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
                                currentState.addPartETag(uploadPartResult.getPartETag());
                                currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state processing flow file: "
                                            + e.getMessage());
                                }
                                getLogger().info(
                                        "Success uploading part flowfile={} part={} available={} "
                                                + "etag={} uploadId={}",
                                        new Object[] { ffFilename, part, in.available(),
                                                uploadPartResult.getETag(), currentState.getUploadId() });
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure uploading part flowfile={} part={} bucket={} key={} "
                                                + "reason={}",
                                        new Object[] { ffFilename, part, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        }

                        // complete multipart upload
                        //------------------------------------------------------------
                        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                                bucket, key, currentState.getUploadId(), currentState.getPartETags());
                        try {
                            CompleteMultipartUploadResult completeResult = s3
                                    .completeMultipartUpload(completeRequest);
                            getLogger().info("Success completing upload flowfile={} etag={} uploadId={}",
                                    new Object[] { ffFilename, completeResult.getETag(),
                                            currentState.getUploadId() });
                            if (completeResult.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
                            }
                            if (completeResult.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
                            }
                            if (completeResult.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY,
                                        completeResult.getExpirationTime().toString());
                            }
                            if (currentState.getStorageClass() != null) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY,
                                        currentState.getStorageClass().toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    }
                }
            }
        });

        if (!attributes.isEmpty()) {
            flowFile = session.putAllAttributes(flowFile, attributes);
        }
        session.transfer(flowFile, REL_SUCCESS);

        final String url = s3.getResourceUrl(bucket, key);
        final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().send(flowFile, url, millis);

        getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
        try {
            removeLocalState(cacheKey);
        } catch (IOException e) {
            getLogger().info("Error trying to delete key {} from cache: {}",
                    new Object[] { cacheKey, e.getMessage() });
        }
    } catch (final ProcessException | AmazonClientException pe) {
        if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
            getLogger().info(pe.getMessage());
            session.rollback();
        } else {
            getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
        }
    }

}

From source file:org.apache.storm.s3.output.PutRequestUploader.java

License:Apache License

@Override
public void upload(String bucketName, String name, InputStream input, ObjectMetadata meta) throws IOException {
    client.putObject(new PutObjectRequest(bucketName, name, input, meta));
}

From source file:org.apache.usergrid.apm.util.CrashLogUploadToS3Simulator.java

License:Apache License

public static boolean uploadSimulatedCrashLogsToS3(String appId, String fileName) {
    DeploymentConfig config = DeploymentConfig.geDeploymentConfig();
    AWSCredentials credentials = new BasicAWSCredentials(config.getAccessKey(), config.getSecretKey());
    AmazonS3Client s3Client = new AmazonS3Client(credentials);
    PutObjectRequest putObjectRequest;/*ww w. j ava 2  s. c  o m*/

    String s3FullFileName = config.getEnvironment() + "/crashlog/" + appId + "/" + fileName;

    try {

        ObjectMetadata metaData = new ObjectMetadata();

        metaData.setHeader(Headers.S3_CANNED_ACL, CannedAccessControlList.AuthenticatedRead);

        putObjectRequest = new PutObjectRequest(config.getS3LogBucket(), s3FullFileName,
                new ByteArrayInputStream(fileName.getBytes("UTF-8")), null);
        PutObjectResult result = s3Client.putObject(putObjectRequest);
        return true;

    } catch (UnsupportedEncodingException e) {
        e.printStackTrace();
    }
    return false;
}

From source file:org.apache.usergrid.apm.util.GenerateSimulatedMobileData.java

License:Apache License

public static boolean uploadSimulatedCrashLogsToS3(App app, String fileName) {
    DeploymentConfig config = DeploymentConfig.geDeploymentConfig();
    AWSCredentials credentials = new BasicAWSCredentials(config.getAccessKey(), config.getSecretKey());
    AmazonS3Client s3Client = new AmazonS3Client(credentials);
    PutObjectRequest putObjectRequest;/* ww  w  .ja  v  a  2 s  . c o  m*/

    String s3FullFileName = config.getEnvironment() + "/" + config.getS3CrashLogFolder() + "/"
            + app.getFullAppName() + "/" + fileName;
    String sampleCrashFileName = null;

    if (fileName.endsWith(".crash")) //it's an iOS crash file
        sampleCrashFileName = "ios-crash-log-example.txt";
    else if (fileName.endsWith(".stacktrace"))
        sampleCrashFileName = "android-crash-log-example.txt";
    try {

        ObjectMetadata metaData = new ObjectMetadata();

        metaData.setHeader(Headers.S3_CANNED_ACL, CannedAccessControlList.AuthenticatedRead);

        InputStream is = Thread.currentThread().getContextClassLoader()
                .getResourceAsStream(sampleCrashFileName);

        putObjectRequest = new PutObjectRequest(config.getS3LogBucket(), s3FullFileName, is, null);
        //new ByteArrayInputStream(    //fileName.getBytes("UTF-8")),null);
        PutObjectResult result = s3Client.putObject(putObjectRequest);
        return true;

    } catch (Exception e) {
        e.printStackTrace();
    }
    return false;
}

From source file:org.apereo.portal.portlets.dynamicskin.storage.s3.AwsS3DynamicSkinService.java

License:Apache License

private PutObjectRequest createPutObjectRequest(final String objectKey, final InputStream inputStream,
        final ObjectMetadata objectMetadata) {
    return new PutObjectRequest(this.awsS3BucketConfig.getBucketName(), objectKey, inputStream, objectMetadata);
}

From source file:org.broadleafcommerce.vendor.amazon.s3.S3FileServiceProvider.java

License:Apache License

protected void addOrUpdateResourcesInternalStreamVersion(S3Configuration s3config, AmazonS3Client s3,
        InputStream inputStream, String fileName, long fileSizeInBytes) {
    final String bucketName = s3config.getDefaultBucketName();

    final ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(fileSizeInBytes);
    final String resourceName = buildResourceName(s3config, fileName);
    final PutObjectRequest objToUpload = new PutObjectRequest(bucketName, resourceName, inputStream, metadata);

    if ((s3config.getStaticAssetFileExtensionPattern() != null)
            && s3config.getStaticAssetFileExtensionPattern().matcher(getExtension(fileName)).matches()) {
        objToUpload.setCannedAcl(CannedAccessControlList.PublicRead);
    }/*w w  w .j  a v  a  2 s . c  om*/

    s3.putObject(objToUpload);

    if (LOG.isTraceEnabled()) {
        final String s3Uri = String.format("s3://%s/%s", s3config.getDefaultBucketName(), resourceName);
        final String msg = String.format("%s copied/updated to %s", fileName, s3Uri);

        LOG.trace(msg);
    }
}

From source file:org.clothocad.phagebook.adaptors.S3Adapter.java

private static void createS3Folder(String bucketName, String folderName, AmazonS3 client) {
    // create meta-data for your folder and set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);//from   www  .jav  a2s  .  c om
    // create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);
    // create a PutObjectRequest passing the folder name suffixed by /
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, folderName + "/", emptyContent,
            metadata); //folder name should be clothoID
    // send request to S3 to create folder
    client.putObject(putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead));

}

From source file:org.dspace.globus.S3Client.java

License:Open Source License

public String createObject(String metadata) {
    UUID key = UUID.randomUUID();
    try {//ww w .  ja v a  2s. c om
        byte[] metadataBytes = metadata.getBytes("UTF-8");
        InputStream stream = new ByteArrayInputStream(metadataBytes);
        ObjectMetadata s3ObjectMeta = new ObjectMetadata();
        s3ObjectMeta.setContentLength(metadataBytes.length);
        s3Client.putObject(new PutObjectRequest(bucketName, key.toString(), stream, s3ObjectMeta));
        return key.toString();
    } catch (AmazonServiceException ase) {
        logger.error("AmazonServiceException: " + ase.getMessage());
    } catch (AmazonClientException ace) {
        logger.error("AmazonClientException: " + ace.getMessage());
    } catch (Exception e) {
        logger.error("Exception: " + e.getMessage());
    }
    return null;
}

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

/**
 * Adds content to a hidden space.//w  ww  .j a  v a 2  s .  c om
 *
 * @param spaceId         hidden spaceId
 * @param contentId
 * @param contentMimeType
 * @param content
 * @return
 */
public String addHiddenContent(String spaceId, String contentId, String contentMimeType, InputStream content) {
    log.debug("addHiddenContent(" + spaceId + ", " + contentId + ", " + contentMimeType + ")");

    // Will throw if bucket does not exist
    String bucketName = getBucketName(spaceId);

    // Wrap the content in order to be able to retrieve a checksum

    if (contentMimeType == null || contentMimeType.equals("")) {
        contentMimeType = DEFAULT_MIMETYPE;
    }

    ObjectMetadata objMetadata = new ObjectMetadata();
    objMetadata.setContentType(contentMimeType);

    PutObjectRequest putRequest = new PutObjectRequest(bucketName, contentId, content, objMetadata);
    putRequest.setStorageClass(DEFAULT_STORAGE_CLASS);
    putRequest.setCannedAcl(CannedAccessControlList.Private);

    try {
        PutObjectResult putResult = s3Client.putObject(putRequest);
        return putResult.getETag();
    } catch (AmazonClientException e) {
        String err = "Could not add content " + contentId + " with type " + contentMimeType + " to S3 bucket "
                + bucketName + " due to error: " + e.getMessage();
        throw new StorageException(err, e, NO_RETRY);
    }

}

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

/**
 * {@inheritDoc}/*ww w . j a va 2 s.  c  om*/
 */
public String addContent(String spaceId, String contentId, String contentMimeType,
        Map<String, String> userProperties, long contentSize, String contentChecksum, InputStream content) {
    log.debug("addContent(" + spaceId + ", " + contentId + ", " + contentMimeType + ", " + contentSize + ", "
            + contentChecksum + ")");

    // Will throw if bucket does not exist
    String bucketName = getBucketName(spaceId);

    // Wrap the content in order to be able to retrieve a checksum
    ChecksumInputStream wrappedContent = new ChecksumInputStream(content, contentChecksum);

    String contentEncoding = removeContentEncoding(userProperties);

    userProperties = removeCalculatedProperties(userProperties);

    if (contentMimeType == null || contentMimeType.equals("")) {
        contentMimeType = DEFAULT_MIMETYPE;
    }

    ObjectMetadata objMetadata = new ObjectMetadata();
    objMetadata.setContentType(contentMimeType);
    if (contentSize > 0) {
        objMetadata.setContentLength(contentSize);
    }
    if (null != contentChecksum && !contentChecksum.isEmpty()) {
        String encodedChecksum = ChecksumUtil.convertToBase64Encoding(contentChecksum);
        objMetadata.setContentMD5(encodedChecksum);
    }

    if (contentEncoding != null) {
        objMetadata.setContentEncoding(contentEncoding);
    }

    if (userProperties != null) {
        for (String key : userProperties.keySet()) {
            String value = userProperties.get(key);

            if (log.isDebugEnabled()) {
                log.debug("[" + key + "|" + value + "]");
            }

            objMetadata.addUserMetadata(getSpaceFree(encodeHeaderKey(key)), encodeHeaderValue(value));
        }
    }

    PutObjectRequest putRequest = new PutObjectRequest(bucketName, contentId, wrappedContent, objMetadata);
    putRequest.setStorageClass(DEFAULT_STORAGE_CLASS);
    putRequest.setCannedAcl(CannedAccessControlList.Private);

    // Add the object
    String etag;
    try {
        PutObjectResult putResult = s3Client.putObject(putRequest);
        etag = putResult.getETag();
    } catch (AmazonClientException e) {
        if (e instanceof AmazonS3Exception) {
            AmazonS3Exception s3Ex = (AmazonS3Exception) e;
            String errorCode = s3Ex.getErrorCode();
            Integer statusCode = s3Ex.getStatusCode();
            String message = MessageFormat.format(
                    "exception putting object {0} into {1}: errorCode={2},"
                            + "  statusCode={3}, errorMessage={4}",
                    contentId, bucketName, errorCode, statusCode, e.getMessage());

            if (errorCode.equals("InvalidDigest") || errorCode.equals("BadDigest")) {
                log.error(message, e);

                String err = "Checksum mismatch detected attempting to add " + "content " + contentId
                        + " to S3 bucket " + bucketName + ". Content was not added.";
                throw new ChecksumMismatchException(err, e, NO_RETRY);
            } else if (errorCode.equals("IncompleteBody")) {
                log.error(message, e);
                throw new StorageException("The content body was incomplete for " + contentId + " to S3 bucket "
                        + bucketName + ". Content was not added.", e, NO_RETRY);
            } else if (!statusCode.equals(HttpStatus.SC_SERVICE_UNAVAILABLE)
                    && !statusCode.equals(HttpStatus.SC_NOT_FOUND)) {
                log.error(message, e);
            } else {
                log.warn(message, e);
            }
        } else {
            String err = MessageFormat.format("exception putting object {0} into {1}: {2}", contentId,
                    bucketName, e.getMessage());
            log.error(err, e);
        }

        // Check to see if file landed successfully in S3, despite the exception
        etag = doesContentExistWithExpectedChecksum(bucketName, contentId, contentChecksum);
        if (null == etag) {
            String err = "Could not add content " + contentId + " with type " + contentMimeType + " and size "
                    + contentSize + " to S3 bucket " + bucketName + " due to error: " + e.getMessage();
            throw new StorageException(err, e, NO_RETRY);
        }
    }

    // Compare checksum
    String providerChecksum = getETagValue(etag);
    String checksum = wrappedContent.getMD5();
    StorageProviderUtil.compareChecksum(providerChecksum, spaceId, contentId, checksum);
    return providerChecksum;
}