Example usage for com.amazonaws.services.s3.model ObjectMetadata setSSEAlgorithm

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setSSEAlgorithm

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setSSEAlgorithm.

Prototype

@Override
public void setSSEAlgorithm(String algorithm) 

Source Link

Document

Sets the server-side encryption algorithm when encrypting the object using AWS-managed keys.

Usage

From source file:io.druid.storage.s3.S3ServerSideEncryption.java

License:Apache License

@Override
public PutObjectRequest decorate(PutObjectRequest request) {
    final ObjectMetadata objectMetadata = request.getMetadata() == null ? new ObjectMetadata()
            : request.getMetadata().clone();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    return request.withMetadata(objectMetadata);
}

From source file:io.druid.storage.s3.S3ServerSideEncryption.java

License:Apache License

@Override
public CopyObjectRequest decorate(CopyObjectRequest request) {
    final ObjectMetadata objectMetadata = request.getNewObjectMetadata() == null ? new ObjectMetadata()
            : request.getNewObjectMetadata().clone();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    return request.withNewObjectMetadata(objectMetadata);
}

From source file:jenkins.plugins.itemstorage.s3.S3BaseUploadCallable.java

License:Open Source License

protected ObjectMetadata buildMetadata(File file) throws IOException {
    final ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentType(Mimetypes.getInstance().getMimetype(file.getName()));
    metadata.setContentLength(file.length());
    metadata.setLastModified(new Date(file.lastModified()));

    if (storageClass != null && !storageClass.isEmpty()) {
        metadata.setHeader("x-amz-storage-class", storageClass);
    }//from ww  w  .j a  v a 2  s.c om
    if (useServerSideEncryption) {
        metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }

    for (Map.Entry<String, String> entry : userMetadata.entrySet()) {
        final String key = entry.getKey().toLowerCase();
        switch (key) {
        case "cache-control":
            metadata.setCacheControl(entry.getValue());
            break;
        case "expires":
            try {
                final Date expires = new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss z")
                        .parse(entry.getValue());
                metadata.setHttpExpiresDate(expires);
            } catch (ParseException e) {
                metadata.addUserMetadata(entry.getKey(), entry.getValue());
            }
            break;
        case "content-encoding":
            metadata.setContentEncoding(entry.getValue());
            break;
        case "content-type":
            metadata.setContentType(entry.getValue());
        default:
            metadata.addUserMetadata(entry.getKey(), entry.getValue());
            break;
        }
    }
    return metadata;
}

From source file:ohnosequences.ivy.S3Repository.java

License:Apache License

@Override
protected void put(File source, String destination, boolean overwrite) {
    //System.out.print("parent> ");
    String bucket = S3Utils.getBucket(destination);
    String key = S3Utils.getKey(destination);
    // System.out.println("publishing: bucket=" + bucket + " key=" + key);
    PutObjectRequest request = new PutObjectRequest(bucket, key, source);
    request = request.withCannedAcl(acl);

    if (serverSideEncryption) {
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setMetadata(objectMetadata);
    }//from w w w  .j  av  a 2 s .c om

    if (!getS3Client().doesBucketExist(bucket)) {
        if (!createBucket(bucket, region)) {
            throw new Error("couldn't create bucket");
        }
    }

    if (!this.overwrite && !getS3Client().listObjects(bucket, key).getObjectSummaries().isEmpty()) {
        throw new Error(destination + " exists but overwriting is disabled");
    }
    getS3Client().putObject(request);

}

From source file:org.apache.beam.sdk.io.aws.s3.S3WritableByteChannel.java

License:Apache License

S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options)
        throws IOException {
    this.amazonS3 = checkNotNull(amazonS3, "amazonS3");
    this.options = checkNotNull(options);
    this.path = checkNotNull(path, "path");
    checkArgument(/*from   w  ww  . java2  s .c om*/
            atMostOne(options.getSSECustomerKey() != null, options.getSSEAlgorithm() != null,
                    options.getSSEAwsKeyManagementParams() != null),
            "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)"
                    + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time.");
    // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part.
    checkArgument(options
            .getS3UploadBufferSizeBytes() >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES,
            "S3UploadBufferSizeBytes must be at least %s bytes",
            S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES);
    this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes());
    eTags = new ArrayList<>();

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentType(contentType);
    if (options.getSSEAlgorithm() != null) {
        objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm());
    }
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(path.getBucket(), path.getKey())
            .withStorageClass(options.getS3StorageClass()).withObjectMetadata(objectMetadata);
    request.setSSECustomerKey(options.getSSECustomerKey());
    request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams());
    InitiateMultipartUploadResult result;
    try {
        result = amazonS3.initiateMultipartUpload(request);
    } catch (AmazonClientException e) {
        throw new IOException(e);
    }
    uploadId = result.getUploadId();
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3RequestDecorator.java

License:Apache License

/**
 * Set encryption in {@link PutObjectRequest}
 *//* www .  j a  v  a2 s.  c om*/
public PutObjectRequest decorate(PutObjectRequest request) {
    switch (getDataEncryption()) {
    case SSE_S3:
        ObjectMetadata metadata = request.getMetadata() == null ? new ObjectMetadata() : request.getMetadata();
        metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setMetadata(metadata);
        break;
    case NONE:
        break;
    }
    return request;
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3RequestDecorator.java

License:Apache License

/**
 * Set encryption in {@link CopyObjectRequest}
 *//*from  ww  w  .  j a va 2  s . c o  m*/
public CopyObjectRequest decorate(CopyObjectRequest request) {
    switch (getDataEncryption()) {
    case SSE_S3:
        ObjectMetadata metadata = request.getNewObjectMetadata() == null ? new ObjectMetadata()
                : request.getNewObjectMetadata();
        metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setNewObjectMetadata(metadata);
        break;
    case NONE:
        break;
    }
    return request;
}

From source file:org.apache.nifi.processors.aws.s3.AbstractS3IT.java

License:Apache License

protected void putTestFileEncrypted(String key, File file) throws AmazonS3Exception, FileNotFoundException {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, key, new FileInputStream(file),
            objectMetadata);//from w  w  w.  j a  va2 s. c  o m

    client.putObject(putRequest);
}

From source file:org.apache.nifi.processors.aws.s3.encryption.ServerSideS3EncryptionStrategy.java

License:Apache License

@Override
public void configurePutObjectRequest(PutObjectRequest request, ObjectMetadata objectMetadata,
        String keyValue) {//from   w ww.  j  a va2 s.  com
    objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();/*from w  ww . j a  v a 2  s  . c om*/
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;

    final AmazonS3Client s3 = getClient();
    final FlowFile ff = flowFile;
    final Map<String, String> attributes = new HashMap<>();
    final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
    attributes.put(S3_BUCKET_KEY, bucket);
    attributes.put(S3_OBJECT_KEY, key);

    final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
    final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();

    final long now = System.currentTimeMillis();

    /*
     * If necessary, run age off for existing uploads in AWS S3 and local state
     */
    ageoffS3Uploads(context, s3, now);

    /*
     * Then
     */
    try {
        session.read(flowFile, new InputStreamCallback() {
            @Override
            public void process(final InputStream rawIn) throws IOException {
                try (final InputStream in = new BufferedInputStream(rawIn)) {
                    final ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
                    objectMetadata.setContentLength(ff.getSize());

                    final String contentType = context.getProperty(CONTENT_TYPE)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (contentType != null) {
                        objectMetadata.setContentType(contentType);
                        attributes.put(S3_CONTENT_TYPE, contentType);
                    }

                    final String expirationRule = context.getProperty(EXPIRATION_RULE_ID)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (expirationRule != null) {
                        objectMetadata.setExpirationTimeRuleId(expirationRule);
                    }

                    final Map<String, String> userMetadata = new HashMap<>();
                    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties()
                            .entrySet()) {
                        if (entry.getKey().isDynamic()) {
                            final String value = context.getProperty(entry.getKey())
                                    .evaluateAttributeExpressions(ff).getValue();
                            userMetadata.put(entry.getKey().getName(), value);
                        }
                    }

                    final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
                    if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
                        objectMetadata.setSSEAlgorithm(serverSideEncryption);
                        attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
                    }

                    if (!userMetadata.isEmpty()) {
                        objectMetadata.setUserMetadata(userMetadata);
                    }

                    if (ff.getSize() <= multipartThreshold) {
                        //----------------------------------------
                        // single part upload
                        //----------------------------------------
                        final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
                        request.setStorageClass(
                                StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                        final AccessControlList acl = createACL(context, ff);
                        if (acl != null) {
                            request.setAccessControlList(acl);
                        }
                        final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                        if (cannedAcl != null) {
                            request.withCannedAcl(cannedAcl);
                        }

                        try {
                            final PutObjectResult result = s3.putObject(request);
                            if (result.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
                            }
                            if (result.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
                            }
                            if (result.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
                            }
                            if (result.getMetadata().getRawMetadata().keySet()
                                    .contains(S3_STORAGECLASS_META_KEY)) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata()
                                        .getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    } else {
                        //----------------------------------------
                        // multipart upload
                        //----------------------------------------

                        // load or create persistent state
                        //------------------------------------------------------------
                        MultipartState currentState;
                        try {
                            currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
                            if (currentState != null) {
                                if (currentState.getPartETags().size() > 0) {
                                    final PartETag lastETag = currentState.getPartETags()
                                            .get(currentState.getPartETags().size() - 1);
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' partsLoaded={} lastPart={}/{}",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength(),
                                                    currentState.getPartETags().size(),
                                                    Integer.toString(lastETag.getPartNumber()),
                                                    lastETag.getETag() });
                                } else {
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' no partsLoaded",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength() });
                                }
                            } else {
                                currentState = new MultipartState();
                                currentState.setPartSize(multipartPartSize);
                                currentState.setStorageClass(
                                        StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                                currentState.setContentLength(ff.getSize());
                                persistLocalState(cacheKey, currentState);
                                getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'",
                                        new Object[] { ffFilename, bucket, key });
                            }
                        } catch (IOException e) {
                            getLogger().error("IOException initiating cache state while processing flow files: "
                                    + e.getMessage());
                            throw (e);
                        }

                        // initiate multipart upload or find position in file
                        //------------------------------------------------------------
                        if (currentState.getUploadId().isEmpty()) {
                            final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(
                                    bucket, key, objectMetadata);
                            initiateRequest.setStorageClass(currentState.getStorageClass());
                            final AccessControlList acl = createACL(context, ff);
                            if (acl != null) {
                                initiateRequest.setAccessControlList(acl);
                            }
                            final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                            if (cannedAcl != null) {
                                initiateRequest.withCannedACL(cannedAcl);
                            }
                            try {
                                final InitiateMultipartUploadResult initiateResult = s3
                                        .initiateMultipartUpload(initiateRequest);
                                currentState.setUploadId(initiateResult.getUploadId());
                                currentState.getPartETags().clear();
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state while processing flow file: "
                                            + e.getMessage());
                                    throw (new ProcessException("Exception saving cache state", e));
                                }
                                getLogger().info(
                                        "Success initiating upload flowfile={} available={} position={} "
                                                + "length={} bucket={} key={} uploadId={}",
                                        new Object[] { ffFilename, in.available(),
                                                currentState.getFilePosition(), currentState.getContentLength(),
                                                bucket, key, currentState.getUploadId() });
                                if (initiateResult.getUploadId() != null) {
                                    attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
                                }
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure initiating upload flowfile={} bucket={} key={} reason={}",
                                        new Object[] { ffFilename, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        } else {
                            if (currentState.getFilePosition() > 0) {
                                try {
                                    final long skipped = in.skip(currentState.getFilePosition());
                                    if (skipped != currentState.getFilePosition()) {
                                        getLogger().info(
                                                "Failure skipping to resume upload flowfile={} "
                                                        + "bucket={} key={} position={} skipped={}",
                                                new Object[] { ffFilename, bucket, key,
                                                        currentState.getFilePosition(), skipped });
                                    }
                                } catch (Exception e) {
                                    getLogger().info(
                                            "Failure skipping to resume upload flowfile={} bucket={} "
                                                    + "key={} position={} reason={}",
                                            new Object[] { ffFilename, bucket, key,
                                                    currentState.getFilePosition(), e.getMessage() });
                                    throw (new ProcessException(e));
                                }
                            }
                        }

                        // upload parts
                        //------------------------------------------------------------
                        long thisPartSize;
                        for (int part = currentState.getPartETags().size() + 1; currentState
                                .getFilePosition() < currentState.getContentLength(); part++) {
                            if (!PutS3Object.this.isScheduled()) {
                                throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename
                                        + " part=" + part + " uploadId=" + currentState.getUploadId());
                            }
                            thisPartSize = Math.min(currentState.getPartSize(),
                                    (currentState.getContentLength() - currentState.getFilePosition()));
                            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                                    .withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in)
                                    .withPartNumber(part).withPartSize(thisPartSize);
                            try {
                                UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
                                currentState.addPartETag(uploadPartResult.getPartETag());
                                currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state processing flow file: "
                                            + e.getMessage());
                                }
                                getLogger().info(
                                        "Success uploading part flowfile={} part={} available={} "
                                                + "etag={} uploadId={}",
                                        new Object[] { ffFilename, part, in.available(),
                                                uploadPartResult.getETag(), currentState.getUploadId() });
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure uploading part flowfile={} part={} bucket={} key={} "
                                                + "reason={}",
                                        new Object[] { ffFilename, part, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        }

                        // complete multipart upload
                        //------------------------------------------------------------
                        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                                bucket, key, currentState.getUploadId(), currentState.getPartETags());
                        try {
                            CompleteMultipartUploadResult completeResult = s3
                                    .completeMultipartUpload(completeRequest);
                            getLogger().info("Success completing upload flowfile={} etag={} uploadId={}",
                                    new Object[] { ffFilename, completeResult.getETag(),
                                            currentState.getUploadId() });
                            if (completeResult.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
                            }
                            if (completeResult.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
                            }
                            if (completeResult.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY,
                                        completeResult.getExpirationTime().toString());
                            }
                            if (currentState.getStorageClass() != null) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY,
                                        currentState.getStorageClass().toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    }
                }
            }
        });

        if (!attributes.isEmpty()) {
            flowFile = session.putAllAttributes(flowFile, attributes);
        }
        session.transfer(flowFile, REL_SUCCESS);

        final String url = s3.getResourceUrl(bucket, key);
        final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().send(flowFile, url, millis);

        getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
        try {
            removeLocalState(cacheKey);
        } catch (IOException e) {
            getLogger().info("Error trying to delete key {} from cache: {}",
                    new Object[] { cacheKey, e.getMessage() });
        }
    } catch (final ProcessException | AmazonClientException pe) {
        if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
            getLogger().info(pe.getMessage());
            session.rollback();
        } else {
            getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
        }
    }

}