Example usage for com.amazonaws.services.s3.model ObjectMetadata AES_256_SERVER_SIDE_ENCRYPTION

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata AES_256_SERVER_SIDE_ENCRYPTION

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata AES_256_SERVER_SIDE_ENCRYPTION.

Prototype

String AES_256_SERVER_SIDE_ENCRYPTION

To view the source code for com.amazonaws.services.s3.model ObjectMetadata AES_256_SERVER_SIDE_ENCRYPTION.

Click Source Link

Usage

From source file:org.apache.zeppelin.notebook.repo.S3NotebookRepo.java

License:Apache License

@Override
public void save(Note note, AuthenticationInfo subject) throws IOException {
    String json = note.toJson();/*from  ww  w .j  a va  2  s.  c om*/
    String key = rootFolder + "/" + buildNoteFileName(note);
    File file = File.createTempFile("note", "zpln");
    try {
        Writer writer = new OutputStreamWriter(new FileOutputStream(file));
        writer.write(json);
        writer.close();
        PutObjectRequest putRequest = new PutObjectRequest(bucketName, key, file);
        if (useServerSideEncryption) {
            // Request server-side encryption.
            ObjectMetadata objectMetadata = new ObjectMetadata();
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            putRequest.setMetadata(objectMetadata);
        }
        s3client.putObject(putRequest);
    } catch (AmazonClientException ace) {
        throw new IOException("Unable to store note in S3: " + ace, ace);
    } finally {
        FileUtils.deleteQuietly(file);
    }
}

From source file:org.elasticsearch.cloud.aws.blobstore.DefaultS3OutputStream.java

License:Apache License

protected void doUpload(S3BlobStore blobStore, String bucketName, String blobName, InputStream is, int length,
        boolean serverSideEncryption) throws AmazonS3Exception {
    ObjectMetadata md = new ObjectMetadata();
    if (serverSideEncryption) {
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }//from   w  w  w  .j a va2 s  .com
    md.setContentLength(length);
    blobStore.client().putObject(bucketName, blobName, is, md);
}

From source file:org.elasticsearch.cloud.aws.blobstore.DefaultS3OutputStream.java

License:Apache License

protected String doInitialize(S3BlobStore blobStore, String bucketName, String blobName,
        boolean serverSideEncryption) {
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, blobName);
    if (serverSideEncryption) {
        ObjectMetadata md = new ObjectMetadata();
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setObjectMetadata(md);// ww w . j av  a2 s . co m
    }
    return blobStore.client().initiateMultipartUpload(request).getUploadId();
}

From source file:org.elasticsearch.cloud.aws.blobstore.S3BlobContainer.java

License:Apache License

@Override
public void move(String sourceBlobName, String targetBlobName) throws IOException {
    try {/*from  w  ww .j av a 2s  . co m*/
        CopyObjectRequest request = new CopyObjectRequest(blobStore.bucket(), buildKey(sourceBlobName),
                blobStore.bucket(), buildKey(targetBlobName));

        if (blobStore.serverSideEncryption()) {
            ObjectMetadata objectMetadata = new ObjectMetadata();
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            request.setNewObjectMetadata(objectMetadata);
        }
        blobStore.client().copyObject(request);
        blobStore.client().deleteObject(blobStore.bucket(), buildKey(sourceBlobName));
    } catch (AmazonS3Exception e) {
        throw new IOException(e);
    }
}

From source file:org.elasticsearch.cloud.aws.blobstore.S3ImmutableBlobContainer.java

License:Apache License

@Override
public void writeBlob(final String blobName, final InputStream is, final long sizeInBytes,
        final WriterListener listener) {
    blobStore.executor().execute(new Runnable() {
        @Override/* www.j a v a  2s .c om*/
        public void run() {
            try {
                ObjectMetadata md = new ObjectMetadata();
                if (blobStore.serverSideEncryption()) {
                    md.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
                }
                md.setContentLength(sizeInBytes);
                PutObjectResult objectResult = blobStore.client().putObject(blobStore.bucket(),
                        buildKey(blobName), is, md);
                listener.onCompleted();
            } catch (Exception e) {
                listener.onFailure(e);
            }
        }
    });
}

From source file:org.elasticsearch.repositories.s3.DefaultS3OutputStream.java

License:Apache License

protected void doUpload(S3BlobStore blobStore, String bucketName, String blobName, InputStream is, int length,
        boolean serverSideEncryption) throws AmazonS3Exception {
    ObjectMetadata md = new ObjectMetadata();
    if (serverSideEncryption) {
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }/*  w ww .  ja va  2s .c  om*/
    md.setContentLength(length);

    PutObjectRequest putRequest = new PutObjectRequest(bucketName, blobName, is, md)
            .withStorageClass(blobStore.getStorageClass()).withCannedAcl(blobStore.getCannedACL());
    blobStore.client().putObject(putRequest);

}

From source file:org.elasticsearch.repositories.s3.DefaultS3OutputStream.java

License:Apache License

protected String doInitialize(S3BlobStore blobStore, String bucketName, String blobName,
        boolean serverSideEncryption) {
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, blobName)
            .withCannedACL(blobStore.getCannedACL()).withStorageClass(blobStore.getStorageClass());

    if (serverSideEncryption) {
        ObjectMetadata md = new ObjectMetadata();
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setObjectMetadata(md);//from   w ww.ja  v a2  s  . com
    }

    return blobStore.client().initiateMultipartUpload(request).getUploadId();
}

From source file:org.elasticsearch.repositories.s3.S3BlobContainer.java

License:Apache License

@Override
public void move(String sourceBlobName, String targetBlobName) throws IOException {
    try {/* w  ww  .j av  a 2 s.c  om*/
        CopyObjectRequest request = new CopyObjectRequest(blobStore.bucket(), buildKey(sourceBlobName),
                blobStore.bucket(), buildKey(targetBlobName));

        if (blobStore.serverSideEncryption()) {
            ObjectMetadata objectMetadata = new ObjectMetadata();
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            request.setNewObjectMetadata(objectMetadata);
        }

        SocketAccess.doPrivilegedVoid(() -> {
            blobStore.client().copyObject(request);
            blobStore.client().deleteObject(blobStore.bucket(), buildKey(sourceBlobName));
        });

    } catch (AmazonS3Exception e) {
        throw new IOException(e);
    }
}

From source file:org.finra.dm.dao.impl.S3DaoImpl.java

License:Apache License

/**
 * Prepares the object metadata for server side encryption and reduced redundancy storage.
 *
 * @param params the parameters.//from  ww  w.ja  v a2s  .  co m
 * @param metadata the metadata to prepare.
 */
private void prepareMetadata(final S3FileTransferRequestParamsDto params, ObjectMetadata metadata) {
    // Set the server side encryption
    metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);

    // If specified, set the metadata to use RRS.
    if (Boolean.TRUE.equals(params.getUseRrs())) {
        // TODO: For upload File, we can set RRS on the putObjectRequest.  For uploadDirectory, this is the only
        // way to do it.  However, setHeader() is flagged as For Internal Use Only
        metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.ReducedRedundancy.toString());
    }
}

From source file:org.finra.herd.dao.impl.S3DaoImpl.java

License:Apache License

@Override
public S3FileTransferResultsDto copyFile(final S3FileCopyRequestParamsDto params) throws InterruptedException {
    LOGGER.info(//ww w.  j av  a 2s  .c  o  m
            "Copying S3 object... sourceS3Key=\"{}\" sourceS3BucketName=\"{}\" targetS3Key=\"{}\" targetS3BucketName=\"{}\"",
            params.getSourceObjectKey(), params.getSourceBucketName(), params.getTargetObjectKey(),
            params.getTargetBucketName());

    // Perform the copy.
    S3FileTransferResultsDto results = performTransfer(params, new Transferer() {
        @Override
        public Transfer performTransfer(TransferManager transferManager) {
            // Create a copy request.
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(params.getSourceBucketName(),
                    params.getSourceObjectKey(), params.getTargetBucketName(), params.getTargetObjectKey());

            // If KMS Key ID is specified, set the AWS Key Management System parameters to be used to encrypt the object.
            if (StringUtils.isNotBlank(params.getKmsKeyId())) {
                copyObjectRequest
                        .withSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(params.getKmsKeyId()));
            }
            // Otherwise, specify the server-side encryption algorithm for encrypting the object using AWS-managed keys.
            else {
                ObjectMetadata metadata = new ObjectMetadata();
                metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
                copyObjectRequest.setNewObjectMetadata(metadata);
            }

            return s3Operations.copyFile(copyObjectRequest, transferManager);
        }
    });

    LOGGER.info(
            "Copied S3 object. sourceS3Key=\"{}\" sourceS3BucketName=\"{}\" targetS3Key=\"{}\" targetS3BucketName=\"{}\" "
                    + "totalBytesTransferred={} transferDuration=\"{}\"",
            params.getSourceObjectKey(), params.getSourceBucketName(), params.getTargetObjectKey(),
            params.getTargetBucketName(), results.getTotalBytesTransferred(),
            HerdDateUtils.formatDuration(results.getDurationMillis()));

    logOverallTransferRate(results);

    return results;
}