List of usage examples for com.amazonaws.services.s3 AmazonS3Client shutdown
public void shutdown()
From source file:org.finra.dm.dao.impl.S3DaoImpl.java
License:Apache License
@Override public void createDirectory(final S3FileTransferRequestParamsDto params) { // Create metadata for the directory marker and set content-length to 0 bytes. ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(0);//from ww w .j a v a 2s . c o m prepareMetadata(params, metadata); // Create empty content. InputStream emptyContent = new ByteArrayInputStream(new byte[0]); // Create a PutObjectRequest passing the folder name suffixed by '/'. String directoryName = params.getS3KeyPrefix() + (params.getS3KeyPrefix().endsWith("/") ? "" : "/"); PutObjectRequest putObjectRequest = new PutObjectRequest(params.getS3BucketName(), directoryName, emptyContent, metadata); AmazonS3Client s3Client = null; try { s3Client = getAmazonS3(params); s3Operations.putObject(putObjectRequest, s3Client); } catch (AmazonServiceException e) { throw new IllegalStateException( String.format("Failed to create 0 byte S3 object with \"%s\" key in bucket \"%s\". Reason: %s", directoryName, params.getS3BucketName(), e.getMessage()), e); } finally { // Shutdown the AmazonS3Client instance to release resources. if (s3Client != null) { s3Client.shutdown(); } } }
From source file:org.finra.dm.dao.impl.S3DaoImpl.java
License:Apache License
@Override public void deleteFileList(final S3FileTransferRequestParamsDto params) { AmazonS3Client s3Client = null; LOGGER.info(String.format("Deleting %d keys/objects from s3://%s ...", params.getFiles().size(), params.getS3BucketName()));/*w w w . j a v a 2 s. c o m*/ try { // In order to avoid a MalformedXML AWS exception, we send delete request only when we have any keys to delete. if (!params.getFiles().isEmpty()) { // Build a list of keys to be deleted. List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>(); for (File file : params.getFiles()) { keys.add(new DeleteObjectsRequest.KeyVersion(file.getPath().replaceAll("\\\\", "/"))); } DeleteObjectsRequest multiObjectDeleteRequest = new DeleteObjectsRequest(params.getS3BucketName()); s3Client = getAmazonS3(params); // The Multi-Object Delete request can contain a list of up to 1000 keys. for (int i = 0; i < keys.size() / MAX_KEYS_PER_DELETE_REQUEST + 1; i++) { List<DeleteObjectsRequest.KeyVersion> keysSubList = keys.subList( i * MAX_KEYS_PER_DELETE_REQUEST, Math.min(keys.size(), (i + 1) * MAX_KEYS_PER_DELETE_REQUEST)); multiObjectDeleteRequest.setKeys(keysSubList); s3Operations.deleteObjects(multiObjectDeleteRequest, s3Client); LOGGER.info(String.format( "Successfully requested the deletion of the following %d keys/objects from bucket \"%s\":", keysSubList.size(), params.getS3BucketName())); for (DeleteObjectsRequest.KeyVersion keyVersion : keysSubList) { LOGGER.info(String.format(" s3://%s/%s", params.getS3BucketName(), keyVersion.getKey())); } } } } catch (Exception e) { throw new IllegalStateException( String.format("Failed to delete a list of keys/objects from bucket \"%s\". Reason: %s", params.getS3BucketName(), e.getMessage()), e); } finally { // Shutdown the AmazonS3Client instance to release resources. if (s3Client != null) { s3Client.shutdown(); } } }
From source file:org.finra.dm.dao.impl.S3DaoImpl.java
License:Apache License
@Override public void deleteDirectory(final S3FileTransferRequestParamsDto params) { AmazonS3Client s3Client = null; LOGGER.info(String.format("Deleting keys/objects from s3://%s/%s ...", params.getS3BucketName(), params.getS3KeyPrefix()));/*www . j a va 2 s . com*/ Assert.hasText(params.getS3KeyPrefix(), "Deleting from root directory is not allowed."); try { // List S3 object including any 0 byte objects that represent S3 directories. List<StorageFile> storageFiles = listObjectsMatchingKeyPrefix(params, false); LOGGER.info(String.format("Found %d keys/objects in s3://%s/%s ...", storageFiles.size(), params.getS3BucketName(), params.getS3KeyPrefix())); // In order to avoid a MalformedXML AWS exception, we send delete request only when we have any keys to delete. if (!storageFiles.isEmpty()) { DeleteObjectsRequest multiObjectDeleteRequest = new DeleteObjectsRequest(params.getS3BucketName()); s3Client = getAmazonS3(params); // The Multi-Object Delete request can contain a list of up to 1000 keys. for (int i = 0; i < storageFiles.size() / MAX_KEYS_PER_DELETE_REQUEST + 1; i++) { // Prepare a list of S3 object keys to be deleted. List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>(); for (StorageFile storageFile : storageFiles.subList(i * MAX_KEYS_PER_DELETE_REQUEST, Math.min(storageFiles.size(), (i + 1) * MAX_KEYS_PER_DELETE_REQUEST))) { keys.add(new DeleteObjectsRequest.KeyVersion(storageFile.getFilePath())); } // Delete the S3 objects. multiObjectDeleteRequest.setKeys(keys); s3Operations.deleteObjects(multiObjectDeleteRequest, s3Client); LOGGER.info(String.format( "Successfully deleted the following %d keys/objects with prefix \"%s\" from bucket \"%s\":", keys.size(), params.getS3KeyPrefix(), params.getS3BucketName())); for (DeleteObjectsRequest.KeyVersion keyVersion : keys) { LOGGER.info(String.format(" s3://%s/%s", params.getS3BucketName(), keyVersion.getKey())); } } } } catch (AmazonClientException e) { throw new IllegalStateException( String.format("Failed to delete keys/objects with prefix \"%s\" from bucket \"%s\". Reason: %s", params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()), e); } finally { // Shutdown the AmazonS3Client instance to release resources. if (s3Client != null) { s3Client.shutdown(); } } }
From source file:org.finra.dm.dao.impl.S3DaoImpl.java
License:Apache License
/** * {@inheritDoc}/*from www . ja v a 2 s .co m*/ */ @Override public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate) { AmazonS3Client s3Client = null; int abortedMultipartUploadsCount = 0; try { // Create an Amazon S3 client. s3Client = getAmazonS3(params); // List upload markers. Null implies initial list request. String uploadIdMarker = null; String keyMarker = null; boolean truncated; do { // Create the list multipart request, optionally using the last markers. ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName()); request.setUploadIdMarker(uploadIdMarker); request.setKeyMarker(keyMarker); // Request the multipart upload listing. MultipartUploadListing uploadListing = s3Operations .listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client); for (MultipartUpload upload : uploadListing.getMultipartUploads()) { if (upload.getInitiated().compareTo(thresholdDate) < 0) { // Abort the upload. s3Operations.abortMultipartUpload( TransferManager.appendSingleObjectUserAgent(new AbortMultipartUploadRequest( params.getS3BucketName(), upload.getKey(), upload.getUploadId())), s3Client); // Log the information about the aborted multipart upload. LOGGER.info(String.format( "Aborted S3 multipart upload for \"%s\" object key initiated at [%s] in \"%s\" S3 bucket.", upload.getKey(), upload.getInitiated(), params.getS3BucketName())); // Increment the counter. abortedMultipartUploadsCount++; } } // Determine whether there are more uploads to list. truncated = uploadListing.isTruncated(); if (truncated) { // Record the list markers. uploadIdMarker = uploadListing.getUploadIdMarker(); keyMarker = uploadListing.getKeyMarker(); } } while (truncated); } finally { // Shutdown the Amazon S3 client instance to release resources. if (s3Client != null) { s3Client.shutdown(); } } return abortedMultipartUploadsCount; }
From source file:org.finra.dm.dao.impl.S3DaoImpl.java
License:Apache License
/** * Lists all S3 objects matching the S3 key prefix in the given bucket (S3 bucket name). The S3 bucket name and S3 key prefix that identify the S3 objects * to get listed are taken from the S3 file transfer request parameters DTO. * * @param params the S3 file transfer request parameters * @param ignoreZeroByteDirectoryMarkers specifies whether to ignore 0 byte objects that represent S3 directories * * @return the list of all S3 objects represented as storage files that match the prefix in the given bucket *///w w w. jav a 2s . c om private List<StorageFile> listObjectsMatchingKeyPrefix(final S3FileTransferRequestParamsDto params, boolean ignoreZeroByteDirectoryMarkers) { AmazonS3Client s3Client = null; List<StorageFile> storageFiles = new ArrayList<>(); try { s3Client = getAmazonS3(params); ListObjectsRequest listObjectsRequest = new ListObjectsRequest() .withBucketName(params.getS3BucketName()).withPrefix(params.getS3KeyPrefix()); ObjectListing objectListing; do { objectListing = s3Operations.listObjects(listObjectsRequest, s3Client); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { // Ignore 0 byte objects that represent S3 directories. if (!(ignoreZeroByteDirectoryMarkers && objectSummary.getKey().endsWith("/") && objectSummary.getSize() == 0L)) { storageFiles.add(new StorageFile(objectSummary.getKey(), objectSummary.getSize(), null)); } } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); } catch (AmazonS3Exception amazonS3Exception) { if (S3Operations.ERROR_CODE_NO_SUCH_BUCKET.equals(amazonS3Exception.getErrorCode())) { throw new IllegalArgumentException( "The specified bucket '" + params.getS3BucketName() + "' does not exist.", amazonS3Exception); } throw new IllegalStateException("Error accessing S3", amazonS3Exception); } catch (AmazonClientException e) { throw new IllegalStateException( String.format("Failed to list keys/objects with prefix \"%s\" from bucket \"%s\". Reason: %s", params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()), e); } finally { // Shutdown the AmazonS3Client instance to release resources. if (s3Client != null) { s3Client.shutdown(); } } return storageFiles; }
From source file:org.finra.herd.dao.impl.S3DaoImpl.java
License:Apache License
@Override public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate) { // Create an Amazon S3 client. AmazonS3Client s3Client = getAmazonS3(params); int abortedMultipartUploadsCount = 0; try {/*from w w w . ja v a 2 s .c om*/ // List upload markers. Null implies initial list request. String uploadIdMarker = null; String keyMarker = null; boolean truncated; do { // Create the list multipart request, optionally using the last markers. ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName()); request.setUploadIdMarker(uploadIdMarker); request.setKeyMarker(keyMarker); // Request the multipart upload listing. MultipartUploadListing uploadListing = s3Operations .listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client); for (MultipartUpload upload : uploadListing.getMultipartUploads()) { if (upload.getInitiated().compareTo(thresholdDate) < 0) { // Abort the upload. s3Operations.abortMultipartUpload( TransferManager.appendSingleObjectUserAgent(new AbortMultipartUploadRequest( params.getS3BucketName(), upload.getKey(), upload.getUploadId())), s3Client); // Log the information about the aborted multipart upload. LOGGER.info( "Aborted S3 multipart upload. s3Key=\"{}\" s3BucketName=\"{}\" s3MultipartUploadInitiatedDate=\"{}\"", upload.getKey(), params.getS3BucketName(), upload.getInitiated()); // Increment the counter. abortedMultipartUploadsCount++; } } // Determine whether there are more uploads to list. truncated = uploadListing.isTruncated(); if (truncated) { // Record the list markers. uploadIdMarker = uploadListing.getNextUploadIdMarker(); keyMarker = uploadListing.getNextKeyMarker(); } } while (truncated); } finally { // Shutdown the Amazon S3 client instance to release resources. s3Client.shutdown(); } return abortedMultipartUploadsCount; }
From source file:org.finra.herd.dao.impl.S3DaoImpl.java
License:Apache License
@Override public void createDirectory(final S3FileTransferRequestParamsDto params) { // Create metadata for the directory marker and set content-length to 0 bytes. ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(0);/*from www .j a v a2s . c om*/ prepareMetadata(params, metadata); // Create empty content. InputStream emptyContent = new ByteArrayInputStream(new byte[0]); // Create a PutObjectRequest passing the folder name suffixed by '/'. String directoryName = StringUtils.appendIfMissing(params.getS3KeyPrefix(), "/"); PutObjectRequest putObjectRequest = new PutObjectRequest(params.getS3BucketName(), directoryName, emptyContent, metadata); // KMS key ID is being set through prepareMetadata() AmazonS3Client s3Client = getAmazonS3(params); try { s3Operations.putObject(putObjectRequest, s3Client); } catch (AmazonServiceException e) { throw new IllegalStateException( String.format("Failed to create 0 byte S3 object with \"%s\" key in bucket \"%s\". Reason: %s", directoryName, params.getS3BucketName(), e.getMessage()), e); } finally { // Shutdown the AmazonS3Client instance to release resources. s3Client.shutdown(); } }
From source file:org.finra.herd.dao.impl.S3DaoImpl.java
License:Apache License
@Override public void deleteDirectory(final S3FileTransferRequestParamsDto params) { LOGGER.info("Deleting keys/key versions from S3... s3KeyPrefix=\"{}\" s3BucketName=\"{}\"", params.getS3KeyPrefix(), params.getS3BucketName()); Assert.isTrue(!isRootKeyPrefix(params.getS3KeyPrefix()), "Deleting from root directory is not allowed."); try {//w w w.j a va2 s . c om // List S3 versions. List<S3VersionSummary> s3VersionSummaries = listVersions(params); LOGGER.info( "Found keys/key versions in S3 for deletion. s3KeyCount={} s3KeyPrefix=\"{}\" s3BucketName=\"{}\"", s3VersionSummaries.size(), params.getS3KeyPrefix(), params.getS3BucketName()); // In order to avoid a MalformedXML AWS exception, we send delete request only when we have any key versions to delete. if (CollectionUtils.isNotEmpty(s3VersionSummaries)) { // Create an S3 client. AmazonS3Client s3Client = getAmazonS3(params); // Build a list of objects to be deleted. List<DeleteObjectsRequest.KeyVersion> keyVersions = new ArrayList<>(); for (S3VersionSummary s3VersionSummary : s3VersionSummaries) { keyVersions.add(new DeleteObjectsRequest.KeyVersion(s3VersionSummary.getKey(), s3VersionSummary.getVersionId())); } try { // Delete the key versions. deleteKeyVersions(s3Client, params.getS3BucketName(), keyVersions); } finally { s3Client.shutdown(); } } } catch (AmazonClientException e) { throw new IllegalStateException(String.format( "Failed to delete keys/key versions with prefix \"%s\" from bucket \"%s\". Reason: %s", params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()), e); } }
From source file:org.finra.herd.dao.impl.S3DaoImpl.java
License:Apache License
@Override public void deleteFileList(final S3FileTransferRequestParamsDto params) { LOGGER.info("Deleting a list of objects from S3... s3BucketName=\"{}\" s3KeyCount={}", params.getS3BucketName(), params.getFiles().size()); try {/*from ww w .j a va 2 s . c o m*/ // In order to avoid a MalformedXML AWS exception, we send delete request only when we have any keys to delete. if (!params.getFiles().isEmpty()) { // Create an S3 client. AmazonS3Client s3Client = getAmazonS3(params); try { // Build a list of keys to be deleted. List<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>(); for (File file : params.getFiles()) { keys.add(new DeleteObjectsRequest.KeyVersion(file.getPath().replaceAll("\\\\", "/"))); } // Delete the keys. deleteKeyVersions(s3Client, params.getS3BucketName(), keys); } finally { s3Client.shutdown(); } } } catch (Exception e) { throw new IllegalStateException( String.format("Failed to delete a list of keys from bucket \"%s\". Reason: %s", params.getS3BucketName(), e.getMessage()), e); } }
From source file:org.finra.herd.dao.impl.S3DaoImpl.java
License:Apache License
@Override public String generateGetObjectPresignedUrl(String bucketName, String key, Date expiration, S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto) { GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest(bucketName, key, HttpMethod.GET);/*from w w w .j ava 2 s . co m*/ generatePresignedUrlRequest.setExpiration(expiration); AmazonS3Client s3 = getAmazonS3(s3FileTransferRequestParamsDto); try { return s3Operations.generatePresignedUrl(generatePresignedUrlRequest, s3).toString(); } finally { s3.shutdown(); } }