List of usage examples for com.amazonaws.services.s3 AmazonS3 listObjects
public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) throws SdkClientException, AmazonServiceException;
Returns a list of summary information about the objects in the specified bucket.
From source file:org.rdswitchboard.importers.browser.s3.App.java
License:Open Source License
public static void main(String[] args) { try {/*ww w .j av a2s .com*/ if (args.length == 0 || StringUtils.isNullOrEmpty(args[0])) throw new Exception("Please provide properties file"); String propertiesFile = args[0]; Properties properties = new Properties(); try (InputStream in = new FileInputStream(propertiesFile)) { properties.load(in); } String source = properties.getProperty("data.source.id"); if (StringUtils.isNullOrEmpty(source)) throw new IllegalArgumentException("Source can not be empty"); System.out.println("Source: " + source); String baseUrl = properties.getProperty("base.url"); if (StringUtils.isNullOrEmpty(baseUrl)) throw new IllegalArgumentException("Base URL can not be empty"); System.out.println("Base URL: " + baseUrl); String sessionId = properties.getProperty("session.id"); if (StringUtils.isNullOrEmpty(sessionId)) throw new IllegalArgumentException("Session Id can not be empty"); System.out.println("Session Id: " + sessionId); String accessKey = properties.getProperty("aws.access.key"); String secretKey = properties.getProperty("aws.secret.key"); String bucket = properties.getProperty("s3.bucket"); if (StringUtils.isNullOrEmpty(bucket)) throw new IllegalArgumentException("AWS S3 Bucket can not be empty"); System.out.println("S3 Bucket: " + bucket); String prefix = properties.getProperty("s3.prefix"); if (StringUtils.isNullOrEmpty(prefix)) throw new IllegalArgumentException("AWS S3 Prefix can not be empty"); System.out.println("S3 Prefix: " + prefix); String crosswalk = properties.getProperty("crosswalk"); Templates template = null; if (!StringUtils.isNullOrEmpty(crosswalk)) { System.out.println("Crosswalk: " + crosswalk); template = TransformerFactory.newInstance() .newTemplates(new StreamSource(new FileInputStream(crosswalk))); } ObjectMapper mapper = new ObjectMapper(); Client client = Client.create(); Cookie cookie = new Cookie("PHPSESSID", properties.getProperty("session")); AmazonS3 s3client; if (!StringUtils.isNullOrEmpty(accessKey) && !StringUtils.isNullOrEmpty(secretKey)) { System.out.println( "Connecting to AWS via Access and Secret Keys. This is not safe practice, consider to use IAM Role instead."); AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); s3client = new AmazonS3Client(awsCredentials); } else { System.out.println("Connecting to AWS via Instance Profile Credentials"); s3client = new AmazonS3Client(new InstanceProfileCredentialsProvider()); } //String file = "rda/rif/class:collection/54800.xml"; ListObjectsRequest listObjectsRequest; ObjectListing objectListing; String file = prefix + "/latest.txt"; S3Object object = s3client.getObject(new GetObjectRequest(bucket, file)); String latest; try (InputStream txt = object.getObjectContent()) { latest = prefix + "/" + IOUtils.toString(txt, StandardCharsets.UTF_8).trim() + "/"; } System.out.println("S3 Repository: " + latest); listObjectsRequest = new ListObjectsRequest().withBucketName(bucket).withPrefix(latest); do { objectListing = s3client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { file = objectSummary.getKey(); System.out.println("Processing file: " + file); object = s3client.getObject(new GetObjectRequest(bucket, file)); String xml = null; if (null != template) { Source reader = new StreamSource(object.getObjectContent()); StringWriter writer = new StringWriter(); Transformer transformer = template.newTransformer(); transformer.transform(reader, new StreamResult(writer)); xml = writer.toString(); } else { InputStream is = object.getObjectContent(); xml = IOUtils.toString(is, ENCODING); } URL url = new URL(baseUrl + "/registry/import/import_s3/"); StringBuilder sb = new StringBuilder(); addParam(sb, "id", source); addParam(sb, "xml", xml); //System.out.println(sb.toString()); WebResource webResource = client.resource(url.toString()); ClientResponse response = webResource .header("User-Agent", "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0") .accept(MediaType.APPLICATION_JSON, "*/*").acceptLanguage("en-US", "en") .type(MediaType.APPLICATION_FORM_URLENCODED).cookie(cookie) .post(ClientResponse.class, sb.toString()); if (response.getStatus() != 200) { throw new RuntimeException("Failed : HTTP error code : " + response.getStatus()); } String output = response.getEntity(String.class); Result result = mapper.readValue(output, Result.class); if (!result.getStatus().equals("OK")) { System.err.println(result.getMessage()); break; } else System.out.println(result.getMessage()); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); } catch (Exception e) { e.printStackTrace(); } }
From source file:org.rdswitchboard.utils.s3.find.App.java
License:Open Source License
public static void main(String[] args) { try {//from w w w .j a va2 s.c o m if (args.length != 2) throw new IllegalArgumentException("Bucket name and search string can not be empty"); String buckey = args[0]; String search = args[1]; String prefix = null; int pos = buckey.indexOf('/'); if (pos > 0) { prefix = buckey.substring(pos + 1); buckey = buckey.substring(0, pos); } AmazonS3 s3client = new AmazonS3Client(new InstanceProfileCredentialsProvider()); // AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider()); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(buckey); if (!StringUtils.isNullOrEmpty(prefix)) listObjectsRequest.setPrefix(prefix); ObjectListing objectListing; do { objectListing = s3client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { String key = objectSummary.getKey(); System.out.println(" - " + key); S3Object object = s3client.getObject(new GetObjectRequest(buckey, key)); String str = IOUtils.toString(object.getObjectContent()); if (str.contains(search)) { System.out.println("Found!"); FileUtils.writeStringToFile(new File("s3/" + key), str); } } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); } catch (Exception e) { e.printStackTrace(); } }
From source file:org.reswitchboard.utils.s3.access.App.java
License:Open Source License
public static void main(String[] args) { try {//from ww w . j a v a 2s . c o m if (args.length == 0 || StringUtils.isNullOrEmpty(args[0])) throw new IllegalArgumentException("Bucket name can not be empty"); String bucketName = args[0]; String prefix = null; if (args.length > 1) prefix = args[1]; AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider()); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); if (!StringUtils.isNullOrEmpty(prefix)) listObjectsRequest.setPrefix(prefix); ObjectListing objectListing; do { objectListing = s3client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { String key = objectSummary.getKey(); System.out.println(" - " + key); for (int nAttempt = 1;; ++nAttempt) { try { AccessControlList acl = s3client.getObjectAcl(bucketName, key); List<Grant> grants = acl.getGrantsAsList(); for (Grant grant : grants) { // System.out.println( " Grant: " + grant.toString()); if (grant.getGrantee().equals(GroupGrantee.AllUsers)) { System.out.println(" Revoking public access"); acl.revokeAllPermissions(GroupGrantee.AllUsers); s3client.setObjectAcl(bucketName, key, acl); break; } } break; } catch (Exception e) { System.out.println("Error: " + e.toString()); if (nAttempt >= 10) { throw new Exception("Maximum number of invalid attempts has been reeched"); } // double back-off delay Thread.sleep((long) (Math.pow(2, nAttempt) * 50)); } } } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); } catch (Exception e) { e.printStackTrace(); } }
From source file:org.xmlsh.aws.gradle.s3.DeleteBucketTask.java
License:BSD License
@TaskAction public void deleteBucket() { // to enable conventionMappings feature String bucketName = getBucketName(); boolean ifExists = isIfExists(); if (bucketName == null) throw new GradleException("bucketName is not specified"); AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class); AmazonS3 s3 = ext.getClient(); if (ifExists == false || exists(s3)) { if (deleteObjects) { getLogger().info("Delete all S3 objects in bucket [{}]", bucketName); ObjectListing objectListing = s3.listObjects(bucketName); while (objectListing.getObjectSummaries().isEmpty() == false) { objectListing.getObjectSummaries().forEach(summary -> { getLogger().info(" => delete s3://{}/{}", bucketName, summary.getKey()); s3.deleteObject(bucketName, summary.getKey()); });//from w w w . java2s . c om objectListing = s3.listNextBatchOfObjects(objectListing); } } s3.deleteBucket(bucketName); getLogger().info("S3 bucket {} is deleted", bucketName); } else { getLogger().debug("S3 bucket {} does not exist", bucketName); } }
From source file:pagerank.S3Wrapper.java
License:Open Source License
public static void main(String[] args) throws IOException { /*//w ww .j av a2 s . c o m * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (/home/yupenglu/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/home/yupenglu/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); // Region usWest2 = Region.getRegion(Regions.US_WEST_2); // s3.setRegion(usWest2); // String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String bucketName = "pages4.27"; String key = "NewKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ // System.out.println("Creating bucket " + bucketName + "\n"); // s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ // System.out.println("Uploading a new object to S3 from a file\n"); // s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ // System.out.println("Downloading an object"); // S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); // System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); // displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); // ObjectListing objectListing = s3.listObjects(new ListObjectsRequest() // .withBucketName(bucketName) // .withPrefix("My")); ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName)); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println(" - " + URLDecoder.decode(objectSummary.getKey(), "UTF-8") + " " + "(size = " + objectSummary.getSize() + ")"); } S3Object testObj = s3.getObject(bucketName, URLEncoder.encode("http://finance.yahoo.com/investing-news/", "UTF-8")); S3ObjectInputStream inputStream = testObj.getObjectContent(); // System.out.println(streamToString(inputStream)); System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ // System.out.println("Deleting an object\n"); // s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ // System.out.println("Deleting bucket " + bucketName + "\n"); // s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:S3Controller.DownloadingImages.java
public static void main(String[] args) throws IOException { AWSCredentials credentials = null;/*from w w w .jav a2s.c om*/ String aws_access_key_id = "PUT_YOUR_aws_access_key_id_HERE"; String aws_secret_access_key = "PUT_YOUR_aws_secret_access_key_HERE"; try { credentials = new BasicAWSCredentials(aws_access_key_id, aws_secret_access_key);//.getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); Region AP_SOUTHEAST_1 = Region.getRegion(Regions.AP_SOUTHEAST_1); s3.setRegion(AP_SOUTHEAST_1); String bucketName = "PUT_YOUR_S3-BUCKET-NAME_HERE"; String key = "PUT_YOUR_S3-BUCKET-KEY_HERE"; try { ArrayList arr = new ArrayList(); ArrayList EmailArray = new ArrayList(); Bucket bucket = new Bucket(bucketName); ObjectListing objects = s3.listObjects(bucket.getName()); do { for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) { // System.out.println(objectSummary.getKey() + "\t" + // objectSummary.getSize() + "\t" + // StringUtils.fromDate(objectSummary.getLastModified())); arr.add(objectSummary.getKey()); } objects = s3.listNextBatchOfObjects(objects); } while (objects.isTruncated()); KrakenIOExampleMain kraken = new KrakenIOExampleMain(); for (int i = 0; i < arr.size(); i++) { System.out.println("Compressing: " + arr.get(i)); String s = (String) arr.get(i); GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucket.getName(), s); System.out.println(s3.generatePresignedUrl(request)); URL Glink = s3.generatePresignedUrl(request); String Dlink = Glink.toString(); System.out.println("Download Link:" + Dlink); kraken.Compression(Dlink, bucketName); System.out.println("Compression completed: " + arr.get(i)); EmailArray.add("Processed Image:" + arr.get(i)); } System.out.println("Start Emailing list"); EmailSender esender = new EmailSender(); esender.EmailVerification(GetNotificationEmail, EmailArray); System.out.println("Kraken compression completed"); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } catch (ExecutionException ex) { Logger.getLogger(DownloadingImages.class.getName()).log(Level.SEVERE, null, ex); } catch (InterruptedException ex) { Logger.getLogger(DownloadingImages.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:squash.deployment.lambdas.utils.TransferUtils.java
License:Apache License
/** * Sets public read permissions on content within an S3 bucket. * /* w w w . j a v a 2s .c o m*/ * <p>Web content served from an S3 bucket must have public read permissions. * * @param bucketName the bucket to apply the permissions to. * @param prefix prefix within the bucket, beneath which to apply the permissions. * @param logger a CloudwatchLogs logger. */ public static void setPublicReadPermissionsOnBucket(String bucketName, Optional<String> prefix, LambdaLogger logger) { // Ensure newly uploaded content has public read permission ListObjectsRequest listObjectsRequest; if (prefix.isPresent()) { logger.log("Setting public read permission on bucket: " + bucketName + " and prefix: " + prefix.get()); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get()); } else { logger.log("Setting public read permission on bucket: " + bucketName); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); } ObjectListing objectListing; AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client(); do { objectListing = client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { logger.log("Setting permissions for S3 object: " + objectSummary.getKey()); client.setObjectAcl(bucketName, objectSummary.getKey(), CannedAccessControlList.PublicRead); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); logger.log("Finished setting public read permissions"); }
From source file:squash.deployment.lambdas.utils.TransferUtils.java
License:Apache License
/** * Adds gzip content-encoding metadata to S3 objects. * /*from w ww .j a va 2s. c om*/ * <p>Adds gzip content-encoding metadata to S3 objects. All objects * beneath the specified prefix (i.e. folder) will have the * metadata added. When the bucket serves objects it will then * add a suitable Content-Encoding header. * * @param bucketName the bucket to apply the metadata to. * @param prefix prefix within the bucket, beneath which to apply the metadata. * @param logger a CloudwatchLogs logger. */ public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix, LambdaLogger logger) { // To add new metadata, we must copy each object to itself. ListObjectsRequest listObjectsRequest; if (prefix.isPresent()) { logger.log("Setting gzip content encoding metadata on bucket: " + bucketName + " and prefix: " + prefix.get()); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get()); } else { logger.log("Setting gzip content encoding metadata on bucket: " + bucketName); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); } ObjectListing objectListing; AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client(); do { objectListing = client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { String key = objectSummary.getKey(); logger.log("Setting metadata for S3 object: " + key); // We must specify ALL metadata - not just the one we're adding. ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key); objectMetadata.setContentEncoding("gzip"); CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key) .withNewObjectMetadata(objectMetadata) .withCannedAccessControlList(CannedAccessControlList.PublicRead); client.copyObject(copyObjectRequest); logger.log("Set metadata for S3 object: " + key); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); logger.log("Set gzip content encoding metadata on bucket"); }
From source file:squash.deployment.lambdas.utils.TransferUtils.java
License:Apache License
/** * Adds cache-control header to S3 objects. * //from w ww. ja va 2s.c om * <p>Adds cache-control header to S3 objects. All objects * beneath the specified prefix (i.e. folder), and with the * specified extension will have the header added. When the * bucket serves objects it will then add a suitable * Cache-Control header. * * @param headerValue value of the cache-control header * @param bucketName the bucket to apply the header to. * @param prefix prefix within the bucket, beneath which to apply the header. * @param extension file extension to apply header to * @param logger a CloudwatchLogs logger. */ public static void addCacheControlHeader(String headerValue, String bucketName, Optional<String> prefix, String extension, LambdaLogger logger) { // To add new metadata, we must copy each object to itself. ListObjectsRequest listObjectsRequest; if (prefix.isPresent()) { logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName + " and prefix: " + prefix.get() + " and extension: " + extension); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get()); } else { logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName + " and extension: " + extension); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); } ObjectListing objectListing; AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client(); do { objectListing = client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { String key = objectSummary.getKey(); if (!key.endsWith(extension)) { continue; } logger.log("Setting metadata for S3 object: " + key); // We must specify ALL metadata - not just the one we're adding. ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key); objectMetadata.setCacheControl(headerValue); CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key) .withNewObjectMetadata(objectMetadata) .withCannedAccessControlList(CannedAccessControlList.PublicRead); client.copyObject(copyObjectRequest); logger.log("Set metadata for S3 object: " + key); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); logger.log("Set cache-control metadata on bucket"); }
From source file:sys2202.aws.s3.Sample.java
License:Open Source License
public static void main(String[] args) throws Exception { // create the client we'll use to connect to S3 AmazonS3 s3 = AmazonS3ClientBuilder.standard().withRegion(Regions.US_EAST_1).build(); // list buckets in our S3 account System.out.println("Listing buckets in our S3 account...\n"); for (Bucket bucket : s3.listBuckets()) { System.out.println("\t" + bucket.getName()); }/*from w ww . j a v a 2 s . c om*/ System.out.println(); // create a new bucket to experiment with String bucketName = "msg8u-sys2202-bucket"; // set the bucket name -- this must be unique, so you'll want to use your ID instead of msg8u System.out.println("Creating bucket " + bucketName + "...\n"); s3.createBucket(bucketName); // list buckets in our S3 account System.out.println("Listing buckets in our S3 account...\n"); for (Bucket bucket : s3.listBuckets()) { System.out.println("\t" + bucket.getName()); } System.out.println(); // create and upload a sample file System.out.println("Uploading a new object to S3 from a local file...\n"); File sampleFile = createSampleFile(); String objectKey = "my-test-file"; PutObjectRequest putRequest = new PutObjectRequest(bucketName, objectKey, sampleFile); s3.putObject(putRequest); // list objects in our new bucket -- notice the new object is now present System.out.println("Listing objects in our new bucket...\n"); ListObjectsRequest listRequest = new ListObjectsRequest().withBucketName(bucketName); ObjectListing objectListing = s3.listObjects(listRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println("\t" + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); // download and display the sample file that we just uploaded System.out.println("Downloading the sample file...\n"); GetObjectRequest getRequest = new GetObjectRequest(bucketName, objectKey); S3Object object = s3.getObject(getRequest); displayTextInputStream(object.getObjectContent()); // delete the sample file from S3 System.out.println("Deleting the sample file...\n"); s3.deleteObject(bucketName, objectKey); // delete the bucket System.out.println("Deleting the bucket...\n"); s3.deleteBucket(bucketName); System.out.println("All done!"); }