Example usage for com.amazonaws.services.s3.model S3Object getObjectContent

List of usage examples for com.amazonaws.services.s3.model S3Object getObjectContent

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3Object getObjectContent.

Prototype

public S3ObjectInputStream getObjectContent() 

Source Link

Document

Gets the input stream containing the contents of this object.

Usage

From source file:com.smoketurner.pipeline.application.core.MessageProcessor.java

License:Apache License

/**
 * Stream an {@link S3Object} object and process each line with the
 * processor.//from   w w  w  .j av a  2s .c o m
 * 
 * @param object
 *            S3Object to download and process
 * @return number of events processed
 * @throws IOException
 *             if unable to stream the object
 */
private int streamObject(@Nonnull final S3Object object) throws IOException {

    final AtomicInteger eventCount = new AtomicInteger(0);
    try (S3ObjectInputStream input = object.getObjectContent()) {

        final BufferedReader reader;
        if (AmazonS3Downloader.isGZipped(object)) {
            reader = new BufferedReader(
                    new InputStreamReader(new StreamingGZIPInputStream(input), StandardCharsets.UTF_8));
        } else {
            reader = new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8));
        }

        // failed will be true if we did not successfully broadcast all
        // of the events because of no consumers
        final boolean failed = reader.lines().peek(event -> eventCount.incrementAndGet())
                .anyMatch(broadcaster::test);

        if (failed) {
            // abort the current S3 download
            input.abort();
            LOGGER.error("Partial events broadcast ({} sent) from key: {}/{}", eventCount.get(),
                    object.getBucketName(), object.getKey());
            throw new IOException("aborting download");
        }
    }
    return eventCount.get();
}

From source file:com.springboot.demo.framework.aws.s3.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*//from  ww  w  .  ja  v a 2s.  c o  m
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (~/.aws/credentials).
     */
    AWSCredentials basicCredentials = new BasicAWSCredentials(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY);

    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    /*
     * Create S3 Client
     */
    AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient();
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Returns an URL for the object stored in the specified bucket and  key
         */
        URL url = s3.getUrl(bucketName, key);
        System.out.println("upload file url : " + url.toString());

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.supprema.utils.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*/*w w w  .  jav a2s .  co m*/
     * The ProfileCredentialsProvider will return your [fabiano-user-s3]
     * credential profile by reading from the credentials file located at
     * (/Users/fabianorodriguesmatias/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("fabiano-user-s3").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException(
                "Cannot load the credentials from the credential profiles file. "
                        + "Please make sure that your credentials file is at the correct "
                        + "location (/Users/fabianorodriguesmatias/.aws/credentials), and is in valid format.",
                e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.tango.BucketSyncer.KeyJobs.S32GCSKeyCopyJob.java

License:Apache License

boolean keyCopied(ObjectMetadata sourceMetadata) {
    boolean copied = false;
    String key = summary.getKey();
    MirrorOptions options = context.getOptions();
    boolean verbose = options.isVerbose();
    int maxRetries = options.getMaxRetries();
    MirrorStats stats = context.getStats();

    InputStreamContent mediaContent = null;

    for (int tries = 0; tries < maxRetries; tries++) {

        if (verbose) {
            log.info("copying (try # {} ): {} to: {}", new Object[] { tries, key, keydest });
        }/*from  w  ww.  j a v a 2s .c  o m*/

        //get object from S3
        //deal with exception that the object has been deleted when trying to fetch it from S3
        S3Object s3object = null;

        try {
            s3object = s3Client.getObject(new GetObjectRequest(options.getSourceBucket(), key));
        } catch (AmazonServiceException e) {
            log.error("Failed to fetch object from S3. Object {} may have been deleted: {}", key, e);
        } catch (Exception e) {
            log.error("Failed to fetch object from S3. Object {} may have been deleted: {}", key, e);
        }

        if (s3object != null) {

            InputStream inputStream = s3object.getObjectContent();

            String type = s3object.getObjectMetadata().getContentType();
            mediaContent = new InputStreamContent(type, inputStream);

            String etag = s3object.getObjectMetadata().getETag();
            StorageObject objectMetadata = new StorageObject().setMetadata(ImmutableMap.of("Etag", etag));

            Storage.Objects.Insert insertObject = null;
            try {
                insertObject = gcsClient.objects().insert(options.getDestinationBucket(), objectMetadata,
                        mediaContent);
            } catch (IOException e) {
                log.error("Failed to create insertObject of GCS ", e);
            }

            insertObject.setName(key);

            insertObject.getMediaHttpUploader().setProgressListener(new CustomUploadProgressListener())
                    .setDisableGZipContent(true);

            // For small files, you may wish to call setDirectUploadEnabled(true), to
            // reduce the number of HTTP requests made to the server.

            if (mediaContent.getLength() > 0 && mediaContent.getLength() <= 2 * 1000 * 1000 /* 2MB */) {
                insertObject.getMediaHttpUploader().setDirectUploadEnabled(true);
            }

            try {
                stats.copyCount.incrementAndGet();
                insertObject.execute();
                stats.bytesCopied.addAndGet(sourceMetadata.getContentLength());
                if (verbose)
                    log.info("Successfully copied (on try # {} ): {} to: {} in GCS",
                            new Object[] { tries, key, keydest });
                copied = true;
                break;
            } catch (GoogleJsonResponseException e) {
                if (e.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
                    log.error("Failed to access GCS bucket. Check bucket name: ", e);
                    System.exit(1);
                }
            } catch (IOException e) {
                log.error("GCS exception copying (try # {} ) {} to: {} : {}",
                        new Object[] { tries, key, keydest, e });
            }
        }

        try {
            Thread.sleep(10);
        } catch (InterruptedException e) {
            log.error("interrupted while waiting to retry key: {}, {}", key, e);
            return copied;
        }
    }
    return copied;
}

From source file:com.tfnsnproject.util.S3StorageManager.java

License:Open Source License

public InputStream loadInputStream(S3StorageObject s3Store) throws IOException {
    S3Object s3 = getS3Object(s3Store);
    this.lastUpdate = s3.getObjectMetadata().getLastModified();
    return s3.getObjectContent();
}

From source file:com.tfnsnproject.util.S3StorageManager.java

License:Open Source License

/**
 * Loads the raw object data from S3 storage
 *
 * @param s3Store the s3 object to be loaded from the store
 * @return input stream for reading in the raw object
 * @throws java.io.IOException// w  w  w  . j a v  a  2s  .  c  o m
 */
public InputStream loadStream(S3StorageObject s3Store) throws IOException {
    S3Object obj = getS3Object(s3Store);
    InputStream is = obj.getObjectContent();
    return is;
}

From source file:com.topera.epoch.service.S3Util.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*/*from w  w w . j a  v a 2  s. c  om*/
     * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) 
     * and save the following lines after replacing the underlined values with your own.
     *
     * [default]
     * aws_access_key_id = YOUR_ACCESS_KEY_ID
     * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY
     */
    AWSCredentials creds = new AWSCredentials() {

        public String getAWSSecretKey() {
            // TODO Auto-generated method stub
            return "5VVtmI7vcecuVbw8JsG4uo2O1/9RwwLHrTT01Itz";
        }

        public String getAWSAccessKeyId() {
            // TODO Auto-generated method stub
            return "AKIAJCMYALI46A2DIPRQ";
        }
    };

    AmazonS3 s3 = new AmazonS3Client(creds);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.treasure_data.td_import.source.S3Source.java

License:Apache License

@Override
public InputStream getInputStream() throws IOException {
    LOG.info(String.format("get s3 file by client %s: bucket=%s, key=%s", client, bucket, key));
    GetObjectRequest req = new GetObjectRequest(bucket, key);
    req.setRange(0, size);//from  w ww  . j  a v a 2  s .  co  m
    S3Object object = client.getObject(req);

    if (object != null) {
        return object.getObjectContent();
    } else {
        throw new IOException("s3 file is null.");
    }
}

From source file:com.tremolosecurity.unison.proxy.filters.s3.AwsS3Proxy.java

License:Apache License

@Override
public void doFilter(HttpFilterRequest req, HttpFilterResponse resp, HttpFilterChain chain) throws Exception {
    chain.setNoProxy(true);/*from www . j av a2  s  . c om*/

    String uri = req.getRequestURI();

    int start = uri.indexOf(this.topBucket);
    if (start == -1) {
        resp.sendError(404);

    } else {
        String bucket = uri.substring(start + 1, uri.lastIndexOf('/'));
        String key = uri.substring(uri.lastIndexOf('/') + 1);

        if (logger.isDebugEnabled()) {
            logger.debug("Bucket - '" + bucket + "', Key - '" + key + "'");
        }

        S3Object object = s3Client.getObject(new GetObjectRequest(bucket, key));
        chain.setIns(object.getObjectContent());
        resp.setContentType(object.getObjectMetadata().getContentType());
    }

}

From source file:com.ub.ml.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*//from ww  w .j  a va  2  s. c  o  m
     * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) 
     * and save the following lines after replacing the underlined values with your own.
     *
     * [default]
     * aws_access_key_id = YOUR_ACCESS_KEY_ID
     * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY
     */

    AmazonS3 s3 = new AmazonS3Client();
    Region usWest2 = Region.getRegion(Regions.US_EAST_1);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //System.out.println("Deleting an object\n");
        //s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //System.out.println("Deleting bucket " + bucketName + "\n");
        //s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}