Example usage for com.amazonaws AmazonServiceException getStatusCode

List of usage examples for com.amazonaws AmazonServiceException getStatusCode

Introduction

In this page you can find the example usage for com.amazonaws AmazonServiceException getStatusCode.

Prototype

public int getStatusCode() 

Source Link

Document

Returns the HTTP status code that was returned with this service exception.

Usage

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

@Override
public boolean delete(String hostName, Path path, boolean recursive) throws IOException {
    String obj = path.toString();
    if (path.toString().startsWith(hostName)) {
        obj = path.toString().substring(hostName.length());
    }//from  ww w  . ja va  2  s  . c o  m
    LOG.debug("Object name to delete {}. Path {}", obj, path.toString());
    try {
        mClient.deleteObject(new DeleteObjectRequest(mBucket, obj));
        memoryCache.removeFileStatus(path.toString());
        return true;
    } catch (AmazonServiceException e) {
        if (e.getStatusCode() != 404) {
            throw new IOException(e);
        }
    }
    LOG.warn("Delete on {} not found. Nothing to delete");
    return false;
}

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

private void initMultipartUploads(Configuration conf) throws IOException {
    boolean purgeExistingMultipart = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, PURGE_EXISTING_MULTIPART,
            DEFAULT_PURGE_EXISTING_MULTIPART);
    long purgeExistingMultipartAge = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, PURGE_EXISTING_MULTIPART_AGE,
            DEFAULT_PURGE_EXISTING_MULTIPART_AGE);

    if (purgeExistingMultipart) {
        Date purgeBefore = new Date(new Date().getTime() - purgeExistingMultipartAge * 1000);

        try {/*  w w  w .j a  v a2s.  c  o m*/
            transfers.abortMultipartUploads(mBucket, purgeBefore);
        } catch (AmazonServiceException e) {
            if (e.getStatusCode() == 403) {
                LOG.debug("Failed to purging multipart uploads against {}," + " FS may be read only", mBucket,
                        e);
            } else {
                throw translateException("purging multipart uploads", mBucket, e);
            }
        }
    }
}

From source file:com.ibm.stocator.fs.cos.COSUtils.java

License:Apache License

/**
 * Translate an exception raised in an operation into an IOException. The
 * specific type of IOException depends on the class of
 * {@link AmazonClientException} passed in, and any status codes included in
 * the operation. That is: HTTP error codes are examined and can be used to
 * build a more specific response./*from   w ww  .  ja  va 2  s  . c  om*/
 *
 * @param operation operation
 * @param path path operated on (may be null)
 * @param exception amazon exception raised
 * @return an IOE which wraps the caught exception
 */
@SuppressWarnings("ThrowableInstanceNeverThrown")
public static IOException translateException(String operation, String path, AmazonClientException exception) {
    String message = String.format("%s%s: %s", operation, path != null ? (" on " + path) : "", exception);
    if (!(exception instanceof AmazonServiceException)) {
        if (containsInterruptedException(exception)) {
            return (IOException) new InterruptedIOException(message).initCause(exception);
        }
        return new COSClientIOException(message, exception);
    } else {

        IOException ioe;
        AmazonServiceException ase = (AmazonServiceException) exception;
        // this exception is non-null if the service exception is an COS one
        AmazonS3Exception s3Exception = ase instanceof AmazonS3Exception ? (AmazonS3Exception) ase : null;
        int status = ase.getStatusCode();
        switch (status) {

        case 301:
            if (s3Exception != null) {
                if (s3Exception.getAdditionalDetails() != null
                        && s3Exception.getAdditionalDetails().containsKey(ENDPOINT_KEY)) {
                    message = String.format(
                            "Received permanent redirect response to "
                                    + "endpoint %s.  This likely indicates that the COS endpoint "
                                    + "configured in %s does not match the region containing " + "the bucket.",
                            s3Exception.getAdditionalDetails().get(ENDPOINT_KEY), ENDPOINT_URL);
                }
                ioe = new COSIOException(message, s3Exception);
            } else {
                ioe = new COSServiceIOException(message, ase);
            }
            break;
        // permissions
        case 401:
        case 403:
            ioe = new AccessDeniedException(path, null, message);
            ioe.initCause(ase);
            break;

        // the object isn't there
        case 404:
        case 410:
            ioe = new FileNotFoundException(message);
            ioe.initCause(ase);
            break;

        // out of range. This may happen if an object is overwritten with
        // a shorter one while it is being read.
        case 416:
            ioe = new EOFException(message);
            break;

        default:
            // no specific exit code. Choose an IOE subclass based on the class
            // of the caught exception
            ioe = s3Exception != null ? new COSIOException(message, s3Exception)
                    : new COSServiceIOException(message, ase);
            break;
        }
        return ioe;
    }
}

From source file:com.imos.sample.S3SampleCheck.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*//from  w ww .j a v a 2  s .  co  m
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (/home/alok/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/home/alok/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    //        Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    Region usWest2 = Region.getRegion(Regions.AP_SOUTHEAST_1);
    s3.setRegion(usWest2);

    String bucketName = "alok-test";
    String key = "sample.json";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        //            System.out.println("Creating bucket " + bucketName + "\n");
        //            s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        //            System.out.println("Listing buckets");
        //            for (Bucket bucket : s3.listBuckets()) {
        //                System.out.println(" - " + bucket.getName());
        //            }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        //s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        //            S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        S3Object object = s3.getObject(new GetObjectRequest("alok-test", key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3.listObjects(new ListObjectsRequest()
                //                    .withBucketName(bucketName)
                .withBucketName("alok-test"));
        //                    .withPrefix("My"));
        objectListing.getObjectSummaries().forEach((objectSummary) -> {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        });
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //            System.out.println("Deleting bucket " + bucketName + "\n");
        //            s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.intuit.s3encrypt.S3Encrypt.java

License:Open Source License

public static void main(String[] args) throws IOException, NoSuchAlgorithmException, InvalidKeySpecException {

    // create Options object
    Options options = new Options();
    options.addOption(create_bucket);// w w w .j a  va  2s  .  c  o  m
    options.addOption(create_key);
    options.addOption(delete_bucket);
    options.addOption(get);
    options.addOption(help);
    options.addOption(inspect);
    options.addOption(keyfile);
    options.addOption(list_buckets);
    options.addOption(list_objects);
    options.addOption(put);
    options.addOption(remove);
    options.addOption(rotate);
    options.addOption(rotateall);
    options.addOption(rotateKey);

    //      CommandLineParser parser = new GnuParser();
    //       Changed from above GnuParser to below PosixParser because I found code which allows for multiple arguments 
    PosixParser parser = new PosixParser();
    CommandLine cmd;
    try {
        cmd = parser.parse(options, args);
        Logger.getRootLogger().setLevel(Level.OFF);

        if (cmd.hasOption("help")) {
            HelpFormatter help = new HelpFormatter();
            System.out.println();
            help.printHelp("S3Encrypt", options);
            System.out.println();
            System.exit(1);
        } else if (cmd.hasOption("create_key")) {
            keyname = cmd.getOptionValue("keyfile");
            createKeyFile(keyname);
            key = new File(keyname);
        } else {
            if (cmd.hasOption("keyfile")) {
                keyname = cmd.getOptionValue("keyfile");
            }
            key = new File(keyname);
        }

        if (!(key.exists())) {
            System.out.println("Key does not exist or not provided");
            System.exit(1);
        }

        //         AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider());
        ClasspathPropertiesFileCredentialsProvider credentials = new ClasspathPropertiesFileCredentialsProvider(
                ".s3encrypt");
        EncryptionMaterials encryptionMaterials = new EncryptionMaterials(getKeyFile(keyname));
        AmazonS3EncryptionClient s3 = new AmazonS3EncryptionClient(credentials.getCredentials(),
                encryptionMaterials);
        //          Region usWest2 = Region.getRegion(Regions.US_WEST_2);
        //          s3.setRegion(usWest2);

        if (cmd.hasOption("create_bucket")) {
            String bucket = cmd.getOptionValue("create_bucket");
            System.out.println("Creating bucket " + bucket + "\n");
            s3.createBucket(bucket);
        } else if (cmd.hasOption("delete_bucket")) {
            String bucket = cmd.getOptionValue("delete_bucket");
            System.out.println("Deleting bucket " + bucket + "\n");
            s3.deleteBucket(bucket);
        } else if (cmd.hasOption("get")) {
            String[] searchArgs = cmd.getOptionValues("get");
            String bucket = searchArgs[0];
            String filename = searchArgs[1];
            getS3Object(cmd, s3, bucket, filename);
        } else if (cmd.hasOption("inspect")) {
            String[] searchArgs = cmd.getOptionValues("inspect");
            String bucket = searchArgs[0];
            String filename = searchArgs[1];
            String keyname = "encryption_key";
            String metadata = inspectS3Object(cmd, s3, bucket, filename, keyname);
            System.out.println(metadata);
        } else if (cmd.hasOption("list_buckets")) {
            System.out.println("Listing buckets");
            for (Bucket bucket : s3.listBuckets()) {
                System.out.println(bucket.getName());
            }
            System.out.println();
        } else if (cmd.hasOption("list_objects")) {
            String bucket = cmd.getOptionValue("list_objects");
            System.out.println("Listing objects");
            ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucket));
            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
                System.out.println(objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
            }
            System.out.println();
        } else if (cmd.hasOption("put")) {
            String[] searchArgs = cmd.getOptionValues("put");
            String bucket = searchArgs[0];
            String filename = searchArgs[1];
            String metadataKeyname = "encryption_key";
            String key = keyname;
            putS3Object(cmd, s3, bucket, filename, metadataKeyname, key);
        } else if (cmd.hasOption("remove")) {
            String[] searchArgs = cmd.getOptionValues("remove");
            String bucket = searchArgs[0];
            String filename = searchArgs[1];
            System.out.println("Removing object in S3 from BUCKET = " + bucket + " FILENAME = " + filename);
            s3.deleteObject(new DeleteObjectRequest(bucket, filename));
            System.out.println();
        } else if (cmd.hasOption("rotate")) {
            String[] searchArgs = cmd.getOptionValues("rotate");
            String bucket = searchArgs[0];
            String filename = searchArgs[1];
            String key1 = cmd.getOptionValue("keyfile");
            String key2 = cmd.getOptionValue("rotateKey");
            String metadataKeyname = "encryption_key";
            System.out.println("Supposed to get object from here OPTION VALUE = " + bucket + " FILENAME = "
                    + filename + " KEY1 = " + key1 + " KEY2 = " + key2);

            EncryptionMaterials rotateEncryptionMaterials = new EncryptionMaterials(getKeyFile(key2));
            AmazonS3EncryptionClient rotateS3 = new AmazonS3EncryptionClient(credentials.getCredentials(),
                    rotateEncryptionMaterials);

            getS3Object(cmd, s3, bucket, filename);
            putS3Object(cmd, rotateS3, bucket, filename, metadataKeyname, key2);
        } else if (cmd.hasOption("rotateall")) {
            String[] searchArgs = cmd.getOptionValues("rotateall");
            String bucket = searchArgs[0];
            String key1 = searchArgs[1];
            String key2 = searchArgs[2];
            System.out.println("Supposed to rotateall here for BUCKET NAME = " + bucket + " KEY1 = " + key1
                    + " KEY2 = " + key2);
        } else {
            System.out.println("Something went wrong... ");
            System.exit(1);
        }

    } catch (ParseException e) {
        e.printStackTrace();
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }

}

From source file:com.ipcglobal.awscdh.config.ManageEc2.java

License:Apache License

/**
 * Start EC2 Instances./*from   w w  w  .j  a  va2s .c  om*/
 *
 * @throws Exception the exception
 */
public void start() throws Exception {
    try {
        List<Instance> instances = findInstances();
        startAllInstances(instances);

    } catch (AmazonServiceException ase) {
        log.error("Caught Exception: " + ase.getMessage());
        log.error("Reponse Status Code: " + ase.getStatusCode());
        log.error("Error Code: " + ase.getErrorCode());
        log.error("Request ID: " + ase.getRequestId());
        throw ase;
    } catch (AmazonClientException ace) {
        log.error("Error Message: " + ace.getMessage());
        throw ace;
    } catch (Exception e) {
        log.error(e);
        throw e;
    }
}

From source file:com.ipcglobal.awscdh.config.ManageEc2.java

License:Apache License

/**
 * Stop EC2 Instances./*from  w ww .j a va2 s . c  o  m*/
 *
 * @throws Exception the exception
 */
public void stop() throws Exception {
    try {
        List<Instance> instances = findInstances();
        stopAllInstances(instances);

    } catch (AmazonServiceException ase) {
        log.error("Caught Exception: " + ase.getMessage());
        log.error("Reponse Status Code: " + ase.getStatusCode());
        log.error("Error Code: " + ase.getErrorCode());
        log.error("Request ID: " + ase.getRequestId());
        throw ase;
    } catch (AmazonClientException ace) {
        log.error("Error Message: " + ace.getMessage());
        throw ace;
    } catch (Exception e) {
        log.error(e);
        throw e;
    }
}

From source file:com.ipcglobal.fredimportaws.TsvsToRedshift.java

License:Apache License

/**
 * Process./*from   w  ww  .  j  a v a2s  .  com*/
 *
 * @throws Exception the exception
 */
public void process() throws Exception {
    try {
        s3Client.createBucket(new CreateBucketRequest(awsBucketName));
        log.info("Start: emptyBucketContents");
        long before = System.currentTimeMillis();
        emptyBucketContents();
        log.info("Complete: emptyBucketContents, elapsed=" + (System.currentTimeMillis() - before));

        log.info("Start: transferToBucket");
        before = System.currentTimeMillis();
        transferToBucket();
        log.info("Complete: transferToBucket, elapsed=" + (System.currentTimeMillis() - before));

        log.info("Start: copyS3FilesToRedshiftTable");
        before = System.currentTimeMillis();
        copyS3FilesToRedshiftTable();
        log.info("Complete: copyS3FilesToRedshiftTable, elapsed=" + (System.currentTimeMillis() - before));

    } catch (AmazonServiceException ase) {
        log.error("Caught Exception: " + ase.getMessage());
        log.error("Reponse Status Code: " + ase.getStatusCode());
        log.error("Error Code: " + ase.getErrorCode());
        log.error("Request ID: " + ase.getRequestId());
        throw ase;
    } catch (AmazonClientException ace) {
        log.error("Error Message: " + ace.getMessage());
        throw ace;
    } catch (Exception e) {
        log.error(e);
        throw e;
    }
}

From source file:com.jaspersoft.jasperserver.war.amazon.client.AwsDataSourceServiceImpl.java

License:Open Source License

public List<AwsDBInstanceDTO> getAwsDBInstances(AWSCredentials awsCredentials, String amazonDBService,
        String endpoint) {//  w  w  w . ja  va  2s  .co m
    try {
        if (amazonDBService.toLowerCase().equals(RDS)) {
            //Make RDS service calls to read all available RDS instances
            AmazonRDSClient rdsClient = new AmazonRDSClient(awsCredentials);
            if (endpoint != null) {
                rdsClient.setEndpoint(RDS + "." + endpoint);
            }
            return toRDSInstancesDTOs(getRdsInstances(rdsClient), amazonDBService);
        } else if (amazonDBService.toLowerCase().equals(Redshift)) {
            //Make RedShift service calls to read all available RedShift instances
            AmazonRedshiftClient redshiftClient = new AmazonRedshiftClient(awsCredentials);
            if (endpoint != null) {
                redshiftClient.setEndpoint(Redshift + "." + endpoint);
            }
            return toRedshiftInstancesDTOs(getRedshiftInstances(redshiftClient), amazonDBService);
        } else {
            return new ArrayList<AwsDBInstanceDTO>();
        }
    } catch (AmazonServiceException ex) {
        log.warn("Loading AWS data source metadata for " + amazonDBService + ": " + ex.getMessage());

        String errorCode = ex.getErrorCode();
        if (ex.getStatusCode() == 403 && errorCode != null) {
            errorCode = errorCode.toLowerCase();
            if (errorCode.equals(ACCESS_DENIED)) {
                return generateDBServiceInfoStatus(amazonDBService, "resource.dataSource.aws.access.denied");
            } else if (errorCode.equals(INVALID_CLIENT_TOKEN_ID)) {
                throw new JSShowOnlyErrorMessage(
                        messageSource.getMessage("" + "aws.exception.datasource.accessKey.invalid", null,
                                LocaleContextHolder.getLocale()));
            } else if (errorCode.equals(SIGNATURE_DOES_NOT_MATCH)) {
                throw new JSShowOnlyErrorMessage(
                        messageSource.getMessage("" + "aws.exception.datasource.secretKey.invalid", null,
                                LocaleContextHolder.getLocale()));
            }
        }

        return generateDBServiceInfoStatus(amazonDBService, "[" + ex.getMessage() + "]");
    } catch (AmazonClientException ex) {
        if (ex.getCause() instanceof UnknownHostException) {
            return generateDBServiceInfoStatus(endpoint, "resource.dataSource.aws.unknown.host");
        }

        return generateDBServiceInfoStatus(amazonDBService, "[" + ex.getMessage() + "]");
    }
}

From source file:com.jfixby.scarabei.red.aws.test.S3Sample.java

License:Open Source License

public static void main(final String[] args) throws IOException {

    /*//from w  ww . j a  v a 2 s  .  com
     * The ProfileCredentialsProvider will return your [default] credential profile by reading from the credentials file located
     * at (C:\\Users\\JCode\\.aws\\credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (final Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (C:\\Users\\%USERNAME%\\.aws\\credentials), and is in valid format.", e);
    }

    final AmazonS3 s3 = new AmazonS3Client(credentials);
    final Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    final String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    final String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique, so once a bucket name has been taken by any user,
         * you can't create another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to keep your data closer to your applications or
         * users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (final Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to S3, or upload directly an InputStream if you know
         * the length of the data in the stream. You can also specify your own metadata when uploading to S3, which allows you
         * set a variety of options like content-type and content-encoding, plus additional metadata specific to your
         * applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of the object's metadata and a stream from which to read
         * the contents. It's important to read the contents of the stream as quickly as possibly since the data is streamed
         * directly from Amazon S3 and your network connection will remain open until you read all the data or close the input
         * stream.
         *
         * GetObjectRequest also supports several other options, including conditional downloading of objects based on
         * modification times, ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        final S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for listing the objects in your bucket. Keep in mind
         * that buckets with many objects might truncate their results when listing their objects, so be sure to check if the
         * returned object listing is truncated, and use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        final ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (final S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket, there is no way to undelete an object, so use
         * caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be deleted, so remember to delete any objects from
         * your buckets before you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (final AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (final AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}