Example usage for com.amazonaws.services.s3 AmazonS3 setRegion

List of usage examples for com.amazonaws.services.s3 AmazonS3 setRegion

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3 setRegion.

Prototype

public void setRegion(com.amazonaws.regions.Region region) throws IllegalArgumentException;

Source Link

Document

An alternative to AmazonS3#setEndpoint(String) , sets the regional endpoint for this client's service calls.

Usage

From source file:S3TransferProgressSample.java

License:Open Source License

public static void main(String[] args) throws Exception {
    /*//from ww  w. ja  v  a  2  s.c  om
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (/Users/zunzunwang/.aws/credentials).
     *
     * TransferManager manages a pool of threads, so we create a
     * single instance and share it throughout our application.
     */
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/Users/zunzunwang/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);
    tx = new TransferManager(s3);

    bucketName = "s3-upload-sdk-sample-" + credentials.getAWSAccessKeyId().toLowerCase();

    new S3TransferProgressSample();
}

From source file:aot.storage.s3.CustomStorage.java

License:Open Source License

protected static AmazonS3 createS3(String[] ids) {
    AmazonS3 s3;
    if ((ids.length >= 1) && !ids[1].trim().isEmpty()) {
        String[] creds = ids[1].split(":");
        s3 = new AmazonS3Client(new BasicAWSCredentials(creds[0], creds[1]));
    } else {/*from www  .  j  a v  a 2  s  . c om*/
        s3 = new AmazonS3Client();
    }
    if ((ids.length >= 2) && !ids[2].trim().isEmpty()) {
        s3.setRegion(Region.getRegion(Regions.fromName(ids[2])));
    }
    if ((ids.length >= 3) && !ids[3].trim().isEmpty()) {
        s3.setEndpoint(ids[3]);
    }
    return s3;
}

From source file:apphub.storage.s3.CustomStorage.java

License:Open Source License

protected static AmazonS3 createS3(URL url) {
    AmazonS3 s3;
    String userInfo = url.getUserInfo();
    if (userInfo != null) {
        String[] creds = userInfo.split(":");
        if (creds.length == 2) {
            s3 = new AmazonS3Client(new BasicAWSCredentials(creds[0], creds[1]));
        } else {//from  w  w  w.  j  a v a 2s  .  c  om
            throw new CreateStorageException(url.toString(),
                    "Credentials for S3 storage must be in form of KEY:SECRET");
        }
    } else {
        s3 = new AmazonS3Client();
    }
    Map<String, String> queryParameters = UrlUtil.getQueryParameters(url);
    String region = queryParameters.get("region");
    if (region != null) {
        s3.setRegion(Region.getRegion(Regions.fromName(region)));
    }
    String endpoint = queryParameters.get("endpoint");
    if (endpoint != null) {
        s3.setEndpoint(endpoint);
    }
    return s3;
}

From source file:br.com.unb.aws.client.S3ClientV1.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*//from   w w  w. j  ava 2s .co m
     * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) 
     * and save the following lines after replacing the underlined values with your own.
     *
     * [default]
     * aws_access_key_id = YOUR_ACCESS_KEY_ID
     * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY
     */

    AmazonS3 s3 = new AmazonS3Client();
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.amazon.util.ImageUploader.java

public static void uploadImage(String imageURL, String imageName, String folderName, String bucketName)
        throws MalformedURLException, IOException {
    // credentials object identifying user for authentication

    AWSCredentials credentials = new BasicAWSCredentials(System.getenv("AWS_S3_ACCESS_KEY"),
            System.getenv("AWS_S3_SECRET_ACCESS_KEY"));

    // create a client connection based on credentials
    AmazonS3 s3client = new AmazonS3Client(credentials);

    try {//from   w  ww  . java2s  .  c  o  m
        if (!(s3client.doesBucketExist(bucketName))) {
            s3client.setRegion(Region.getRegion(Regions.US_EAST_1));
            // Note that CreateBucketRequest does not specify region. So bucket is 
            // created in the region specified in the client.
            s3client.createBucket(new CreateBucketRequest(bucketName));
        }

        //Enabe CORS:
        //     <?xml version="1.0" encoding="UTF-8"?>
        //<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
        //    <CORSRule>
        //        <AllowedOrigin>http://ask-ifr-download.s3.amazonaws.com</AllowedOrigin>
        //        <AllowedMethod>GET</AllowedMethod>
        //    </CORSRule>
        //</CORSConfiguration>
        BucketCrossOriginConfiguration configuration = new BucketCrossOriginConfiguration();

        CORSRule corsRule = new CORSRule()
                .withAllowedMethods(
                        Arrays.asList(new CORSRule.AllowedMethods[] { CORSRule.AllowedMethods.GET }))
                .withAllowedOrigins(Arrays.asList(new String[] { "http://ask-ifr-download.s3.amazonaws.com" }));
        configuration.setRules(Arrays.asList(new CORSRule[] { corsRule }));
        s3client.setBucketCrossOriginConfiguration(bucketName, configuration);

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }

    String fileName = folderName + SUFFIX + imageName + ".png";
    URL url = new URL(imageURL);

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType("image/png");
    omd.setContentLength(url.openConnection().getContentLength());
    // upload file to folder and set it to public
    s3client.putObject(new PutObjectRequest(bucketName, fileName, url.openStream(), omd)
            .withCannedAcl(CannedAccessControlList.PublicRead));
}

From source file:com.arc.cloud.aws.s3.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*//w  w w  .j  av  a2  s  . c o m
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (~/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.atlassian.localstack.sample.S3Sample.java

License:Open Source License

public static void runTest(AWSCredentials credentials) throws IOException {

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);
    s3.setEndpoint(LocalstackTestRunner.getEndpointS3());

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    /*/* www.  j a va  2s  . c  o  m*/
     * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
     * so once a bucket name has been taken by any user, you can't create
     * another bucket with that same name.
     *
     * You can optionally specify a location for your bucket if you want to
     * keep your data closer to your applications or users.
     */
    System.out.println("Creating bucket " + bucketName + "\n");
    s3.createBucket(bucketName);

    /*
     * List the buckets in your account
     */
    System.out.println("Listing buckets");
    for (Bucket bucket : s3.listBuckets()) {
        System.out.println(" - " + bucket.getName());
    }
    System.out.println();

    /*
     * Upload an object to your bucket - You can easily upload a file to
     * S3, or upload directly an InputStream if you know the length of
     * the data in the stream. You can also specify your own metadata
     * when uploading to S3, which allows you set a variety of options
     * like content-type and content-encoding, plus additional metadata
     * specific to your applications.
     */
    System.out.println("Uploading a new object to S3 from a file\n");
    s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

    /*
     * Download an object - When you download an object, you get all of
     * the object's metadata and a stream from which to read the contents.
     * It's important to read the contents of the stream as quickly as
     * possibly since the data is streamed directly from Amazon S3 and your
     * network connection will remain open until you read all the data or
     * close the input stream.
     *
     * GetObjectRequest also supports several other options, including
     * conditional downloading of objects based on modification times,
     * ETags, and selectively downloading a range of an object.
     */
    System.out.println("Downloading an object");
    S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
    System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
    displayTextInputStream(object.getObjectContent());

    /*
     * List objects in your bucket by prefix - There are many options for
     * listing the objects in your bucket.  Keep in mind that buckets with
     * many objects might truncate their results when listing their objects,
     * so be sure to check if the returned object listing is truncated, and
     * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
     * additional results.
     */
    System.out.println("Listing objects");
    ObjectListing objectListing = s3
            .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        System.out.println(" - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
    }
    System.out.println();

    /*
     * Delete an object - Unless versioning has been turned on for your bucket,
     * there is no way to undelete an object, so use caution when deleting objects.
     */
    System.out.println("Deleting an object\n");
    s3.deleteObject(bucketName, key);

    /*
     * Delete a bucket - A bucket must be completely empty before it can be
     * deleted, so remember to delete any objects from your buckets before
     * you try to delete them.
     */
    System.out.println("Deleting bucket " + bucketName + "\n");
    s3.deleteBucket(bucketName);
}

From source file:com.bye.project.S3TransferProgressSample.java

License:Open Source License

public static void main(String[] args) throws Exception {
    /*//from ww  w.ja  v a  2s .co m
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * TransferManager manages a pool of threads, so we create a
     * single instance and share it throughout our application.
     */
    AmazonS3 s3 = new AmazonS3Client(credentials = new PropertiesCredentials(
            S3TransferProgressSample.class.getResourceAsStream("AwsCredentials.properties")));
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);
    tx = new TransferManager(s3);

    bucketName = "s3-upload-sdk-sample-" + credentials.getAWSAccessKeyId().toLowerCase();

    new S3TransferProgressSample();
}

From source file:com.crickdata.upload.s3.UploadLiveData.java

License:Open Source License

public Map<String, Date> uploadToS3(String fileName, boolean type) throws IOException {

    Statistics statistics = new Statistics();
    Map<String, Date> perfMap = new HashMap<String, Date>();
    AWSCredentials credentials = null;// ww  w. j av a 2 s.  c  om
    try {
        credentials = new BasicAWSCredentials("AKIAI6QKTRAQE7MXQOIQ",
                "wIG6u1yI5ZaseeJbvYSUmD98qelIJNSCVBzt5k2q");
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (C:\\Users\\bssan_000\\.aws\\credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);
    String bucketName;
    if (!type)
        bucketName = "cricmatchinfo";
    else
        bucketName = "cricmatchinfoseries";
    String key = fileName.replace(".json", "").trim();
    try {
        perfMap.put("S3INSERTREQ", new Date());
        statistics.setS3Req(new Date());
        File f = readMatchFile(fileName);

        double bytes = f.length();
        double kilobytes = (bytes / 1024);
        System.out.println("Details :" + kilobytes);
        s3.putObject(new PutObjectRequest(bucketName, key, f));
        statistics.setSize(String.valueOf(kilobytes));

        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        perfMap.put("S3SAVERES", object.getObjectMetadata().getLastModified());
        statistics.setKey(key);
        statistics.setS3Res(object.getObjectMetadata().getLastModified());
        MyUI.stats.add(statistics);

        displayTextInputStream(object.getObjectContent());

        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
    return perfMap;
}

From source file:com.dustindoloff.s3websitedeploy.Main.java

License:Apache License

public static void main(final String[] args) {
    final Options options = buildOptions();
    final CommandLineParser parser = new DefaultParser();
    final CommandLine commandLine;
    try {//from   ww w  .  j a  va2s.co  m
        commandLine = parser.parse(options, args);
    } catch (final ParseException e) {
        System.out.println(e.getMessage());
        new HelpFormatter().printHelp("s3WebsiteDeploy", options);
        System.exit(1);
        return;
    }

    final File websiteZip = new File(commandLine.getOptionValue(ARG_WEBSITE_ZIP));
    final String s3Bucket = commandLine.getOptionValue(ARG_BUCKET);
    final String awsAccessKey = commandLine.getOptionValue(ARG_AWS_ACCESS_KEY);
    final String awsSecretKey = commandLine.getOptionValue(ARG_AWS_SECRET_KEY);

    final ZipFile zipFile = getAsValidZip(websiteZip);
    if (zipFile == null) {
        System.out.println("Invalid zip file passed in");
        System.exit(2);
        return;
    }

    System.out.println("Running S3 Website Deploy");

    final AmazonS3 s3Client = new AmazonS3Client(new BasicAWSCredentials(awsAccessKey, awsSecretKey));

    final Region bucketRegion = getBucketRegion(s3Client, s3Bucket);

    if (bucketRegion == null) {
        System.out.println("Unable to get the region for the bucket.");
        System.exit(3);
        return;
    }

    s3Client.setRegion(bucketRegion);

    if (!emptyBucket(s3Client, s3Bucket)) {
        System.out.println("Unable to upload to empty bucket.");
        System.exit(4);
        return;
    }

    if (!upload(s3Client, s3Bucket, zipFile)) {
        System.out.println("Unable to upload to S3.");
        System.exit(5);
        return;
    }

    System.out.println("Deployment Complete");
}