Example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

List of usage examples for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries.

Prototype

public List<S3ObjectSummary> getObjectSummaries() 

Source Link

Document

Gets the list of object summaries describing the objects stored in the S3 bucket.

Usage

From source file:com.rathravane.clerk.impl.s3.S3IamDb.java

License:Apache License

@Override
public void sweepExpiredTags() throws IamSvcException {
    final TreeSet<String> keys = new TreeSet<String>();
    try {/*from w  w w.ja  v  a2s  . co m*/
        final String prefix = makeByTagId("");
        final ListObjectsRequest listObjectRequest = new ListObjectsRequest().withBucketName(fBucketId)
                .withPrefix(prefix);
        ObjectListing objects = fDb.listObjects(listObjectRequest);
        do {
            for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
                final String tagId = objectSummary.getKey().substring(prefix.length());
                keys.add(tagId);
            }
            objects = fDb.listNextBatchOfObjects(objects);
        } while (objects.isTruncated());
    } catch (AmazonS3Exception x) {
        throw new IamSvcException(x);
    }

    for (String key : keys) {
        loadTagObject(key, false);
    }
}

From source file:com.rathravane.clerk.impl.s3.S3IamDb.java

License:Apache License

List<String> loadKeysBelow(String key) throws IamSvcException {
    final LinkedList<String> result = new LinkedList<String>();
    try {//w ww  . ja v a 2 s .  c o  m
        final ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(fBucketId)
                .withPrefix(key);
        ObjectListing objectListing;
        do {
            objectListing = fDb.listObjects(listObjectsRequest);
            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
                result.add(objectSummary.getKey());
            }
            listObjectsRequest.setMarker(objectListing.getNextMarker());
        } while (objectListing.isTruncated());
    } catch (AmazonClientException e) {
        throw new IamSvcException(e);
    }
    return result;
}

From source file:com.scoyo.tools.s3cacheenhancer.S3HeaderEnhancer.java

License:Apache License

private void setHeaders(ObjectListing listing, final String maxAgeHeader, ExecutorService executorService) {

    for (final S3ObjectSummary summary : listing.getObjectSummaries()) {
        executorService.submit(new Runnable() {
            @Override//from   w  w  w  .  j  av  a 2 s . co m
            public void run() {
                String bucket = summary.getBucketName();
                String key = summary.getKey();

                ObjectMetadata metadata = null;
                try {
                    metadata = s3.getObjectMetadata(bucket, key);
                } catch (AmazonS3Exception exception) {
                    System.out.println("Could not update " + key + " [" + exception.getMessage() + "]");
                    return;
                }

                if ("application/x-directory".equals(metadata.getContentType())) {
                    System.out.println("Skipping because content-type " + key);
                    return;
                }

                if (!maxAgeHeader.equals(metadata.getCacheControl())) {
                    metadata.setCacheControl(maxAgeHeader);
                } else {
                    System.out.println("Skipping because header is already correct " + key);
                    return;
                }

                AccessControlList acl = s3.getObjectAcl(summary.getBucketName(), summary.getKey());

                CopyObjectRequest copyReq = new CopyObjectRequest(bucket, key, bucket, key)
                        .withAccessControlList(acl).withNewObjectMetadata(metadata);

                CopyObjectResult result = s3.copyObject(copyReq);

                if (result != null) {
                    System.out.println("Updated " + key);
                } else {
                    System.out.println("Could not update " + key);
                }
            }
        });
    }
}

From source file:com.shareplaylearn.models.UserItemManager.java

License:Open Source License

private HashSet<String> getExternalItemListing(ObjectListing objectListing) {
    HashSet<String> itemLocations = new HashSet<>();
    for (S3ObjectSummary obj : objectListing.getObjectSummaries()) {
        String internalPath = obj.getKey();
        String externalPath = makeExternalLocation(internalPath);
        if (externalPath != null) {
            itemLocations.add(externalPath);
            log.debug("External path was " + externalPath);
        } else {//w  ww  .  j  a  v a  2s.co  m
            log.info("External path for object list was null?");
        }
    }
    return itemLocations;
}

From source file:com.shareplaylearn.models.UserItemManager.java

License:Open Source License

/**
 * This is not good enough. It slows things down, and still costs money.
 * Eventually, we should have an async task that updates a local cache of
 * used storage. If the cache says your below X of the limit (think atms),
 * you're good. Once you get up close, ping Amazon every time.
 * @param objectListing//from  w w w. j a  v  a 2  s.  co m
 * @param maxSize
 * @return
 */
private Response checkObjectListingSize(ObjectListing objectListing, int maxSize) {
    if (objectListing.isTruncated() && objectListing.getMaxKeys() >= maxSize) {
        log.error("Error, too many uploads");
        return Response.status(418).entity("I'm a teapot! j/k - not enough space " + maxSize).build();
    }
    if (objectListing.getObjectSummaries().size() >= maxSize) {
        log.error("Error, too many uploads");
        return Response.status(418)
                .entity("I'm a teapot! Er, well, at least I can't hold " + maxSize + " stuff.").build();
    }
    return Response.status(Response.Status.OK).entity("OK").build();
}

From source file:com.sjsu.backitup.AwsConsoleApp.java

License:Open Source License

public static void main(String[] args) throws Exception {

    System.out.println("===========================================");
    System.out.println("Welcome to the AWS Java SDK!");
    System.out.println("===========================================");

    init();//from  w ww .j av  a 2 s. co  m

    /*
     * Amazon EC2
     *
     * The AWS EC2 client allows you to create, delete, and administer
     * instances programmatically.
     *
     * In this sample, we use an EC2 client to get a list of all the
     * availability zones, and all instances sorted by reservation id.
     */
    try {
    } catch (AmazonServiceException ase) {
        System.out.println("Caught Exception: " + ase.getMessage());
        System.out.println("Reponse Status Code: " + ase.getStatusCode());
        System.out.println("Error Code: " + ase.getErrorCode());
        System.out.println("Request ID: " + ase.getRequestId());
    }

    /*
     * Amazon SimpleDB
     *
     * The AWS SimpleDB client allows you to query and manage your data
     * stored in SimpleDB domains (similar to tables in a relational DB).
     *
     * In this sample, we use a SimpleDB client to iterate over all the
     * domains owned by the current user, and add up the number of items
     * (similar to rows of data in a relational DB) in each domain.
     */

    /*
     * Amazon S3
     *
     * The AWS S3 client allows you to manage buckets and programmatically
     * put and get objects to those buckets.
     *
     * In this sample, we use an S3 client to iterate over all the buckets
     * owned by the current user, and all the object metadata in each
     * bucket, to obtain a total object and space usage count. This is done
     * without ever actually downloading a single object -- the requests
     * work with object metadata only.
     */
    try {
        List<Bucket> buckets = s3.listBuckets();

        long totalSize = 0;
        int totalItems = 0;
        for (Bucket bucket : buckets) {
            /*
             * In order to save bandwidth, an S3 object listing does not
             * contain every object in the bucket; after a certain point the
             * S3ObjectListing is truncated, and further pages must be
             * obtained with the AmazonS3Client.listNextBatchOfObjects()
             * method.
             */
            ObjectListing objects = s3.listObjects(bucket.getName());
            do {
                for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
                    totalSize += objectSummary.getSize();
                    totalItems++;
                }
                objects = s3.listNextBatchOfObjects(objects);
            } while (objects.isTruncated());
        }

        System.out.println("You have " + buckets.size() + " Amazon S3 bucket(s), " + "containing " + totalItems
                + " objects with a total size of " + totalSize + " bytes.");
    } catch (AmazonServiceException ase) {
        /*
         * AmazonServiceExceptions represent an error response from an AWS
         * services, i.e. your request made it to AWS, but the AWS service
         * either found it invalid or encountered an error trying to execute
         * it.
         */
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        /*
         * AmazonClientExceptions represent an error that occurred inside
         * the client on the local host, either while trying to send the
         * request to AWS or interpret the response. For example, if no
         * network connection is available, the client won't be able to
         * connect to AWS to execute a request and will throw an
         * AmazonClientException.
         */
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.sjsu.faceit.example.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*/*  w  w w  . jav a  2s  .  c om*/
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    System.out.println(new File(".").getAbsolutePath());
    AmazonS3 s3 = new AmazonS3Client(
            new PropertiesCredentials(S3Sample.class.getResourceAsStream("AwsCredentials.properties")));

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, "abc/" + key, new File("/Users/prayag/Desktop/2.jpg")));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, "abc/" + key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //            System.out.println("Deleting bucket " + bucketName + "\n");
        //            s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.sludev.commons.vfs2.provider.s3.SS3FileObject.java

License:Apache License

/**
 * Callback for checking the type of the current FileObject.  Typically can
 * be of type...//  w w  w . jav a2s.c om
 * FILE for regular remote files
 * FOLDER for regular remote containers
 * IMAGINARY for a path that does not exist remotely.
 * 
 * @return
 * @throws Exception 
 */
@Override
protected FileType doGetType() throws Exception {
    FileType res;

    Pair<String, String> path = getContainerAndPath();

    if (objectExists(path.getLeft(), path.getRight())) {
        res = FileType.FILE;
    } else {
        // Blob Service does not have folders.  Just files with path separators in
        // their names.

        // Here's the trick for folders.
        //
        // Do a listing on that prefix.  If it returns anything, after not
        // existing, then it's a folder.
        String prefix = path.getRight();
        if (prefix.endsWith("/") == false) {
            // We need folders ( prefixes ) to end with a slash
            prefix += "/";
        }

        ObjectListing blobs = null;
        if (prefix.equals("/")) {
            // Special root path case. List the root blobs with no prefix
            blobs = fileSystem.getClient().listObjects(path.getLeft());
        } else {
            blobs = fileSystem.getClient().listObjects(path.getLeft(), prefix);
        }

        if (blobs.getObjectSummaries().isEmpty()) {
            res = FileType.IMAGINARY;
        } else {
            res = FileType.FOLDER;
        }
    }

    return res;
}

From source file:com.sludev.commons.vfs2.provider.s3.SS3FileObject.java

License:Apache License

/**
 * Lists the children of this file.  Is only called if {@link #doGetType}
 * returns {@link FileType#FOLDER}.  The return value of this method
 * is cached, so the implementation can be expensive.<br />
 * @return a possible empty String array if the file is a directory or null or an exception if the
 * file is not a directory or can't be read.
 * @throws Exception if an error occurs.
 *///from  ww w. ja  va2 s  .  c  o m
@Override
protected String[] doListChildren() throws Exception {
    String[] res = null;

    Pair<String, String> path = getContainerAndPath();

    String prefix = path.getRight();
    if (prefix.endsWith("/") == false) {
        // We need folders ( prefixes ) to end with a slash
        prefix += "/";
    }
    ListObjectsRequest loReq = new ListObjectsRequest();
    loReq.withBucketName(path.getLeft());
    loReq.withPrefix(prefix);
    loReq.withDelimiter("/");

    ObjectListing blobs = fileSystem.getClient().listObjects(loReq);

    List<String> resList = new ArrayList<>();
    for (S3ObjectSummary osum : blobs.getObjectSummaries()) {
        String currBlobStr = osum.getKey();
        resList.add(String.format("/%s/%s", path.getLeft(), currBlobStr));
    }

    List<String> commPrefixes = blobs.getCommonPrefixes();
    if (commPrefixes != null) {
        for (String currFld : commPrefixes) {
            resList.add(String.format("/%s/%s", path.getLeft(), currFld));
        }
    }

    res = resList.toArray(new String[resList.size()]);

    return res;
}

From source file:com.springboot.demo.framework.aws.s3.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*// www . j  av a  2s .co m
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (~/.aws/credentials).
     */
    AWSCredentials basicCredentials = new BasicAWSCredentials(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY);

    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    /*
     * Create S3 Client
     */
    AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient();
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Returns an URL for the object stored in the specified bucket and  key
         */
        URL url = s3.getUrl(bucketName, key);
        System.out.println("upload file url : " + url.toString());

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}