Example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

List of usage examples for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries.

Prototype

public List<S3ObjectSummary> getObjectSummaries() 

Source Link

Document

Gets the list of object summaries describing the objects stored in the S3 bucket.

Usage

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

@Override
protected Snapshot scan(List<Pattern> filters) {
    try {/*from w  w w  . jav a  2 s.  c om*/
        Map<String, FileSnapshot> files = new LinkedHashMap<>();
        Set<String> dirs = new HashSet<>();

        ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName)
                .withPrefix(rootPath.isEmpty() ? "" : rootPath + "/");

        ObjectListing objectListing;

        do {
            objectListing = listObjects(listObjectsRequest);
            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {

                if (isExcluded(objectSummary.getKey()) || isFiltered(objectSummary.getKey(), filters))
                    continue;

                if (objectSummary.getKey().endsWith("/")) {
                    String filePath = trimPath(objectSummary.getKey());
                    filePath = filePath.equals(rootPath) ? ""
                            : filePath.substring(rootPath.length() + (rootPath.isEmpty() ? 0 : 1));
                    dirs.add(filePath);
                    System.out
                            .println(String.format("Scanning s3://%s/%s", bucketName, objectSummary.getKey()));
                } else {
                    String fileName = objectSummary.getKey();
                    String filePath = "";

                    if (fileName.contains("/")) {
                        int fileNameSplitIndex = fileName.lastIndexOf("/");
                        filePath = fileName.substring(0, fileNameSplitIndex);
                        fileName = fileName.substring(fileNameSplitIndex + 1);

                        filePath = filePath.equals(rootPath) ? ""
                                : filePath.substring(rootPath.length() + (rootPath.isEmpty() ? 0 : 1));
                    }

                    if (filePath.equals("")) {
                        filePath = fileName;
                    } else {
                        filePath = filePath + "/" + fileName;
                    }

                    ObjectMetadata meta = getObjectInfo(objectSummary);
                    String lmd = meta.getUserMetaDataOf("lmd");

                    Date lastModified = (lmd == null) ? objectSummary.getLastModified()
                            : new Date(Long.parseLong(lmd));

                    FileSnapshot file = new FileSnapshot(fileName, objectSummary.getSize(),
                            new DateTime(lastModified), filePath);
                    files.put(filePath, file);
                }
            }
            listObjectsRequest.setMarker(objectListing.getNextMarker());
        } while (objectListing.isTruncated());

        Snapshot snapshot = new Snapshot(files, dirs);
        return snapshot;
    } catch (AmazonClientException ex) {
        System.out.println("Failed to scan file space");
        System.out.println(ex.getMessage());
    }

    return null;
}

From source file:com.amazon.aws.samplecode.travellog.aws.S3StorageManager.java

License:Open Source License

/**
 * Deletes the specified S3 object from the S3 storage service.  If a
 * storage path is passed in that has child S3 objects, it will recursively
 * delete the underlying objects.//from   w w w  . j  av a  2 s. c o  m
 * @param s3Store the s3 object to be deleted
 */
public void delete(TravelLogStorageObject s3Store) {

    if (s3Store.getStoragePath() == null || s3Store.getStoragePath().equals("")) {
        logger.log(Level.WARNING, "Empty storage path passed to delete method");
        return; //We don't want to delete everything in a path
    }

    //Go through the store structure and delete child objects
    ObjectListing listing = s3client.listObjects(s3Store.getBucketName(), s3Store.getStoragePath());
    while (true) {
        List<S3ObjectSummary> objectList = listing.getObjectSummaries();
        for (S3ObjectSummary summary : objectList) {
            s3client.deleteObject(s3Store.getBucketName(), summary.getKey());
        }
        if (listing.isTruncated()) {
            listing = s3client.listNextBatchOfObjects(listing);
        } else {
            break;
        }
    }

}

From source file:com.appdynamics.monitors.s3.AWSS3Monitor.java

License:Apache License

/**
 * This method calls Amazon WS to get required S3 statistics, set values
 * based on configured unit, and returns the result back
 * /*from  w  w  w .  j av a 2  s .  c  o  m*/
 * @param buckets
 * @param amazonS3Client
 * @return Map<String, String>
 * @throws TaskExecutionException
 */
private Map<String, String> getS3Result(List<Bucket> buckets, AmazonS3Client amazonS3Client)
        throws TaskExecutionException {
    // Declaring result variables with default values
    long size = 0;
    long count = 0;
    Date lastModified = new Date(0);

    try {
        // Fetching all bucket names if passed buckets is null
        if (buckets == null) {
            logger.debug("Calling Webservice to list all buckets");
            buckets = amazonS3Client.listBuckets();
        }

        // Looping over all buckets
        for (Bucket bucket : buckets) {

            logger.debug("Getting data for bucket: " + bucket.getName());

            ObjectListing objectListing = null;

            do {
                // Getting objectListing while calling it for the first time
                if (objectListing == null) {
                    logger.debug("Calling Webservice to get objectlisting for first time");
                    objectListing = amazonS3Client.listObjects(bucket.getName());
                } else {
                    // Calling listNextBatchOfObjects if previous response
                    // is truncated
                    logger.debug("Calling Webservice to get objectlisting subsequent time");
                    objectListing = amazonS3Client.listNextBatchOfObjects(objectListing);
                }

                // Incrementing the count
                count += objectListing.getObjectSummaries().size();

                // Looping over all objects
                for (S3ObjectSummary s3ObjectSummary : objectListing.getObjectSummaries()) {
                    // Incrementing size
                    size += s3ObjectSummary.getSize();

                    // Setting last modified if lastModifiedDate is latest
                    if (lastModified.before(s3ObjectSummary.getLastModified())) {
                        lastModified = s3ObjectSummary.getLastModified();
                    }
                }
            }

            // Continuing till objectListing is complete
            while (objectListing.isTruncated());
        }

    } catch (AmazonS3Exception exception) {
        logger.error("AmazonS3Exception occurred", exception);
        throw new TaskExecutionException("Sending S3 metric failed due to AmazonS3Exception");
    }

    return getResultWithRequiredUnit(size, count, lastModified);
}

From source file:com.arc.cloud.aws.s3.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*/*  w  w  w .  j  av  a 2s.  c o m*/
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (~/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.athena.dolly.web.aws.s3.S3Service.java

License:Open Source License

/**
 * Retrieve object summaries in specific bucket
 * @param bucketName/*from   w w  w  .j  a  va  2 s.co  m*/
 * @return
 */
public List<S3ObjectSummary> listBucket(String bucketName) {
    ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName));
    System.out.println(
            "");
    System.out.println(objectListing.getCommonPrefixes());
    return objectListing.getObjectSummaries();
}

From source file:com.athena.dolly.web.aws.s3.S3Service.java

License:Open Source License

/**
 * Retrieve object summaries in specific bucket
 * @param bucketName/* w  w  w .  j ava  2 s .  c o  m*/
 * @return
 */
public List<S3Dto> listBucket(String bucketName, String prefix) {
    ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName)
            .withPrefix(prefix).withDelimiter(null);
    ObjectListing objectListing = s3.listObjects(listObjectsRequest);

    List<S3Dto> list = new ArrayList<S3Dto>();

    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        logger.info(" - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        list.add(makeDto(bucketName, objectSummary));
    }

    return list;
}

From source file:com.atlassian.localstack.sample.S3Sample.java

License:Open Source License

public static void runTest(AWSCredentials credentials) throws IOException {

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);/*w  ww  . j  a  v  a 2s. co  m*/
    s3.setEndpoint(LocalstackTestRunner.getEndpointS3());

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    /*
     * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
     * so once a bucket name has been taken by any user, you can't create
     * another bucket with that same name.
     *
     * You can optionally specify a location for your bucket if you want to
     * keep your data closer to your applications or users.
     */
    System.out.println("Creating bucket " + bucketName + "\n");
    s3.createBucket(bucketName);

    /*
     * List the buckets in your account
     */
    System.out.println("Listing buckets");
    for (Bucket bucket : s3.listBuckets()) {
        System.out.println(" - " + bucket.getName());
    }
    System.out.println();

    /*
     * Upload an object to your bucket - You can easily upload a file to
     * S3, or upload directly an InputStream if you know the length of
     * the data in the stream. You can also specify your own metadata
     * when uploading to S3, which allows you set a variety of options
     * like content-type and content-encoding, plus additional metadata
     * specific to your applications.
     */
    System.out.println("Uploading a new object to S3 from a file\n");
    s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

    /*
     * Download an object - When you download an object, you get all of
     * the object's metadata and a stream from which to read the contents.
     * It's important to read the contents of the stream as quickly as
     * possibly since the data is streamed directly from Amazon S3 and your
     * network connection will remain open until you read all the data or
     * close the input stream.
     *
     * GetObjectRequest also supports several other options, including
     * conditional downloading of objects based on modification times,
     * ETags, and selectively downloading a range of an object.
     */
    System.out.println("Downloading an object");
    S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
    System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
    displayTextInputStream(object.getObjectContent());

    /*
     * List objects in your bucket by prefix - There are many options for
     * listing the objects in your bucket.  Keep in mind that buckets with
     * many objects might truncate their results when listing their objects,
     * so be sure to check if the returned object listing is truncated, and
     * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
     * additional results.
     */
    System.out.println("Listing objects");
    ObjectListing objectListing = s3
            .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        System.out.println(" - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
    }
    System.out.println();

    /*
     * Delete an object - Unless versioning has been turned on for your bucket,
     * there is no way to undelete an object, so use caution when deleting objects.
     */
    System.out.println("Deleting an object\n");
    s3.deleteObject(bucketName, key);

    /*
     * Delete a bucket - A bucket must be completely empty before it can be
     * deleted, so remember to delete any objects from your buckets before
     * you try to delete them.
     */
    System.out.println("Deleting bucket " + bucketName + "\n");
    s3.deleteBucket(bucketName);
}

From source file:com.att.aro.core.cloud.aws.AwsRepository.java

License:Apache License

public List<S3ObjectSummary> getlist() {
    List<S3ObjectSummary> objects = null;
    if (s3Client != null) {
        ObjectListing objectListing = null;
        try {/*from  ww w .  j a  v  a  2s . c o  m*/
            objectListing = s3Client.listObjects(bucketName);
            objects = objectListing.getObjectSummaries();
            for (S3ObjectSummary objectSummary : objects) {
                LOGGER.debug(
                        " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
            }
        } catch (Exception exc) {
            LOGGER.error("Error Message: " + exc.getMessage());
        }
        return objects;
    }
    return objects;
}

From source file:com.awscrud.aws.S3StorageManager.java

License:Open Source License

/**
 * Deletes the specified S3 object from the S3 storage service.  If a
 * storage path is passed in that has child S3 objects, it will recursively
 * delete the underlying objects./*from   w w  w .j  av  a2  s .  c  o  m*/
 * @param s3Store the s3 object to be deleted
 */
public void delete(AwscrudStorageObject s3Store) {

    if (s3Store.getStoragePath() == null || s3Store.getStoragePath().equals("")) {
        logger.log(Level.WARNING, "Empty storage path passed to delete method");
        return; // We don't want to delete everything in a path
    }

    // Go through the store structure and delete child objects
    ObjectListing listing = s3client.listObjects(s3Store.getBucketName(), s3Store.getStoragePath());
    while (true) {
        List<S3ObjectSummary> objectList = listing.getObjectSummaries();
        for (S3ObjectSummary summary : objectList) {
            s3client.deleteObject(s3Store.getBucketName(), summary.getKey());
        }
        if (listing.isTruncated()) {
            listing = s3client.listNextBatchOfObjects(listing);
        } else {
            break;
        }
    }

}

From source file:com.clicktravel.infrastructure.persistence.aws.s3.S3FileStore.java

License:Apache License

@Override
public List<FilePath> list(final String directory, final String prefix) {

    try {/*from   ww  w.  j a v  a 2 s  .  com*/

        final List<FilePath> filePathList = new ArrayList<FilePath>();
        final ObjectListing objectListing = amazonS3Client.listObjects(bucketNameForDirectory(directory),
                prefix);

        final List<S3ObjectSummary> s3objectSummaries = objectListing.getObjectSummaries();
        for (final S3ObjectSummary s3ObjectSummary : s3objectSummaries) {
            final FilePath filePath = new FilePath(directory, s3ObjectSummary.getKey());
            filePathList.add(filePath);
        }

        return filePathList;

    } catch (final AmazonS3Exception e) {
        throw new PersistenceResourceFailureException("An error occurred obtaining a listing of directory -> "
                + directory + " with prefix -> " + prefix, e);
    }
}