Example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

List of usage examples for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries.

Prototype

public List<S3ObjectSummary> getObjectSummaries() 

Source Link

Document

Gets the list of object summaries describing the objects stored in the S3 bucket.

Usage

From source file:org.springframework.integration.aws.s3.core.DefaultAmazonS3Operations.java

License:Apache License

/**
 * The implementation that uses the AWS SDK to list objects from the given bucket
 *
 * @param bucketName The bucket in which we want to list the objects in
 * @param nextMarker The number of objects can be very large and this serves as the marker
 *                 for remembering the last record fetch in the last retrieve operation.
 * @param pageSize The max number of records to be retrieved in one list object operation.
 * @param prefix The prefix for the list operation, this can serve as the folder whose contents
 *              are to be listed.//from   w  w w  .  ja  v a 2 s .c o  m
 */
@Override
protected PaginatedObjectsView doListObjects(String bucketName, String nextMarker, int pageSize,
        String prefix) {

    ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName)
            .withPrefix(prefix).withMarker(nextMarker);

    if (pageSize > 0) {
        listObjectsRequest.withMaxKeys(pageSize);
    }

    ObjectListing listing = client.listObjects(listObjectsRequest);
    PaginatedObjectsView view = null;
    List<com.amazonaws.services.s3.model.S3ObjectSummary> summaries = listing.getObjectSummaries();
    if (summaries != null && !summaries.isEmpty()) {
        List<S3ObjectSummary> objectSummaries = new ArrayList<S3ObjectSummary>();
        for (final com.amazonaws.services.s3.model.S3ObjectSummary summary : summaries) {
            S3ObjectSummary summ = new S3ObjectSummary() {

                public long getSize() {
                    return summary.getSize();
                }

                public Date getLastModified() {
                    return summary.getLastModified();
                }

                public String getKey() {
                    return summary.getKey();
                }

                public String getETag() {
                    return summary.getETag();
                }

                public String getBucketName() {
                    return summary.getBucketName();
                }
            };
            objectSummaries.add(summ);
        }
        view = new PagninatedObjectsViewImpl(objectSummaries, listing.getNextMarker());
    }
    return view;
}

From source file:org.springframework.integration.aws.support.S3Session.java

License:Apache License

@Override
public S3ObjectSummary[] list(String path) throws IOException {
    Assert.hasText(path, "'path' must not be empty String.");
    String[] bucketPrefix = path.split("/");
    Assert.state(bucketPrefix.length > 0 && bucketPrefix[0].length() >= 3,
            "S3 bucket name must be at least 3 characters long.");

    String bucket = resolveBucket(bucketPrefix[0]);

    ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket);
    if (bucketPrefix.length > 1) {
        listObjectsRequest.setPrefix(bucketPrefix[1]);
    }/*  w  w w .j  a v  a 2  s.  c o  m*/

    /*
    For listing objects, Amazon S3 returns up to 1,000 keys in the response.
    If you have more than 1,000 keys in your bucket, the response will be truncated.
    You should always check for if the response is truncated.
    */
    ObjectListing objectListing;
    List<S3ObjectSummary> objectSummaries = new ArrayList<>();
    do {
        objectListing = this.amazonS3.listObjects(listObjectsRequest);
        objectSummaries.addAll(objectListing.getObjectSummaries());
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());

    return objectSummaries.toArray(new S3ObjectSummary[objectSummaries.size()]);
}

From source file:org.springframework.integration.aws.support.S3Session.java

License:Apache License

@Override
public String[] listNames(String path) throws IOException {
    String[] bucketPrefix = path.split("/");
    Assert.state(bucketPrefix.length > 0 && bucketPrefix[0].length() >= 3,
            "S3 bucket name must be at least 3 characters long.");

    String bucket = resolveBucket(bucketPrefix[0]);

    ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket);
    if (bucketPrefix.length > 1) {
        listObjectsRequest.setPrefix(bucketPrefix[1]);
    }/*from  w  w w.ja va2 s  .c  om*/

    /*
    For listing objects, Amazon S3 returns up to 1,000 keys in the response.
    If you have more than 1,000 keys in your bucket, the response will be truncated.
    You should always check for if the response is truncated.
    */
    ObjectListing objectListing;
    List<String> names = new ArrayList<>();
    do {
        objectListing = this.amazonS3.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            names.add(objectSummary.getKey());
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());

    return names.toArray(new String[names.size()]);
}

From source file:org.weakref.s3fs.util.AmazonS3ClientMock.java

License:Apache License

@Override
public ObjectListing listObjects(ListObjectsRequest listObjectsRequest)
        throws AmazonClientException, AmazonServiceException {
    ObjectListing objectListing = new ObjectListing();
    Integer capacity = listObjectsRequest.getMaxKeys();
    if (capacity == null) {
        capacity = Integer.MAX_VALUE;
    }/*from w w  w  .ja  v  a2 s.  co  m*/

    Bucket bucket = find(listObjectsRequest.getBucketName());
    for (S3Element elem : objects.get(bucket)) {
        if (capacity > 0) {
            // TODO. add delimiter and marker support
            if (listObjectsRequest.getPrefix() != null
                    && elem.getS3Object().getKey().startsWith(listObjectsRequest.getPrefix())) {
                S3ObjectSummary s3ObjectSummary = new S3ObjectSummary();
                s3ObjectSummary.setBucketName(elem.getS3Object().getBucketName());
                s3ObjectSummary.setKey(elem.getS3Object().getKey());
                s3ObjectSummary.setLastModified(elem.getS3Object().getObjectMetadata().getLastModified());
                s3ObjectSummary.setOwner(owner);
                s3ObjectSummary.setETag(elem.getS3Object().getObjectMetadata().getETag());
                s3ObjectSummary.setSize(elem.getS3Object().getObjectMetadata().getContentLength());
                objectListing.getObjectSummaries().add(s3ObjectSummary);
                capacity--;
            }
        }

    }

    return objectListing;
}

From source file:org.xmlsh.aws.gradle.s3.DeleteBucketTask.java

License:BSD License

@TaskAction
public void deleteBucket() {
    // to enable conventionMappings feature
    String bucketName = getBucketName();
    boolean ifExists = isIfExists();

    if (bucketName == null)
        throw new GradleException("bucketName is not specified");

    AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class);
    AmazonS3 s3 = ext.getClient();/*from   www . j  a  v a 2 s.c  o  m*/

    if (ifExists == false || exists(s3)) {
        if (deleteObjects) {
            getLogger().info("Delete all S3 objects in bucket [{}]", bucketName);
            ObjectListing objectListing = s3.listObjects(bucketName);
            while (objectListing.getObjectSummaries().isEmpty() == false) {
                objectListing.getObjectSummaries().forEach(summary -> {
                    getLogger().info(" => delete s3://{}/{}", bucketName, summary.getKey());
                    s3.deleteObject(bucketName, summary.getKey());
                });
                objectListing = s3.listNextBatchOfObjects(objectListing);
            }
        }
        s3.deleteBucket(bucketName);
        getLogger().info("S3 bucket {} is deleted", bucketName);
    } else {
        getLogger().debug("S3 bucket {} does not exist", bucketName);
    }
}

From source file:org.yardstickframework.spark.S3MasterUrlProvider.java

License:Apache License

/**
 * Resolves url to master node.//from ww  w.  j a  v a 2  s .  c o m
 *
 * @return Url to master node.
 */
public String getMasterUrl() {
    initAwsClient();

    String masterUrl = null;

    try {
        ObjectListing list = s3.listObjects(bucketName, SparkMaster.SPARK_URL_PREFIX);

        while (true) {
            for (S3ObjectSummary s3Entry : list.getObjectSummaries())
                if (s3Entry.getKey() != null && s3Entry.getKey().contains(SparkMaster.SPARK_URL_PREFIX)) {
                    masterUrl = s3KeyToUrl(s3Entry.getKey());

                    break;
                }

            if (list.isTruncated())
                list = s3.listNextBatchOfObjects(list);
            else
                break;
        }
    } catch (AmazonClientException e) {
        throw new RuntimeException("Failed to list objects in the bucket: " + bucketName, e);
    }

    return masterUrl;
}

From source file:org.zalando.stups.fullstop.controller.S3Controller.java

License:Apache License

@RequestMapping(method = RequestMethod.GET, value = "/download")
public void downloadFiles(@RequestParam(value = "bucket") final String bucket,
        @RequestParam(value = "location") final String location, @RequestParam(value = "page") final int page) {

    try {//from   w  w  w.  j av a  2s.  co m
        log.info("Creating fullstop directory here: {}", fullstopLoggingDir);

        boolean mkdirs = new File(fullstopLoggingDir).mkdirs();
    } catch (SecurityException e) {
        // do nothing
    }

    AmazonS3Client amazonS3Client = new AmazonS3Client();
    amazonS3Client.setRegion(Region.getRegion(Regions
            .fromName((String) cloudTrailProcessingLibraryProperties.getAsProperties().get(S3_REGION_KEY))));

    ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket) //
            .withPrefix(location) //
            .withMaxKeys(page);

    ObjectListing objectListing = amazonS3Client.listObjects(listObjectsRequest);

    final List<S3ObjectSummary> s3ObjectSummaries = objectListing.getObjectSummaries();

    while (objectListing.isTruncated()) {

        objectListing = amazonS3Client.listNextBatchOfObjects(objectListing);
        s3ObjectSummaries.addAll(objectListing.getObjectSummaries());

    }

    for (S3ObjectSummary s3ObjectSummary : s3ObjectSummaries) {
        String bucketName = s3ObjectSummary.getBucketName();
        String key = s3ObjectSummary.getKey();

        S3Object object = amazonS3Client.getObject(new GetObjectRequest(bucketName, key));
        InputStream inputStream = object.getObjectContent();

        File file = new File(fullstopLoggingDir,
                object.getBucketName() + object.getObjectMetadata().getETag() + JSON_GZ);

        copyInputStreamToFile(inputStream, file);
        log.info("File saved here: {}", file.getAbsolutePath());

    }
}

From source file:org.zalando.stups.fullstop.plugin.SaveSecurityGroupsPlugin.java

License:Apache License

private List<String> listS3Objects(String bucketName, String prefix) {
    final List<String> commonPrefixes = Lists.newArrayList();

    AmazonS3Client s3client = new AmazonS3Client();

    try {/*ww w .  ja va 2  s  .co  m*/
        System.out.println("Listing objects");

        ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withDelimiter("/")
                .withBucketName(bucketName).withPrefix(prefix);

        ObjectListing objectListing;

        do {
            objectListing = s3client.listObjects(listObjectsRequest);
            commonPrefixes.addAll(objectListing.getCommonPrefixes());
            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
                System.out.println(
                        " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
            }
            listObjectsRequest.setMarker(objectListing.getNextMarker());
        } while (objectListing.isTruncated());

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, " + "which means your request made it "
                + "to Amazon S3, but was rejected with an error response " + "for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, " + "which means the client encountered "
                + "an internal error while trying to communicate" + " with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }

    return commonPrefixes;
}

From source file:pagerank.S3Wrapper.java

License:Open Source License

public Set<String> keySet(int num) {
    if (num < 0)
        return null;
    ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName));
    int count = num;
    while (count != 0) {
        objectListing = s3.listNextBatchOfObjects(objectListing);
        count--;//  w  w  w . j a  va  2 s .  c o  m
    }
    Set<String> keySet = new HashSet<String>();
    for (S3ObjectSummary summary : objectListing.getObjectSummaries()) {
        keySet.add(summary.getKey());
    }
    return keySet;
}

From source file:pagerank.S3Wrapper.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*/*from  w  w  w  .j  a v  a 2  s.co  m*/
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (/home/yupenglu/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/home/yupenglu/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    //        Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    //        s3.setRegion(usWest2);

    //        String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String bucketName = "pages4.27";
    String key = "NewKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        //            System.out.println("Creating bucket " + bucketName + "\n");
        //            s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        //            System.out.println("Uploading a new object to S3 from a file\n");
        //            s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        //            System.out.println("Downloading an object");
        //            S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        //            System.out.println("Content-Type: "  + object.getObjectMetadata().getContentType());
        //            displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        //            ObjectListing objectListing = s3.listObjects(new ListObjectsRequest()
        //                    .withBucketName(bucketName)
        //                    .withPrefix("My"));
        ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(" - " + URLDecoder.decode(objectSummary.getKey(), "UTF-8") + "  " + "(size = "
                    + objectSummary.getSize() + ")");
        }
        S3Object testObj = s3.getObject(bucketName,
                URLEncoder.encode("http://finance.yahoo.com/investing-news/", "UTF-8"));
        S3ObjectInputStream inputStream = testObj.getObjectContent();

        //            System.out.println(streamToString(inputStream));
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //            System.out.println("Deleting bucket " + bucketName + "\n");
        //            s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}