Example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

List of usage examples for com.amazonaws.services.s3.model ObjectListing getObjectSummaries

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectListing getObjectSummaries.

Prototype

public List<S3ObjectSummary> getObjectSummaries() 

Source Link

Document

Gets the list of object summaries describing the objects stored in the S3 bucket.

Usage

From source file:raymond.mockftpserver.S3BucketFileSystem.java

License:Apache License

public List<FileSystemEntry> listFilesRoot() {
    List<FileSystemEntry> retorno = new ArrayList<FileSystemEntry>();
    // if (isDirectory(path)) {
    // ObjectListing listing = isRoot(path) ? s3.listObjects(bucket)
    // : s3.listObjects(bucket, path);
    ObjectListing listing = s3.listObjects(bucket);
    for (S3ObjectSummary summary : listing.getObjectSummaries()) {
        String summaryPath = summary.getKey();
        FileSystemEntry entry;//from www  .  jav a 2s .co m
        if (isDirectory(summaryPath)) {
            entry = new DirectoryEntry(summaryPath.substring(0, summaryPath.length() - 1));
        } else {
            entry = new FileEntry(summaryPath);
        }
        retorno.add(entry);
    }
    return retorno;
}

From source file:S3Controller.DownloadingImages.java

public static void main(String[] args) throws IOException {
    AWSCredentials credentials = null;/*from   w  w w.j av a2 s . c  o m*/
    String aws_access_key_id = "PUT_YOUR_aws_access_key_id_HERE";
    String aws_secret_access_key = "PUT_YOUR_aws_secret_access_key_HERE";
    try {
        credentials = new BasicAWSCredentials(aws_access_key_id, aws_secret_access_key);//.getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region AP_SOUTHEAST_1 = Region.getRegion(Regions.AP_SOUTHEAST_1);
    s3.setRegion(AP_SOUTHEAST_1);

    String bucketName = "PUT_YOUR_S3-BUCKET-NAME_HERE";
    String key = "PUT_YOUR_S3-BUCKET-KEY_HERE";

    try {

        ArrayList arr = new ArrayList();
        ArrayList EmailArray = new ArrayList();
        Bucket bucket = new Bucket(bucketName);
        ObjectListing objects = s3.listObjects(bucket.getName());
        do {
            for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
                //                System.out.println(objectSummary.getKey() + "\t" +
                //                        objectSummary.getSize() + "\t" +
                //                        StringUtils.fromDate(objectSummary.getLastModified()));
                arr.add(objectSummary.getKey());
            }
            objects = s3.listNextBatchOfObjects(objects);
        } while (objects.isTruncated());

        KrakenIOExampleMain kraken = new KrakenIOExampleMain();
        for (int i = 0; i < arr.size(); i++) {
            System.out.println("Compressing: " + arr.get(i));
            String s = (String) arr.get(i);
            GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucket.getName(), s);
            System.out.println(s3.generatePresignedUrl(request));
            URL Glink = s3.generatePresignedUrl(request);
            String Dlink = Glink.toString();
            System.out.println("Download Link:" + Dlink);
            kraken.Compression(Dlink, bucketName);
            System.out.println("Compression completed: " + arr.get(i));
            EmailArray.add("Processed Image:" + arr.get(i));
        }
        System.out.println("Start Emailing list");
        EmailSender esender = new EmailSender();
        esender.EmailVerification(GetNotificationEmail, EmailArray);
        System.out.println("Kraken compression completed");
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    } catch (ExecutionException ex) {
        Logger.getLogger(DownloadingImages.class.getName()).log(Level.SEVERE, null, ex);
    } catch (InterruptedException ex) {
        Logger.getLogger(DownloadingImages.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Sets public read permissions on content within an S3 bucket.
 * //from w w w  . jav  a  2  s.  com
 * <p>Web content served from an S3 bucket must have public read permissions.
 * 
 *    @param bucketName the bucket to apply the permissions to.
 *    @param prefix prefix within the bucket, beneath which to apply the permissions.
 *    @param logger a CloudwatchLogs logger.
 */
public static void setPublicReadPermissionsOnBucket(String bucketName, Optional<String> prefix,
        LambdaLogger logger) {
    // Ensure newly uploaded content has public read permission
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting public read permission on bucket: " + bucketName + " and prefix: " + prefix.get());
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting public read permission on bucket: " + bucketName);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            logger.log("Setting permissions for S3 object: " + objectSummary.getKey());
            client.setObjectAcl(bucketName, objectSummary.getKey(), CannedAccessControlList.PublicRead);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Finished setting public read permissions");
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Adds gzip content-encoding metadata to S3 objects.
 * //  w w w .ja  v a  2  s  .c  om
 * <p>Adds gzip content-encoding metadata to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder) will have the
 *    metadata added. When the bucket serves objects it will then
 *    add a suitable Content-Encoding header.
 *
 *    @param bucketName the bucket to apply the metadata to.
 *    @param prefix prefix within the bucket, beneath which to apply the metadata.
 *    @param logger a CloudwatchLogs logger.
 */
public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix,
        LambdaLogger logger) {

    // To add new metadata, we must copy each object to itself.
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName + " and prefix: "
                + prefix.get());
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            String key = objectSummary.getKey();
            logger.log("Setting metadata for S3 object: " + key);
            // We must specify ALL metadata - not just the one we're adding.
            ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
            objectMetadata.setContentEncoding("gzip");
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key)
                    .withNewObjectMetadata(objectMetadata)
                    .withCannedAccessControlList(CannedAccessControlList.PublicRead);
            client.copyObject(copyObjectRequest);
            logger.log("Set metadata for S3 object: " + key);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Set gzip content encoding metadata on bucket");
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Adds cache-control header to S3 objects.
 * //  w w w.ja  va2  s. c  om
 * <p>Adds cache-control header to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder), and with the
 *    specified extension will have the header added. When the
 *    bucket serves objects it will then add a suitable
 *    Cache-Control header.
 *
 *    @param headerValue value of the cache-control header
 *    @param bucketName the bucket to apply the header to.
 *    @param prefix prefix within the bucket, beneath which to apply the header.
 *    @param extension file extension to apply header to
 *    @param logger a CloudwatchLogs logger.
 */
public static void addCacheControlHeader(String headerValue, String bucketName, Optional<String> prefix,
        String extension, LambdaLogger logger) {

    // To add new metadata, we must copy each object to itself.
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
                + " and prefix: " + prefix.get() + " and extension: " + extension);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
                + " and extension: " + extension);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            String key = objectSummary.getKey();
            if (!key.endsWith(extension)) {
                continue;
            }
            logger.log("Setting metadata for S3 object: " + key);
            // We must specify ALL metadata - not just the one we're adding.
            ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
            objectMetadata.setCacheControl(headerValue);
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key)
                    .withNewObjectMetadata(objectMetadata)
                    .withCannedAccessControlList(CannedAccessControlList.PublicRead);
            client.copyObject(copyObjectRequest);
            logger.log("Set metadata for S3 object: " + key);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Set cache-control metadata on bucket");
}

From source file:surrey.repository.impl.S3Repository.java

License:Open Source License

/**
 * @throws IOException/*from   w ww  . ja  v a  2s  . c o  m*/
 * @see surrey.repository.Repository#createUniqueFile(java.lang.String,
 *      java.lang.String)
 */
@Override
public RepositoryFile createUniqueFile(String prefix, String name) throws IOException {
    initialise();
    String cleanPrefix = prefix != null ? prefix.replaceAll("\\\\", "/") : "empty";
    if (cleanPrefix.startsWith("/")) {
        cleanPrefix = cleanPrefix.substring(1);
    }
    // create name, get list of files with that name
    String finalUri = cleanPrefix + getEnd(cleanPrefix);

    ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
    String result = finalUri + name;
    listObjectsRequest.withBucketName(baseURL).withPrefix(result);
    listObjectsRequest.setMaxKeys(20);
    ObjectListing listing = s3.listObjects(listObjectsRequest);
    int counter = 0;
    while (listing.getObjectSummaries() != null && listing.getObjectSummaries().size() > 0) {
        boolean found = false;
        boolean firstCall = true;
        do {
            if (!firstCall) {
                listing = s3.listNextBatchOfObjects(listing);
            }
            for (S3ObjectSummary summary : listing.getObjectSummaries()) {
                if (summary.getKey().equals(result)) {
                    result = finalUri + counter++ + name;
                    found = true;
                    break;
                }
            }
            if (found) {
                break;
            }
            firstCall = false;
        } while (listing.isTruncated());
        if (!found) {
            break;
        }
        listObjectsRequest.setPrefix(result);
        listing = s3.listObjects(listObjectsRequest);
    }
    // result is now what should be used so create a zero byte file to lock
    // that name to us

    S3RepositoryFile repoFile = new S3RepositoryFile(baseURL, result, transferManager);
    ByteArrayInputStream source = new ByteArrayInputStream(new byte[] { (byte) 0 });
    repoFile.write(source, 1);
    return repoFile;
}

From source file:surrey.repository.impl.S3RepositoryFile.java

License:Open Source License

@Override
public boolean exists() {

    ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
    listObjectsRequest.withBucketName(bucketName).withPrefix(key);
    listObjectsRequest.setMaxKeys(1);//www.j a  va2  s  .  c om
    ObjectListing listing = transferManager.getAmazonS3Client().listObjects(listObjectsRequest);

    return listing.getObjectSummaries() != null && listing.getObjectSummaries().size() > 0;
}

From source file:sys2202.aws.s3.Sample.java

License:Open Source License

public static void main(String[] args) throws Exception {

    // create the client we'll use to connect to S3
    AmazonS3 s3 = AmazonS3ClientBuilder.standard().withRegion(Regions.US_EAST_1).build();

    // list buckets in our S3 account
    System.out.println("Listing buckets in our S3 account...\n");
    for (Bucket bucket : s3.listBuckets()) {
        System.out.println("\t" + bucket.getName());
    }/*  w  w  w.jav a2s .c  om*/

    System.out.println();

    // create a new bucket to experiment with
    String bucketName = "msg8u-sys2202-bucket"; // set the bucket name -- this must be unique, so you'll want to use your ID instead of msg8u
    System.out.println("Creating bucket " + bucketName + "...\n");
    s3.createBucket(bucketName);

    // list buckets in our S3 account
    System.out.println("Listing buckets in our S3 account...\n");
    for (Bucket bucket : s3.listBuckets()) {
        System.out.println("\t" + bucket.getName());
    }

    System.out.println();

    // create and upload a sample file
    System.out.println("Uploading a new object to S3 from a local file...\n");
    File sampleFile = createSampleFile();
    String objectKey = "my-test-file";
    PutObjectRequest putRequest = new PutObjectRequest(bucketName, objectKey, sampleFile);
    s3.putObject(putRequest);

    // list objects in our new bucket -- notice the new object is now present
    System.out.println("Listing objects in our new bucket...\n");
    ListObjectsRequest listRequest = new ListObjectsRequest().withBucketName(bucketName);
    ObjectListing objectListing = s3.listObjects(listRequest);
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        System.out.println("\t" + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
    }

    System.out.println();

    // download and display the sample file that we just uploaded
    System.out.println("Downloading the sample file...\n");
    GetObjectRequest getRequest = new GetObjectRequest(bucketName, objectKey);
    S3Object object = s3.getObject(getRequest);
    displayTextInputStream(object.getObjectContent());

    // delete the sample file from S3
    System.out.println("Deleting the sample file...\n");
    s3.deleteObject(bucketName, objectKey);

    // delete the bucket
    System.out.println("Deleting the bucket...\n");
    s3.deleteBucket(bucketName);

    System.out.println("All done!");
}

From source file:thinkbig.util.Util.java

License:Open Source License

/**
 * returns (and prints) all (only non-empty objects if excludeBlanks is true) objects 
 * in an S3 bucket as list of Strings//from  w ww. j  a va  2 s  .com
 * null if no such bucket exists
 * @param s3
 * @param bucketName
 * @param excludeBlanks
 * @return
 */
public static List<String> getAllObjectKeysInBucket(AmazonS3 s3, String bucketName, boolean excludeBlanks) {

    // check if the client is valid
    if (s3 == null) {
        System.out.println("Not a valid S3 Client");
        return null;
    }
    // check if the bucket exists
    if (!s3.doesBucketExist(bucketName)) {
        System.out.println("The bucket '" + bucketName + "' does not exist!");
        return null;
    }
    System.out.println("Listing objects in bucket '" + bucketName + "' ");
    ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName));
    if (objectListing == null) {
        return null;
    }
    List<String> objectKeys = new ArrayList<String>();
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        String key = objectSummary.getKey();
        if (!excludeBlanks || objectSummary.getSize() > 0) {
            objectKeys.add(key);
            System.out.println(" - " + key + "  " + "(size = " + objectSummary.getSize() + ")");
        }
    }
    return objectKeys;
}

From source file:updaters.S3FeedStorer.java

License:Open Source License

/**
 * Get all the feeds in S3.//from w  ww . ja  v a  2  s . c om
 */
public List<String> getFeedIds() {
    List<String> feedIds = new ArrayList<String>();
    List<S3ObjectSummary> summaries;

    ObjectListing listing = this.s3Client.listObjects(this.bucket);
    summaries = listing.getObjectSummaries();

    for (S3ObjectSummary summary : summaries) {
        feedIds.add(summary.getKey());
    }

    while (listing.isTruncated()) {
        listing = this.s3Client.listNextBatchOfObjects(listing);
        summaries = listing.getObjectSummaries();

        for (S3ObjectSummary summary : summaries) {
            feedIds.add(summary.getKey());
        }
    }

    return feedIds;
}