Example usage for com.amazonaws.services.s3.model S3ObjectSummary getKey

List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getKey

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3ObjectSummary getKey.

Prototype

public String getKey() 

Source Link

Document

Gets the key under which this object is stored in Amazon S3.

Usage

From source file:pagerank.S3Wrapper.java

License:Open Source License

public Set<String> keySet() {
    Set<String> keySet = new HashSet<String>();
    for (S3ObjectSummary summary : objectListing.getObjectSummaries()) {
        keySet.add(summary.getKey());
    }//from  w w w .j  av a  2s .  co m
    isTruncated = objectListing.isTruncated();
    objectListing = s3.listNextBatchOfObjects(objectListing);
    return keySet;
}

From source file:pagerank.S3Wrapper.java

License:Open Source License

public Set<String> keySet(int num) {
    if (num < 0)
        return null;
    ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName));
    int count = num;
    while (count != 0) {
        objectListing = s3.listNextBatchOfObjects(objectListing);
        count--;//from   ww w. j  a va  2s. c o  m
    }
    Set<String> keySet = new HashSet<String>();
    for (S3ObjectSummary summary : objectListing.getObjectSummaries()) {
        keySet.add(summary.getKey());
    }
    return keySet;
}

From source file:pagerank.S3Wrapper.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*//from   ww  w.j a v  a2s .com
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (/home/yupenglu/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("default").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (/home/yupenglu/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    //        Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    //        s3.setRegion(usWest2);

    //        String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String bucketName = "pages4.27";
    String key = "NewKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        //            System.out.println("Creating bucket " + bucketName + "\n");
        //            s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        //            System.out.println("Uploading a new object to S3 from a file\n");
        //            s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        //            System.out.println("Downloading an object");
        //            S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        //            System.out.println("Content-Type: "  + object.getObjectMetadata().getContentType());
        //            displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        //            ObjectListing objectListing = s3.listObjects(new ListObjectsRequest()
        //                    .withBucketName(bucketName)
        //                    .withPrefix("My"));
        ObjectListing objectListing = s3.listObjects(new ListObjectsRequest().withBucketName(bucketName));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(" - " + URLDecoder.decode(objectSummary.getKey(), "UTF-8") + "  " + "(size = "
                    + objectSummary.getSize() + ")");
        }
        S3Object testObj = s3.getObject(bucketName,
                URLEncoder.encode("http://finance.yahoo.com/investing-news/", "UTF-8"));
        S3ObjectInputStream inputStream = testObj.getObjectContent();

        //            System.out.println(streamToString(inputStream));
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //            System.out.println("Deleting bucket " + bucketName + "\n");
        //            s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:raymond.mockftpserver.S3BucketFileSystem.java

License:Apache License

public List<FileSystemEntry> listFilesRoot() {
    List<FileSystemEntry> retorno = new ArrayList<FileSystemEntry>();
    // if (isDirectory(path)) {
    // ObjectListing listing = isRoot(path) ? s3.listObjects(bucket)
    // : s3.listObjects(bucket, path);
    ObjectListing listing = s3.listObjects(bucket);
    for (S3ObjectSummary summary : listing.getObjectSummaries()) {
        String summaryPath = summary.getKey();
        FileSystemEntry entry;/*from w w  w. jav a2s  . c  o m*/
        if (isDirectory(summaryPath)) {
            entry = new DirectoryEntry(summaryPath.substring(0, summaryPath.length() - 1));
        } else {
            entry = new FileEntry(summaryPath);
        }
        retorno.add(entry);
    }
    return retorno;
}

From source file:S3Controller.DownloadingImages.java

public static void main(String[] args) throws IOException {
    AWSCredentials credentials = null;/*from w  ww . jav a  2  s  . c om*/
    String aws_access_key_id = "PUT_YOUR_aws_access_key_id_HERE";
    String aws_secret_access_key = "PUT_YOUR_aws_secret_access_key_HERE";
    try {
        credentials = new BasicAWSCredentials(aws_access_key_id, aws_secret_access_key);//.getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region AP_SOUTHEAST_1 = Region.getRegion(Regions.AP_SOUTHEAST_1);
    s3.setRegion(AP_SOUTHEAST_1);

    String bucketName = "PUT_YOUR_S3-BUCKET-NAME_HERE";
    String key = "PUT_YOUR_S3-BUCKET-KEY_HERE";

    try {

        ArrayList arr = new ArrayList();
        ArrayList EmailArray = new ArrayList();
        Bucket bucket = new Bucket(bucketName);
        ObjectListing objects = s3.listObjects(bucket.getName());
        do {
            for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) {
                //                System.out.println(objectSummary.getKey() + "\t" +
                //                        objectSummary.getSize() + "\t" +
                //                        StringUtils.fromDate(objectSummary.getLastModified()));
                arr.add(objectSummary.getKey());
            }
            objects = s3.listNextBatchOfObjects(objects);
        } while (objects.isTruncated());

        KrakenIOExampleMain kraken = new KrakenIOExampleMain();
        for (int i = 0; i < arr.size(); i++) {
            System.out.println("Compressing: " + arr.get(i));
            String s = (String) arr.get(i);
            GeneratePresignedUrlRequest request = new GeneratePresignedUrlRequest(bucket.getName(), s);
            System.out.println(s3.generatePresignedUrl(request));
            URL Glink = s3.generatePresignedUrl(request);
            String Dlink = Glink.toString();
            System.out.println("Download Link:" + Dlink);
            kraken.Compression(Dlink, bucketName);
            System.out.println("Compression completed: " + arr.get(i));
            EmailArray.add("Processed Image:" + arr.get(i));
        }
        System.out.println("Start Emailing list");
        EmailSender esender = new EmailSender();
        esender.EmailVerification(GetNotificationEmail, EmailArray);
        System.out.println("Kraken compression completed");
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    } catch (ExecutionException ex) {
        Logger.getLogger(DownloadingImages.class.getName()).log(Level.SEVERE, null, ex);
    } catch (InterruptedException ex) {
        Logger.getLogger(DownloadingImages.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Sets public read permissions on content within an S3 bucket.
 * //  ww  w  .java  2s  . co m
 * <p>Web content served from an S3 bucket must have public read permissions.
 * 
 *    @param bucketName the bucket to apply the permissions to.
 *    @param prefix prefix within the bucket, beneath which to apply the permissions.
 *    @param logger a CloudwatchLogs logger.
 */
public static void setPublicReadPermissionsOnBucket(String bucketName, Optional<String> prefix,
        LambdaLogger logger) {
    // Ensure newly uploaded content has public read permission
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting public read permission on bucket: " + bucketName + " and prefix: " + prefix.get());
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting public read permission on bucket: " + bucketName);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            logger.log("Setting permissions for S3 object: " + objectSummary.getKey());
            client.setObjectAcl(bucketName, objectSummary.getKey(), CannedAccessControlList.PublicRead);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Finished setting public read permissions");
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Adds gzip content-encoding metadata to S3 objects.
 * /*from  w w w  . j av a2 s .  c  o m*/
 * <p>Adds gzip content-encoding metadata to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder) will have the
 *    metadata added. When the bucket serves objects it will then
 *    add a suitable Content-Encoding header.
 *
 *    @param bucketName the bucket to apply the metadata to.
 *    @param prefix prefix within the bucket, beneath which to apply the metadata.
 *    @param logger a CloudwatchLogs logger.
 */
public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix,
        LambdaLogger logger) {

    // To add new metadata, we must copy each object to itself.
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName + " and prefix: "
                + prefix.get());
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting gzip content encoding metadata on bucket: " + bucketName);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            String key = objectSummary.getKey();
            logger.log("Setting metadata for S3 object: " + key);
            // We must specify ALL metadata - not just the one we're adding.
            ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
            objectMetadata.setContentEncoding("gzip");
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key)
                    .withNewObjectMetadata(objectMetadata)
                    .withCannedAccessControlList(CannedAccessControlList.PublicRead);
            client.copyObject(copyObjectRequest);
            logger.log("Set metadata for S3 object: " + key);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Set gzip content encoding metadata on bucket");
}

From source file:squash.deployment.lambdas.utils.TransferUtils.java

License:Apache License

/**
 * Adds cache-control header to S3 objects.
 * /*ww  w  .j a v  a  2  s.  c o  m*/
 * <p>Adds cache-control header to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder), and with the
 *    specified extension will have the header added. When the
 *    bucket serves objects it will then add a suitable
 *    Cache-Control header.
 *
 *    @param headerValue value of the cache-control header
 *    @param bucketName the bucket to apply the header to.
 *    @param prefix prefix within the bucket, beneath which to apply the header.
 *    @param extension file extension to apply header to
 *    @param logger a CloudwatchLogs logger.
 */
public static void addCacheControlHeader(String headerValue, String bucketName, Optional<String> prefix,
        String extension, LambdaLogger logger) {

    // To add new metadata, we must copy each object to itself.
    ListObjectsRequest listObjectsRequest;
    if (prefix.isPresent()) {
        logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
                + " and prefix: " + prefix.get() + " and extension: " + extension);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix.get());
    } else {
        logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
                + " and extension: " + extension);
        listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
    }

    ObjectListing objectListing;
    AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
    do {
        objectListing = client.listObjects(listObjectsRequest);
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            String key = objectSummary.getKey();
            if (!key.endsWith(extension)) {
                continue;
            }
            logger.log("Setting metadata for S3 object: " + key);
            // We must specify ALL metadata - not just the one we're adding.
            ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
            objectMetadata.setCacheControl(headerValue);
            CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key)
                    .withNewObjectMetadata(objectMetadata)
                    .withCannedAccessControlList(CannedAccessControlList.PublicRead);
            client.copyObject(copyObjectRequest);
            logger.log("Set metadata for S3 object: " + key);
        }
        listObjectsRequest.setMarker(objectListing.getNextMarker());
    } while (objectListing.isTruncated());
    logger.log("Set cache-control metadata on bucket");
}

From source file:surrey.repository.impl.S3Repository.java

License:Open Source License

/**
 * @throws IOException//from www.jav a 2 s  .  c  o m
 * @see surrey.repository.Repository#createUniqueFile(java.lang.String,
 *      java.lang.String)
 */
@Override
public RepositoryFile createUniqueFile(String prefix, String name) throws IOException {
    initialise();
    String cleanPrefix = prefix != null ? prefix.replaceAll("\\\\", "/") : "empty";
    if (cleanPrefix.startsWith("/")) {
        cleanPrefix = cleanPrefix.substring(1);
    }
    // create name, get list of files with that name
    String finalUri = cleanPrefix + getEnd(cleanPrefix);

    ListObjectsRequest listObjectsRequest = new ListObjectsRequest();
    String result = finalUri + name;
    listObjectsRequest.withBucketName(baseURL).withPrefix(result);
    listObjectsRequest.setMaxKeys(20);
    ObjectListing listing = s3.listObjects(listObjectsRequest);
    int counter = 0;
    while (listing.getObjectSummaries() != null && listing.getObjectSummaries().size() > 0) {
        boolean found = false;
        boolean firstCall = true;
        do {
            if (!firstCall) {
                listing = s3.listNextBatchOfObjects(listing);
            }
            for (S3ObjectSummary summary : listing.getObjectSummaries()) {
                if (summary.getKey().equals(result)) {
                    result = finalUri + counter++ + name;
                    found = true;
                    break;
                }
            }
            if (found) {
                break;
            }
            firstCall = false;
        } while (listing.isTruncated());
        if (!found) {
            break;
        }
        listObjectsRequest.setPrefix(result);
        listing = s3.listObjects(listObjectsRequest);
    }
    // result is now what should be used so create a zero byte file to lock
    // that name to us

    S3RepositoryFile repoFile = new S3RepositoryFile(baseURL, result, transferManager);
    ByteArrayInputStream source = new ByteArrayInputStream(new byte[] { (byte) 0 });
    repoFile.write(source, 1);
    return repoFile;
}

From source file:sys2202.aws.s3.Sample.java

License:Open Source License

public static void main(String[] args) throws Exception {

    // create the client we'll use to connect to S3
    AmazonS3 s3 = AmazonS3ClientBuilder.standard().withRegion(Regions.US_EAST_1).build();

    // list buckets in our S3 account
    System.out.println("Listing buckets in our S3 account...\n");
    for (Bucket bucket : s3.listBuckets()) {
        System.out.println("\t" + bucket.getName());
    }//from w w w.j av  a  2  s  .com

    System.out.println();

    // create a new bucket to experiment with
    String bucketName = "msg8u-sys2202-bucket"; // set the bucket name -- this must be unique, so you'll want to use your ID instead of msg8u
    System.out.println("Creating bucket " + bucketName + "...\n");
    s3.createBucket(bucketName);

    // list buckets in our S3 account
    System.out.println("Listing buckets in our S3 account...\n");
    for (Bucket bucket : s3.listBuckets()) {
        System.out.println("\t" + bucket.getName());
    }

    System.out.println();

    // create and upload a sample file
    System.out.println("Uploading a new object to S3 from a local file...\n");
    File sampleFile = createSampleFile();
    String objectKey = "my-test-file";
    PutObjectRequest putRequest = new PutObjectRequest(bucketName, objectKey, sampleFile);
    s3.putObject(putRequest);

    // list objects in our new bucket -- notice the new object is now present
    System.out.println("Listing objects in our new bucket...\n");
    ListObjectsRequest listRequest = new ListObjectsRequest().withBucketName(bucketName);
    ObjectListing objectListing = s3.listObjects(listRequest);
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        System.out.println("\t" + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
    }

    System.out.println();

    // download and display the sample file that we just uploaded
    System.out.println("Downloading the sample file...\n");
    GetObjectRequest getRequest = new GetObjectRequest(bucketName, objectKey);
    S3Object object = s3.getObject(getRequest);
    displayTextInputStream(object.getObjectContent());

    // delete the sample file from S3
    System.out.println("Deleting the sample file...\n");
    s3.deleteObject(bucketName, objectKey);

    // delete the bucket
    System.out.println("Deleting the bucket...\n");
    s3.deleteBucket(bucketName);

    System.out.println("All done!");
}