List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest
public PutObjectRequest(String bucketName, String key, String redirectLocation)
From source file:com.neu.cloud.Controller.ThirdUseCaseController.java
private void uploadInS3(String uploadFilePath, String uploadFileName, String dateForFolder) { String bucketName = "reports-sppard"; String keyName = "UseCase3-" + dateForFolder + "/" + uploadFileName; AmazonS3 s3client = new AmazonS3Client( new BasicAWSCredentials("AKIAJ2E67YVFQ5PZSWQA", "xiVuejpUofGonrsiy2owvu/wgeNKq5nYjxYVC0ma")); try {//w ww. j a v a 2 s.c o m System.out.println("Uploading a new object to S3 from a file\n"); File file = new File(uploadFilePath + uploadFileName); s3client.putObject(new PutObjectRequest(bucketName, keyName, file)); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.openkm.util.backup.RepositoryS3Backup.java
License:Open Source License
/** * Performs a recursive repository content export with metadata */// w ww .j a v a2 s.c om private static ImpExpStats backupHelper(String token, String fldPath, AmazonS3 s3, String bucket, boolean metadata, Writer out, InfoDecorator deco) throws FileNotFoundException, PathNotFoundException, AccessDeniedException, ParseException, NoSuchGroupException, RepositoryException, IOException, DatabaseException { log.info("backup({}, {}, {}, {}, {}, {})", new Object[] { token, fldPath, bucket, metadata, out, deco }); ImpExpStats stats = new ImpExpStats(); DocumentModule dm = ModuleManager.getDocumentModule(); FolderModule fm = ModuleManager.getFolderModule(); MetadataAdapter ma = MetadataAdapter.getInstance(token); Gson gson = new Gson(); for (Iterator<Document> it = dm.getChildren(token, fldPath).iterator(); it.hasNext();) { File tmpDoc = null; InputStream is = null; FileOutputStream fos = null; boolean upload = true; try { Document docChild = it.next(); String path = docChild.getPath().substring(1); ObjectMetadata objMeta = new ObjectMetadata(); if (Config.REPOSITORY_CONTENT_CHECKSUM) { if (exists(s3, bucket, path)) { objMeta = s3.getObjectMetadata(bucket, path); if (docChild.getActualVersion().getChecksum().equals(objMeta.getETag())) { upload = false; } } } if (upload) { tmpDoc = FileUtils.createTempFileFromMime(docChild.getMimeType()); fos = new FileOutputStream(tmpDoc); is = dm.getContent(token, docChild.getPath(), false); IOUtils.copy(is, fos); PutObjectRequest request = new PutObjectRequest(bucket, path, tmpDoc); if (metadata) { // Metadata DocumentMetadata dmd = ma.getMetadata(docChild); String json = gson.toJson(dmd); objMeta.addUserMetadata("okm", json); } request.setMetadata(objMeta); s3.putObject(request); out.write(deco.print(docChild.getPath(), docChild.getActualVersion().getSize(), null)); out.flush(); } else { if (metadata) { // Metadata DocumentMetadata dmd = ma.getMetadata(docChild); String json = gson.toJson(dmd); objMeta.addUserMetadata("okm", json); // Update object metadata CopyObjectRequest copyObjReq = new CopyObjectRequest(bucket, path, bucket, path); copyObjReq.setNewObjectMetadata(objMeta); s3.copyObject(copyObjReq); } log.info("Don't need to upload document {}", docChild.getPath()); } // Stats stats.setSize(stats.getSize() + docChild.getActualVersion().getSize()); stats.setDocuments(stats.getDocuments() + 1); } finally { IOUtils.closeQuietly(is); IOUtils.closeQuietly(fos); FileUtils.deleteQuietly(tmpDoc); } } for (Iterator<Folder> it = fm.getChildren(token, fldPath).iterator(); it.hasNext();) { InputStream is = null; try { Folder fldChild = it.next(); String path = fldChild.getPath().substring(1) + "/"; is = new ByteArrayInputStream(new byte[0]); ObjectMetadata objMeta = new ObjectMetadata(); objMeta.setContentLength(0); PutObjectRequest request = new PutObjectRequest(bucket, path, is, objMeta); // Metadata if (metadata) { FolderMetadata fmd = ma.getMetadata(fldChild); String json = gson.toJson(fmd); objMeta.addUserMetadata("okm", json); } request.setMetadata(objMeta); s3.putObject(request); ImpExpStats tmp = backupHelper(token, fldChild.getPath(), s3, bucket, metadata, out, deco); // Stats stats.setSize(stats.getSize() + tmp.getSize()); stats.setDocuments(stats.getDocuments() + tmp.getDocuments()); stats.setFolders(stats.getFolders() + tmp.getFolders() + 1); stats.setOk(stats.isOk() && tmp.isOk()); } finally { IOUtils.closeQuietly(is); } } log.debug("backupHelper: {}", stats); return stats; }
From source file:com.pinterest.secor.uploader.S3UploadManager.java
License:Apache License
public Handle<?> upload(LogFilePath localPath) throws Exception { String s3Bucket = mConfig.getS3Bucket(); String curS3Path = s3Path; String s3Key;// www . ja va 2 s . co m File localFile = new File(localPath.getLogFilePath()); if (FileUtil.s3PathPrefixIsAltered(localPath.withPrefix(curS3Path).getLogFilePath(), mConfig)) { curS3Path = FileUtil.getS3AlternativePathPrefix(mConfig); LOG.info("Will upload file {} to alternative s3 path s3://{}/{}", localFile, s3Bucket, curS3Path); } if (mConfig.getS3MD5HashPrefix()) { // add MD5 hash to the prefix to have proper partitioning of the secor logs on s3 String md5Hash = FileUtil.getMd5Hash(localPath.getTopic(), localPath.getPartitions()); s3Key = localPath.withPrefix(md5Hash + "/" + curS3Path).getLogFilePath(); } else { s3Key = localPath.withPrefix(curS3Path).getLogFilePath(); } // make upload request, taking into account configured options for encryption PutObjectRequest uploadRequest = new PutObjectRequest(s3Bucket, s3Key, localFile); if (!mConfig.getAwsSseType().isEmpty()) { if (S3.equals(mConfig.getAwsSseType())) { LOG.info("uploading file {} to s3://{}/{} with S3-managed encryption", localFile, s3Bucket, s3Key); enableS3Encryption(uploadRequest); } else if (KMS.equals(mConfig.getAwsSseType())) { LOG.info("uploading file {} to s3://{}/{} using KMS based encryption", localFile, s3Bucket, s3Key); enableKmsEncryption(uploadRequest); } else if (CUSTOMER.equals(mConfig.getAwsSseType())) { LOG.info("uploading file {} to s3://{}/{} using customer key encryption", localFile, s3Bucket, s3Key); enableCustomerEncryption(uploadRequest); } else { // bad option throw new IllegalArgumentException( mConfig.getAwsSseType() + "is not a suitable type for AWS SSE encryption"); } } else { LOG.info("uploading file {} to s3://{}/{} with no encryption", localFile, s3Bucket, s3Key); } Upload upload = mManager.upload(uploadRequest); return new S3UploadHandle(upload); }
From source file:com.shazam.dataengineering.pipelinebuilder.AWSProxy.java
License:Apache License
public static boolean uploadFileToS3Url(AmazonS3 client, String url, File file) throws DeploymentException { try {//from w w w.j ava 2 s . c om Pattern pattern = Pattern.compile("://([^/]+)/(.*)"); Matcher matcher = pattern.matcher(url); if (matcher.find()) { String bucketName = matcher.group(1); String key = matcher.group(2); PutObjectRequest putRequest = new PutObjectRequest(bucketName, key, file); PutObjectResult result = client.putObject(putRequest); return true; } else { return false; } } catch (RuntimeException e) { throw new DeploymentException(e); } }
From source file:com.sjsu.faceit.example.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*// www . j a v a2 s . c o m * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ System.out.println(new File(".").getAbsolutePath()); AmazonS3 s3 = new AmazonS3Client( new PropertiesCredentials(S3Sample.class.getResourceAsStream("AwsCredentials.properties"))); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, "abc/" + key, new File("/Users/prayag/Desktop/2.jpg"))); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, "abc/" + key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ // System.out.println("Deleting an object\n"); // s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ // System.out.println("Deleting bucket " + bucketName + "\n"); // s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.sludev.commons.vfs2.provider.s3.SS3FileObject.java
License:Apache License
/** * Upload a local file to Amazon S3./*from ww w. ja va 2 s . co m*/ * * @param f File object from the local file-system to be uploaded to Amazon S3 */ public void putObject(File f) { Pair<String, String> path = getContainerAndPath(); fileSystem.getClient().putObject(new PutObjectRequest(path.getLeft(), path.getRight(), f)); }
From source file:com.springboot.demo.framework.aws.s3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*// w ww . j ava 2 s. c o m * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (~/.aws/credentials). */ AWSCredentials basicCredentials = new BasicAWSCredentials(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY); AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider().getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } /* * Create S3 Client */ AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient(); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Returns an URL for the object stored in the specified bucket and key */ URL url = s3.getUrl(bucketName, key); System.out.println("upload file url : " + url.toString()); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.streamsets.datacollector.lib.emr.S3Manager.java
License:Apache License
String uploadToS3(String name, File file) throws IOException { long start = System.currentTimeMillis(); long fileLength = file.length() / (1000 * 1000); String bucket = getBucket(pipelineEmrConfigs.getS3StagingUri()); String path = getPath(pipelineEmrConfigs.getS3StagingUri()) + "/" + pipelineId + "/" + uniquePrefix; String s3Uri = "s3://" + bucket + "/" + path + "/" + name; try {/* w w w. j a va 2 s.c o m*/ // Upload PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, path + "/" + name, file); putObjectRequest.setGeneralProgressListener(new ProgressListener() { long counter; long tick = -1; @Override public void progressChanged(ProgressEvent progressEvent) { counter += progressEvent.getBytesTransferred(); if (counter / (100 * 1000000) > tick) { tick++; LOG.debug("Uploading '{}' {}/{} MB, {} secs", s3Uri, counter / (1000 * 1000), fileLength, (System.currentTimeMillis() - start) / 1000); } } }); getS3TransferManager().upload(putObjectRequest).waitForCompletion(); LOG.info("Uploaded file at: {}", s3Uri); return s3Uri; } catch (SdkBaseException | InterruptedException ex) { throw new IOException(ex); } }
From source file:com.supprema.utils.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from ww w. ja v a 2 s. c om*/ * The ProfileCredentialsProvider will return your [fabiano-user-s3] * credential profile by reading from the credentials file located at * (/Users/fabianorodriguesmatias/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("fabiano-user-s3").getCredentials(); } catch (Exception e) { throw new AmazonClientException( "Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/Users/fabianorodriguesmatias/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.topera.epoch.service.S3Util.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from ww w . j a va 2 s .co m*/ * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) * and save the following lines after replacing the underlined values with your own. * * [default] * aws_access_key_id = YOUR_ACCESS_KEY_ID * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY */ AWSCredentials creds = new AWSCredentials() { public String getAWSSecretKey() { // TODO Auto-generated method stub return "5VVtmI7vcecuVbw8JsG4uo2O1/9RwwLHrTT01Itz"; } public String getAWSAccessKeyId() { // TODO Auto-generated method stub return "AKIAJCMYALI46A2DIPRQ"; } }; AmazonS3 s3 = new AmazonS3Client(creds); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }