List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest
public PutObjectRequest(String bucketName, String key, String redirectLocation)
From source file:com.arc.cloud.aws.s3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*//www .ja v a 2 s. c o m * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (~/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider().getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.atlassian.localstack.sample.S3Sample.java
License:Open Source License
public static void runTest(AWSCredentials credentials) throws IOException { AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2);// w w w . j a va 2 s . c o m s3.setEndpoint(LocalstackTestRunner.getEndpointS3()); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println(" - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); }
From source file:com.att.aro.core.cloud.aws.AwsRepository.java
License:Apache License
@Override public TransferState put(File file) { try {/* w w w.j a v a2 s . c o m*/ PutObjectRequest req = new PutObjectRequest(bucketName, file.getName(), file); Upload myUpload = transferMgr.upload(req); myUpload.waitForCompletion(); transferMgr.shutdownNow(); return myUpload.getState(); } catch (AmazonServiceException ase) { LOGGER.error("Error Message: " + ase.getMessage()); } catch (Exception exception) { LOGGER.error(exception.getMessage(), exception); } return null; }
From source file:com.carrotgarden.nexus.aws.s3.publish.amazon.AmazonServiceProvider.java
License:BSD License
@Override public boolean save(final String path, final File file) { reporter.requestSaveCount.inc();//ww w . ja v a 2s. c o m reporter.requestTotalCount.inc(); try { final PutObjectRequest request = // new PutObjectRequest(mavenBucket(), mavenRepoKey(path), file); final PutObjectResult result = client.putObject(request); reporter.fileSaveCount.inc(); reporter.fileSaveSize.inc(file.length()); reporter.fileSaveWatch.add(file); setAvailable(true, null); return true; } catch (final Exception e) { setAvailable(false, e); return false; } }
From source file:com.climate.oada.dao.impl.S3ResourceDAO.java
License:Open Source License
/** * Upload file to S3.//from w w w .j a va 2 s .co m * * @param local * - local file to upload. * @return boolean */ boolean uploadS3(File local) { boolean retval = false; AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider()); try { LOG.debug("Uploading a new object to S3 from local, file name " + local.getName()); s3client.putObject(new PutObjectRequest(bucketName, keyName, local)); retval = true; } catch (AmazonServiceException ase) { logAWSServiceException(ase); } catch (AmazonClientException ace) { logAWSClientException(ace); } return retval; }
From source file:com.crickdata.upload.s3.UploadLiveData.java
License:Open Source License
public Map<String, Date> uploadToS3(String fileName, boolean type) throws IOException { Statistics statistics = new Statistics(); Map<String, Date> perfMap = new HashMap<String, Date>(); AWSCredentials credentials = null;//from w w w. j ava 2 s .com try { credentials = new BasicAWSCredentials("AKIAI6QKTRAQE7MXQOIQ", "wIG6u1yI5ZaseeJbvYSUmD98qelIJNSCVBzt5k2q"); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (C:\\Users\\bssan_000\\.aws\\credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName; if (!type) bucketName = "cricmatchinfo"; else bucketName = "cricmatchinfoseries"; String key = fileName.replace(".json", "").trim(); try { perfMap.put("S3INSERTREQ", new Date()); statistics.setS3Req(new Date()); File f = readMatchFile(fileName); double bytes = f.length(); double kilobytes = (bytes / 1024); System.out.println("Details :" + kilobytes); s3.putObject(new PutObjectRequest(bucketName, key, f)); statistics.setSize(String.valueOf(kilobytes)); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); perfMap.put("S3SAVERES", object.getObjectMetadata().getLastModified()); statistics.setKey(key); statistics.setS3Res(object.getObjectMetadata().getLastModified()); MyUI.stats.add(statistics); displayTextInputStream(object.getObjectContent()); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } return perfMap; }
From source file:com.digitaslbi.helios.mock.utils.ConnectionHelper.java
public static void uploadFile(String bucketName, String folderName, String filePath) { try {/*from w ww.ja va2s . c om*/ log.debug("Uploading a new object to S3 from a file\n"); java.io.File file = new java.io.File(filePath); s3Client.putObject(new PutObjectRequest(bucketName, folderName, file) .withCannedAcl(CannedAccessControlList.PublicRead)); } catch (AmazonServiceException ase) { log.error("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); log.error("Error Message: " + ase.getMessage()); log.error("HTTP Status Code: " + ase.getStatusCode()); log.error("AWS Error Code: " + ase.getErrorCode()); log.error("Error Type: " + ase.getErrorType()); log.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { log.error("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); log.error("Error Message: " + ace.getMessage()); } }
From source file:com.dongli.model.MyJSONData.java
License:Open Source License
public static String createObject(MyJSONObject mjo) throws MyRESTException { // generate uid for this object String uid = UIDGenerator.getUID(); mjo.setUID(uid);/*from ww w . ja v a 2 s .com*/ // tmp path of data file to store this object. The file will to sent to S3 later. String path = "/tmp/" + uid; try { FileWriter fw = new FileWriter(path, false); PrintWriter pw = new PrintWriter(fw); pw.println(mjo.toString()); pw.close(); fw.close(); } catch (IOException e) { // e.printStackTrace(); // failed to create the new object File nf = new File(path); nf.delete(); throw new MyRESTException("Failed to create the object " + uid + "."); } // create the new object on AWS S3 try { File uploadFile = new File(path); MyAWSStorage.getInstance().s3client .putObject(new PutObjectRequest(MyConfiguration.getInstance().bucket, uid, uploadFile)); } catch (AmazonServiceException ase) { throw new MyRESTException("Failed to create the object " + uid + "."); } catch (AmazonClientException ace) { throw new MyRESTException("Failed to create the object " + uid + "."); } return uid; }
From source file:com.dongli.model.MyJSONData.java
License:Open Source License
public static void updateObject(MyJSONObject mjo) throws MyRESTException { String uid = mjo.getUID();/* www.j a v a 2 s.com*/ MyJSONObject myJSONObject = queryObject(uid); String path = "/tmp/" + uid; // write the JSON object to a tmp file try { FileWriter fw = new FileWriter(path, false); PrintWriter pw = new PrintWriter(fw); pw.println(mjo.toString()); pw.close(); fw.close(); } catch (IOException e) { // failed to create the new object File nf = new File(path); nf.delete(); throw new MyRESTException("Failed to update the object."); } // update the new object on Amazon AWS S3 try { File uploadFile = new File(path); MyAWSStorage.getInstance().s3client .putObject(new PutObjectRequest(MyConfiguration.getInstance().bucket, uid, uploadFile)); } catch (AmazonServiceException ase) { throw new MyRESTException("Failed to update the object " + uid + "."); } catch (AmazonClientException ace) { throw new MyRESTException("Failed to qpdate the object " + uid + "."); } }
From source file:com.dss.sframework.tools.amazon.models.UploadModel.java
License:Open Source License
public void upload() { if (mFile != null) { try {/*from ww w.j a va2 s . c om*/ ObjectMetadata metaData = new ObjectMetadata(); metaData.setContentType(mMediaType == Enums.MEDIA_TYPE.VIDEO ? "video/" : "image/" + mExtension); if (mMediaType == Enums.MEDIA_TYPE.VIDEO) { mediaURL = AmazonUtil.getPrefix(getContext()) + "videos/" + super.getFileName() + "." + mExtension; } else { mediaURL = AmazonUtil.getPrefix(getContext()) + "imagens/" + super.getFileName() + "." + mExtension; } mUpload = getTransferManager().upload( new PutObjectRequest(ConstantAmazon.BUCKET_NAME.toLowerCase(Locale.US), mediaURL, mFile) .withCannedAcl(CannedAccessControlList.PublicRead).withMetadata(metaData)); Log.i("TAG", "Upload: " + ConstantAmazon.BUCKET_NAME + mediaURL); mUpload.addProgressListener(mListener); } catch (Exception e) { Log.e(TAG, "", e); mCallbacks.onErrorListenerUploadFile("Erro no upload"); } } }