List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest
public PutObjectRequest(String bucketName, String key, String redirectLocation)
From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java
License:Open Source License
@Override public boolean uploadObjectAndListenProgress(final String bucketName, final String fileName, final InputStream inputStream, final boolean isPublicAccessible) throws AmazonClientException, AmazonServiceException, IOException { LOGGER.info(// w w w .ja va 2s . co m "uploadObjectAndListenProgress invoked, bucketName: {} , fileName: {} and isPublicAccessible: {}", bucketName, fileName, isPublicAccessible); File tempFile = null; PutObjectRequest putObjectRequest = null; Upload upload = null; try { // Create temporary file from stream to avoid 'out of memory' exception tempFile = AWSUtil.createTempFileFromStream(inputStream); putObjectRequest = new PutObjectRequest(bucketName, fileName, tempFile); if (isPublicAccessible) { putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); } final TransferManager transferMgr = new TransferManager(s3client); upload = transferMgr.upload(putObjectRequest); // You can poll your transfer's status to check its progress if (upload.isDone()) { LOGGER.info("Start: {} , State: {} and Progress (%): {}", upload.getDescription(), upload.getState(), upload.getProgress().getPercentTransferred()); } // Add progressListener to listen asynchronous notifications about your transfer's progress // Uncomment below code snippet during development /*upload.addProgressListener(new ProgressListener() { public void progressChanged(ProgressEvent event) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Transferred bytes: " + (long) event.getBytesTransferred()); } } });*/ try { //Block the current thread and wait for completion //If the transfer fails AmazonClientException will be thrown upload.waitForCompletion(); } catch (AmazonClientException | InterruptedException excp) { LOGGER.error("Exception occured while waiting for transfer: ", excp); } } finally { AWSUtil.deleteTempFile(tempFile); // Delete the temporary file once uploaded } LOGGER.info("End: {} , State: {} , Progress (%): {}", upload.getDescription(), upload.getState(), upload.getProgress().getPercentTransferred()); return upload.isDone(); }
From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java
License:Open Source License
@Override public Upload uploadFileAsync(final String bucketName, final String fileName, final File fileObj, final CannedAccessControlList cannedAcl) throws AmazonClientException, AmazonServiceException, IOException { LOGGER.info("uploadObjectAsync invoked, bucketName: {} , fileName: {} and cannedAccessControlList: {}", bucketName, fileName, cannedAcl); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, fileName, fileObj) .withCannedAcl(cannedAcl);// w ww. j a v a 2s . c o m final TransferManager transferMgr = new TransferManager(s3client); return transferMgr.upload(putObjectRequest); }
From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java
License:Open Source License
@Override public Upload uploadFileAsync(final String bucketName, final String fileName, final File fileObj, final boolean isPublicAccessible) throws AmazonClientException, AmazonServiceException, IOException { LOGGER.info("uploadObjectAsync invoked, bucketName: {} , fileName: {} and isPublicAccessible: {}", bucketName, fileName, isPublicAccessible); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, fileName, fileObj); if (isPublicAccessible) { putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); }/* w ww. ja va 2 s . co m*/ final TransferManager transferMgr = new TransferManager(s3client); return transferMgr.upload(putObjectRequest); }
From source file:com.github.rholder.esthree.command.Put.java
License:Apache License
@Override public Integer call() throws Exception { TransferManager t = new TransferManager(amazonS3Client); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setUserMetadata(metadata); Upload u = t.upload(new PutObjectRequest(bucket, key, inputFile).withMetadata(objectMetadata)); // TODO this listener spews out garbage >100% on a retry, add a test to verify if (progressListener != null) { progressListener.withTransferProgress(new TransferProgressWrapper(u.getProgress())); u.addProgressListener(progressListener); }/*from w w w. ja v a 2 s. co m*/ try { u.waitForCompletion(); } finally { t.shutdownNow(); } return 0; }
From source file:com.groupproject.data.FileManager.java
License:Open Source License
public static void uploadFile(byte[] bFile, String key) throws IOException { s3.setRegion(usWest2);/* www .java 2 s .c om*/ File file = File.createTempFile("temp-file", ".txt"); file.deleteOnExit(); Writer writer = new OutputStreamWriter(new FileOutputStream(file)); FileOutputStream fileOuputStream = new FileOutputStream(file); fileOuputStream.write(bFile); writer.close(); fileOuputStream.close(); System.out.println("Uploading a new object to S3 from a file\n"); try { s3.putObject(new PutObjectRequest(bucketName, key, file)); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.ibm.stocator.fs.cos.COSAPIClient.java
License:Apache License
/** * Create a putObject request.//from w ww . j a v a 2s .c om * Adds the ACL and metadata * @param key key of object * @param metadata metadata header * @param srcfile source file * @return the request */ public PutObjectRequest newPutObjectRequest(String key, ObjectMetadata metadata, File srcfile) { PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, key, srcfile); putObjectRequest.setMetadata(metadata); return putObjectRequest; }
From source file:com.ibm.stocator.fs.cos.COSOutputStream.java
License:Apache License
@Override public void close() throws IOException { if (closed.getAndSet(true)) { return;/* www . ja v a2 s. c o m*/ } mBackupOutputStream.close(); LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey); try { final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(mBackupFile.length()); om.setContentType(mContentType); om.setUserMetadata(mMetadata); PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); upload.waitForUploadResult(); } catch (InterruptedException e) { throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e); } catch (AmazonClientException e) { throw new IOException(String.format("saving output %s %s", mKey, e)); } finally { if (!mBackupFile.delete()) { LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream); } super.close(); } LOG.debug("OutputStream for key '{}' upload complete", mKey); }
From source file:com.images3.data.impl.ImageContentAccessImplS3.java
License:Apache License
@Override public File insertImageContent(ImageIdentity id, AmazonS3Bucket bucket, File content) { File imageFile = new File(generateFilePath(id)); content.renameTo(imageFile);//from ww w .j ava 2s . c o m AmazonS3 client = clients.getClient(bucket); client.putObject(new PutObjectRequest(bucket.getName(), generateS3ObjectKey(id), imageFile)); return imageFile; }
From source file:com.imos.sample.S3SampleCheck.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*w ww . j a v a 2s. c om*/ * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (/home/alok/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("default").getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/home/alok/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); // Region usWest2 = Region.getRegion(Regions.US_WEST_2); Region usWest2 = Region.getRegion(Regions.AP_SOUTHEAST_1); s3.setRegion(usWest2); String bucketName = "alok-test"; String key = "sample.json"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ // System.out.println("Creating bucket " + bucketName + "\n"); // s3.createBucket(bucketName); /* * List the buckets in your account */ // System.out.println("Listing buckets"); // for (Bucket bucket : s3.listBuckets()) { // System.out.println(" - " + bucket.getName()); // } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); //s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); // S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); S3Object object = s3.getObject(new GetObjectRequest("alok-test", key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3.listObjects(new ListObjectsRequest() // .withBucketName(bucketName) .withBucketName("alok-test")); // .withPrefix("My")); objectListing.getObjectSummaries().forEach((objectSummary) -> { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); }); System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ // System.out.println("Deleting an object\n"); // s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ // System.out.println("Deleting bucket " + bucketName + "\n"); // s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.intuit.s3encrypt.S3Encrypt.java
License:Open Source License
private static void putS3Object(CommandLine cmd, AmazonS3EncryptionClient s3, String bucket, String filename, String keyname, String key) { String[] searchArgs = cmd.getOptionValues("put"); System.out.println("Uploading a new object to S3 BUCKET = " + bucket + " FILENAME = " + filename); File file = new File(filename); PutObjectRequest request = new PutObjectRequest(bucket, filename, file); ObjectMetadata metadata = new ObjectMetadata(); metadata.addUserMetadata(keyname, key); request.setMetadata(metadata);/*from w w w.ja v a 2 s . c o m*/ s3.putObject(request); System.out.println(); }