Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, String redirectLocation) 

Source Link

Document

Constructs a new PutObjectRequest object to perform a redirect for the specified bucket and key.

Usage

From source file:com.easarrive.aws.plugins.common.service.impl.S3Service.java

License:Open Source License

private PutObjectResult putObject(AmazonS3 client, String bucketName, String key, File file, Grantee grantee,
        Permission permission, Grant... grantsVarArg) {
    if (client == null) {
        return null;
    } else if (StringUtil.isEmpty(bucketName)) {
        return null;
    } else if (StringUtil.isEmpty(key)) {
        return null;
    } else if (file == null) {
        return null;
    } else if ((grantee == null || permission == null) && (grantsVarArg == null || grantsVarArg.length < 1)) {
        return null;
    }//w w  w.j  a va2s  . co  m
    PutObjectResult result = null;
    AccessControlList accessControlList = new AccessControlList();
    if (grantee != null && permission != null) {
        accessControlList.grantPermission(grantee, permission);
    }
    if (grantsVarArg != null && grantsVarArg.length > 0) {
        accessControlList.grantAllPermissions(grantsVarArg);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, file)
            .withAccessControlList(accessControlList);
    result = client.putObject(putObjectRequest);
    return result;
}

From source file:com.eBilling.util.S3Example.java

void uploadfile(AWSCredentials credentials) {
    AmazonS3 s3client = new AmazonS3Client(credentials);

    try {/*  w w w .j av  a  2 s  .  com*/
        File file = new File(uploadFileName);
        PutObjectRequest p = new PutObjectRequest(bucketName, keyName, file);
        p.setCannedAcl(CannedAccessControlList.PublicRead);
        s3client.putObject(p);
        String _finalUrl = "https://" + bucketName + ".s3.amazonaws.com/" + keyName;
        System.out.println(_finalUrl);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.example.S3Sample02.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*//  ww  w .j a  v a 2  s  . c  o  m
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (~/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);

    //        AP_SOUTHEAST_2

    //       Region usWest2 = Region.getRegion(Regions.AP_SOUTHEAST_2 );
    //       s3.setRegion(usWest2);

    //        String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();

    String bucketName = "imos-test-data-1";

    String key = "MyObjectKey" + UUID.randomUUID();

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        //            System.out.println("Creating bucket " + bucketName + "\n");
        //            s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        /*            System.out.println("Listing buckets");
                    for (Bucket bucket : s3.listBuckets()) {
        System.out.println(" - " + bucket.getName());
                    }
                    System.out.println();
        */

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        System.out.println("done\n");

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        System.out.println("done\n");

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();
        System.out.println("done\n");

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */

        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        /*
                    System.out.println("Deleting bucket " + bucketName + "\n");
                    s3.deleteBucket(bucketName);
        */

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.exedosoft.plat.storage.s3.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*/*  w  w  w  .j  a va2  s  . c  o  m*/
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider());
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //            System.out.println("Deleting bucket " + bucketName + "\n");
        //            s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.fivepebbles.ProcessAWS.java

License:MIT License

public boolean putAWSObject(String myKey, File myFile, String bucketName) {
    boolean b2 = true;

    try {//from  ww w . j a va  2  s .co  m
        PutObjectRequest objreq = new PutObjectRequest(bucketName, myKey, myFile);
        PutObjectResult objresult = s3client.putObject(objreq);
    }

    catch (AmazonServiceException ase) {
        //***TODO*** Log message
        b2 = false;
    }

    catch (AmazonClientException ace) {
        //***TODO*** Log message
        b2 = false;
    }
    return b2;
}

From source file:com.flipzu.PostProcThread.java

License:Apache License

private boolean uploadToS3(Broadcast bcast, boolean delete) {
    debug.logPostProc("PostProcThread, S3 upload for " + bcast.getFilename());

    if (bcast.getFilename() == null) {
        debug.logPostProc("PostProcThread, uploadToS3, filename is null");
        return false;
    }//from  w  w w  . j  av  a 2 s . co m

    File file = new File(bcast.getFilename());
    if (!file.exists()) {
        debug.logPostProc("PostProcThread, uploadToS3, " + bcast.getFilename() + " does not exist");
        return false;
    }

    AmazonS3 s3 = null;

    try {
        InputStream is = new FileInputStream("aws.properties");
        s3 = new AmazonS3Client(new PropertiesCredentials(is));
    } catch (Exception e) {
        Debug.getInstance().logError("uploadToS3 Error ", e);
        return false;
    }

    String bucketName = Config.getInstance().getS3Bucket();
    String dirName = Config.getInstance().getS3dir();
    String objName = dirName + "/" + bcast.getId() + Config.getInstance().getFileWriterExtension();

    PutObjectRequest po = new PutObjectRequest(bucketName, objName, file);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentType("audio/mpeg");
    po.setMetadata(metadata);
    po.setCannedAcl(CannedAccessControlList.PublicRead);

    try {
        s3.putObject(po);
    } catch (AmazonServiceException ase) {
        debug.logPostProc("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        debug.logPostProc("Error Message:    " + ase.getMessage());
        debug.logPostProc("HTTP Status Code: " + ase.getStatusCode());
        debug.logPostProc("AWS Error Code:   " + ase.getErrorCode());
        debug.logPostProc("Error Type:       " + ase.getErrorType());
        debug.logPostProc("Request ID:       " + ase.getRequestId());
        return false;

    } catch (AmazonClientException ace) {
        debug.logPostProc("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        debug.logPostProc("Error Message: " + ace.getMessage());
        return false;
    }

    if (delete) {
        if (Config.getInstance().deleteSmallBcasts())
            /* check and remove empty/short broadcasts */
            cleanCrappyBroadcasts(bcast.getKey(), file);

        debug.logPostProc("uploadToS3, deleting file " + bcast.getFilename());
        file.delete();
    }

    return true;

}

From source file:com.gemmystar.api.contents.S3UploadScheduledTask.java

License:Open Source License

public void uploadToS3(File uploadedFile) {

    try {//from w w w .j a  v a2 s.com
        AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider());
        LOGGER.debug("Uploading a new object to S3 from a {}", uploadedFile.getName());
        String keyName = GemmyConstant.S3_KEY_PREFIX_VIDEO + uploadedFile.getName();
        s3client.putObject(new PutObjectRequest(s3BucketName, keyName, uploadedFile));

        contentsService.saveS3Key(getContentsId(uploadedFile.getName()), keyName);

        LOGGER.debug("upload success.");

        uploadedFile.delete();

    } catch (Exception e) {
        LOGGER.error(e.toString(), e);
    }
}

From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java

License:Open Source License

@Override
public PutObjectResult uploadObject(final String bucketName, final String fileName,
        final InputStream inputStream, final CannedAccessControlList cannedAcl)
        throws AmazonClientException, AmazonServiceException, IOException {
    LOGGER.info("uploadObject invoked, bucketName: {} , fileName: {}, cannedAccessControlList: {}", bucketName,
            fileName, cannedAcl);//from w w  w . ja v  a 2 s  . c  o  m
    File tempFile = null;
    PutObjectRequest putObjectRequest = null;
    PutObjectResult uploadResult = null;
    try {
        // Create temporary file from stream to avoid 'out of memory' exception
        tempFile = AWSUtil.createTempFileFromStream(inputStream);
        putObjectRequest = new PutObjectRequest(bucketName, fileName, tempFile).withCannedAcl(cannedAcl);
        uploadResult = uploadObject(putObjectRequest);
    } finally {
        AWSUtil.deleteTempFile(tempFile); // Delete the temporary file once uploaded
    }
    return uploadResult;
}

From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java

License:Open Source License

@Override
public PutObjectResult uploadObject(final String bucketName, final String fileName,
        final InputStream inputStream, final boolean isPublicAccessible)
        throws AmazonClientException, AmazonServiceException, IOException {
    LOGGER.info("uploadObject invoked, bucketName: {} , fileName: {} and isPublicAccessible: {}", bucketName,
            fileName, isPublicAccessible);
    File tempFile = null;/*from   w  ww .jav  a  2 s  .  c  o  m*/
    PutObjectRequest putObjectRequest = null;
    PutObjectResult uploadResult = null;
    try {
        // Create temporary file from stream to avoid 'out of memory' exception
        tempFile = AWSUtil.createTempFileFromStream(inputStream);
        putObjectRequest = new PutObjectRequest(bucketName, fileName, tempFile);
        if (isPublicAccessible) {
            putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
        }
        uploadResult = uploadObject(putObjectRequest);
    } finally {
        AWSUtil.deleteTempFile(tempFile); // Delete the temporary file once uploaded
    }
    return uploadResult;
}

From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java

License:Open Source License

@Override
public boolean uploadObjectAndListenProgress(final String bucketName, final String fileName,
        final InputStream inputStream, final CannedAccessControlList cannedAcl)
        throws AmazonClientException, AmazonServiceException, IOException {
    LOGGER.info(/*from w ww .jav a 2s .  c o  m*/
            "uploadObjectAndListenProgress invoked, bucketName: {} , fileName: {} and cannedAccessControlList: {}",
            bucketName, fileName, cannedAcl);
    File tempFile = null;
    PutObjectRequest putObjectRequest = null;
    Upload upload = null;
    try {
        // Create temporary file from stream to avoid 'out of memory' exception
        tempFile = AWSUtil.createTempFileFromStream(inputStream);
        putObjectRequest = new PutObjectRequest(bucketName, fileName, tempFile).withCannedAcl(cannedAcl);
        final TransferManager transferMgr = new TransferManager(s3client);
        upload = transferMgr.upload(putObjectRequest);
        // You can poll your transfer's status to check its progress
        if (upload.isDone()) {
            LOGGER.info("Start: {}  , State: {} and Progress (%): {}", upload.getDescription(),
                    upload.getState(), upload.getProgress().getPercentTransferred());
        }

        // Add progressListener to listen asynchronous notifications about your transfer's progress
        // Uncomment below code snippet during development
        /*upload.addProgressListener(new ProgressListener() {
           public void progressChanged(ProgressEvent event) {
              if (LOGGER.isDebugEnabled()) {
          LOGGER.debug("Transferred bytes: " + (long) event.getBytesTransferred());
              }
        }
        });*/

        try {
            //Block the current thread and wait for completion
            //If the transfer fails AmazonClientException will be thrown
            upload.waitForCompletion();
        } catch (AmazonClientException | InterruptedException excp) {
            LOGGER.error("Exception occured while waiting for transfer: ", excp);
        }
    } finally {
        AWSUtil.deleteTempFile(tempFile); // Delete the temporary file once uploaded
    }
    LOGGER.info("End: {} , State: {} , Progress (%): {}", upload.getDescription(), upload.getState(),
            upload.getProgress().getPercentTransferred());
    return upload.isDone();
}