Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, String redirectLocation) 

Source Link

Document

Constructs a new PutObjectRequest object to perform a redirect for the specified bucket and key.

Usage

From source file:br.com.ingenieux.mojo.beanstalk.bundle.UploadSourceBundleMojo.java

License:Apache License

protected Object executeInternal() throws Exception {
    String path = artifactFile.getPath();

    if (!(path.endsWith(".war") || path.endsWith(".jar") || path.endsWith(".zip"))) {
        getLog().warn("Not a war/jar/zip file. Skipping");

        return null;
    }//from www .  j a va 2  s  .com

    if (!artifactFile.exists()) {
        throw new MojoFailureException("Artifact File does not exist! (file=" + path + ")");
    }

    BeanstalkerS3Client client = new BeanstalkerS3Client(getAWSCredentials(), getClientConfiguration(),
            getRegion());

    client.setMultipartUpload(multipartUpload);
    client.setSilentUpload(silentUpload);

    if (StringUtils.isBlank(s3Bucket)) {
        getLog().info("S3 Bucket not defined.");
        s3Bucket = getService().createStorageLocation().getS3Bucket();

        getLog().info("Using defaults, like: " + s3Bucket);

        project.getProperties().put("beanstalk.s3Bucket", s3Bucket);
    }

    getLog().info("Target Path: s3://" + s3Bucket + "/" + s3Key);
    getLog().info("Uploading artifact file: " + path);

    PutObjectResult result = client.putObject(new PutObjectRequest(s3Bucket, s3Key, artifactFile));

    getLog().info("Artifact Uploaded");

    project.getProperties().put("beanstalk.s3Key", s3Key);

    return result;
}

From source file:br.com.tamandua.aws.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*//w ww  .j  ava 2s .  c  om
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(
            new PropertiesCredentials(S3Sample.class.getResourceAsStream("AwsCredentials.properties")));

    String bucketName = "test-bucket-everton-" + UUID.randomUUID();
    String key = "somekey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("some"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:br.com.unb.aws.client.S3ClientV1.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*//from   www.j  a  v a  2 s. c om
     * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) 
     * and save the following lines after replacing the underlined values with your own.
     *
     * [default]
     * aws_access_key_id = YOUR_ACCESS_KEY_ID
     * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY
     */

    AmazonS3 s3 = new AmazonS3Client();
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:br.puc_rio.ele.lvc.interimage.core.datamanager.AWSSource.java

License:Apache License

 public void put(String from, String to, Resource resource) {

   try {//  w  w  w . ja  va 2  s .com
               
      File file = new File(from);
                     
      PutObjectRequest putObjectRequest = new PutObjectRequest(_bucket, to, file);
         
      if (resource instanceof SplittableResource) {
         SplittableResource rsrc = (SplittableResource)resource;
         if (rsrc.getType() == SplittableResource.IMAGE) {
            putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all
         }
      } else if (resource instanceof DefaultResource) {
         DefaultResource rsrc = (DefaultResource)resource;
         if (rsrc.getType() == DefaultResource.TILE) {
            putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all
         } else if (rsrc.getType() == DefaultResource.FUZZY_SET) {
            putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all
         } else if (rsrc.getType() == DefaultResource.SHAPE) {
            putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all
         }
      }
      
      Upload upload = _manager.upload(putObjectRequest);
         
      upload.waitForCompletion();
         
      System.out.println("AWSSource: Uploaded file - " + to);
         
   } catch (Exception e) {
      System.err.println("Source put failed: " + e.getMessage());         
   }
      
}

From source file:br.puc_rio.ele.lvc.interimage.core.datamanager.AWSSource.java

License:Apache License

public void put(String from, String to, Resource resource) {

        try {/*from w  w  w.j  av a  2s  . co  m*/

            File file = new File(from);

            PutObjectRequest putObjectRequest = new PutObjectRequest(_bucket, to, file);

            if (resource instanceof SplittableResource) {
                SplittableResource rsrc = (SplittableResource) resource;
                if (rsrc.getType() == SplittableResource.IMAGE) {
                    putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all
                }
            } else if (resource instanceof DefaultResource) {
                DefaultResource rsrc = (DefaultResource) resource;
                if (rsrc.getType() == DefaultResource.TILE) {
                    putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all
                } else if (rsrc.getType() == DefaultResource.FUZZY_SET) {
                    putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all
                } else if (rsrc.getType() == DefaultResource.SHAPE) {
                    putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); // public for all
                }
            }

            Upload upload = _manager.upload(putObjectRequest);

            upload.waitForCompletion();

            System.out.println("AWSSource: Uploaded file - " + to);

        } catch (Exception e) {
            System.err.println("Source put failed: " + e.getMessage());
        }

    }

From source file:br.unb.bionimbuz.storage.bucket.methods.CloudMethodsAmazonGoogle.java

@Override
public void StorageUploadFile(BioBucket bucket, String bucketPath, String localPath, String fileName)
        throws Exception {

    switch (bucket.getProvider()) {

    case AMAZON: {

        File file = new File(localPath + fileName);
        s3client.setEndpoint(bucket.getEndPoint());
        s3client.putObject(new PutObjectRequest(bucket.getName(), bucketPath.substring(1) + fileName, file));

        break;/* www  .j  a v  a 2s . c o  m*/
    }
    case GOOGLE: {

        String command = gcloudFolder + "gsutil cp " + localPath + fileName + " gs://" + bucket.getName()
                + bucketPath + fileName;
        ExecCommand(command);

        break;
    }
    default: {
        throw new Exception("Provedor incorreto!");
    }
    }
}

From source file:br.unb.cic.bionimbuz.services.storage.bucket.methods.CloudMethodsAmazonGoogle.java

@Override
public void StorageUploadFile(BioBucket bucket, String bucketPath, String localPath, String fileName)
        throws Exception {

    switch (bucket.getProvider()) {

    case AMAZON: {

        final File file = new File(localPath + fileName);
        s3client.setEndpoint(bucket.getEndPoint());
        s3client.putObject(new PutObjectRequest(bucket.getName(), bucketPath.substring(1) + fileName, file));

        break;/*from ww  w.j a  v  a 2s .c o  m*/
    }
    case GOOGLE: {

        final String command = gcloudFolder + "gsutil cp " + localPath + fileName + " gs://" + bucket.getName()
                + bucketPath + fileName;
        ExecCommand(command);

        break;
    }
    default: {
        throw new Exception("Provedor incorreto!");
    }
    }
}

From source file:ch.entwine.weblounge.maven.S3DeployMojo.java

License:Open Source License

/**
 * //w w w.  j  av a 2  s .com
 * {@inheritDoc}
 * 
 * @see org.apache.maven.plugin.Mojo#execute()
 */
public void execute() throws MojoExecutionException, MojoFailureException {

    // Setup AWS S3 client
    AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey);
    AmazonS3Client uploadClient = new AmazonS3Client(credentials);
    TransferManager transfers = new TransferManager(credentials);

    // Make sure key prefix does not start with a slash but has one at the
    // end
    if (keyPrefix.startsWith("/"))
        keyPrefix = keyPrefix.substring(1);
    if (!keyPrefix.endsWith("/"))
        keyPrefix = keyPrefix + "/";

    // Keep track of how much data has been transferred
    long totalBytesTransferred = 0L;
    int items = 0;
    Queue<Upload> uploads = new LinkedBlockingQueue<Upload>();

    try {
        // Check if S3 bucket exists
        getLog().debug("Checking whether bucket " + bucket + " exists");
        if (!uploadClient.doesBucketExist(bucket)) {
            getLog().error("Desired bucket '" + bucket + "' does not exist!");
            return;
        }

        getLog().debug("Collecting files to transfer from " + resources.getDirectory());
        List<File> res = getResources();
        for (File file : res) {
            // Make path of resource relative to resources directory
            String filename = file.getName();
            String extension = FilenameUtils.getExtension(filename);
            String path = file.getPath().substring(resources.getDirectory().length());
            String key = concat("/", keyPrefix, path).substring(1);

            // Delete old file version in bucket
            getLog().debug("Removing existing object at " + key);
            uploadClient.deleteObject(bucket, key);

            // Setup meta data
            ObjectMetadata meta = new ObjectMetadata();
            meta.setCacheControl("public, max-age=" + String.valueOf(valid * 3600));

            FileInputStream fis = null;
            GZIPOutputStream gzipos = null;
            final File fileToUpload;

            if (gzip && ("js".equals(extension) || "css".equals(extension))) {
                try {
                    fis = new FileInputStream(file);
                    File gzFile = File.createTempFile(file.getName(), null);
                    gzipos = new GZIPOutputStream(new FileOutputStream(gzFile));
                    IOUtils.copy(fis, gzipos);
                    fileToUpload = gzFile;
                    meta.setContentEncoding("gzip");
                    if ("js".equals(extension))
                        meta.setContentType("text/javascript");
                    if ("css".equals(extension))
                        meta.setContentType("text/css");
                } catch (FileNotFoundException e) {
                    getLog().error(e);
                    continue;
                } catch (IOException e) {
                    getLog().error(e);
                    continue;
                } finally {
                    IOUtils.closeQuietly(fis);
                    IOUtils.closeQuietly(gzipos);
                }
            } else {
                fileToUpload = file;
            }

            // Do a random check for existing errors before starting the next upload
            if (erroneousUpload != null)
                break;

            // Create put object request
            long bytesToTransfer = fileToUpload.length();
            totalBytesTransferred += bytesToTransfer;
            PutObjectRequest request = new PutObjectRequest(bucket, key, fileToUpload);
            request.setProgressListener(new UploadListener(credentials, bucket, key, bytesToTransfer));
            request.setMetadata(meta);

            // Schedule put object request
            getLog().info(
                    "Uploading " + key + " (" + FileUtils.byteCountToDisplaySize((int) bytesToTransfer) + ")");
            Upload upload = transfers.upload(request);
            uploads.add(upload);
            items++;
        }
    } catch (AmazonServiceException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    } catch (AmazonClientException e) {
        getLog().error("Uploading resources failed: " + e.getMessage());
    }

    // Wait for uploads to be finished
    String currentUpload = null;
    try {
        Thread.sleep(1000);
        getLog().info("Waiting for " + uploads.size() + " uploads to finish...");
        while (!uploads.isEmpty()) {
            Upload upload = uploads.poll();
            currentUpload = upload.getDescription().substring("Uploading to ".length());
            if (TransferState.InProgress.equals(upload.getState()))
                getLog().debug("Waiting for upload " + currentUpload + " to finish");
            upload.waitForUploadResult();
        }
    } catch (AmazonServiceException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (AmazonClientException e) {
        throw new MojoExecutionException("Error while uploading " + currentUpload);
    } catch (InterruptedException e) {
        getLog().debug("Interrupted while waiting for upload to finish");
    }

    // Check for errors that happened outside of the actual uploading
    if (erroneousUpload != null) {
        throw new MojoExecutionException("Error while uploading " + erroneousUpload);
    }

    getLog().info("Deployed " + items + " files ("
            + FileUtils.byteCountToDisplaySize((int) totalBytesTransferred) + ") to s3://" + bucket);
}

From source file:cloudExplorer.Put.java

License:Open Source License

public void run() {
    try {/*from w  w w.j  a  v  a2 s. c  o  m*/
        AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
        AmazonS3 s3Client = new AmazonS3Client(credentials,
                new ClientConfiguration().withSignerOverride("S3SignerType"));
        s3Client.setEndpoint(endpoint);
        TransferManager tx = new TransferManager(s3Client);
        File file = new File(what);
        PutObjectRequest putRequest;
        if (!rrs) {
            putRequest = new PutObjectRequest(bucket, ObjectKey, file);
        } else {
            putRequest = new PutObjectRequest(bucket, ObjectKey, file)
                    .withStorageClass(StorageClass.ReducedRedundancy);
        }
        MimetypesFileTypeMap mimeTypesMap = new MimetypesFileTypeMap();
        String mimeType = mimeTypesMap.getContentType(file);
        mimeType = mimeTypesMap.getContentType(file);
        ObjectMetadata objectMetadata = new ObjectMetadata();
        if (encrypt) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        if ((ObjectKey.contains(".html")) || ObjectKey.contains(".txt")) {
            objectMetadata.setContentType("text/html");
        } else {
            objectMetadata.setContentType(mimeType);
        }
        long t1 = System.currentTimeMillis();
        putRequest.setMetadata(objectMetadata);
        Upload myUpload = tx.upload(putRequest);
        myUpload.waitForCompletion();
        tx.shutdownNow();
        long t2 = System.currentTimeMillis();
        long diff = t2 - t1;

        if (!mainFrame.perf) {
            if (terminal) {
                System.out.print("\nUploaded object: " + ObjectKey + " in " + diff / 1000 + " second(s).\n");
            } else {
                mainFrame.jTextArea1
                        .append("\nUploaded object: " + ObjectKey + " in " + diff / 1000 + " second(s).");
            }
        }
    } catch (AmazonServiceException ase) {
        if (NewJFrame.gui) {
            mainFrame.jTextArea1.append("\n\nError Message:    " + ase.getMessage());
            mainFrame.jTextArea1.append("\nHTTP Status Code: " + ase.getStatusCode());
            mainFrame.jTextArea1.append("\nAWS Error Code:   " + ase.getErrorCode());
            mainFrame.jTextArea1.append("\nError Type:       " + ase.getErrorType());
            mainFrame.jTextArea1.append("\nRequest ID:       " + ase.getRequestId());
            calibrate();
        } else {
            System.out.print("\n\nError Message:    " + ase.getMessage());
            System.out.print("\nHTTP Status Code: " + ase.getStatusCode());
            System.out.print("\nAWS Error Code:   " + ase.getErrorCode());
            System.out.print("\nError Type:       " + ase.getErrorType());
            System.out.print("\nRequest ID:       " + ase.getRequestId());
        }
    } catch (Exception put) {
    }

    calibrate();
}

From source file:com.aegeus.aws.SimpleStorageService.java

License:Apache License

public void upload(String bucket, String key, File file) {
    PutObjectRequest request = new PutObjectRequest(bucket, key, file);
    s3.putObject(request);
}