Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, String redirectLocation) 

Source Link

Document

Constructs a new PutObjectRequest object to perform a redirect for the specified bucket and key.

Usage

From source file:org.applicationMigrator.serverAgent.ServerAgentFileTransferClient.java

License:Apache License

private void uploadFile(AWSCredentials awsCredentials, String sourcePathString, String destinationPathString,
        boolean forceUpload) throws FileNotFoundException {
    // TODO Think about one file being used by many apps (e.g HP1.pdf read
    // through Adobe reader and OpenOffice)
    AmazonS3 s3client = new AmazonS3Client(awsCredentials);
    boolean fileIsPresentOnServer = checkIfFileIsPresentOnServer(s3client, BUCKET_NAME, destinationPathString);
    if (fileIsPresentOnServer && !forceUpload)
        return;/*from  w  w w .  j  a v  a  2  s  .  com*/
    try {
        File file = new File(sourcePathString);
        if (!file.exists())
            throw new FileNotFoundException();
        s3client.putObject(new PutObjectRequest(BUCKET_NAME, destinationPathString, file));
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        System.out.println("Error Message: " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code: " + ase.getErrorCode());
        System.out.println("Error Type: " + ase.getErrorType());
        System.out.println("Request ID: " + ase.getRequestId());
        throw ase;
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
        throw ace;
    }
    // TODO:verify completion of upload operation

}

From source file:org.benetech.secureapp.generator.AmazonS3Utils.java

License:Open Source License

static public void uploadToAmazonS3(HttpSession session, File fileToUpload) throws S3Exception {
    try {//from   ww w  . j  a v a 2s .  co  m
        AmazonS3 s3client = getS3();
        String bucketName = getDownloadS3Bucket();
        if (!s3client.doesBucketExist(bucketName))
            SagLogger.logError(session, "Does not exist?  S3 Bucket :" + bucketName);

        AccessControlList acl = new AccessControlList();
        acl.grantPermission(GroupGrantee.AllUsers, Permission.Read);
        s3client.putObject(
                new PutObjectRequest(bucketName, getAPKDownloadFilePathWithFile(fileToUpload.getName()),
                        fileToUpload).withAccessControlList(acl));

        SagLogger.logInfo(session, "Finished uploading to S3");
    } catch (Exception e) {
        SagLogger.logException(session, e);
        throw new S3Exception(e);
    }
}

From source file:org.boriken.s3fileuploader.S3SampleRefactored.java

License:Open Source License

public static void createFile(AmazonS3 s3, String bucketName, String key) throws IOException {
    /*//from w  w  w.  ja v  a2 s .  c o m
      * Upload an object to your bucket - You can easily upload a file to
      * S3, or upload directly an InputStream if you know the length of
      * the data in the stream. You can also specify your own metadata
      * when uploading to S3, which allows you set a variety of options
      * like content-type and content-encoding, plus additional metadata
      * specific to your applications.
      */
    System.out.println("Uploading a new object to S3 from a file\n");
    s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));
}

From source file:org.broadleafcommerce.vendor.amazon.s3.S3FileServiceProvider.java

License:Apache License

protected List<String> addOrUpdateResourcesInternal(S3Configuration s3config, AmazonS3Client s3,
        FileWorkArea workArea, List<File> files, boolean removeFilesFromWorkArea) {
    final List<String> resourcePaths = new ArrayList<String>();
    for (final File srcFile : files) {
        if (!srcFile.getAbsolutePath().startsWith(workArea.getFilePathLocation())) {
            throw new FileServiceException("Attempt to update file " + srcFile.getAbsolutePath()
                    + " that is not in the passed in WorkArea " + workArea.getFilePathLocation());
        }/*w ww .j  a  v  a 2 s . c o  m*/
        final long ts1 = System.currentTimeMillis();
        final String fileName = srcFile.getAbsolutePath().substring(workArea.getFilePathLocation().length());
        final String resourceName = buildResourceName(s3config, fileName);

        ObjectMetadata meta = null;
        try {
            final GetObjectMetadataRequest get = new GetObjectMetadataRequest(s3config.getDefaultBucketName(),
                    resourceName);
            meta = s3.getObjectMetadata(get);
        } catch (AmazonS3Exception ex) {
            meta = null;
        }
        final long ts2 = System.currentTimeMillis();

        if (meta == null || meta.getContentLength() != srcFile.length()) {
            final PutObjectRequest put = new PutObjectRequest(s3config.getDefaultBucketName(), resourceName,
                    srcFile);

            if ((s3config.getStaticAssetFileExtensionPattern() != null) && s3config
                    .getStaticAssetFileExtensionPattern().matcher(getExtension(fileName)).matches()) {
                put.setCannedAcl(CannedAccessControlList.PublicRead);
            }

            s3.putObject(put);
            final long ts3 = System.currentTimeMillis();

            if (LOG.isTraceEnabled()) {
                final String s3Uri = String.format("s3://%s/%s", s3config.getDefaultBucketName(), resourceName);
                final String msg = String.format(
                        "%s copied/updated to %s; queryTime = %dms; uploadTime = %dms; totalTime = %dms",
                        srcFile.getAbsolutePath(), s3Uri, ts2 - ts1, ts3 - ts2, ts3 - ts1);

                LOG.trace(msg);
            }
        } else {
            if (LOG.isTraceEnabled()) {
                final String s3Uri = String.format("s3://%s/%s", s3config.getDefaultBucketName(), resourceName);
                final String msg = String.format(
                        "%s already at %s with same filesize = %dbytes; queryTime = %dms",
                        srcFile.getAbsolutePath(), s3Uri, srcFile.length(), ts2 - ts1);

                LOG.trace(msg);
            }
        }

        resourcePaths.add(fileName);
    }
    return resourcePaths;
}

From source file:org.clothocad.phagebook.adaptors.S3Adapter.java

public static void uploadProfilePicture(String clothoId, String filePath) {
    //String clothoId = pers.getId();
    AWSCredentials credentials = new BasicAWSCredentials(S3Credentials.getUsername(),
            S3Credentials.getPassword());
    System.out.println("Login Complete");

    AmazonS3 s3client = new AmazonS3Client(credentials);
    String fileName = clothoId + "/" + "profilePicture.jpg";
    s3client.putObject(new PutObjectRequest("phagebookaws", fileName, new File(filePath))
            .withCannedAcl(CannedAccessControlList.PublicRead));

}

From source file:org.clothocad.phagebook.adaptors.S3Adapter.java

public static void uploadProfilePicture(String clothoId, File file) {
    AWSCredentials credentials = new BasicAWSCredentials(S3Credentials.getUsername(),
            S3Credentials.getPassword());
    System.out.println("Login Complete");

    AmazonS3 s3client = new AmazonS3Client(credentials);
    String fileName = clothoId + "/" + "profilePicture.jpg";
    s3client.putObject(new PutObjectRequest("phagebookaws", fileName, file)
            .withCannedAcl(CannedAccessControlList.PublicRead));
}

From source file:org.cloudifysource.esc.driver.provisioning.privateEc2.AmazonS3Uploader.java

License:Open Source License

/**
 * Upload file./* w  w  w.j  a  v a2s.  c o  m*/
 * 
 * @param bucketFullPath
 *            The path of the bucket where to download the file.
 * @param file
 *            The file to upload.
 * @return The URL to access the file in s3
 */
public S3Object uploadFile(final String bucketFullPath, final File file) {
    final BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule()
            .withId("Delete cloudFolder archives").withPrefix(this.extractPrefix(bucketFullPath) + ZIP_PREFIX)
            .withExpirationInDays(1).withStatus(BucketLifecycleConfiguration.ENABLED.toString());
    final List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>();
    rules.add(ruleArchiveAndExpire);
    final BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules);
    this.s3client.setBucketLifecycleConfiguration(bucketFullPath, configuration);

    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketFullPath, this.accessKey, file);
    putObjectRequest.setKey(file.getName());
    final ObjectMetadata metadata = new ObjectMetadata();
    putObjectRequest.setMetadata(metadata);
    this.s3client.putObject(putObjectRequest);

    final S3Object object = this.s3client.getObject(bucketFullPath, file.getName());
    return object;
}

From source file:org.cloudifysource.s3client.S3AWSAPIDeployMojo.java

License:Open Source License

private void uploadFile(AmazonS3 s3, String container, String target, File source) throws MojoFailureException {
    if (source.isDirectory()) {
        for (File f : source.listFiles()) {
            uploadFile(s3, container, target + "/" + f.getName(),
                    new File(source.getPath() + "/" + f.getName()));
        }/*  w w w.  j a va2s  .co m*/
    } else {
        getLog().info(
                "Processing " + source + ", upload size is: " + (source).length() + ". Target: " + target);
        s3.putObject(new PutObjectRequest(container, target, source)
                .withCannedAcl(CannedAccessControlList.PublicRead));
        getLog().info("Upload of " + source + " was ended successfully");

    }
}

From source file:org.cto.VVS3Box.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*/*from w  w  w . j  a  va2 s  .  c o m*/
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider());
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "lior.test-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:org.deeplearning4j.legacyExamples.EmrSparkExample.java

License:Apache License

public void entryPoint(String[] args) {
    JCommander jcmdr = new JCommander(this);
    try {/*from  w  w w . j  a v a2  s .c o  m*/
        jcmdr.parse(args);
    } catch (ParameterException e) {
        jcmdr.usage();
        try {
            Thread.sleep(500);
        } catch (Exception e2) {
        }
        throw e;
    }

    AmazonElasticMapReduceClientBuilder builder = AmazonElasticMapReduceClientBuilder.standard();
    builder.withRegion(region);
    builder.withCredentials(getCredentialsProvider());

    AmazonElasticMapReduce emr = builder.build();

    List<StepConfig> steps = new ArrayList<>();

    if (upload) {
        log.info("uploading uber jar");

        AmazonS3ClientBuilder s3builder = AmazonS3ClientBuilder.standard();
        s3builder.withRegion(region);
        s3builder.withCredentials(getCredentialsProvider());
        AmazonS3 s3Client = s3builder.build();

        if (!s3Client.doesBucketExist(bucketName)) {
            s3Client.createBucket(bucketName);
        }

        File uberJarFile = new File(uberJar);

        s3Client.putObject(new PutObjectRequest(bucketName, uberJarFile.getName(), uberJarFile));
    }

    if (debug) {
        log.info("enable debug");

        StepFactory stepFactory = new StepFactory(builder.getRegion() + ".elasticmapreduce");
        StepConfig enableDebugging = new StepConfig().withName("Enable Debugging")
                .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW)
                .withHadoopJarStep(stepFactory.newEnableDebuggingStep());
        steps.add(enableDebugging);
    }

    if (execute) {
        log.info("execute spark step");

        HadoopJarStepConfig sparkStepConf = new HadoopJarStepConfig();
        sparkStepConf.withJar("command-runner.jar");
        sparkStepConf.withArgs("spark-submit", "--deploy-mode", "cluster", "--class", className,
                getS3UberJarUrl(), "-useSparkLocal", "false");

        ActionOnFailure action = ActionOnFailure.TERMINATE_JOB_FLOW;

        if (keepAlive) {
            action = ActionOnFailure.CONTINUE;
        }

        StepConfig sparkStep = new StepConfig().withName("Spark Step").withActionOnFailure(action)
                .withHadoopJarStep(sparkStepConf);
        steps.add(sparkStep);
    }

    log.info("create spark cluster");

    Application sparkApp = new Application().withName("Spark");

    // service and job flow role will be created automatically when
    // launching cluster in aws console, better do that first or create
    // manually

    RunJobFlowRequest request = new RunJobFlowRequest().withName("Spark Cluster").withSteps(steps)
            .withServiceRole("EMR_DefaultRole").withJobFlowRole("EMR_EC2_DefaultRole")
            .withApplications(sparkApp).withReleaseLabel(emrVersion).withLogUri(getS3BucketLogsUrl())
            .withInstances(new JobFlowInstancesConfig().withEc2KeyName("spark").withInstanceCount(instanceCount)
                    .withKeepJobFlowAliveWhenNoSteps(keepAlive).withMasterInstanceType(instanceType)
                    .withSlaveInstanceType(instanceType));

    RunJobFlowResult result = emr.runJobFlow(request);

    log.info(result.toString());

    log.info("done");
}