Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, InputStream input, ObjectMetadata metadata) 

Source Link

Document

Constructs a new PutObjectRequest object to upload a stream of data to the specified bucket and key.

Usage

From source file:com.cloudbees.plugins.binarydeployer.s3.S3Repository.java

License:Open Source License

private PutObjectRequest prepareUpload(VirtualFile file, String name) throws IOException {
    log.debug("Preparing upload for " + name + " to S3::" + bucketName);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(file.length());

    return new PutObjectRequest(bucketName, name, file.open(), metadata);
}

From source file:com.digitaslbi.helios.mock.utils.ConnectionHelper.java

public static void createFolder(String bucketName, String folderName) {
    // create meta-data for your folder and set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);//from   w  w w.  java 2 s .  c  o m

    // create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

    // create a PutObjectRequest passing the folder name suffixed by /
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName,
            folderName + MocksConstants.AWS_PARENT_DELIMITER.getValue(), emptyContent, metadata);

    // send request to S3 to create folder
    try {
        s3Client.putObject(putObjectRequest);
    } catch (AmazonServiceException ase) {
        log.error("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    }
}

From source file:com.digitaslbi.helios.utils.S3Helper.java

public static void createFolder(String folderName) {
    connect();/*from   ww w .  ja va2s . c  o m*/

    // create meta-data for your folder and set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);

    // create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

    // create a PutObjectRequest passing the folder name suffixed by /
    PutObjectRequest putObjectRequest = new PutObjectRequest(S3Properties.getInstance().getBucketName(),
            folderName + Constants.AWS_PARENT_DELIMITER.getValue(), emptyContent, metadata);

    // send request to S3 to create folder
    try {
        s3Client.putObject(putObjectRequest);
    } catch (AmazonServiceException ase) {
        log.error("[S3Helper][createFolder] Caught an AmazonServiceException, which "
                + "means your request made it " + "to Amazon S3, but was rejected with an error response"
                + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("[S3Helper][createFolder] Caught an AmazonClientException, which "
                + "means the client encountered " + "an internal error while trying to "
                + "communicate with S3, " + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    }
}

From source file:com.digitaslbi.helios.utils.S3Helper.java

public static void uploadFile(String fileName, byte[] content) {
    connect();/*from   www .  j  a v a 2  s . c  o m*/

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(content.length);

    try {
        log.info("[S3Helper][uploadFile] Uploading a new object to S3: " + fileName);

        PutObjectRequest putObjectRequest = new PutObjectRequest(S3Properties.getInstance().getBucketName(),
                fileName, new ByteArrayInputStream(content), metadata);
        putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead);

        s3Client.putObject(putObjectRequest);
    } catch (AmazonServiceException ase) {
        log.error("[S3Helper][uploadFile] Caught an AmazonServiceException, which "
                + "means your request made it " + "to Amazon S3, but was rejected with an error response"
                + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("[S3Helper][uploadFile] Caught an AmazonClientException, which "
                + "means the client encountered " + "an internal error while trying to "
                + "communicate with S3, " + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    }
}

From source file:com.easarrive.aws.plugins.common.service.impl.S3Service.java

License:Open Source License

private PutObjectResult putObject(AmazonS3 client, String bucketName, String key, InputStream input,
        ObjectMetadata metadata, Grantee grantee, Permission permission, Grant... grantsVarArg) {
    if (client == null) {
        return null;
    } else if (StringUtil.isEmpty(bucketName)) {
        return null;
    } else if (StringUtil.isEmpty(key)) {
        return null;
    } else if (input == null) {
        return null;
    } else if (metadata == null) {
        return null;
    } else if ((grantee == null || permission == null) && (grantsVarArg == null || grantsVarArg.length < 1)) {
        return null;
    }/*w  w  w  .j a  va 2s  . c o m*/
    PutObjectResult result = null;
    AccessControlList accessControlList = new AccessControlList();
    if (grantee != null && permission != null) {
        accessControlList.grantPermission(grantee, permission);
    }
    if (grantsVarArg != null && grantsVarArg.length > 0) {
        accessControlList.grantAllPermissions(grantsVarArg);
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, input, metadata)
            .withAccessControlList(accessControlList);
    result = client.putObject(putObjectRequest);
    return result;
}

From source file:com.emc.ecs.sync.target.S3Target.java

License:Open Source License

protected void putObject(SyncObject obj, String targetKey) {
    ObjectMetadata om = AwsS3Util.s3MetaFromSyncMeta(obj.getMetadata());
    if (obj.isDirectory())
        om.setContentType(AwsS3Util.TYPE_DIRECTORY);

    PutObjectRequest req;//from w  w  w. j a  v  a  2 s .c o m
    if (obj.isDirectory()) {
        req = new PutObjectRequest(bucketName, targetKey, new ByteArrayInputStream(new byte[0]), om);
    } else if (obj instanceof FileSyncObject) {
        req = new PutObjectRequest(bucketName, targetKey, ((FileSyncObject) obj).getRawSourceIdentifier());
    } else {
        req = new PutObjectRequest(bucketName, targetKey, obj.getInputStream(), om);
    }

    if (includeAcl)
        req.setAccessControlList(AwsS3Util.s3AclFromSyncAcl(obj.getMetadata().getAcl(), ignoreInvalidAcls));

    // xfer manager will figure out if MPU is needed (based on threshold), do the MPU if necessary,
    // and abort if it fails
    TransferManagerConfiguration xferConfig = new TransferManagerConfiguration();
    xferConfig.setMultipartUploadThreshold((long) mpuThresholdMB * 1024 * 1024);
    xferConfig.setMinimumUploadPartSize((long) mpuPartSizeMB * 1024 * 1024);
    TransferManager xferManager = new TransferManager(s3, Executors.newFixedThreadPool(mpuThreadCount));
    xferManager.setConfiguration(xferConfig);

    Upload upload = xferManager.upload(req);
    try {
        log.debug("Wrote {}, etag: {}", targetKey, upload.waitForUploadResult().getETag());
    } catch (InterruptedException e) {
        throw new RuntimeException("upload thread was interrupted", e);
    } finally {
        // make sure bytes read is accurate if we bypassed the counting stream
        if (obj instanceof FileSyncObject) {
            try {
                ((FileSyncObject) obj).setOverrideBytesRead(upload.getProgress().getBytesTransferred());
            } catch (Throwable t) {
                log.warn("could not get bytes transferred from upload", t);
            }
        }
    }
}

From source file:com.emc.vipr.s3.sample._03_UpdateObject.java

License:Open Source License

public static void main(String[] args) throws Exception {
    // create the ViPR S3 Client
    ViPRS3Client s3 = ViPRS3Factory.getS3Client();

    // retrieve the object key and new object value from user
    System.out.println("Enter the object key:");
    String key = new BufferedReader(new InputStreamReader(System.in)).readLine();
    System.out.println("Enter new object content:");
    String content = new BufferedReader(new InputStreamReader(System.in)).readLine();

    // update the object in the demo bucket
    PutObjectRequest updateRequest = new PutObjectRequest(ViPRS3Factory.S3_BUCKET, key,
            new StringInputStream(content), null);
    s3.putObject(updateRequest);/*from w ww  .  j  av  a  2 s.co  m*/

    // print out object key/value for validation
    System.out.println(String.format("update object [%s/%s] with new content: [%s]", ViPRS3Factory.S3_BUCKET,
            key, content));
}

From source file:com.erudika.para.storage.AWSFileStore.java

License:Apache License

@Override
public String store(String path, InputStream data) {
    if (StringUtils.startsWith(path, "/")) {
        path = path.substring(1);//from  www .  ja va  2 s . c om
    }
    if (StringUtils.isBlank(path) || data == null) {
        return null;
    }
    int maxFileSizeMBytes = Config.getConfigInt("para.s3.max_filesize_mb", 10);
    try {
        if (data.available() > 0 && data.available() <= (maxFileSizeMBytes * 1024 * 1024)) {
            ObjectMetadata om = new ObjectMetadata();
            om.setCacheControl("max-age=15552000, must-revalidate"); // 180 days
            if (path.endsWith(".gz")) {
                om.setContentEncoding("gzip");
                path = path.substring(0, path.length() - 3);
            }
            path = System.currentTimeMillis() + "." + path;
            PutObjectRequest por = new PutObjectRequest(bucket, path, data, om);
            por.setCannedAcl(CannedAccessControlList.PublicRead);
            por.setStorageClass(StorageClass.ReducedRedundancy);
            s3.putObject(por);
            return Utils.formatMessage(baseUrl, Config.AWS_REGION, bucket, path);
        }
    } catch (IOException e) {
        logger.error(null, e);
    } finally {
        try {
            data.close();
        } catch (IOException ex) {
            logger.error(null, ex);
        }
    }
    return null;
}

From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java

License:Open Source License

private PutObjectResult uploadSnapshotAsSingleObject(String compressedSnapFileName, Long actualSize,
        Long uncompressedSize, SnapshotProgressCallback callback) throws Exception {
    callback.setUploadSize(actualSize);/*www. ja  v a 2s .co m*/
    FileInputStreamWithCallback snapInputStream = new FileInputStreamWithCallback(
            new File(compressedSnapFileName), callback);
    ObjectMetadata objectMetadata = new ObjectMetadata();
    Map<String, String> userMetadataMap = new HashMap<String, String>();
    userMetadataMap.put(UNCOMPRESSED_SIZE_KEY, String.valueOf(uncompressedSize)); // Send the uncompressed length as the metadata
    objectMetadata.setUserMetadata(userMetadataMap);
    objectMetadata.setContentLength(actualSize);

    return retryAfterRefresh(new Function<PutObjectRequest, PutObjectResult>() {

        @Override
        @Nullable
        public PutObjectResult apply(@Nullable PutObjectRequest arg0) {
            eucaS3Client.refreshEndpoint();
            return eucaS3Client.putObject(arg0);
        }

    }, new PutObjectRequest(bucketName, keyName, snapInputStream, objectMetadata), REFRESH_TOKEN_RETRIES);
}

From source file:com.eucalyptus.loadbalancing.workflow.LoadBalancingActivitiesImpl.java

License:Open Source License

@Override
public AccessLogPolicyActivityResult modifyLoadBalancerAttributesCreateAccessLogPolicy(
        final String accountNumber, final String lbName, final Boolean accessLogEnabled,
        final String s3BucketName, final String s3BucketPrefix, final Integer emitInterval)
        throws LoadBalancingActivityException {
    final String ACCESSLOG_ROLE_POLICY_DOCUMENT = "{\"Statement\":" + "[ {" + "\"Action\": [\"s3:PutObject\"],"
            + "\"Effect\": \"Allow\","
            + "\"Resource\": [\"arn:aws:s3:::BUCKETNAME_PLACEHOLDER/BUCKETPREFIX_PLACEHOLDER\"]" + "}]}";

    AccessLogPolicyActivityResult result = new AccessLogPolicyActivityResult();
    result.setShouldRollback(false);/* w  ww  .j  av  a  2  s. c  om*/
    if (!accessLogEnabled)
        return result;

    final String bucketName = s3BucketName;
    final String bucketPrefix = com.google.common.base.Objects.firstNonNull(s3BucketPrefix, "");

    final String roleName = getRoleName(accountNumber, lbName);
    final String policyName = ACCESSLOG_ROLE_POLICY_NAME;
    try {
        final List<String> policies = EucalyptusActivityTasks.getInstance().listRolePolicies(roleName);
        if (policies.contains(policyName)) {
            EucalyptusActivityTasks.getInstance().deleteRolePolicy(roleName, policyName);
        }
    } catch (final Exception ex) {
        ;
    }

    String policyDocument = ACCESSLOG_ROLE_POLICY_DOCUMENT.replace("BUCKETNAME_PLACEHOLDER", bucketName);
    if (bucketPrefix.length() > 0) {
        policyDocument = policyDocument.replace("BUCKETPREFIX_PLACEHOLDER", bucketPrefix + "/*");
    } else {
        policyDocument = policyDocument.replace("BUCKETPREFIX_PLACEHOLDER", "*");
    }

    try {
        EucalyptusActivityTasks.getInstance().putRolePolicy(roleName, policyName, policyDocument);
        result.setRoleName(roleName);
        result.setPolicyName(policyName);
        result.setShouldRollback(true);
    } catch (final Exception ex) {
        throw new LoadBalancingActivityException(
                "failed to put role policy for loadbalancer vm's access to S3 buckets");
    }

    try {
        final EucaS3Client s3c = getS3Client(roleName);
        final String key = s3BucketPrefix != null && !s3BucketPrefix.isEmpty()
                ? String.format("%s/AWSLogs/%s/ELBAccessLogTestFile", s3BucketPrefix, accountNumber)
                : String.format("AWSLogs/%s/ELBAccessLogTestFile", accountNumber);
        final DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS");
        final String content = String.format("Enable AccessLog for ELB: %s at %s", lbName,
                df.format(new Date()));
        final PutObjectRequest req = new PutObjectRequest(bucketName, key,
                new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)), new ObjectMetadata())
                        .withCannedAcl(CannedAccessControlList.BucketOwnerFullControl);
        s3c.putObject(req);
    } catch (final Exception ex) {
        LOG.warn("Failed to put test key to the access log bucket");
    }
    return result;
}