Example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata.

Prototype

public ObjectMetadata() 

Source Link

Usage

From source file:com.ALC.SC2BOAserver.aws.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3/*from  w  w w . jav a2s  .  c o m*/
 * @param obj the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(SC2BOAStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) {
    // Make sure the bucket exists before we try to use it
    checkForAndCreateBucket(obj.getBucketName());

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(obj.getMimeType());
    omd.setContentLength(obj.getData().length);

    ByteArrayInputStream is = new ByteArrayInputStream(obj.getData());
    PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd);

    // Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }

    s3Client.putObject(request);

    // If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3Client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl);
    }

}

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

public boolean init() {

    if (!s3.doesBucketExist(bucketName)) {
        return false;
    }//from w w w .  j ava2s  .  com

    String rawRootPath = rootPath.equals(rootSuffix) ? ""
            : rootSuffix.isEmpty() ? rootPath : rootPath.substring(0, rootPath.lastIndexOf(rootSuffix) - 1);

    if (!rawRootPath.isEmpty()) {
        try {
            ObjectMetadata rawRootMeta = s3.getObjectMetadata(bucketName, rawRootPath + "/");
        } catch (AmazonClientException ex) {
            return false;
        }
    }

    if (!rawRootPath.equals(rootPath)) {

        try {
            ObjectMetadata rootMeta = s3.getObjectMetadata(bucketName, rootPath + "/");
        } catch (AmazonClientException ex) {
            ObjectMetadata meta = new ObjectMetadata();
            meta.setContentLength(0);
            s3.putObject(bucketName, rootPath + "/", new ByteArrayInputStream(new byte[0]), meta);
        }
    }

    return true;
}

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

@Override
public boolean createDirectory(String path) {
    path = trimPath(path);//from w ww.ja  v a 2s . c o m

    try {
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(0);
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        s3.putObject(bucketName, toAbsoluteDirPath(path), new ByteArrayInputStream(new byte[0]), meta);
    } catch (AmazonClientException ex) {
        return false;
    }
    return true;
}

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

@Override
public boolean writeFile(InputStream fileStream, FileSnapshot file) {
    if (fileStream == null)
        return false;

    if (file.isLargeFile()) {
        return writeLargeFile(fileStream, file);
    }/*from ww w.  j  a va  2s.  co  m*/

    try {
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(file.getFileSize());
        meta.getUserMetadata().put("lmd", file.getModifiedTimestamp().toDate().getTime() + "");
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        s3.putObject(bucketName, toAbsoluteFilePath(file.getRelativePath()), fileStream, meta);
    } catch (AmazonClientException ex) {
        return false;
    }
    return true;
}

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

public boolean writeLargeFile(InputStream fileStream, FileSnapshot file) {
    if (fileStream == null)
        return false;

    try {/*from  ww w.  j ava  2s .co m*/
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(file.getFileSize());
        meta.getUserMetadata().put("lmd", file.getModifiedTimestamp().toDate().getTime() + "");
        meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);

        List<PartETag> partTags = new ArrayList<>();
        String fileKey = toAbsoluteFilePath(file.getRelativePath());

        InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, fileKey, meta);
        InitiateMultipartUploadResult result = s3.initiateMultipartUpload(request);

        long contentLength = file.getFileSize();
        long partSize = 256 * 1024 * 1024;

        try {
            // Uploading the file, part by part.
            long filePosition = 0;

            for (int i = 1; filePosition < contentLength; i++) {

                partSize = Math.min(partSize, (contentLength - filePosition));

                // Creating the request for a part upload
                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName)
                        .withKey(fileKey).withUploadId(result.getUploadId()).withPartNumber(i)
                        .withInputStream(fileStream).withPartSize(partSize);

                // Upload part and add response to the result list.
                partTags.add(s3.uploadPart(uploadRequest).getPartETag());
                filePosition += partSize;

                System.out.println("Uploaded " + Utils.readableFileSize(filePosition) + " out of "
                        + Utils.readableFileSize(contentLength));
            }
        } catch (Exception e) {
            System.out.println("UploadPartRequest failed: " + e.getMessage());
            s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, fileKey, result.getUploadId()));
            return false;
        }

        s3.completeMultipartUpload(
                new CompleteMultipartUploadRequest(bucketName, fileKey, result.getUploadId(), partTags));
    } catch (AmazonClientException ex) {
        System.out.println("Upload failed: " + ex.getMessage());
        return false;

    }
    return true;
}

From source file:com.amazon.aws.samplecode.travellog.aws.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3//from   w  w w . j av a  2 s  . c o m
 * @param obj the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(TravelLogStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) {
    //Make sure the bucket exists before we try to use it
    checkForAndCreateBucket(obj.getBucketName());

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(obj.getMimeType());
    omd.setContentLength(obj.getData().length);

    ByteArrayInputStream is = new ByteArrayInputStream(obj.getData());
    PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd);

    //Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }

    s3client.putObject(request);

    //If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl);
    }

}

From source file:com.amazon.photosharing.utils.content.UploadThread.java

License:Open Source License

@Override
public void run() {
    ObjectMetadata meta_data = new ObjectMetadata();
    if (p_content_type != null)
        meta_data.setContentType(p_content_type);

    meta_data.setContentLength(p_size);//from  w w w .jav a 2 s  .co  m

    PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data);
    putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
    PutObjectResult res = s3Client.putObject(putObjectRequest);
}

From source file:com.amazon.sqs.javamessaging.AmazonSQSExtendedClient.java

License:Open Source License

private void storeTextInS3(String s3Key, String messageContentStr, Long messageContentSize) {
    InputStream messageContentStream = new ByteArrayInputStream(
            messageContentStr.getBytes(StandardCharsets.UTF_8));
    ObjectMetadata messageContentStreamMetadata = new ObjectMetadata();
    messageContentStreamMetadata.setContentLength(messageContentSize);
    PutObjectRequest putObjectRequest = new PutObjectRequest(clientConfiguration.getS3BucketName(), s3Key,
            messageContentStream, messageContentStreamMetadata);
    try {// w  w w . ja v  a 2  s  . co m
        clientConfiguration.getAmazonS3Client().putObject(putObjectRequest);
    } catch (AmazonServiceException e) {
        String errorMessage = "Failed to store the message content in an S3 object. SQS message was not sent.";
        LOG.error(errorMessage, e);
        throw new AmazonServiceException(errorMessage, e);
    } catch (AmazonClientException e) {
        String errorMessage = "Failed to store the message content in an S3 object. SQS message was not sent.";
        LOG.error(errorMessage, e);
        throw new AmazonClientException(errorMessage, e);
    }
}

From source file:com.amazon.util.ImageUploader.java

public static void uploadImage(String imageURL, String imageName, String folderName, String bucketName)
        throws MalformedURLException, IOException {
    // credentials object identifying user for authentication

    AWSCredentials credentials = new BasicAWSCredentials(System.getenv("AWS_S3_ACCESS_KEY"),
            System.getenv("AWS_S3_SECRET_ACCESS_KEY"));

    // create a client connection based on credentials
    AmazonS3 s3client = new AmazonS3Client(credentials);

    try {//  w w  w .  ja v  a2 s  .  c o  m
        if (!(s3client.doesBucketExist(bucketName))) {
            s3client.setRegion(Region.getRegion(Regions.US_EAST_1));
            // Note that CreateBucketRequest does not specify region. So bucket is 
            // created in the region specified in the client.
            s3client.createBucket(new CreateBucketRequest(bucketName));
        }

        //Enabe CORS:
        //     <?xml version="1.0" encoding="UTF-8"?>
        //<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
        //    <CORSRule>
        //        <AllowedOrigin>http://ask-ifr-download.s3.amazonaws.com</AllowedOrigin>
        //        <AllowedMethod>GET</AllowedMethod>
        //    </CORSRule>
        //</CORSConfiguration>
        BucketCrossOriginConfiguration configuration = new BucketCrossOriginConfiguration();

        CORSRule corsRule = new CORSRule()
                .withAllowedMethods(
                        Arrays.asList(new CORSRule.AllowedMethods[] { CORSRule.AllowedMethods.GET }))
                .withAllowedOrigins(Arrays.asList(new String[] { "http://ask-ifr-download.s3.amazonaws.com" }));
        configuration.setRules(Arrays.asList(new CORSRule[] { corsRule }));
        s3client.setBucketCrossOriginConfiguration(bucketName, configuration);

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }

    String fileName = folderName + SUFFIX + imageName + ".png";
    URL url = new URL(imageURL);

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType("image/png");
    omd.setContentLength(url.openConnection().getContentLength());
    // upload file to folder and set it to public
    s3client.putObject(new PutObjectRequest(bucketName, fileName, url.openStream(), omd)
            .withCannedAcl(CannedAccessControlList.PublicRead));
}

From source file:com.amediamanager.dao.DynamoDbUserDaoImpl.java

License:Apache License

/**
 * Upload the profile pic to S3 and return it's URL
 * @param profilePic/*from www .  j  a  v  a 2  s .  c  o  m*/
 * @return The fully-qualified URL of the photo in S3
 * @throws IOException
 */
public String uploadFileToS3(CommonsMultipartFile profilePic) throws IOException {

    // Profile pic prefix
    String prefix = config.getProperty(ConfigProps.S3_PROFILE_PIC_PREFIX);

    // Date string
    String dateString = new SimpleDateFormat("ddMMyyyy").format(new java.util.Date());
    String s3Key = prefix + "/" + dateString + "/" + UUID.randomUUID().toString() + "_"
            + profilePic.getOriginalFilename();

    // Get bucket
    String s3bucket = config.getProperty(ConfigProps.S3_UPLOAD_BUCKET);

    // Create a ObjectMetadata instance to set the ACL, content type and length
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentType(profilePic.getContentType());
    metadata.setContentLength(profilePic.getSize());

    // Create a PutRequest to upload the image
    PutObjectRequest putObject = new PutObjectRequest(s3bucket, s3Key, profilePic.getInputStream(), metadata);

    // Put the image into S3
    s3Client.putObject(putObject);
    s3Client.setObjectAcl(s3bucket, s3Key, CannedAccessControlList.PublicRead);

    return "http://" + s3bucket + ".s3.amazonaws.com/" + s3Key;
}