Example usage for com.amazonaws.services.s3.model S3Object getObjectMetadata

List of usage examples for com.amazonaws.services.s3.model S3Object getObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3Object getObjectMetadata.

Prototype

public ObjectMetadata getObjectMetadata() 

Source Link

Document

Gets the metadata stored by Amazon S3 for this object.

Usage

From source file:com.epam.dlab.module.aws.AdapterS3File.java

License:Apache License

/**
 * Open the source file and return reader.
 *
 * @throws AdapterException//from w w w  .j a v  a  2s .  c  o m
 */
private InputStream getFileStream() throws AdapterException {
    try {
        GetObjectRequest request = new GetObjectRequest(bucket, getCurrentFileName());
        S3Object object = clientS3.getObject(request);
        lastModificationDate = object.getObjectMetadata().getLastModified();
        return object.getObjectContent();
    } catch (Exception e) {
        throw new AdapterException("Cannot open file " + bucket + DELIMITER + getCurrentFileName() + ". "
                + e.getLocalizedMessage(), e);
    }
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public GetObjectResponseType getObject(final GetObjectType request) throws S3Exception {
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;
    GetObjectRequest getRequest = new GetObjectRequest(request.getBucket(), request.getKey());
    try {/*  ww w  . ja  va  2s  .  c o m*/
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();
        GetObjectResponseType reply = request.getReply();
        S3Object response;
        response = s3Client.getObject(getRequest);
        populateResponseMetadata(reply, response.getObjectMetadata());
        reply.setDataInputStream(response.getObjectContent());
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public GetObjectExtendedResponseType getObjectExtended(final GetObjectExtendedType request) throws S3Exception {
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;

    Boolean getMetaData = request.getGetMetaData();
    Long byteRangeStart = request.getByteRangeStart();
    Long byteRangeEnd = request.getByteRangeEnd();
    Date ifModifiedSince = request.getIfModifiedSince();
    Date ifUnmodifiedSince = request.getIfUnmodifiedSince();
    String ifMatch = request.getIfMatch();
    String ifNoneMatch = request.getIfNoneMatch();

    GetObjectRequest getRequest = new GetObjectRequest(request.getBucket(), request.getKey());
    if (byteRangeStart == null) {
        byteRangeStart = 0L;//  w w w  . ja va2  s .c  o  m
    }
    if (byteRangeEnd != null) {
        getRequest.setRange(byteRangeStart, byteRangeEnd);
    }
    if (getMetaData != null) {
        //Get object metadata
    }
    if (ifModifiedSince != null) {
        getRequest.setModifiedSinceConstraint(ifModifiedSince);
    }
    if (ifUnmodifiedSince != null) {
        getRequest.setUnmodifiedSinceConstraint(ifUnmodifiedSince);
    }
    if (ifMatch != null) {
        List matchList = new ArrayList();
        matchList.add(ifMatch);
        getRequest.setMatchingETagConstraints(matchList);
    }
    if (ifNoneMatch != null) {
        List nonMatchList = new ArrayList();
        nonMatchList.add(ifNoneMatch);
        getRequest.setNonmatchingETagConstraints(nonMatchList);
    }
    try {
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();
        S3Object response = s3Client.getObject(getRequest);

        GetObjectExtendedResponseType reply = request.getReply();
        populateResponseMetadata(reply, response.getObjectMetadata());
        reply.setDataInputStream(response.getObjectContent());
        reply.setByteRangeStart(request.getByteRangeStart());
        reply.setByteRangeEnd(request.getByteRangeEnd());
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.example.S3Sample02.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*/*from  w w w  .jav a  2 s  . c  o m*/
     * The ProfileCredentialsProvider will return your [default]
     * credential profile by reading from the credentials file located at
     * (~/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider().getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException("Cannot load the credentials from the credential profiles file. "
                + "Please make sure that your credentials file is at the correct "
                + "location (~/.aws/credentials), and is in valid format.", e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);

    //        AP_SOUTHEAST_2

    //       Region usWest2 = Region.getRegion(Regions.AP_SOUTHEAST_2 );
    //       s3.setRegion(usWest2);

    //        String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();

    String bucketName = "imos-test-data-1";

    String key = "MyObjectKey" + UUID.randomUUID();

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        //            System.out.println("Creating bucket " + bucketName + "\n");
        //            s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        /*            System.out.println("Listing buckets");
                    for (Bucket bucket : s3.listBuckets()) {
        System.out.println(" - " + bucket.getName());
                    }
                    System.out.println();
        */

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        System.out.println("done\n");

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        System.out.println("done\n");

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();
        System.out.println("done\n");

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */

        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        /*
                    System.out.println("Deleting bucket " + bucketName + "\n");
                    s3.deleteBucket(bucketName);
        */

    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.exedosoft.plat.storage.s3.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*/*from w ww  . j a  v  a  2 s  .  com*/
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider());
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        //            System.out.println("Deleting an object\n");
        //            s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        //            System.out.println("Deleting bucket " + bucketName + "\n");
        //            s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.ge.predix.sample.blobstore.repository.BlobstoreService.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added/*from w ww .ja v a2 s  . c  o  m*/
 * @throws Exception
 */
public void put(S3Object obj) throws Exception {
    if (obj == null) {
        log.error("put(): Empty file provided");
        throw new Exception("File is null");
    }
    InputStream is = obj.getObjectContent();

    List<PartETag> partETags = new ArrayList<>();

    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    try {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        log.info("currentPartSize: " + currentPartSize);
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            s3Client.putObject(putObjectRequest);
            return;
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            log.info("currentPartSize: " + currentPartSize);
            log.info("byteArray: " + b);

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag());
        }
    } catch (Exception e) {
        log.error("put(): Exception occurred in put(): " + e.getMessage());
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw e;
    }
    CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
            .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
            .withKey(obj.getKey());

    s3Client.completeMultipartUpload(completeMultipartUploadRequest);
}

From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java

License:Apache License

/**
 * Adds a new Blob to the binded bucket in the Object Store
 *
 * @param obj S3Object to be added//  w w  w . j a  va2  s . c  o  m
 */
@Override
public String saveBlob(S3Object obj) {
    if (obj == null) {
        this.log.error("put(): Empty file provided"); //$NON-NLS-1$
        throw new RuntimeException("File is null"); //$NON-NLS-1$
    }
    List<PartETag> partETags = new ArrayList<>();
    String bucket = this.blobstoreConfig.getBucketName();
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey());
    InitiateMultipartUploadResult initResponse = this.s3Client.initiateMultipartUpload(initRequest);
    try (InputStream is = obj.getObjectContent();) {

        int i = 1;
        int currentPartSize = 0;
        ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream();
        int byteValue;
        while ((byteValue = is.read()) != -1) {
            tempBuffer.write(byteValue);
            currentPartSize = tempBuffer.size();
            if (currentPartSize == (50 * 1024 * 1024)) //make this a const
            {
                byte[] b = tempBuffer.toByteArray();
                ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

                UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                        .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++)
                        .withInputStream(byteStream).withPartSize(currentPartSize);
                partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());

                tempBuffer.reset();
            }
        }
        this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(currentPartSize);
        if (this.enableSSE) {
            objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        obj.setObjectMetadata(objectMetadata);

        if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const
        {
            this.s3Client.abortMultipartUpload(
                    new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));

            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);
            objectMetadata.setContentType(getContentType(b));
            if (this.enableSSE) {
                objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
            }
            obj.setObjectMetadata(objectMetadata);

            PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream,
                    obj.getObjectMetadata());
            this.s3Client.putObject(putObjectRequest);

            ObjectMetadata meta = this.s3Client.getObjectMetadata(bucket, obj.getKey());
            Map<String, Object> headers = meta.getRawMetadata();
            for (Map.Entry<String, Object> entry : headers.entrySet()) {
                this.log.info("Object Metadata -- " + entry.getKey() + ": " + entry.getValue().toString()); //$NON-NLS-1$ //$NON-NLS-2$
            }

            return initResponse.getUploadId();
        }

        if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const
        {
            byte[] b = tempBuffer.toByteArray();
            ByteArrayInputStream byteStream = new ByteArrayInputStream(b);

            this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$
            this.log.info("byteArray: " + b); //$NON-NLS-1$

            UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i)
                    .withInputStream(byteStream).withPartSize(currentPartSize);
            partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag());
        }

        CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest()
                .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId())
                .withKey(obj.getKey());

        this.s3Client.completeMultipartUpload(completeMultipartUploadRequest);
        return initResponse.getUploadId();
    } catch (Exception e) {
        this.log.error("put(): Exception occurred in put(): " + e.getMessage()); //$NON-NLS-1$
        this.s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId()));
        throw new RuntimeException("put(): Exception occurred in put(): ", e); //$NON-NLS-1$
    }
}

From source file:com.github.rholder.esthree.command.Get.java

License:Apache License

public MessageDigest retryingGet() throws ExecutionException, RetryException {

    return (MessageDigest) RetryUtils.AWS_RETRYER.call(new Callable<Object>() {
        public MessageDigest call() throws Exception {

            GetObjectRequest req = new GetObjectRequest(bucket, key);

            S3Object s3Object = amazonS3Client.getObject(req);
            contentLength = s3Object.getObjectMetadata().getContentLength();
            fullETag = s3Object.getObjectMetadata().getETag();

            Progress progress = new TransferProgressWrapper(new TransferProgress());
            progress.setTotalBytesToTransfer(contentLength);
            if (progressListener != null) {
                progressListener.withTransferProgress(progress).withCompleted(0.0).withMultiplier(1.0);
            }/*  w  ww.  jav  a 2s.c  o m*/

            InputStream input = null;
            try {
                // create the output file, now that we know it actually exists
                if (output == null) {
                    output = new RandomAccessFile(outputFile, "rw");
                }

                // seek to the start of the chunk in the file, just in case we're retrying
                output.seek(0);
                input = s3Object.getObjectContent();

                return copyAndHash(input, contentLength, progress);
            } finally {
                IOUtils.closeQuietly(input);
            }
        }
    });
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

private BinaryInfo getBinaryInfo(AmazonS3 s3client, String bucketName, String fileName) throws StoreException {
    logger.info("getBinaryInfo: start.");

    logger.debug("???={}", bucketName);
    logger.debug("???={}", fileName);

    BinaryInfo binary = new BinaryInfo(bucketName);
    try {/*from   w ww. j  a  v  a  2  s  .  c om*/
        S3Object s3binary = s3client.getObject(new GetObjectRequest(bucketName, fileName));

        if (null != s3binary) {
            binary.setFileName(fileName);
            binary.setContentType(s3binary.getObjectMetadata().getContentType());
            binary.setSize(s3binary.getObjectMetadata().getContentLength());
            binary.setUrl(s3client.getUrl(binary.getBucketName(), binary.getFileName()).toString());

            logger.debug("Generating pre-signed URL.");
            URL PresignedUrl = getPresignedUrl(s3client, binary.getBucketName(), binary.getFileName());
            binary.setPresignedUrl(PresignedUrl.toString());
            logger.debug("Pre-Signed URL = " + PresignedUrl.toString());
        }

    } catch (AmazonServiceException ase) {
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.GET_FAIL, ase,
                binary.getFileName());
    } catch (AmazonClientException ace) {
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.GET_FAIL, ace,
                binary.getFileName());
    }

    logger.info("getBinaryInfo: end.");
    return binary;
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo get(StorageInfo storage, BinaryInfo binary) throws StoreException {

    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    try {//from  w  ww.j  a  v  a  2s  . c om
        logger.debug("Get an binary");
        if (!s3client.doesObjectExist(binary.getBucketName(), binary.getFileName())) {
            logger.info("The Binary has not exsit.bucket={}, binary={}", binary.getBucketName(),
                    binary.getFileName());
            return null;
        }

        S3Object s3binary = s3client
                .getObject(new GetObjectRequest(binary.getBucketName(), binary.getFileName()));
        binary.setContentType(s3binary.getObjectMetadata().getContentType());
        binary.setSize(s3binary.getObjectMetadata().getContentLength());
        binary.setUrl(s3client.getUrl(binary.getBucketName(), binary.getFileName()).toString());

        logger.debug("Generating pre-signed URL.");
        URL PresignedUrl = getPresignedUrl(s3client, binary.getBucketName(), binary.getFileName());
        binary.setPresignedUrl(PresignedUrl.toString());
        logger.debug("Pre-Signed URL = " + PresignedUrl.toString());

    } catch (AmazonServiceException ase) {
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.GET_FAIL, ase,
                binary.getFileName());
    } catch (AmazonClientException ace) {
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.GET_FAIL, ace,
                binary.getFileName());
    }

    return binary;
}