Example usage for com.amazonaws.services.s3.model S3ObjectSummary getStorageClass

List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getStorageClass

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3ObjectSummary getStorageClass.

Prototype

public String getStorageClass() 

Source Link

Document

Gets the storage class used by Amazon S3 for this object.

Usage

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public ListBucketResponseType listBucket(ListBucketType request) throws S3Exception {
    ListBucketResponseType reply = request.getReply();
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;
    try {//from w  ww.  j  a  va2 s.  c  o m
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();
        ListObjectsRequest listRequest = new ListObjectsRequest();
        listRequest.setBucketName(request.getBucket());
        listRequest.setDelimiter(Strings.isNullOrEmpty(request.getDelimiter()) ? null : request.getDelimiter());
        listRequest.setMarker(Strings.isNullOrEmpty(request.getMarker()) ? null : request.getMarker());
        listRequest.setMaxKeys((request.getMaxKeys() == null ? null : Integer.parseInt(request.getMaxKeys())));
        listRequest.setPrefix(Strings.isNullOrEmpty(request.getPrefix()) ? null : request.getPrefix());

        ObjectListing response = s3Client.listObjects(listRequest);

        /* Non-optional, must have non-null values */
        reply.setName(request.getBucket());
        reply.setMaxKeys(response.getMaxKeys());
        reply.setMarker(response.getMarker() == null ? "" : response.getMarker());
        reply.setPrefix(response.getPrefix() == null ? "" : response.getPrefix());
        reply.setIsTruncated(response.isTruncated());

        /* Optional */
        reply.setNextMarker(response.getNextMarker());
        reply.setDelimiter(response.getDelimiter());
        if (reply.getContents() == null) {
            reply.setContents(new ArrayList<ListEntry>());
        }
        if (reply.getCommonPrefixesList() == null) {
            reply.setCommonPrefixesList(new ArrayList<CommonPrefixesEntry>());
        }

        for (S3ObjectSummary obj : response.getObjectSummaries()) {
            //Add entry, note that the canonical user is set based on requesting user, not returned user
            reply.getContents()
                    .add(new ListEntry(obj.getKey(),
                            DateFormatter.dateToHeaderFormattedString(obj.getLastModified()), obj.getETag(),
                            obj.getSize(), getCanonicalUser(requestUser), obj.getStorageClass()));
        }

        if (response.getCommonPrefixes() != null && response.getCommonPrefixes().size() > 0) {
            reply.setCommonPrefixesList(new ArrayList<CommonPrefixesEntry>());

            for (String s : response.getCommonPrefixes()) {
                reply.getCommonPrefixesList().add(new CommonPrefixesEntry(s));
            }
        }

        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.jktsoftware.amazondownloader.download.S3TypeBucket.java

License:Open Source License

public List<IObject> getObjectsInRepo() {
    String repoid = getRepoId();//from w  ww  .ja  va2s  . c  o m
    AWSCredentials awscredentials = new BasicAWSCredentials(this.credentials.getAccessKey(),
            this.credentials.getSecretAccessKey());

    AmazonS3 s3 = new AmazonS3Client(awscredentials);
    s3.setEndpoint(endpoint);

    System.out.println("Getting objects");
    ObjectListing objectListing = s3.listObjects(repoid);

    List<IObject> objects = new ArrayList<IObject>();

    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
        S3TypeObject obj = new S3TypeObject(objectSummary.getKey(), objectSummary.getSize(),
                objectSummary.getBucketName(), objectSummary.getStorageClass(), s3);
        objects.add(obj);
    }
    return objects;
}

From source file:io.druid.storage.s3.S3DataSegmentMover.java

License:Apache License

/**
 * Copies an object and after that checks that the object is present at the target location, via a separate API call.
 * If it is not, an exception is thrown, and the object is not deleted at the old location. This "paranoic" check
 * is added after it was observed that S3 may report a successful move, and the object is not found at the target
 * location.//from w w w  .j a v a  2 s .  c  om
 */
private void selfCheckingMove(String s3Bucket, String targetS3Bucket, String s3Path, String targetS3Path,
        String copyMsg) throws IOException, SegmentLoadingException {
    if (s3Bucket.equals(targetS3Bucket) && s3Path.equals(targetS3Path)) {
        log.info("No need to move file[s3://%s/%s] onto itself", s3Bucket, s3Path);
        return;
    }
    if (s3Client.doesObjectExist(s3Bucket, s3Path)) {
        final ListObjectsV2Result listResult = s3Client.listObjectsV2(
                new ListObjectsV2Request().withBucketName(s3Bucket).withPrefix(s3Path).withMaxKeys(1));
        if (listResult.getKeyCount() == 0) {
            // should never happen
            throw new ISE("Unable to list object [s3://%s/%s]", s3Bucket, s3Path);
        }
        final S3ObjectSummary objectSummary = listResult.getObjectSummaries().get(0);
        if (objectSummary.getStorageClass() != null
                && StorageClass.fromValue(StringUtils.toUpperCase(objectSummary.getStorageClass()))
                        .equals(StorageClass.Glacier)) {
            throw new AmazonServiceException(StringUtils.format(
                    "Cannot move file[s3://%s/%s] of storage class glacier, skipping.", s3Bucket, s3Path));
        } else {
            log.info("Moving file %s", copyMsg);
            final CopyObjectRequest copyRequest = new CopyObjectRequest(s3Bucket, s3Path, targetS3Bucket,
                    targetS3Path);
            if (!config.getDisableAcl()) {
                copyRequest
                        .setAccessControlList(S3Utils.grantFullControlToBucketOwner(s3Client, targetS3Bucket));
            }
            s3Client.copyObject(copyRequest);
            if (!s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
                throw new IOE(
                        "After copy was reported as successful the file doesn't exist in the target location [%s]",
                        copyMsg);
            }
            deleteWithRetriesSilent(s3Bucket, s3Path);
            log.debug("Finished moving file %s", copyMsg);
        }
    } else {
        // ensure object exists in target location
        if (s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
            log.info("Not moving file [s3://%s/%s], already present in target location [s3://%s/%s]", s3Bucket,
                    s3Path, targetS3Bucket, targetS3Path);
        } else {
            throw new SegmentLoadingException(
                    "Unable to move file %s, not present in either source or target location", copyMsg);
        }
    }
}

From source file:org.apache.druid.storage.s3.S3DataSegmentMover.java

License:Apache License

/**
 * Copies an object and after that checks that the object is present at the target location, via a separate API call.
 * If it is not, an exception is thrown, and the object is not deleted at the old location. This "paranoic" check
 * is added after it was observed that S3 may report a successful move, and the object is not found at the target
 * location.//from  w  w  w .ja v a 2s. c om
 */
private void selfCheckingMove(String s3Bucket, String targetS3Bucket, String s3Path, String targetS3Path,
        String copyMsg) throws IOException, SegmentLoadingException {
    if (s3Bucket.equals(targetS3Bucket) && s3Path.equals(targetS3Path)) {
        log.info("No need to move file[s3://%s/%s] onto itself", s3Bucket, s3Path);
        return;
    }
    if (s3Client.doesObjectExist(s3Bucket, s3Path)) {
        final ListObjectsV2Result listResult = s3Client.listObjectsV2(
                new ListObjectsV2Request().withBucketName(s3Bucket).withPrefix(s3Path).withMaxKeys(1));
        // Using getObjectSummaries().size() instead of getKeyCount as, in some cases
        // it is observed that even though the getObjectSummaries returns some data
        // keyCount is still zero.
        if (listResult.getObjectSummaries().size() == 0) {
            // should never happen
            throw new ISE("Unable to list object [s3://%s/%s]", s3Bucket, s3Path);
        }
        final S3ObjectSummary objectSummary = listResult.getObjectSummaries().get(0);
        if (objectSummary.getStorageClass() != null
                && StorageClass.fromValue(StringUtils.toUpperCase(objectSummary.getStorageClass()))
                        .equals(StorageClass.Glacier)) {
            throw new AmazonServiceException(StringUtils.format(
                    "Cannot move file[s3://%s/%s] of storage class glacier, skipping.", s3Bucket, s3Path));
        } else {
            log.info("Moving file %s", copyMsg);
            final CopyObjectRequest copyRequest = new CopyObjectRequest(s3Bucket, s3Path, targetS3Bucket,
                    targetS3Path);
            if (!config.getDisableAcl()) {
                copyRequest
                        .setAccessControlList(S3Utils.grantFullControlToBucketOwner(s3Client, targetS3Bucket));
            }
            s3Client.copyObject(copyRequest);
            if (!s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
                throw new IOE(
                        "After copy was reported as successful the file doesn't exist in the target location [%s]",
                        copyMsg);
            }
            deleteWithRetriesSilent(s3Bucket, s3Path);
            log.debug("Finished moving file %s", copyMsg);
        }
    } else {
        // ensure object exists in target location
        if (s3Client.doesObjectExist(targetS3Bucket, targetS3Path)) {
            log.info("Not moving file [s3://%s/%s], already present in target location [s3://%s/%s]", s3Bucket,
                    s3Path, targetS3Bucket, targetS3Path);
        } else {
            throw new SegmentLoadingException(
                    "Unable to move file %s, not present in either source or target location", copyMsg);
        }
    }
}

From source file:org.exem.flamingo.web.filesystem.s3.S3BrowserController.java

License:Apache License

@RequestMapping(value = "listObjects", method = RequestMethod.GET)
@ResponseStatus(HttpStatus.OK)/*w w w .  java 2 s  . c  om*/
public Response listObjects(@RequestParam(required = false) String bucketName,
        @RequestParam(required = false) String prefix,
        @RequestParam(required = false) String continuationToken) {
    // Get bucket list
    if (StringUtils.isEmpty(bucketName)) {
        Response response = new Response();
        response.getList().addAll(getBucketList());
        response.setSuccess(true);
        return response;
    }

    // Get folder & bucket list
    ListObjectsV2Result result = s3BrowserService.listObjects(bucketName, prefix, continuationToken);

    List<S3ObjectInfo> list = new ArrayList<>();
    List<String> commonPrefixes = result.getCommonPrefixes();
    for (String key : commonPrefixes) {
        S3ObjectInfo object = new S3ObjectInfo();
        object.setBucketName(bucketName);
        object.setKey(key);
        object.setName(getName(key));
        object.setFolder(true);
        list.add(object);
    }

    List<S3ObjectSummary> objectSummaries = result.getObjectSummaries();

    if (!StringUtils.endsWith(prefix, S3Constansts.DELIMITER)) {
        prefix = prefix + S3Constansts.DELIMITER;
    }
    for (S3ObjectSummary s3Object : objectSummaries) {
        String key = s3Object.getKey();
        if (prefix.equals(key)) {
            continue;
        }
        S3ObjectInfo object = new S3ObjectInfo();
        object.setBucketName(bucketName);
        object.setPrefix(prefix);
        object.setKey(key);
        object.setName(getName(key));
        object.setObject(true);
        object.setSize(s3Object.getSize());
        object.setLastModified(s3Object.getLastModified());
        object.setStorageClass(s3Object.getStorageClass());
        list.add(object);
    }

    Map<String, String> map = new HashMap<>();
    map.put(S3Constansts.CONTINUATIONTOKEN, result.getNextContinuationToken());
    map.put(S3Constansts.ISTRUNCATED, BooleanUtils.toStringTrueFalse(result.isTruncated()));

    Response response = new Response();
    response.getList().addAll(list);
    response.getMap().putAll(map);
    response.setSuccess(true);
    return response;
}

From source file:org.finra.herd.service.impl.BusinessObjectDataFinalizeRestoreHelperServiceImpl.java

License:Apache License

/**
 * Executes S3 specific steps for the business object data finalize restore.
 *
 * @param businessObjectDataRestoreDto the DTO that holds various parameters needed to perform a business object data restore
 *///from w  w  w  .ja  v a  2 s.co  m
protected void executeS3SpecificStepsImpl(BusinessObjectDataRestoreDto businessObjectDataRestoreDto) {
    // Create an S3 file transfer parameters DTO to access the S3 bucket.
    // Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper
            .getS3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3BucketName(businessObjectDataRestoreDto.getS3BucketName());
    s3FileTransferRequestParamsDto.setS3Endpoint(businessObjectDataRestoreDto.getS3Endpoint());
    s3FileTransferRequestParamsDto
            .setS3KeyPrefix(StringUtils.appendIfMissing(businessObjectDataRestoreDto.getS3KeyPrefix(), "/"));

    // Get actual S3 files by selecting all S3 keys matching the S3 key prefix form the S3 bucket.
    // When listing S3 files, we ignore 0 byte objects that represent S3 directories.
    List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);

    // Validate existence and file size of the S3 files.
    storageFileHelper.validateRegisteredS3Files(businessObjectDataRestoreDto.getStorageFiles(), actualS3Files,
            businessObjectDataRestoreDto.getStorageName(),
            businessObjectDataRestoreDto.getBusinessObjectDataKey());

    // Build a list of files to check for restore status by selection only objects that are currently archived in Glacier (have Glacier storage class).
    List<S3ObjectSummary> glacierS3Files = new ArrayList<>();
    for (S3ObjectSummary s3ObjectSummary : actualS3Files) {
        if (StorageClass.Glacier.toString().equals(s3ObjectSummary.getStorageClass())) {
            glacierS3Files.add(s3ObjectSummary);
        }
    }

    // Validate that all Glacier storage class S3 files are now restored.
    s3FileTransferRequestParamsDto.setFiles(storageFileHelper
            .getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(glacierS3Files)));
    s3Service.validateGlacierS3FilesRestored(s3FileTransferRequestParamsDto);
}

From source file:org.finra.herd.service.impl.BusinessObjectDataInitiateRestoreHelperServiceImpl.java

License:Apache License

/**
 * Executes S3 specific steps for the initiation of a business object data restore request. The method also updates the specified DTO.
 *
 * @param businessObjectDataRestoreDto the DTO that holds various parameters needed to perform a business object data restore
 *//*w  w  w. ja  v  a  2  s.  co m*/
protected void executeS3SpecificStepsImpl(BusinessObjectDataRestoreDto businessObjectDataRestoreDto) {
    try {
        // Create an S3 file transfer parameters DTO to access the S3 bucket.
        // Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
        S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper
                .getS3FileTransferRequestParamsDto();
        s3FileTransferRequestParamsDto.setS3BucketName(businessObjectDataRestoreDto.getS3BucketName());
        s3FileTransferRequestParamsDto.setS3Endpoint(businessObjectDataRestoreDto.getS3Endpoint());
        s3FileTransferRequestParamsDto.setS3KeyPrefix(
                StringUtils.appendIfMissing(businessObjectDataRestoreDto.getS3KeyPrefix(), "/"));

        // Get a list of S3 files matching the S3 key prefix. When listing S3 files, we ignore 0 byte objects that represent S3 directories.
        List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);

        // Validate existence and file size of the S3 files.
        storageFileHelper.validateRegisteredS3Files(businessObjectDataRestoreDto.getStorageFiles(),
                actualS3Files, businessObjectDataRestoreDto.getStorageName(),
                businessObjectDataRestoreDto.getBusinessObjectDataKey());

        // Validate that all files to be restored are currently archived in Glacier (have Glacier storage class).
        // Fail on any S3 file that does not have Glacier storage class. This can happen when request to restore business object
        // data is posted after business object data archiving transition is executed (relative S3 objects get tagged),
        // but before AWS actually transitions the S3 files to Glacier (changes S3 object storage class to Glacier).
        for (S3ObjectSummary s3ObjectSummary : actualS3Files) {
            if (!StringUtils.equals(s3ObjectSummary.getStorageClass(), StorageClass.Glacier.toString())) {
                throw new IllegalArgumentException(String.format(
                        "S3 file \"%s\" is not archived (found %s storage class when expecting %s). S3 Bucket Name: \"%s\"",
                        s3ObjectSummary.getKey(), s3ObjectSummary.getStorageClass(),
                        StorageClass.Glacier.toString(), s3FileTransferRequestParamsDto.getS3BucketName()));
            }
        }

        // Set a list of files to restore.
        s3FileTransferRequestParamsDto.setFiles(storageFileHelper
                .getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(actualS3Files)));

        // Initiate restore requests for the list of objects in the Glacier bucket.
        // TODO: Make "expirationInDays" value configurable with default value set to 99 years (36135 days).
        s3Service.restoreObjects(s3FileTransferRequestParamsDto, 36135,
                businessObjectDataRestoreDto.getArchiveRetrievalOption());
    } catch (RuntimeException e) {
        // Log the exception.
        LOGGER.error(
                "Failed to initiate a restore request for the business object data. businessObjectDataKey={}",
                jsonHelper.objectToJson(businessObjectDataRestoreDto.getBusinessObjectDataKey()), e);

        // Update the DTO with the caught exception.
        businessObjectDataRestoreDto.setException(e);
    }
}

From source file:org.finra.herd.service.impl.ExpireRestoredBusinessObjectDataHelperServiceImpl.java

License:Apache License

/**
 * Executes S3 specific steps required to expire business object data.
 *
 * @param businessObjectDataRestoreDto the DTO that holds various parameters needed to expire business object data
 *///from  www  .j  av a  2  s  . com
protected void executeS3SpecificStepsImpl(BusinessObjectDataRestoreDto businessObjectDataRestoreDto) {
    // Create an S3 file transfer parameters DTO to access the S3 bucket.
    // Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper
            .getS3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3Endpoint(businessObjectDataRestoreDto.getS3Endpoint());
    s3FileTransferRequestParamsDto.setS3BucketName(businessObjectDataRestoreDto.getS3BucketName());
    s3FileTransferRequestParamsDto
            .setS3KeyPrefix(StringUtils.appendIfMissing(businessObjectDataRestoreDto.getS3KeyPrefix(), "/"));

    // Get a list of S3 files matching the S3 key prefix. When listing S3 files, we ignore 0 byte objects that represent S3 directories.
    List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);

    // Validate existence and file size of the S3 files.
    storageFileHelper.validateRegisteredS3Files(businessObjectDataRestoreDto.getStorageFiles(), actualS3Files,
            businessObjectDataRestoreDto.getStorageName(),
            businessObjectDataRestoreDto.getBusinessObjectDataKey());

    // Build a list of files to expire by selection only objects that have Glacier storage class.
    List<S3ObjectSummary> glacierS3Files = new ArrayList<>();
    for (S3ObjectSummary s3ObjectSummary : actualS3Files) {
        if (StorageClass.Glacier.toString().equals(s3ObjectSummary.getStorageClass())) {
            glacierS3Files.add(s3ObjectSummary);
        }
    }

    // Set a list of files to expire.
    s3FileTransferRequestParamsDto.setFiles(storageFileHelper
            .getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(glacierS3Files)));

    // To expire the restored S3 objects, initiate restore requests with expiration set to 1 day.
    s3Service.restoreObjects(s3FileTransferRequestParamsDto, 1, null);
}