Example usage for com.amazonaws.services.s3.model S3ObjectSummary getKey

List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getKey

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3ObjectSummary getKey.

Prototype

public String getKey() 

Source Link

Document

Gets the key under which this object is stored in Amazon S3.

Usage

From source file:org.finra.herd.service.helper.StorageFileHelper.java

License:Apache License

/**
 * Returns a map of file paths to the storage files build from the list of S3 object summaries with map iteration order matching the original list order.
 *
 * @param s3ObjectSummaries the list of S3 object summaries
 *
 * @return the map of file paths to storage files
 *///w  ww . j  a  va  2  s .c o m
public Map<String, StorageFile> getStorageFilesMapFromS3ObjectSummaries(
        List<S3ObjectSummary> s3ObjectSummaries) {
    Map<String, StorageFile> result = new LinkedHashMap<>();

    for (S3ObjectSummary s3ObjectSummary : s3ObjectSummaries) {
        result.put(s3ObjectSummary.getKey(),
                new StorageFile(s3ObjectSummary.getKey(), s3ObjectSummary.getSize(), null));
    }

    return result;
}

From source file:org.finra.herd.service.helper.StorageFileHelper.java

License:Apache License

/**
 * Validates registered S3 files per list of expected storage files. The validation ignores (does not fail) when detecting unregistered zero byte S3 files.
 *
 * @param expectedStorageFiles the list of expected S3 files represented by storage files
 * @param s3ObjectSummaries the list of actual S3 files represented by S3 object summaries
 * @param storageName the storage name/*from w  w  w. j a v a  2  s . c o m*/
 * @param businessObjectDataKey the business object data key
 */
public void validateRegisteredS3Files(List<StorageFile> expectedStorageFiles,
        List<S3ObjectSummary> s3ObjectSummaries, String storageName,
        BusinessObjectDataKey businessObjectDataKey) {
    // Get a set of actual S3 file paths.
    Set<String> actualS3FilePaths = new HashSet<>(getFilePathsFromS3ObjectSummaries(s3ObjectSummaries));

    // Validate existence and file size for all expected files.
    for (StorageFile expectedStorageFile : expectedStorageFiles) {
        if (!actualS3FilePaths.contains(expectedStorageFile.getFilePath())) {
            throw new ObjectNotFoundException(
                    String.format("Registered file \"%s\" does not exist in \"%s\" storage.",
                            expectedStorageFile.getFilePath(), storageName));
        }
    }

    // Get a set of expected file paths.
    Set<String> expectedFilePaths = new HashSet<>(getFilePathsFromStorageFiles(expectedStorageFiles));

    // Create a JSON representation of the business object data key.
    String businessObjectDataKeyAsJson = jsonHelper.objectToJson(businessObjectDataKey);

    // Validate that no other files in S3 bucket except for expected storage files have the same S3 key prefix.
    // Please note that this validation ignores (does not fail on) any unregistered zero byte S3 files.
    for (S3ObjectSummary s3ObjectSummary : s3ObjectSummaries) {
        if (!expectedFilePaths.contains(s3ObjectSummary.getKey())) {
            // Ignore unregistered zero byte S3 files.
            if (s3ObjectSummary.getSize() == 0) {
                LOGGER.info(
                        "Ignoring unregistered zero byte S3 file. s3Key=\"{}\" storageName=\"{}\" businessObjectDataKey={}",
                        s3ObjectSummary.getKey(), storageName, businessObjectDataKeyAsJson);
            } else {
                throw new IllegalStateException(String.format(
                        "Found unregistered non-empty S3 file \"%s\" in \"%s\" storage. Business object data {%s}",
                        s3ObjectSummary.getKey(), storageName,
                        businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey)));
            }
        }
    }
}

From source file:org.finra.herd.service.impl.BusinessObjectDataInitiateRestoreHelperServiceImpl.java

License:Apache License

/**
 * Executes S3 specific steps for the initiation of a business object data restore request. The method also updates the specified DTO.
 *
 * @param businessObjectDataRestoreDto the DTO that holds various parameters needed to perform a business object data restore
 *//* w w  w. ja va2 s.  c o m*/
protected void executeS3SpecificStepsImpl(BusinessObjectDataRestoreDto businessObjectDataRestoreDto) {
    try {
        // Create an S3 file transfer parameters DTO to access the S3 bucket.
        // Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
        S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper
                .getS3FileTransferRequestParamsDto();
        s3FileTransferRequestParamsDto.setS3BucketName(businessObjectDataRestoreDto.getS3BucketName());
        s3FileTransferRequestParamsDto.setS3Endpoint(businessObjectDataRestoreDto.getS3Endpoint());
        s3FileTransferRequestParamsDto.setS3KeyPrefix(
                StringUtils.appendIfMissing(businessObjectDataRestoreDto.getS3KeyPrefix(), "/"));

        // Get a list of S3 files matching the S3 key prefix. When listing S3 files, we ignore 0 byte objects that represent S3 directories.
        List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);

        // Validate existence and file size of the S3 files.
        storageFileHelper.validateRegisteredS3Files(businessObjectDataRestoreDto.getStorageFiles(),
                actualS3Files, businessObjectDataRestoreDto.getStorageName(),
                businessObjectDataRestoreDto.getBusinessObjectDataKey());

        // Validate that all files to be restored are currently archived in Glacier (have Glacier storage class).
        // Fail on any S3 file that does not have Glacier storage class. This can happen when request to restore business object
        // data is posted after business object data archiving transition is executed (relative S3 objects get tagged),
        // but before AWS actually transitions the S3 files to Glacier (changes S3 object storage class to Glacier).
        for (S3ObjectSummary s3ObjectSummary : actualS3Files) {
            if (!StringUtils.equals(s3ObjectSummary.getStorageClass(), StorageClass.Glacier.toString())) {
                throw new IllegalArgumentException(String.format(
                        "S3 file \"%s\" is not archived (found %s storage class when expecting %s). S3 Bucket Name: \"%s\"",
                        s3ObjectSummary.getKey(), s3ObjectSummary.getStorageClass(),
                        StorageClass.Glacier.toString(), s3FileTransferRequestParamsDto.getS3BucketName()));
            }
        }

        // Set a list of files to restore.
        s3FileTransferRequestParamsDto.setFiles(storageFileHelper
                .getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(actualS3Files)));

        // Initiate restore requests for the list of objects in the Glacier bucket.
        // TODO: Make "expirationInDays" value configurable with default value set to 99 years (36135 days).
        s3Service.restoreObjects(s3FileTransferRequestParamsDto, 36135,
                businessObjectDataRestoreDto.getArchiveRetrievalOption());
    } catch (RuntimeException e) {
        // Log the exception.
        LOGGER.error(
                "Failed to initiate a restore request for the business object data. businessObjectDataKey={}",
                jsonHelper.objectToJson(businessObjectDataRestoreDto.getBusinessObjectDataKey()), e);

        // Update the DTO with the caught exception.
        businessObjectDataRestoreDto.setException(e);
    }
}

From source file:org.finra.herd.tools.uploader.UploaderController.java

License:Apache License

/**
 * Logs all files found in the specified S3 location.
 *
 * @param params the S3 file transfer request parameters
 *//*  w ww .jav  a 2  s  .c  om*/
private void logS3KeyPrefixContents(S3FileTransferRequestParamsDto params) {
    List<S3ObjectSummary> s3ObjectSummaries = s3Service.listDirectory(params);
    LOGGER.info(String.format("Found %d keys with prefix \"%s\" in bucket \"%s\":", s3ObjectSummaries.size(),
            params.getS3KeyPrefix(), params.getS3BucketName()));

    for (S3ObjectSummary s3ObjectSummary : s3ObjectSummaries) {
        LOGGER.info(String.format("    s3://%s/%s", params.getS3BucketName(), s3ObjectSummary.getKey()));
    }
}

From source file:org.geoserver.taskmanager.external.impl.S3FileServiceImpl.java

License:Open Source License

@Override
public List<String> listSubfolders() {
    Set<String> paths = new HashSet<>();
    ObjectListing listing = getS3Client().listObjects(rootFolder);
    for (S3ObjectSummary summary : listing.getObjectSummaries()) {
        String fullPath = summary.getKey();
        int countSeparators = fullPath.length() - fullPath.replace("/", "").length();
        int fromIndex = 0;
        for (int i = 0; i < countSeparators; i++) {
            int indexOfSeparator = fullPath.indexOf("/", fromIndex);
            fromIndex = indexOfSeparator + 1;
            paths.add(fullPath.substring(0, indexOfSeparator));
        }/*from w  w w.j  a v a2s.c  o m*/
    }
    return new ArrayList<>(paths);
}

From source file:org.geoserver.taskmanager.external.impl.S3FileServiceImpl.java

License:Open Source License

@Override
public FileReference getVersioned(String filePath) {
    int index = filePath.indexOf(PLACEHOLDER_VERSION);
    if (index < 0) {
        return new FileReferenceImpl(this, filePath, filePath);
    }/*from   w w w  . ja va 2s  . co  m*/

    SortedSet<Integer> set = new TreeSet<Integer>();
    Pattern pattern = Pattern
            .compile(Pattern.quote(filePath).replace(FileService.PLACEHOLDER_VERSION, "\\E(.*)\\Q"));

    ObjectListing listing = getS3Client().listObjects(rootFolder, filePath.substring(0, index));
    for (S3ObjectSummary summary : listing.getObjectSummaries()) {
        Matcher matcher = pattern.matcher(summary.getKey());
        if (matcher.matches()) {
            try {
                set.add(Integer.parseInt(matcher.group(1)));
            } catch (NumberFormatException e) {
                LOGGER.log(Level.WARNING, "could not parse version in versioned file " + summary.getKey(), e);
            }
        }
    }
    int last = set.isEmpty() ? 0 : set.last();
    return new FileReferenceImpl(this, filePath.replace(FileService.PLACEHOLDER_VERSION, last + ""),
            filePath.replace(FileService.PLACEHOLDER_VERSION, (last + 1) + ""));
}

From source file:org.geowebcache.s3.TemporaryS3Folder.java

License:Open Source License

public void delete() {
    checkState(isConfigured(), "client not configured.");
    if (temporaryPrefix == null) {
        return;/*from   w ww.  j ava  2 s  .  c  o  m*/
    }

    Iterable<S3ObjectSummary> objects = S3Objects.withPrefix(s3, bucket, temporaryPrefix);
    Iterable<List<S3ObjectSummary>> partition = Iterables.partition(objects, 1000);
    for (List<S3ObjectSummary> os : partition) {
        List<KeyVersion> keys = Lists.transform(os, new Function<S3ObjectSummary, KeyVersion>() {
            @Override
            public KeyVersion apply(S3ObjectSummary input) {
                KeyVersion k = new KeyVersion(input.getKey());
                return k;
            }
        });
        DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket);
        deleteRequest.setKeys(keys);
        s3.deleteObjects(deleteRequest);
    }
}

From source file:org.gradle.internal.resource.transport.aws.s3.S3Client.java

License:Apache License

private List<String> resolveResourceNames(ObjectListing objectListing) {
    List<String> results = new ArrayList<String>();
    List<S3ObjectSummary> objectSummaries = objectListing.getObjectSummaries();
    if (null != objectSummaries) {
        for (S3ObjectSummary objectSummary : objectSummaries) {
            String key = objectSummary.getKey();
            String fileName = extractResourceName(key);
            if (null != fileName) {
                results.add(fileName);/* www.  j a v a2 s. com*/
            }
        }
    }
    return results;
}

From source file:org.gridgain.grid.spi.checkpoint.s3.GridS3CheckpointSpi.java

License:Open Source License

/** {@inheritDoc} */
@SuppressWarnings({ "BusyWait" })
@Override/*from  w  w w  .  j a v a 2  s  .c om*/
public void spiStart(String gridName) throws GridSpiException {
    // Start SPI start stopwatch.
    startStopwatch();

    assertParameter(cred != null, "awsCredentials != null");

    if (log.isDebugEnabled()) {
        log.debug(configInfo("awsCredentials", cred));
        log.debug(configInfo("clientConfiguration", cfg));
        log.debug(configInfo("bucketNameSuffix", bucketNameSuffix));
    }

    if (cfg == null)
        U.warn(log, "Amazon client configuration is not set (will use default).");

    if (F.isEmpty(bucketNameSuffix)) {
        U.warn(log, "Bucket name suffix is null or empty (will use default bucket name).");

        bucketName = BUCKET_NAME_PREFIX + DFLT_BUCKET_NAME_SUFFIX;
    } else
        bucketName = BUCKET_NAME_PREFIX + bucketNameSuffix;

    s3 = cfg != null ? new AmazonS3Client(cred, cfg) : new AmazonS3Client(cred);

    if (!s3.doesBucketExist(bucketName)) {
        try {
            s3.createBucket(bucketName);

            if (log.isDebugEnabled())
                log.debug("Created S3 bucket: " + bucketName);

            while (!s3.doesBucketExist(bucketName))
                try {
                    U.sleep(200);
                } catch (GridInterruptedException e) {
                    throw new GridSpiException("Thread has been interrupted.", e);
                }
        } catch (AmazonClientException e) {
            try {
                if (!s3.doesBucketExist(bucketName))
                    throw new GridSpiException("Failed to create bucket: " + bucketName, e);
            } catch (AmazonClientException ignored) {
                throw new GridSpiException("Failed to create bucket: " + bucketName, e);
            }
        }
    }

    Collection<GridS3TimeData> s3TimeDataLst = new LinkedList<>();

    try {
        ObjectListing list = s3.listObjects(bucketName);

        while (true) {
            for (S3ObjectSummary sum : list.getObjectSummaries()) {
                GridS3CheckpointData data = read(sum.getKey());

                if (data != null) {
                    s3TimeDataLst.add(new GridS3TimeData(data.getExpireTime(), data.getKey()));

                    if (log.isDebugEnabled())
                        log.debug("Registered existing checkpoint from key: " + data.getKey());
                }
            }

            if (list.isTruncated())
                list = s3.listNextBatchOfObjects(list);
            else
                break;
        }
    } catch (AmazonClientException e) {
        throw new GridSpiException("Failed to read checkpoint bucket: " + bucketName, e);
    } catch (GridException e) {
        throw new GridSpiException("Failed to marshal/unmarshal objects in bucket: " + bucketName, e);
    }

    // Track expiration for only those data that are made by this node
    timeoutWrk = new GridS3TimeoutWorker();

    timeoutWrk.add(s3TimeDataLst);

    timeoutWrk.start();

    registerMBean(gridName, this, GridS3CheckpointSpiMBean.class);

    // Ack ok start.
    if (log.isDebugEnabled())
        log.debug(startInfo());
}

From source file:org.gridgain.grid.spi.discovery.tcp.ipfinder.s3.GridTcpDiscoveryS3IpFinder.java

License:Open Source License

/** {@inheritDoc} */
@Override/* w w  w . j ava2  s  .  c om*/
public Collection<InetSocketAddress> getRegisteredAddresses() throws GridSpiException {
    initClient();

    Collection<InetSocketAddress> addrs = new LinkedList<>();

    try {
        ObjectListing list = s3.listObjects(bucketName);

        while (true) {
            for (S3ObjectSummary sum : list.getObjectSummaries()) {
                String key = sum.getKey();

                StringTokenizer st = new StringTokenizer(key, DELIM);

                if (st.countTokens() != 2)
                    U.error(log, "Failed to parse S3 entry due to invalid format: " + key);
                else {
                    String addrStr = st.nextToken();
                    String portStr = st.nextToken();

                    int port = -1;

                    try {
                        port = Integer.parseInt(portStr);
                    } catch (NumberFormatException e) {
                        U.error(log, "Failed to parse port for S3 entry: " + key, e);
                    }

                    if (port != -1)
                        try {
                            addrs.add(new InetSocketAddress(addrStr, port));
                        } catch (IllegalArgumentException e) {
                            U.error(log, "Failed to parse port for S3 entry: " + key, e);
                        }
                }
            }

            if (list.isTruncated())
                list = s3.listNextBatchOfObjects(list);
            else
                break;
        }
    } catch (AmazonClientException e) {
        throw new GridSpiException("Failed to list objects in the bucket: " + bucketName, e);
    }

    return addrs;
}