Example usage for com.amazonaws.services.s3.model S3ObjectSummary getSize

List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getSize

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3ObjectSummary getSize.

Prototype

public long getSize() 

Source Link

Document

Gets the size of this object in bytes.

Usage

From source file:org.commoncrawl.util.S3BulkTransferUtil.java

License:Open Source License

S3BulkTransferUtil(String bucketName, String s3AccessKeyId, String s3SecretKey, JsonArray pathList,
        final Path outputPath) throws IOException {
    _conf = new Configuration();
    _fs = FileSystem.get(_conf);//from w  w w .jav a 2s  .  com
    LOG.info("Initializing Downloader");
    _downloader = new S3Downloader(bucketName, s3AccessKeyId, s3SecretKey, false);
    _downloader.setMaxParallelStreams(150);
    _downloader.initialize(this);

    LOG.info("Got JSON Array with:" + pathList.size() + " elements");
    for (int i = 0; i < pathList.size(); ++i) {
        LOG.info("Collecting files from path:" + pathList.get(i).toString());
        List<S3ObjectSummary> metadataFiles = getPaths(s3AccessKeyId, s3SecretKey, bucketName,
                pathList.get(i).getAsString());
        LOG.info("Got:" + metadataFiles.size() + " total files");
        for (S3ObjectSummary metadataFile : metadataFiles) {

            Path s3Path = new Path("/" + metadataFile.getKey());
            Path finalPath = new Path(outputPath, s3Path.getName());

            FileStatus fileStatus = null;
            try {
                fileStatus = _fs.getFileStatus(finalPath);
            } catch (Exception e) {

            }

            if (fileStatus != null && fileStatus.getLen() != metadataFile.getSize()) {
                LOG.error("SRC-DEST SIZE MISMATCH!! SRC:" + metadataFile + " SRC-SIZE:" + metadataFile.getSize()
                        + " DEST:" + finalPath + " DEST-SIZE:" + fileStatus.getLen());

                // ok delete the destination 
                _fs.delete(finalPath, false);
                // null file status so that the item gets requeued ... 
                fileStatus = null;
            }

            if (fileStatus == null) {
                LOG.info("Queueing Item:" + metadataFile);
                ++_totalQueuedItemsCount;
                _pathMapping.put(metadataFile.getKey(), finalPath);
                _downloader.fetchItem(metadataFile.getKey());
            } else {
                LOG.info("Skipping Already Download Item:" + metadataFile + " Found at:" + finalPath);
            }
        }
    }
    LOG.info("Waiting for shutdown event");
    _downloader.waitForCompletion();
}

From source file:org.cto.VVS3Box.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {
    /*/*w  w w .ja va  2s .co  m*/
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     *
     * Important: Be sure to fill in your AWS access credentials in the
     *            AwsCredentials.properties file before you try to run this
     *            sample.
     * http://aws.amazon.com/security-credentials
     */
    AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider());
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "lior.test-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:org.elasticsearch.cloud.aws.blobstore.AbstarctS3BlobContainer.java

License:Apache License

@Override
public ImmutableMap<String, BlobMetaData> listBlobsByPrefix(@Nullable String blobNamePrefix)
        throws IOException {
    ImmutableMap.Builder<String, BlobMetaData> blobsBuilder = ImmutableMap.builder();
    ObjectListing prevListing = null;//from ww w .  j  av  a  2s.  c  o m
    while (true) {
        ObjectListing list;
        if (prevListing != null) {
            list = blobStore.client().listNextBatchOfObjects(prevListing);
        } else {
            if (blobNamePrefix != null) {
                list = blobStore.client().listObjects(blobStore.bucket(), buildKey(blobNamePrefix));
            } else {
                list = blobStore.client().listObjects(blobStore.bucket(), keyPath);
            }
        }
        for (S3ObjectSummary summary : list.getObjectSummaries()) {
            String name = summary.getKey().substring(keyPath.length());
            blobsBuilder.put(name, new PlainBlobMetaData(name, summary.getSize()));
        }
        if (list.isTruncated()) {
            prevListing = list;
        } else {
            break;
        }
    }
    return blobsBuilder.build();
}

From source file:org.elasticsearch.repositories.s3.S3BlobContainer.java

License:Apache License

@Override
public Map<String, BlobMetaData> listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException {
    return AccessController.doPrivileged((PrivilegedAction<Map<String, BlobMetaData>>) () -> {
        MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder();
        AmazonS3 client = blobStore.client();
        SocketAccess.doPrivilegedVoid(() -> {
            ObjectListing prevListing = null;
            while (true) {
                ObjectListing list;/*w w  w  .j ava 2  s  .c  o  m*/
                if (prevListing != null) {
                    list = client.listNextBatchOfObjects(prevListing);
                } else {
                    if (blobNamePrefix != null) {
                        list = client.listObjects(blobStore.bucket(), buildKey(blobNamePrefix));
                    } else {
                        list = client.listObjects(blobStore.bucket(), keyPath);
                    }
                }
                for (S3ObjectSummary summary : list.getObjectSummaries()) {
                    String name = summary.getKey().substring(keyPath.length());
                    blobsBuilder.put(name, new PlainBlobMetaData(name, summary.getSize()));
                }
                if (list.isTruncated()) {
                    prevListing = list;
                } else {
                    break;
                }
            }
        });
        return blobsBuilder.immutableMap();
    });
}

From source file:org.entando.entando.plugins.jps3awsclient.aps.system.services.storage.AmazonS3StorageManager.java

License:Open Source License

private BasicFileAttributeView[] listAttributes(String subPath, boolean isProtectedResource, boolean addFolders,
        boolean addFiles) throws ApsSystemException {
    if (!this.isActive()) {
        return null;
    }/*  ww  w  .  j a v  a 2  s. c o  m*/
    String folder = this.getKey(subPath, isProtectedResource);
    ObjectListing objectListing = this.getS3Objects(folder);
    if (null == objectListing) {
        return null;
    }
    BasicFileAttributeView[] objects = new BasicFileAttributeView[] {};
    if (null != objectListing.getCommonPrefixes() && addFolders) {
        for (int i = 0; i < objectListing.getCommonPrefixes().size(); i++) {
            String object = objectListing.getCommonPrefixes().get(i);
            String name = object.substring(folder.length(), (object.length() - 1));
            BasicFileAttributeView bfav = new BasicFileAttributeView();
            bfav.setDirectory(true);
            bfav.setName(name);
            objects = this.addChildAttribute(bfav, objects);
        }
    }
    if (null != objectListing.getObjectSummaries() && addFiles) {
        for (int i = 0; i < objectListing.getObjectSummaries().size(); i++) {
            S3ObjectSummary s3os = objectListing.getObjectSummaries().get(i);
            String key = s3os.getKey();
            String name = key.substring(folder.length());
            BasicFileAttributeView bfav = new BasicFileAttributeView();
            bfav.setDirectory(false);
            bfav.setName(name);
            bfav.setLastModifiedTime(s3os.getLastModified());
            bfav.setSize(s3os.getSize());
            objects = this.addChildAttribute(bfav, objects);
        }
    }
    return objects;
}

From source file:org.exem.flamingo.web.filesystem.s3.S3BrowserController.java

License:Apache License

@RequestMapping(value = "listObjects", method = RequestMethod.GET)
@ResponseStatus(HttpStatus.OK)//www.j  av  a  2  s  . c o m
public Response listObjects(@RequestParam(required = false) String bucketName,
        @RequestParam(required = false) String prefix,
        @RequestParam(required = false) String continuationToken) {
    // Get bucket list
    if (StringUtils.isEmpty(bucketName)) {
        Response response = new Response();
        response.getList().addAll(getBucketList());
        response.setSuccess(true);
        return response;
    }

    // Get folder & bucket list
    ListObjectsV2Result result = s3BrowserService.listObjects(bucketName, prefix, continuationToken);

    List<S3ObjectInfo> list = new ArrayList<>();
    List<String> commonPrefixes = result.getCommonPrefixes();
    for (String key : commonPrefixes) {
        S3ObjectInfo object = new S3ObjectInfo();
        object.setBucketName(bucketName);
        object.setKey(key);
        object.setName(getName(key));
        object.setFolder(true);
        list.add(object);
    }

    List<S3ObjectSummary> objectSummaries = result.getObjectSummaries();

    if (!StringUtils.endsWith(prefix, S3Constansts.DELIMITER)) {
        prefix = prefix + S3Constansts.DELIMITER;
    }
    for (S3ObjectSummary s3Object : objectSummaries) {
        String key = s3Object.getKey();
        if (prefix.equals(key)) {
            continue;
        }
        S3ObjectInfo object = new S3ObjectInfo();
        object.setBucketName(bucketName);
        object.setPrefix(prefix);
        object.setKey(key);
        object.setName(getName(key));
        object.setObject(true);
        object.setSize(s3Object.getSize());
        object.setLastModified(s3Object.getLastModified());
        object.setStorageClass(s3Object.getStorageClass());
        list.add(object);
    }

    Map<String, String> map = new HashMap<>();
    map.put(S3Constansts.CONTINUATIONTOKEN, result.getNextContinuationToken());
    map.put(S3Constansts.ISTRUNCATED, BooleanUtils.toStringTrueFalse(result.isTruncated()));

    Response response = new Response();
    response.getList().addAll(list);
    response.getMap().putAll(map);
    response.setSuccess(true);
    return response;
}

From source file:org.finra.dm.dao.impl.S3DaoImpl.java

License:Apache License

/**
 * Lists all S3 objects matching the S3 key prefix in the given bucket (S3 bucket name). The S3 bucket name and S3 key prefix that identify the S3 objects
 * to get listed are taken from the S3 file transfer request parameters DTO.
 *
 * @param params the S3 file transfer request parameters
 * @param ignoreZeroByteDirectoryMarkers specifies whether to ignore 0 byte objects that represent S3 directories
 *
 * @return the list of all S3 objects represented as storage files that match the prefix in the given bucket
 *///  www . ja  v  a  2s  .  co  m
private List<StorageFile> listObjectsMatchingKeyPrefix(final S3FileTransferRequestParamsDto params,
        boolean ignoreZeroByteDirectoryMarkers) {
    AmazonS3Client s3Client = null;
    List<StorageFile> storageFiles = new ArrayList<>();

    try {
        s3Client = getAmazonS3(params);
        ListObjectsRequest listObjectsRequest = new ListObjectsRequest()
                .withBucketName(params.getS3BucketName()).withPrefix(params.getS3KeyPrefix());
        ObjectListing objectListing;

        do {
            objectListing = s3Operations.listObjects(listObjectsRequest, s3Client);

            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
                // Ignore 0 byte objects that represent S3 directories.
                if (!(ignoreZeroByteDirectoryMarkers && objectSummary.getKey().endsWith("/")
                        && objectSummary.getSize() == 0L)) {
                    storageFiles.add(new StorageFile(objectSummary.getKey(), objectSummary.getSize(), null));
                }
            }

            listObjectsRequest.setMarker(objectListing.getNextMarker());
        } while (objectListing.isTruncated());
    } catch (AmazonS3Exception amazonS3Exception) {
        if (S3Operations.ERROR_CODE_NO_SUCH_BUCKET.equals(amazonS3Exception.getErrorCode())) {
            throw new IllegalArgumentException(
                    "The specified bucket '" + params.getS3BucketName() + "' does not exist.",
                    amazonS3Exception);
        }
        throw new IllegalStateException("Error accessing S3", amazonS3Exception);
    } catch (AmazonClientException e) {
        throw new IllegalStateException(
                String.format("Failed to list keys/objects with prefix \"%s\" from bucket \"%s\". Reason: %s",
                        params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()),
                e);
    } finally {
        // Shutdown the AmazonS3Client instance to release resources.
        if (s3Client != null) {
            s3Client.shutdown();
        }
    }

    return storageFiles;
}

From source file:org.finra.herd.dao.impl.S3DaoImpl.java

License:Apache License

@Override
public List<S3ObjectSummary> listDirectory(final S3FileTransferRequestParamsDto params,
        boolean ignoreZeroByteDirectoryMarkers) {
    Assert.isTrue(!isRootKeyPrefix(params.getS3KeyPrefix()),
            "Listing of S3 objects from root directory is not allowed.");

    AmazonS3Client s3Client = getAmazonS3(params);
    List<S3ObjectSummary> s3ObjectSummaries = new ArrayList<>();

    try {/*w w  w .j av  a  2 s.com*/
        ListObjectsRequest listObjectsRequest = new ListObjectsRequest()
                .withBucketName(params.getS3BucketName()).withPrefix(params.getS3KeyPrefix());
        ObjectListing objectListing;

        do {
            objectListing = s3Operations.listObjects(listObjectsRequest, s3Client);

            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
                // Ignore 0 byte objects that represent S3 directories.
                if (!(ignoreZeroByteDirectoryMarkers && objectSummary.getKey().endsWith("/")
                        && objectSummary.getSize() == 0L)) {
                    s3ObjectSummaries.add(objectSummary);
                }
            }

            listObjectsRequest.setMarker(objectListing.getNextMarker());
        } while (objectListing.isTruncated());
    } catch (AmazonS3Exception amazonS3Exception) {
        if (S3Operations.ERROR_CODE_NO_SUCH_BUCKET.equals(amazonS3Exception.getErrorCode())) {
            throw new IllegalArgumentException(
                    "The specified bucket '" + params.getS3BucketName() + "' does not exist.",
                    amazonS3Exception);
        }
        throw new IllegalStateException("Error accessing S3", amazonS3Exception);
    } catch (AmazonClientException e) {
        throw new IllegalStateException(
                String.format("Failed to list keys with prefix \"%s\" from bucket \"%s\". Reason: %s",
                        params.getS3KeyPrefix(), params.getS3BucketName(), e.getMessage()),
                e);
    } finally {
        // Shutdown the AmazonS3Client instance to release resources.
        s3Client.shutdown();
    }

    return s3ObjectSummaries;
}

From source file:org.finra.herd.service.helper.StorageFileHelper.java

License:Apache License

/**
 * Creates a list of storage files from the list of S3 object summaries.
 *
 * @param s3ObjectSummaries the list of S3 object summaries
 *
 * @return the list of storage files/*from   w  w  w.  j  a  va  2  s .  c o m*/
 */
public List<StorageFile> createStorageFilesFromS3ObjectSummaries(List<S3ObjectSummary> s3ObjectSummaries) {
    List<StorageFile> storageFiles = new ArrayList<>();

    for (S3ObjectSummary s3ObjectSummary : s3ObjectSummaries) {
        storageFiles.add(new StorageFile(s3ObjectSummary.getKey(), s3ObjectSummary.getSize(), null));
    }

    return storageFiles;
}

From source file:org.finra.herd.service.helper.StorageFileHelper.java

License:Apache License

/**
 * Returns a map of file paths to the storage files build from the list of S3 object summaries with map iteration order matching the original list order.
 *
 * @param s3ObjectSummaries the list of S3 object summaries
 *
 * @return the map of file paths to storage files
 *//*from www. j  a v a 2 s .c o m*/
public Map<String, StorageFile> getStorageFilesMapFromS3ObjectSummaries(
        List<S3ObjectSummary> s3ObjectSummaries) {
    Map<String, StorageFile> result = new LinkedHashMap<>();

    for (S3ObjectSummary s3ObjectSummary : s3ObjectSummaries) {
        result.put(s3ObjectSummary.getKey(),
                new StorageFile(s3ObjectSummary.getKey(), s3ObjectSummary.getSize(), null));
    }

    return result;
}