List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getSize
public long getSize()
From source file:cloudExplorer.BucketClass.java
License:Open Source License
String getObjectInfo(String key, String access_key, String secret_key, String bucket, String endpoint, String process) {/*www . j a v a2 s . c om*/ AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key); AmazonS3 s3Client = new AmazonS3Client(credentials, new ClientConfiguration().withSignerOverride("S3SignerType")); s3Client.setEndpoint(endpoint); objectlist = null; try { ObjectListing current = s3Client.listObjects((bucket)); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket); ObjectListing objectListing; do { objectListing = s3Client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { if (process.contains("objectsize")) { if (objectSummary.getKey().contains(key)) { objectlist = String.valueOf(objectSummary.getSize()); break; } } if (process.contains("objectdate")) { if (objectSummary.getKey().contains(key)) { objectlist = String.valueOf(objectSummary.getLastModified()); break; } } } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); } catch (Exception listBucket) { mainFrame.jTextArea1.append("\n" + listBucket.getMessage()); } return objectlist; }
From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java
License:Apache License
@Override protected Snapshot scan(List<Pattern> filters) { try {//from www . java2 s .com Map<String, FileSnapshot> files = new LinkedHashMap<>(); Set<String> dirs = new HashSet<>(); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName) .withPrefix(rootPath.isEmpty() ? "" : rootPath + "/"); ObjectListing objectListing; do { objectListing = listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { if (isExcluded(objectSummary.getKey()) || isFiltered(objectSummary.getKey(), filters)) continue; if (objectSummary.getKey().endsWith("/")) { String filePath = trimPath(objectSummary.getKey()); filePath = filePath.equals(rootPath) ? "" : filePath.substring(rootPath.length() + (rootPath.isEmpty() ? 0 : 1)); dirs.add(filePath); System.out .println(String.format("Scanning s3://%s/%s", bucketName, objectSummary.getKey())); } else { String fileName = objectSummary.getKey(); String filePath = ""; if (fileName.contains("/")) { int fileNameSplitIndex = fileName.lastIndexOf("/"); filePath = fileName.substring(0, fileNameSplitIndex); fileName = fileName.substring(fileNameSplitIndex + 1); filePath = filePath.equals(rootPath) ? "" : filePath.substring(rootPath.length() + (rootPath.isEmpty() ? 0 : 1)); } if (filePath.equals("")) { filePath = fileName; } else { filePath = filePath + "/" + fileName; } ObjectMetadata meta = getObjectInfo(objectSummary); String lmd = meta.getUserMetaDataOf("lmd"); Date lastModified = (lmd == null) ? objectSummary.getLastModified() : new Date(Long.parseLong(lmd)); FileSnapshot file = new FileSnapshot(fileName, objectSummary.getSize(), new DateTime(lastModified), filePath); files.put(filePath, file); } } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); Snapshot snapshot = new Snapshot(files, dirs); return snapshot; } catch (AmazonClientException ex) { System.out.println("Failed to scan file space"); System.out.println(ex.getMessage()); } return null; }
From source file:com.appdynamics.monitors.s3.AWSS3Monitor.java
License:Apache License
/** * This method calls Amazon WS to get required S3 statistics, set values * based on configured unit, and returns the result back * /*from ww w . j a v a 2 s .c om*/ * @param buckets * @param amazonS3Client * @return Map<String, String> * @throws TaskExecutionException */ private Map<String, String> getS3Result(List<Bucket> buckets, AmazonS3Client amazonS3Client) throws TaskExecutionException { // Declaring result variables with default values long size = 0; long count = 0; Date lastModified = new Date(0); try { // Fetching all bucket names if passed buckets is null if (buckets == null) { logger.debug("Calling Webservice to list all buckets"); buckets = amazonS3Client.listBuckets(); } // Looping over all buckets for (Bucket bucket : buckets) { logger.debug("Getting data for bucket: " + bucket.getName()); ObjectListing objectListing = null; do { // Getting objectListing while calling it for the first time if (objectListing == null) { logger.debug("Calling Webservice to get objectlisting for first time"); objectListing = amazonS3Client.listObjects(bucket.getName()); } else { // Calling listNextBatchOfObjects if previous response // is truncated logger.debug("Calling Webservice to get objectlisting subsequent time"); objectListing = amazonS3Client.listNextBatchOfObjects(objectListing); } // Incrementing the count count += objectListing.getObjectSummaries().size(); // Looping over all objects for (S3ObjectSummary s3ObjectSummary : objectListing.getObjectSummaries()) { // Incrementing size size += s3ObjectSummary.getSize(); // Setting last modified if lastModifiedDate is latest if (lastModified.before(s3ObjectSummary.getLastModified())) { lastModified = s3ObjectSummary.getLastModified(); } } } // Continuing till objectListing is complete while (objectListing.isTruncated()); } } catch (AmazonS3Exception exception) { logger.error("AmazonS3Exception occurred", exception); throw new TaskExecutionException("Sending S3 metric failed due to AmazonS3Exception"); } return getResultWithRequiredUnit(size, count, lastModified); }
From source file:com.arc.cloud.aws.s3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*// w w w . j a va 2s . c om * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (~/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider().getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.athena.dolly.web.aws.s3.S3Service.java
License:Open Source License
/** * Retrieve object summaries in specific bucket * @param bucketName/*from ww w. j a v a 2 s.co m*/ * @return */ public List<S3Dto> listBucket(String bucketName, String prefix) { ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName) .withPrefix(prefix).withDelimiter(null); ObjectListing objectListing = s3.listObjects(listObjectsRequest); List<S3Dto> list = new ArrayList<S3Dto>(); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { logger.info(" - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); list.add(makeDto(bucketName, objectSummary)); } return list; }
From source file:com.athena.dolly.web.aws.s3.S3Service.java
License:Open Source License
private S3Dto makeDto(String bucketName, S3ObjectSummary objectSummary) { S3Dto dto = new S3Dto(); // Default value setting dto.setBucketName(bucketName);// ww w . ja va 2 s. co m dto.setLastModified(date2String(objectSummary.getLastModified(), "yyyy/MM/dd a KK:mm")); dto.setSize((objectSummary.getSize() / 1024) + "K"); dto.setDataType(checkDataType(objectSummary.getKey())); // Caculate position String current = ""; String dataType = "file"; String parent = ""; String key = objectSummary.getKey(); dto.setUrl(presignedUrl(bucketName, key).toString()); // 1. lastIndexOf("/") == -1 is root directory's file int pos = key.lastIndexOf("/"); if (pos == -1) { // root file } else { // This is directory or file. Apply filter current = key.substring(0, pos); key = key.substring(pos + 1); if (key.equals("")) { key = ".."; dataType = "folder"; } if (parent.length() != 0) parent = current.substring(0, current.lastIndexOf("/")); } dto.setKey(key); dto.setDataType(dataType); dto.setParent(parent); // 2. lastIndexOf("/") == return dto; }
From source file:com.atlassian.localstack.sample.S3Sample.java
License:Open Source License
public static void runTest(AWSCredentials credentials) throws IOException { AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2);/*from ww w .java 2 s . com*/ s3.setEndpoint(LocalstackTestRunner.getEndpointS3()); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println(" - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); }
From source file:com.att.aro.core.cloud.aws.AwsRepository.java
License:Apache License
public List<S3ObjectSummary> getlist() { List<S3ObjectSummary> objects = null; if (s3Client != null) { ObjectListing objectListing = null; try {/*from w w w .jav a2 s . com*/ objectListing = s3Client.listObjects(bucketName); objects = objectListing.getObjectSummaries(); for (S3ObjectSummary objectSummary : objects) { LOGGER.debug( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } } catch (Exception exc) { LOGGER.error("Error Message: " + exc.getMessage()); } return objects; } return objects; }
From source file:com.cirrus.server.osgi.service.amazon.s3.AmazonS3StorageService.java
License:Apache License
@Override public List<ICirrusData> list(final String path) throws ServiceRequestFailedException { final List<ICirrusData> content = new ArrayList<>(); final ListObjectsRequest listObjectsRequest = this.buildObjectRequest(path); final AtomicReference<ObjectListing> objectListing = new AtomicReference<>(); do {//w w w . j a v a2 s .c om objectListing.set(this.amazonS3Client.listObjects(listObjectsRequest)); for (final S3ObjectSummary objectSummary : objectListing.get().getObjectSummaries()) { final String key = objectSummary.getKey(); if (path.equals(SEPARATOR)) { // root directory if (!key.contains(SEPARATOR)) { content.add(new CirrusFileData(SEPARATOR + key, objectSummary.getSize())); } else { if (key.indexOf(SEPARATOR) == key.length() - 1) { content.add(new CirrusFolderData(key)); } } } else { final int beginIndex = key.indexOf(SEPARATOR); final String substring = key.substring(beginIndex + 1, key.length()); if (!substring.isEmpty()) { final ICirrusData cirrusData; if (substring.endsWith(SEPARATOR)) { cirrusData = new CirrusFolderData(substring); } else { cirrusData = new CirrusFileData(SEPARATOR + substring, objectSummary.getSize()); } content.add(cirrusData); } } } listObjectsRequest.setMarker(objectListing.get().getNextMarker()); } while (objectListing.get().isTruncated()); return content; }
From source file:com.climate.oada.dao.impl.S3ResourceDAO.java
License:Open Source License
@Override public List<FileResource> getFileUrls(Long userId, String type) { List<FileResource> retval = new ArrayList<FileResource>(); long validfor = new Long(validHours).longValue() * HOURS_TO_MILLISECONDS; try {/*www .j a v a 2s.c o m*/ AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider()); String prefix = userId.toString() + S3_SEPARATOR + type; LOG.debug("Listing objects from bucket " + bucketName + " with prefix " + prefix); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName) .withPrefix(prefix); ObjectListing objectListing; do { objectListing = s3client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { LOG.debug(" - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); Date expiration = new Date(); long milliSeconds = expiration.getTime(); milliSeconds += validfor; expiration.setTime(milliSeconds); GeneratePresignedUrlRequest generatePresignedUrlRequest = new GeneratePresignedUrlRequest( bucketName, objectSummary.getKey()); generatePresignedUrlRequest.setMethod(HttpMethod.GET); generatePresignedUrlRequest.setExpiration(expiration); FileResource res = new FileResource(); res.setFileURL(s3client.generatePresignedUrl(generatePresignedUrlRequest)); retval.add(res); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); } catch (AmazonServiceException ase) { logAWSServiceException(ase); } catch (AmazonClientException ace) { logAWSClientException(ace); } catch (Exception e) { LOG.error("Unable to retrieve S3 file URLs " + e.getMessage()); } return retval; }