List of usage examples for com.amazonaws.services.s3.model ObjectListing getObjectSummaries
public List<S3ObjectSummary> getObjectSummaries()
From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java
License:Open Source License
public static void deleteBucket(String bucketName, String awsAccessKey, String awsSecretKey) { try {//from www.j a va2 s .c om System.out.println(""); System.out.print("Deleting Bucket [" + bucketName + "]"); AWSCredentials bawsCredentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey); AmazonS3Client bs3Service = new AmazonS3Client(bawsCredentials); ObjectListing ls = bs3Service.listObjects(bucketName); for (S3ObjectSummary objectSummary : ls.getObjectSummaries()) { bs3Service.deleteObject(bucketName, objectSummary.getKey()); System.out.print("."); } bs3Service.deleteBucket(bucketName); SDFSLogger.getLog().info("Bucket [" + bucketName + "] deleted"); System.out.println("Bucket [" + bucketName + "] deleted"); } catch (Exception e) { SDFSLogger.getLog().warn("Unable to delete bucket " + bucketName, e); } }
From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java
License:Open Source License
private int verifyDelete(long id) throws IOException, Exception { this.s3clientLock.readLock().lock(); String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled); ObjectMetadata om = null;//w w w.j a v a 2s . c o m S3Object kobj = null; int claims = 0; this.s3clientLock.readLock().lock(); try { kobj = s3Service.getObject(this.name, "keys/" + haName); claims = this.getClaimedObjects(kobj, id); Map<String, String> mp = this.getUserMetaData(om); if (claims > 0) { if (this.clustered) om = this.getClaimMetaData(id); else { om = s3Service.getObjectMetadata(this.name, "keys/" + haName); } int delobj = 0; if (mp.containsKey("deleted-objects")) { delobj = Integer.parseInt((String) mp.get("deleted-objects")) - claims; if (delobj < 0) delobj = 0; } mp.remove("deleted"); mp.put("deleted-objects", Integer.toString(delobj)); mp.put("suspect", "true"); om.setUserMetadata(mp); String kn = null; if (this.clustered) kn = this.getClaimName(id); else kn = "keys/" + haName; this.updateObject(kn, om); SDFSLogger.getLog().warn("Reclaimed [" + claims + "] blocks marked for deletion"); } if (claims == 0) { if (!clustered) { s3Service.deleteObject(this.name, "blocks/" + haName); s3Service.deleteObject(this.name, "keys/" + haName); SDFSLogger.getLog().debug("deleted block " + "blocks/" + haName + " id " + id); } else { s3Service.deleteObject(this.name, this.getClaimName(id)); int _size = Integer.parseInt((String) mp.get("size")); int _compressedSize = Integer.parseInt((String) mp.get("compressedsize")); HashBlobArchive.currentLength.addAndGet(-1 * _size); HashBlobArchive.compressedLength.addAndGet(-1 * _compressedSize); ObjectListing ol = s3Service.listObjects(this.getName(), "claims/keys/" + haName); if (ol.getObjectSummaries().size() == 0) { s3Service.deleteObject(this.name, "blocks/" + haName); s3Service.deleteObject(this.name, "keys/" + haName); SDFSLogger.getLog().debug("deleted block " + "blocks/" + haName + " id " + id); } } } } finally { try { kobj.close(); } catch (Exception e) { } this.s3clientLock.readLock().unlock(); } return claims; }
From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java
License:Open Source License
@Override public void deleteFile(String nm, String pp) throws IOException { this.s3clientLock.readLock().lock(); try {/*from www . j a v a 2s . c o m*/ while (nm.startsWith(File.separator)) nm = nm.substring(1); try { if (this.isClustered()) { String haName = pp + "/" + EncyptUtils.encString(nm, Main.chunkStoreEncryptionEnabled); SDFSLogger.getLog().info("deleting " + haName); if (s3Service.doesObjectExist(this.name, haName)) { String blb = "claims/" + haName + "/" + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled); s3Service.deleteObject(this.name, blb); SDFSLogger.getLog().info("deleted " + "claims/" + haName + "/" + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled)); ObjectListing ol = s3Service.listObjects(this.getName(), "claims/" + haName + "/"); String vid = "claims/volumes/" + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled) + "/" + haName; s3Service.deleteObject(this.name, vid); if (ol.getObjectSummaries().size() == 0) { s3Service.deleteObject(this.name, haName); SDFSLogger.getLog().info("deleted " + haName); } else { SDFSLogger.getLog().info("not deleting " + haName); } } } else { String haName = EncyptUtils.encString(nm, Main.chunkStoreEncryptionEnabled); s3Service.deleteObject(this.name, pp + "/" + haName); } } catch (Exception e1) { throw new IOException(e1); } } finally { this.s3clientLock.readLock().unlock(); } }
From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java
License:Open Source License
@Override public RemoteVolumeInfo[] getConnectedVolumes() throws IOException { if (this.clustered) { ObjectListing idol = this.s3Service.listObjects(this.getName(), "bucketinfo/"); Iterator<S3ObjectSummary> iter = idol.getObjectSummaries().iterator(); ArrayList<RemoteVolumeInfo> al = new ArrayList<RemoteVolumeInfo>(); while (iter.hasNext()) { try { String key = iter.next().getKey(); SDFSLogger.getLog().info("key=" + key); String vid = key.substring("bucketinfo/".length()); if (vid.length() > 0) { ObjectMetadata om = s3Service.getObjectMetadata(this.name, key); Map<String, String> md = this.getUserMetaData(om); long id = EncyptUtils.decHashArchiveName(vid, Main.chunkStoreEncryptionEnabled); RemoteVolumeInfo info = new RemoteVolumeInfo(); info.id = id;//from w ww .j a va2 s .c o m info.hostname = md.get("hostname"); info.port = Integer.parseInt(md.get("port")); info.compressed = Long.parseLong(md.get("currentcompressedsize")); info.data = Long.parseLong(md.get("currentsize")); info.lastupdated = Long.parseLong(md.get("lastupdate")); al.add(info); } } catch (Exception e) { SDFSLogger.getLog().error("unable to get volume metadata", e); throw new IOException(e); } } RemoteVolumeInfo[] ids = new RemoteVolumeInfo[al.size()]; for (int i = 0; i < al.size(); i++) { ids[i] = al.get(i); } return ids; } else { RemoteVolumeInfo info = new RemoteVolumeInfo(); info.id = Main.DSEID; info.port = Main.sdfsCliPort; info.hostname = InetAddress.getLocalHost().getHostName(); info.compressed = this.compressedSize(); info.data = this.size(); RemoteVolumeInfo[] ninfo = { info }; return ninfo; } }
From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java
License:Open Source License
@Override public void removeVolume(long volumeID) throws IOException { if (volumeID == Main.DSEID) throw new IOException("volume can not remove its self"); String vid = EncyptUtils.encHashArchiveName(volumeID, Main.chunkStoreEncryptionEnabled); Map<String, String> obj = null; ObjectMetadata omd = null;/* www.ja v a 2s . c o m*/ try { omd = s3Service.getObjectMetadata(this.name, "bucketinfo/" + vid); obj = this.getUserMetaData(omd); long tm = Long.parseLong(obj.get("lastupdate")); long dur = System.currentTimeMillis() - tm; if (dur < (60000 * 2)) { throw new IOException("Volume [" + volumeID + "] is currently mounted"); } } catch (Exception e) { omd = null; SDFSLogger.getLog().debug("unable to find bucketinfo object", e); } ck = null; String suffix = "/" + vid; String prefix = "claims/"; Iterator<String> iter = this.getNextObjectList("claims/"); while (iter != null) { while (iter.hasNext()) { String nm = iter.next(); if (nm.endsWith(suffix)) { s3Service.deleteObject(this.name, nm); String fldr = nm.substring(0, nm.length() - suffix.length()); SDFSLogger.getLog().debug("deleted " + fldr); ObjectListing ol = s3Service.listObjects(this.getName(), fldr + "/"); if (ol.getObjectSummaries().size() == 0) { String fl = fldr.substring(prefix.length()); s3Service.deleteObject(this.name, fl); SDFSLogger.getLog().debug("deleted " + fl); } } } iter = null; iter = this.getNextObjectList("claims/"); if (!iter.hasNext()) iter = null; } s3Service.deleteObject(this.name, "bucketinfo/" + vid); SDFSLogger.getLog().info("Deleted " + volumeID); }
From source file:org.openflamingo.fs.s3.S3ObjectProvider.java
License:Apache License
public List<FileInfo> getFiles(String path) { String bucket = null;// w w w .ja v a2 s . com if (!"/".equals(path)) { bucket = S3Utils.getBucket(path + "/"); } String relativePath = S3Utils.getObjectKey(path); List<FileInfo> filesList = new ArrayList<FileInfo>(); if ("".equals(relativePath)) { return filesList; } try { ObjectListing objectListing = awsClient.listObjects( new ListObjectsRequest().withBucketName(bucket).withPrefix(relativePath).withDelimiter("/")); while (true) { List<S3ObjectSummary> summaries = objectListing.getObjectSummaries(); for (S3ObjectSummary objectSummary : summaries) { if (!objectSummary.getKey().endsWith("/")) { long size = objectSummary.getSize(); String filename = FileUtils.getFilename(objectSummary.getKey()); String bucketName = objectSummary.getBucketName(); long modified = objectSummary.getLastModified().getTime(); S3ObjectInfo info = new S3ObjectInfo(bucketName, objectSummary.getKey(), filename, modified, size); filesList.add(info); } } if (!objectListing.isTruncated()) { break; } objectListing = awsClient.listNextBatchOfObjects(objectListing); } return filesList; } catch (Exception ase) { // throw new FileSystemException("? ? ? ? ? ?.", ase); throw new FileSystemException("An error has occurred.", ase); } }
From source file:org.openflamingo.fs.s3.S3ObjectProvider.java
License:Apache License
@Override public boolean delete(String path) { Assert.hasLength(path, "Please enter the file path."); if (S3Utils.isDirectory(path)) { ObjectListing objectListing = awsClient.listObjects(new ListObjectsRequest() .withBucketName(S3Utils.getBucket(path)).withPrefix(S3Utils.getObjectKey(path))); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { awsClient.deleteObject(objectSummary.getBucketName(), objectSummary.getKey()); }/*from w w w . j ava2 s . c o m*/ } else { String bucket = S3Utils.getBucket(path); String relativePath = StringUtils.remove(path, "/" + bucket + "/"); awsClient.deleteObject(bucket, relativePath); } // auditService.delete(FileSystemType.S3, username, path); return true; }
From source file:org.openflamingo.fs.s3.S3Utils.java
License:Apache License
/** * Object .// w ww . j a va 2 s . c om * * @param client Amazon S3 Client * @param bucketName Bucket Name */ public static Map<String, String> getDirectory(AmazonS3Client client, String bucketName, String objectKey) { S3Object object = client.getObject(bucketName, objectKey); ObjectMetadata objectMetadata = object.getObjectMetadata(); List<FileInfo> filesList = new ArrayList<FileInfo>(); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(object.getBucketName()) .withPrefix(objectKey).withDelimiter("/"); ObjectListing objectListing = null; do { objectListing = client.listObjects(listObjectsRequest); List<String> commonPrefixes = objectListing.getCommonPrefixes(); List<S3ObjectSummary> summary = objectListing.getObjectSummaries(); listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); Map<String, String> map = new HashMap<String, String>(); map.put("bucketName", object.getBucketName()); map.put("name", object.getKey()); map.put("redirectionLocation", object.getRedirectLocation()); setValue("version", objectMetadata.getVersionId(), map); setValue("contentDisposition", objectMetadata.getContentDisposition(), map); setValue("contentType", objectMetadata.getContentType(), map); setValue("etag", objectMetadata.getETag(), map); setValue("contentEncoding", objectMetadata.getContentEncoding(), map); setValue("contentLength", objectMetadata.getContentLength(), map); setValue("lastModified", objectMetadata.getLastModified(), map); return null; }
From source file:org.openinfinity.cloud.domain.repository.deployer.BucketRepositoryAWSImpl.java
License:Apache License
public void deleteBucketAndObjects(String bucketName) { try {//from ww w . j a v a 2 s .co m // list and delete objects in a bucket LOGGER.debug("deleteBucketAndObjects called for bucket: <" + bucketName + ">."); ObjectListing objects = simpleStorageService.listObjects(bucketName); do { for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) { LOGGER.debug("Trying to delete object <" + objectSummary.getKey() + "> from bucket <: " + objectSummary.getBucketName() + ">."); simpleStorageService.deleteObject(bucketName, objectSummary.getKey()); } objects = simpleStorageService.listNextBatchOfObjects(objects); } while (objects.isTruncated()); // Now that the bucket is empty, you can delete it. If you try to delete your bucket before it is empty, it will fail. LOGGER.debug("Trying to delete bucket <: " + bucketName + ">."); simpleStorageService.deleteBucket(bucketName); //System.out.println("Deleted bucket " + testBucket.getName()); } catch (Exception e) { // TODO: handle exception LOGGER.warn("Error in deleting objects and bucket: " + e + " -- " + e.getStackTrace().toString()); e.printStackTrace(); ExceptionUtil.throwSystemException(e.getMessage(), ExceptionLevel.ERROR, BucketRepository.EXCEPTION_MESSAGE_CONNECTION_FAILURE); } }
From source file:org.p365.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w w w .j ava 2s. co m*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "mynewbuket"; String key = "Myobj/sd.jpg"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); if (!s3.doesBucketExist(bucketName)) { s3.createBucket(bucketName); } /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); String pathname = "D:\\Program Files\\apache-tomcat-7.0.42\\webapps\\WorkerForP365\\src\\AAA_1465.jpg"; File file = new File(pathname); s3.putObject( new PutObjectRequest(bucketName, key, file).withCannedAcl(CannedAccessControlList.PublicRead)); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ //System.out.println("Deleting an object\n"); //s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ //System.out.println("Deleting bucket " + bucketName + "\n"); //s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }