Java tutorial
/* * Copyright 2010-2012 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.moxtra; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.model.*; import java.io.*; import java.net.URL; import java.util.*; import java.util.logging.Level; import java.util.logging.Logger; //import com.xebew.api.service.S3ClientDetailsService; /** * This is a class for storage of any kind of data on S3. There is some functionality included in this * class that's not used in the TravelLog application but should serve to illustrate additional * capabilities of S3. * */ public class S3StorageManager { private Date lastUpdate; /* * The s3 client class is thread safe so we only ever need one static instance. * While you can have multiple instances it is better to only have one because it's * a relatively heavy weight class. */ private static AmazonS3Client globalS3Client; private final AmazonS3Client s3Client; static { globalS3Client = createClient(); String s3Endpoint = Configuration.getInstance().getServiceEndpoint(Configuration.S3_ENDPOINT_KEY); if (s3Endpoint != null) { globalS3Client.setEndpoint(s3Endpoint); } } /** * Returns a new AmazonS3 client using the default endpoint and current * credentials. */ public static AmazonS3Client createClient() { AWSCredentials creds = new BasicAWSCredentials(getKey(), getSecret()); //ClientConfiguration cf = new ClientConfiguration(); return new AmazonS3Client(creds); } public S3StorageManager() { this(globalS3Client); } /** * Creates a new storage manager that uses the client given. */ public S3StorageManager(AmazonS3Client s3Client) { this.s3Client = s3Client; } /** * The bucket map keeps track of whether a bucket exists or not. The first time * any bucket name is called, it will be checked against this map and created * if not already available. */ private static Map<String, Boolean> bucketMap = new LinkedHashMap<String, Boolean>(); public Date getLastUpdate() { return lastUpdate; } private static final Logger logger = Logger.getLogger(S3StorageManager.class.getName()); /** * Stores a given item on S3 * @param bucketname * @param key * @param data */ public void store(String bucketname, String key, byte[] data) { store(bucketname, key, data, null, "text/html"); } /** * Stores a given item on S3 * @param bucketname * @param key * @param data * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default) */ public void store(String bucketname, String key, byte[] data, CannedAccessControlList acl, String type) { // Make sure the bucket exists before we try to use it checkForAndCreateBucket(bucketname); ObjectMetadata omd = new ObjectMetadata(); //omd.setContentType("text/html"); omd.setContentType(type); omd.setContentLength(data.length); ByteArrayInputStream is = new ByteArrayInputStream(data); PutObjectRequest request = new PutObjectRequest(bucketname, key, is, omd); // Check if reduced redundancy is enabled // if (reducedRedundancy) { // request.setStorageClass(StorageClass.ReducedRedundancy); // } s3Client.putObject(request); // If we have an ACL set access permissions for the the data on S3 if (acl != null) { s3Client.setObjectAcl(bucketname, key, acl); } try { is.close(); } catch (Exception e) { logger.log(Level.SEVERE, "Cannot close store AWS connection"); } } /** * This is a convenience method that stores an object as publicly readable * @param bucketname * @param key * @param data * @param reducedRedundancy */ public void storePublicRead(String bucketname, String key, byte[] data, boolean reducedRedundancy, String type) { store(bucketname, key, data, CannedAccessControlList.PublicRead, type); } /** * This method will call out to S3 to make sure that the specified bucket * exists. If it does not exist it will create it. * @param bucketName name of the bucket to be checked/created */ public void checkForAndCreateBucket(String bucketName) { // Make sure it's lower case to comply with Amazon S3 recommendations bucketName = bucketName.toLowerCase(); if (bucketMap.get(bucketName) == null) { if (s3Client.doesBucketExist(bucketName)) { bucketMap.put(bucketName, true); } else { // Bucket hasn't been created yet so we create it s3Client.createBucket(bucketName); bucketMap.put(bucketName, true); } } } /** * Check if the key is in the bucket already * * @param bucketname * @param key * @return boolean * @throws IOException */ public boolean isExist(String bucketname, String key) { try { S3Object obj = s3Client.getObject(bucketname, key); } catch (Exception e) { // key does not exist return false; } return true; } /** * Loads the raw object data from S3 storage * @param bucketname * @param key * @return input stream for reading in the raw object * @throws IOException */ public InputStream loadStream(String bucketname, String key) throws IOException { S3Object obj = s3Client.getObject(bucketname, key); InputStream is = obj.getObjectContent(); return is; } /** * Loads the raw object data from S3 storage * @param bucketname * @param key * @return byte array for reading in the raw object * @throws Exception */ public byte[] getData(String bucketname, String key) throws Exception { InputStream is = loadStream(bucketname, key); return convertStreamToBytes(is); } /** * Deletes the specified S3 object from the S3 storage service. If a * storage path is passed in that has child S3 objects, it will recursively * delete the underlying objects. * @param bucketname * @param key */ public void delete(String bucketname, String key) { if (key == null || key.equals("")) { logger.log(Level.WARNING, "Empty storage path passed to delete method"); return; // We don't want to delete everything in a path } try { // Go through the store structure and delete child objects ObjectListing listing = s3Client.listObjects(bucketname, key); while (true) { List<S3ObjectSummary> objectList = listing.getObjectSummaries(); for (S3ObjectSummary summary : objectList) { s3Client.deleteObject(bucketname, summary.getKey()); } if (listing.isTruncated()) { listing = s3Client.listNextBatchOfObjects(listing); } else { break; } } } catch (Exception e) { // unable to remove item logger.log(Level.FINEST, "Unable to remove: " + bucketname + "/" + key); } } /** * list objects * @param bucketname * @param prefix * @return * @throws Exception */ public List<byte[]> listObjects(String bucketname, String prefix) throws Exception { List<byte[]> list = new ArrayList<byte[]>(); ObjectListing listing = s3Client.listObjects(bucketname, prefix); while (true) { List<S3ObjectSummary> objectList = listing.getObjectSummaries(); for (S3ObjectSummary summary : objectList) { byte[] bytes = this.getData(bucketname, summary.getKey()); list.add(bytes); } if (listing.isTruncated()) { listing = s3Client.listNextBatchOfObjects(listing); } else { break; } } return list; } public String convertStreamToString(InputStream is) throws Exception { BufferedReader reader = new BufferedReader(new InputStreamReader(is)); StringBuilder sb = new StringBuilder(); String line = null; while ((line = reader.readLine()) != null) { sb.append(line + "\n"); } is.close(); return sb.toString(); } public byte[] convertStreamToBytes(InputStream in) throws Exception { ByteArrayOutputStream bos = new ByteArrayOutputStream(); int next = in.read(); while (next > -1) { bos.write(next); next = in.read(); } bos.flush(); in.close(); byte[] result = bos.toByteArray(); bos.close(); return result; } /** * This method will obtain a presigned URL that will expire on the given * date. * @param bucketname * @param key * @param expirationDate date when the presigned url should expire * @return the signed URL */ public URL getSignedUrl(String bucketname, String key, Date expirationDate) { logger.log(Level.FINEST, "PRESIGNED URL: " + bucketname + "/" + key); return s3Client.generatePresignedUrl(bucketname, key, expirationDate); } public String getResourceUrl(String bucket, String key) { return s3Client.getResourceUrl(bucket, key); } public void makePublic(String bucket, String key) { s3Client.setObjectAcl(bucket, key, CannedAccessControlList.PublicRead); } public static String getKey() { Configuration config = Configuration.getInstance(); return config.getProperty("accessKey"); } public static String getSecret() { Configuration config = Configuration.getInstance(); return config.getProperty("secretKey"); } }