List of usage examples for com.amazonaws AmazonServiceException getMessage
@Override
public String getMessage()
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
@Override public ListAllMyBucketsResponseType listAllMyBuckets(ListAllMyBucketsType request) throws S3Exception { ListAllMyBucketsResponseType reply = request.getReply(); OsgInternalS3Client internalS3Client = null; User requestUser = null;//w w w.java2 s . c o m try { requestUser = getRequestUser(request); //The euca-types ListAllMyBucketsList myBucketList = new ListAllMyBucketsList(); myBucketList.setBuckets(new ArrayList<BucketListEntry>()); //The s3 client types internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); ListBucketsRequest listRequest = new ListBucketsRequest(); //Map s3 client result to euca response message List<Bucket> result = s3Client.listBuckets(listRequest); for (Bucket b : result) { myBucketList.getBuckets().add(new BucketListEntry(b.getName(), DateFormatter.dateToHeaderFormattedString(b.getCreationDate()))); } reply.setBucketList(myBucketList); } catch (AmazonServiceException ex) { LOG.debug("Got service error from backend: " + ex.getMessage(), ex); throw S3ExceptionMapper.fromAWSJavaSDK(ex); } return reply; }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
/** * Handles a HEAD request to the bucket. Just returns 200ok if bucket exists and user has access. Otherwise * returns 404 if not found or 403 if no accesss. * @param request/*from w w w .j av a 2 s.c o m*/ * @return * @throws S3Exception */ @Override public HeadBucketResponseType headBucket(HeadBucketType request) throws S3Exception { HeadBucketResponseType reply = request.getReply(); User requestUser = getRequestUser(request); OsgInternalS3Client internalS3Client = null; // call the storage manager to save the bucket to disk try { internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); com.amazonaws.services.s3.model.AccessControlList responseList = s3Client .getBucketAcl(request.getBucket()); reply.setBucket(request.getBucket()); } catch (AmazonServiceException ex) { LOG.debug("Got service error from backend: " + ex.getMessage(), ex); throw S3ExceptionMapper.fromAWSJavaSDK(ex); } return reply; }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
@Override public CreateBucketResponseType createBucket(CreateBucketType request) throws S3Exception { CreateBucketResponseType reply = request.getReply(); User requestUser = getRequestUser(request); OsgInternalS3Client internalS3Client = null; try {/*from w w w . j av a2 s.co m*/ internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); Bucket responseBucket = s3Client.createBucket(request.getBucket()); //Save the owner info in response? reply.setBucket(request.getBucket()); reply.setStatus(HttpResponseStatus.OK); reply.setStatusMessage("OK"); } catch (AmazonServiceException ex) { LOG.debug("Got service error from backend: " + ex.getMessage(), ex); throw S3ExceptionMapper.fromAWSJavaSDK(ex); } return reply; }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
@Override public DeleteBucketResponseType deleteBucket(DeleteBucketType request) throws S3Exception { DeleteBucketResponseType reply = request.getReply(); User requestUser = getRequestUser(request); OsgInternalS3Client internalS3Client = null; // call the storage manager to save the bucket to disk try {// w w w . ja v a2 s . c om internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); s3Client.deleteBucket(request.getBucket()); } catch (AmazonServiceException ex) { LOG.debug("Got service error from backend: " + ex.getMessage(), ex); throw S3ExceptionMapper.fromAWSJavaSDK(ex); } return reply; }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
@Override public GetBucketAccessControlPolicyResponseType getBucketAccessControlPolicy( GetBucketAccessControlPolicyType request) throws S3Exception { GetBucketAccessControlPolicyResponseType reply = request.getReply(); User requestUser = getRequestUser(request); OsgInternalS3Client internalS3Client = null; try {//from www. j av a 2 s. com internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); com.amazonaws.services.s3.model.AccessControlList acl = s3Client.getBucketAcl(request.getBucket()); reply.setAccessControlPolicy(sdkAclToEucaAcl(acl)); } catch (AmazonServiceException ex) { LOG.debug("Got service error from backend: " + ex.getMessage(), ex); throw S3ExceptionMapper.fromAWSJavaSDK(ex); } return reply; }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
@Override public ListPartsResponseType listParts(ListPartsType request) throws S3Exception { ListPartsResponseType reply = request.getReply(); User requestUser = getRequestUser(request); OsgInternalS3Client internalS3Client = null; String bucketName = request.getBucket(); String key = request.getKey(); String uploadId = request.getUploadId(); ListPartsRequest listPartsRequest = new ListPartsRequest(bucketName, key, uploadId); if (request.getMaxParts() != null) { listPartsRequest.setMaxParts(request.getMaxParts()); }// w ww . j ava 2s .c o m if (request.getPartNumberMarker() != null) { listPartsRequest.setPartNumberMarker(request.getPartNumberMarker()); } try { internalS3Client = getS3Client(requestUser); AmazonS3Client s3Client = internalS3Client.getS3Client(); PartListing listing = s3Client.listParts(listPartsRequest); reply.setBucket(bucketName); reply.setKey(key); reply.setUploadId(uploadId); Initiator initiator = new Initiator(listing.getInitiator().getId(), listing.getInitiator().getDisplayName()); reply.setInitiator(initiator); CanonicalUser owner = new CanonicalUser(listing.getOwner().getId(), listing.getOwner().getDisplayName()); reply.setOwner(owner); reply.setStorageClass(listing.getStorageClass()); reply.setPartNumberMarker(listing.getPartNumberMarker()); reply.setNextPartNumberMarker(listing.getNextPartNumberMarker()); reply.setMaxParts(listing.getMaxParts()); reply.setIsTruncated(listing.isTruncated()); List<PartSummary> parts = listing.getParts(); List<Part> replyParts = reply.getParts(); for (PartSummary part : parts) { replyParts.add( new Part(part.getPartNumber(), part.getETag(), part.getLastModified(), part.getSize())); } } catch (AmazonServiceException ex) { LOG.debug("Got service error from backend: " + ex.getMessage(), ex); throw S3ExceptionMapper.fromAWSJavaSDK(ex); } return reply; }
From source file:com.example.S3Sample02.java
License:Open Source License
public static void main(String[] args) throws IOException { /*//from ww w . ja v a 2 s. c o m * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (~/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider().getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); // AP_SOUTHEAST_2 // Region usWest2 = Region.getRegion(Regions.AP_SOUTHEAST_2 ); // s3.setRegion(usWest2); // String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String bucketName = "imos-test-data-1"; String key = "MyObjectKey" + UUID.randomUUID(); System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ // System.out.println("Creating bucket " + bucketName + "\n"); // s3.createBucket(bucketName); /* * List the buckets in your account */ /* System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); */ /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); System.out.println("done\n"); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); System.out.println("done\n"); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); System.out.println("done\n"); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ // System.out.println("Deleting an object\n"); // s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ /* System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); */ } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.exedosoft.plat.storage.s3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*// ww w . j a v a 2s. c o m * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ // System.out.println("Deleting an object\n"); // s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ // System.out.println("Deleting bucket " + bucketName + "\n"); // s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.facebook.presto.kinesis.s3config.S3TableConfigClient.java
License:Apache License
/** * Call S3 to get the most recent object list. * * This is an object list request to AWS in the given "directory". * * @return/*from w w w . j ava2 s.c o m*/ */ protected List<S3ObjectSummary> getObjectSummaries() { AmazonS3Client s3client = this.clientManager.getS3Client(); AmazonS3URI directoryURI = new AmazonS3URI(this.bucketUrl); ArrayList<S3ObjectSummary> returnList = new ArrayList<S3ObjectSummary>(); try { log.info("Getting the listing of objects in the S3 table config directory: bucket %s prefix %s :", directoryURI.getBucket(), directoryURI.getKey()); ListObjectsRequest req = new ListObjectsRequest().withBucketName(directoryURI.getBucket()) .withPrefix(directoryURI.getKey() + "/").withDelimiter("/").withMaxKeys(25); ObjectListing result; do { result = s3client.listObjects(req); returnList.addAll(result.getObjectSummaries()); req.setMarker(result.getNextMarker()); } while (result.isTruncated()); log.info("Completed getting S3 object listing."); } catch (AmazonServiceException ase) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonServiceException, which means your request made it "); sb.append("to Amazon S3, but was rejected with an error response for some reason.\n"); sb.append("Error Message: " + ase.getMessage()); sb.append("HTTP Status Code: " + ase.getStatusCode()); sb.append("AWS Error Code: " + ase.getErrorCode()); sb.append("Error Type: " + ase.getErrorType()); sb.append("Request ID: " + ase.getRequestId()); log.error(sb.toString(), ase); } catch (AmazonClientException ace) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonClientException, " + "which means the client encountered " + "an internal error while trying to communicate" + " with S3, " + "such as not being able to access the network."); sb.append("Error Message: " + ace.getMessage()); log.error(sb.toString(), ace); } return returnList; }
From source file:com.facebook.presto.kinesis.s3config.S3TableConfigClient.java
License:Apache License
/** * Connect to S3 directory to look for new or updated table definitions and then * update the map./*from w ww .j a v a 2s . c o m*/ */ protected void updateTablesFromS3() { long now = System.currentTimeMillis(); List<S3ObjectSummary> objectList = this.getObjectSummaries(); AmazonS3Client s3client = this.clientManager.getS3Client(); AmazonS3URI directoryURI = new AmazonS3URI(this.bucketUrl); // Build map of "deltas" which in the end contains new definitions and deleted tables HashMap<String, KinesisStreamDescription> deltasMap = new HashMap<String, KinesisStreamDescription>(); internalMapLock.readLock().lock(); try { Iterator<String> keysIter = this.internalMap.keySet().iterator(); while (keysIter.hasNext()) { deltasMap.put(keysIter.next(), dummyStreamDesc); } } finally { internalMapLock.readLock().unlock(); } for (S3ObjectSummary objInfo : objectList) { if (!deltasMap.containsKey(objInfo.getKey()) || objInfo.getLastModified().getTime() >= this.lastCheck) { // New or updated file, so we must read from AWS try { if (objInfo.getKey().endsWith("/")) { continue; } log.info("Getting : %s - %s", objInfo.getBucketName(), objInfo.getKey()); S3Object object = s3client .getObject(new GetObjectRequest(objInfo.getBucketName(), objInfo.getKey())); StringBuilder resultStr = new StringBuilder(""); try (BufferedReader reader = new BufferedReader( new InputStreamReader(object.getObjectContent()))) { boolean hasMore = true; while (hasMore) { String line = reader.readLine(); if (line != null) { resultStr.append(line); } else { hasMore = false; } } KinesisStreamDescription table = streamDescriptionCodec.fromJson(resultStr.toString()); deltasMap.put(objInfo.getKey(), table); log.info("Put table description into the map from %s : %s.%s", objInfo.getKey(), table.getSchemaName(), table.getTableName()); } catch (IOException iox) { log.error("Problem reading input stream from object.", iox); } catch (IllegalArgumentException iax) { // Note: this gets thrown by airlift json library when the input is malformed. log.error("Invalid JSON table description.", iax); } } catch (AmazonServiceException ase) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonServiceException, which means your request made it "); sb.append("to Amazon S3, but was rejected with an error response for some reason.\n"); sb.append("Error Message: " + ase.getMessage()); sb.append("HTTP Status Code: " + ase.getStatusCode()); sb.append("AWS Error Code: " + ase.getErrorCode()); sb.append("Error Type: " + ase.getErrorType()); sb.append("Request ID: " + ase.getRequestId()); log.error(sb.toString(), ase); } catch (AmazonClientException ace) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonClientException, " + "which means the client encountered " + "an internal error while trying to communicate" + " with S3, " + "such as not being able to access the network."); sb.append("Error Message: " + ace.getMessage()); log.error(sb.toString(), ace); } } else if (deltasMap.containsKey(objInfo.getKey())) { deltasMap.remove(objInfo.getKey()); } } // end loop through object descriptions // Deltas: key pointing to dummy means delete, key pointing to other object means update. // This approach lets us delete and update while shortening the locked critical section. Iterator<Map.Entry<String, KinesisStreamDescription>> deltasIter = deltasMap.entrySet().iterator(); internalMapLock.writeLock().lock(); try { while (deltasIter.hasNext()) { Map.Entry<String, KinesisStreamDescription> entry = deltasIter.next(); if (entry.getValue().getTableName().equals("__DUMMY__")) { this.internalMap.remove(entry.getKey()); } else { this.internalMap.put(entry.getKey(), entry.getValue()); } } } finally { internalMapLock.writeLock().unlock(); } log.info("Completed updating table definitions from S3."); this.lastCheck = now; return; }