List of usage examples for com.amazonaws AmazonServiceException getErrorCode
public String getErrorCode()
From source file:com.topera.epoch.service.S3Util.java
License:Open Source License
public static void main(String[] args) throws IOException { /*//w w w .ja va 2 s. co m * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) * and save the following lines after replacing the underlined values with your own. * * [default] * aws_access_key_id = YOUR_ACCESS_KEY_ID * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY */ AWSCredentials creds = new AWSCredentials() { public String getAWSSecretKey() { // TODO Auto-generated method stub return "5VVtmI7vcecuVbw8JsG4uo2O1/9RwwLHrTT01Itz"; } public String getAWSAccessKeyId() { // TODO Auto-generated method stub return "AKIAJCMYALI46A2DIPRQ"; } }; AmazonS3 s3 = new AmazonS3Client(creds); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.tweettrends.pravar.FilterStreamExample.java
License:Apache License
public static void main(String[] args) { final SimpleQueueService simpleQueueService = new SimpleQueueService(); final List<Future<String>> results = new ArrayList<Future<String>>(); final HashMap<Future<String>, Message> map = new HashMap<Future<String>, Message>(100); SimpleNotificationService simpleNotificationService = new SimpleNotificationService(); simpleNotificationService.subscribeToTopic(); try {//from www . j a v a2 s .co m //simpleQueueService.createQueue(); simpleQueueService.listQueues(); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon SQS, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with SQS, such as not " + "being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } new Thread(new Runnable() { @Override public void run() { try { FilterStreamExample.run("9sB7Y7zyxFTgEpk87ZwuZMFZR", "TPvVJJ09FhQeduDR10xJw8t5LJ4i75uu6GYQefVtHt7ebUTgZi", "840399362987560960-MTKPBj2U67boTVP4ug6LWiUdvksF0gO", "adanfdOhMgPmil1TsWpD1vKvfdY6ErRVX2xCqPS6NgaEF", simpleQueueService); } catch (InterruptedException e) { System.out.println(e); } } }).start(); ExecutorService pool = Executors.newCachedThreadPool(); Thread thread = new Thread(new Runnable() { @Override public void run() { System.out.println("Entered runnable"); while (true) { Iterator<Future<String>> iterator = results.iterator(); try { sharedSemaphore.acquire(); while (iterator.hasNext()) { Future<String> result = iterator.next(); String sentiment = result.get(); Message message = map.get(result); // send sentiment and message to SNS System.out.println("Notifying SNS"); //simpleNotificationService.publishToTopic(message.getBody()); iterator.remove(); } } catch (InterruptedException e) { e.printStackTrace(); } catch (ExecutionException e) { e.printStackTrace(); } finally { sharedSemaphore.release(); } } } }); thread.start(); while (true) { List<Message> messages = simpleQueueService.receiveMessages(); if (messages != null) { System.out.println("Received Messages!"); for (Message msg : messages) { System.out.println("Message body : " + msg.getBody()); } simpleQueueService.deleteMessages(messages); for (Message message : messages) { Worker worker = new Worker(message); Future<String> result = pool.submit(worker); try { sharedSemaphore.acquire(); results.add(result); map.put(result, message); } catch (InterruptedException e) { e.printStackTrace(); } finally { sharedSemaphore.release(); } } } } }
From source file:com.ub.ml.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*//w ww.j a v a2 s.c o m * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) * and save the following lines after replacing the underlined values with your own. * * [default] * aws_access_key_id = YOUR_ACCESS_KEY_ID * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY */ AmazonS3 s3 = new AmazonS3Client(); Region usWest2 = Region.getRegion(Regions.US_EAST_1); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ //System.out.println("Deleting an object\n"); //s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ //System.out.println("Deleting bucket " + bucketName + "\n"); //s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.uiintl.backup.agent.AwsBackupAgent.java
License:Open Source License
void handleAwsException(AmazonClientException ace) { if (ace instanceof AmazonServiceException) { AmazonServiceException ase = (AmazonServiceException) ace; logger.error(//from w ww . jav a 2 s . c om "Caught an AmazonServiceException, which means your request made it to Amazon S3, but was rejected with an error response for some reason."); logger.error("Error Message: {}", ase.getMessage()); logger.error("HTTP Status Code: {}", ase.getStatusCode()); logger.error("AWS Error Code: {}", ase.getErrorCode()); logger.error("Error Type: {}", ase.getErrorType()); logger.error("Request ID: {}", ase.getRequestId()); logger.error("Stacktrace: ", ase); } else { logger.error( "Caught an AmazonClientException, which means the client encountered a serious internal problem while trying to communicate with S3, such as not being able to access the network."); logger.error("Error Message: {}", ace.getMessage()); } }
From source file:com.uiintl.backup.agent.samples.S3Sample.java
License:Open Source License
public static void main2(String[] args) throws IOException { /*/*from w w w . java 2 s . co m*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.AP_SOUTHEAST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.urbancode.terraform.tasks.aws.helpers.AWSHelper.java
License:Apache License
/** * Detaches the given internet gateway from the given vpc * * @param gatewayId/*from w ww . j a v a 2s . c o m*/ * @param vpcId * @param ec2Client */ public void detachGateway(String gatewayId, String vpcId, AmazonEC2 ec2Client) { try { DetachInternetGatewayRequest request = new DetachInternetGatewayRequest() .withInternetGatewayId(gatewayId).withVpcId(vpcId); ec2Client.detachInternetGateway(request); } catch (AmazonServiceException e) { log.error("Failed to detach Internet Gateway", e); if (!"InvalidInternetGatewayID.NotFound".equalsIgnoreCase(e.getErrorCode())) { // only swallow the exception if the gateway id was not found throw e; } } }
From source file:com.urbancode.terraform.tasks.aws.helpers.AWSHelper.java
License:Apache License
/** * Retrieves the AssocaitionId (given when you associate an elastic IP with an instance) * from the AllocationId (given when you request an elastic IP) * * @param allocId/* w ww. ja v a 2s .com*/ * @param ec2Client * @return */ public String getAssociationIdForAllocationId(String allocId, AmazonEC2 ec2Client) { String assocId = null; try { DescribeAddressesRequest request = new DescribeAddressesRequest().withAllocationIds(allocId); DescribeAddressesResult result = ec2Client.describeAddresses(request); List<Address> addresses = result.getAddresses(); if (addresses != null & !addresses.isEmpty()) { if (addresses.size() > 1) { log.error("Found more than one Address for allocationId ( " + allocId + " ) !"); } assocId = addresses.get(0).getAssociationId(); } } catch (AmazonServiceException e) { log.error("AmazonSerivceException caught while trying to get Association Id", e); if (!"InvalidAllocationID.NotFound".equals(e.getErrorCode())) { throw e; } } return assocId; }
From source file:com.urbancode.terraform.tasks.aws.helpers.AWSHelper.java
License:Apache License
/** * * @param volumeId// ww w . ja va 2 s .c o m * @param ec2Client */ public void deleteEbsVolume(String volumeId, AmazonEC2 ec2Client) { try { log.info("Deleting EBS Volume (" + volumeId + ")"); DeleteVolumeRequest request = new DeleteVolumeRequest().withVolumeId(volumeId); ec2Client.deleteVolume(request); } catch (AmazonServiceException e) { log.error("Failed to delete Ebs Volume", e); if (!"InvalidVolume.NotFound".equalsIgnoreCase(e.getErrorCode())) { throw e; } } }
From source file:com.urbancode.terraform.tasks.aws.helpers.AWSHelper.java
License:Apache License
/** * Deletes the route corresponding to the given info * * @param routeTableId//from ww w . j av a 2 s .c o m * @param destCidr * @param ec2Client */ public void deleteRoute(String routeTableId, String destCidr, AmazonEC2 ec2Client) { try { DeleteRouteRequest request = new DeleteRouteRequest().withDestinationCidrBlock(destCidr) .withRouteTableId(routeTableId); ec2Client.deleteRoute(request); } catch (AmazonServiceException e) { log.error("Failed to delete Route: " + "\n\tRouteTableId: " + routeTableId + "\n\tDestination CIDR: " + destCidr, e); if (!"InvalidRouteTableID.NotFound".equals(e.getErrorCode())) { throw e; } } }
From source file:com.urbancode.terraform.tasks.aws.helpers.AWSHelper.java
License:Apache License
/** * Creates a route with given info. Attach Id can be either an instance (as a NAT) or an * gateway. I f the attachId is an instance (starts with "i-") then the src/destination check * on the instance will be disabled to allow it to act as a NAT. * * @param routeTableId the id of the route table to add this route to * @param destCidr//from ww w . j ava2s . c om * @param attachId of the instance or internet gateway * @param ec2Client AmazonEC2 connection */ public void createRoute(String routeTableId, String destCidr, String attachId, AmazonEC2 ec2Client) { try { CreateRouteRequest request = new CreateRouteRequest().withDestinationCidrBlock(destCidr) .withRouteTableId(routeTableId); if (attachId.startsWith("i-")) { request = request.withInstanceId(attachId); // disable src/dest check to allow instance to run as NAT setSrcDestCheck(false, attachId, ec2Client); } else if (attachId.startsWith("igw-")) { request = request.withGatewayId(attachId); } ec2Client.createRoute(request); } catch (AmazonServiceException e) { log.error("Failed to create Route in Route Table " + routeTableId, e); if (!"InvalidRouteTableID.NotFound".equalsIgnoreCase(e.getErrorCode())) { // we're only going to swallow the expcetion if the gateway id was not found throw e; } } }