List of usage examples for com.amazonaws AmazonServiceException getErrorCode
public String getErrorCode()
From source file:com.cloudera.director.aws.ec2.EC2InstanceTemplateConfigurationValidator.java
License:Apache License
/** * Validates the configured security group IDs. * * @param client the EC2 client * @param configuration the configuration to be validated * @param accumulator the exception condition accumulator * @param localizationContext the localization context *//* www. j av a2 s . c o m*/ @VisibleForTesting void checkSecurityGroupIds(AmazonEC2Client client, Configured configuration, PluginExceptionConditionAccumulator accumulator, LocalizationContext localizationContext) { List<String> securityGroupsIds = EC2InstanceTemplate.CSV_SPLITTER .splitToList(configuration.getConfigurationValue(SECURITY_GROUP_IDS, localizationContext)); for (String securityGroupId : securityGroupsIds) { LOG.info(">> Describing security group '{}'", securityGroupId); try { DescribeSecurityGroupsResult result = client .describeSecurityGroups(new DescribeSecurityGroupsRequest().withGroupIds(securityGroupId)); checkCount(accumulator, SECURITY_GROUP_IDS, localizationContext, securityGroupId, result.getSecurityGroups()); } catch (AmazonServiceException e) { if (e.getErrorCode().startsWith(INVALID_SECURITY_GROUP)) { addError(accumulator, SECURITY_GROUP_IDS, localizationContext, null, INVALID_SECURITY_GROUP_MSG, securityGroupId); } else { throw Throwables.propagate(e); } } } }
From source file:com.cloudera.director.aws.ec2.EC2InstanceTemplateConfigurationValidator.java
License:Apache License
/** * Validates the EC2 key name./*from w w w . j a v a 2 s. c o m*/ * * @param client the EC2 client * @param accumulator the exception condition accumulator * @param localizationContext the localization context */ @VisibleForTesting void checkKeyName(AmazonEC2Client client, Configured configuration, PluginExceptionConditionAccumulator accumulator, LocalizationContext localizationContext) { String keyName = configuration.getConfigurationValue(KEY_NAME, localizationContext); if (keyName != null) { LOG.info(">> Describing key pair"); try { DescribeKeyPairsResult result = client .describeKeyPairs(new DescribeKeyPairsRequest().withKeyNames(keyName)); // TODO Should this be REDACTED instead of NotDisplayed? checkCount(accumulator, KEY_NAME, localizationContext, "NotDisplayed", result.getKeyPairs()); } catch (AmazonServiceException e) { if (e.getErrorCode().startsWith(INVALID_KEY_PAIR)) { addError(accumulator, KEY_NAME, localizationContext, null, INVALID_KEY_NAME_MSG, keyName); } else { throw Throwables.propagate(e); } } } }
From source file:com.cloudera.director.aws.ec2.EC2Provider.java
License:Apache License
/** * Atomically allocates multiple regular EC2 instances with the specified identifiers based on a * single instance template. If not all the instances can be allocated, the number of instances * allocated must be at least the specified minimum or the method must fail cleanly with no * billing implications./*from www . ja va 2 s. co m*/ * * @param template the instance template * @param virtualInstanceIds the unique identifiers for the instances * @param minCount the minimum number of instances to allocate if not all resources can * be allocated * @return the virtual instance ids of the instances that were allocated * @throws InterruptedException if the operation is interrupted */ public Collection<String> allocateOnDemandInstances(EC2InstanceTemplate template, Collection<String> virtualInstanceIds, int minCount) throws InterruptedException { int instanceCount = virtualInstanceIds.size(); LOG.info(">> Requesting {} instances for {}", instanceCount, template); // EC2 client doesn't accept a min count of 0. Readjust the requested // value to 1 to allow submitting the request. int normalizedMinCount = (minCount == 0) ? 1 : minCount; RunInstancesResult runInstancesResult; try { runInstancesResult = client .runInstances(newRunInstancesRequest(template, virtualInstanceIds, normalizedMinCount)); } catch (AmazonServiceException e) { AWSExceptions.propagateIfUnrecoverable(e); // As documented at http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-capacity.html if ("InsufficientInstanceCapacity".equals(e.getErrorCode()) || "InstanceLimitExceeded".equals(e.getErrorCode())) { if (minCount == 0) { LOG.warn("Ignoring insufficient capacity exception due to min count being zero", e); return Collections.emptyList(); } else { // fail fast on insufficient instance capacity because we expect it will take // a fair amount of time for AWS to bring more capacity online in a zone or it will // take some time for customers to request a limit increase throw new UnrecoverableProviderException(e.getErrorMessage(), e); } } else { throw AWSExceptions.propagate(e); } } if (LOG.isInfoEnabled()) { LOG.info("<< Reservation {} with {}", runInstancesResult.getReservation().getReservationId(), summarizeReservationForLogging(runInstancesResult.getReservation())); } // Tag all the new instances so that we can easily find them later on. // Determine which do not yet have a private IP address. List<Tag> userDefinedTags = getUserDefinedTags(template); Set<String> instancesWithNoPrivateIp = Sets.newHashSet(); List<Instance> instances = runInstancesResult.getReservation().getInstances(); // Limit the number of virtual instance id's used for tagging to the // number of instances that we managed to reserve. List<String> virtualInstanceIdsAllocated = FluentIterable.from(virtualInstanceIds).limit(instances.size()) .toList(); List<String> successfulVirtualInstanceIds = Lists.newArrayList(); for (Map.Entry<String, Instance> entry : zipWith(virtualInstanceIdsAllocated, instances)) { String virtualInstanceId = entry.getKey(); Instance instance = entry.getValue(); String ec2InstanceId = instance.getInstanceId(); if (tagInstance(template, userDefinedTags, virtualInstanceId, ec2InstanceId)) { successfulVirtualInstanceIds.add(virtualInstanceId); if (instance.getPrivateIpAddress() == null) { instancesWithNoPrivateIp.add(ec2InstanceId); } else { LOG.info("<< Instance {} got IP {}", ec2InstanceId, instance.getPrivateIpAddress()); } } else { LOG.info("<< Instance {} could not be tagged.", ec2InstanceId); } } int numInstancesTagged = successfulVirtualInstanceIds.size(); if (numInstancesTagged < minCount) { LOG.error( "Number of instances tagged ({}) is less than the minimum count ({}). One or more instances may have " + "unexpectedly terminated prior to tagging.", numInstancesTagged, minCount); delete(template, successfulVirtualInstanceIds); throw new UnrecoverableProviderException( String.format(COUNT_BELOW_THRESHOLD, numInstancesTagged, minCount)); } // Wait until all of them have a private IP (it should be pretty fast) waitForPrivateIpAddresses(instancesWithNoPrivateIp); return virtualInstanceIdsAllocated; }
From source file:com.cloudhub.aws.extractor.AWSCSVExtractor.java
License:Apache License
/** * Requests billing information from Amazon S3. * This method may spawn multiple threads as needed to complete the task. * *//* www .ja v a 2 s . c o m*/ @Override public String getTotalCost() { String totalCost = null; try { log.debug("Listing objects ..."); final ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); ObjectListing objectListing; do { objectListing = s3client.listObjects(listObjectsRequest); for (final S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { log.debug(" - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); if (objectSummary.getKey().contains(Constants.MATCHER_BILLING_CSV.getKeyPattern())) { totalCost = persist(Constants.MATCHER_BILLING_CSV, objectSummary); } else if (objectSummary.getKey().contains(Constants.MATCHER_COST_ALLOCATION.getKeyPattern())) { totalCost = persist(Constants.MATCHER_COST_ALLOCATION, objectSummary); } } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); } catch (AmazonServiceException ase) { log.error("Caught an AmazonServiceException, " + "which means your request made it " + "to Amazon S3, but was rejected with an error response " + "for some reason."); log.error("Error Message: " + ase.getMessage()); log.error("HTTP Status Code: " + ase.getStatusCode()); log.error("AWS Error Code: " + ase.getErrorCode()); log.error("Error Type: " + ase.getErrorType()); log.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { log.error("Caught an AmazonClientException, " + "which means the client encountered " + "an internal error while trying to communicate" + " with S3, " + "such as not being able to access the network."); log.error("Error Message: " + ace.getMessage()); } catch (IOException ioe) { log.error("Caught an IOException while writing to disk."); log.error("Error Message: " + ioe.getMessage()); } return totalCost; }
From source file:com.crickdata.upload.s3.UploadLiveData.java
License:Open Source License
public Map<String, Date> uploadToS3(String fileName, boolean type) throws IOException { Statistics statistics = new Statistics(); Map<String, Date> perfMap = new HashMap<String, Date>(); AWSCredentials credentials = null;// w w w . j av a2 s.c o m try { credentials = new BasicAWSCredentials("AKIAI6QKTRAQE7MXQOIQ", "wIG6u1yI5ZaseeJbvYSUmD98qelIJNSCVBzt5k2q"); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (C:\\Users\\bssan_000\\.aws\\credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName; if (!type) bucketName = "cricmatchinfo"; else bucketName = "cricmatchinfoseries"; String key = fileName.replace(".json", "").trim(); try { perfMap.put("S3INSERTREQ", new Date()); statistics.setS3Req(new Date()); File f = readMatchFile(fileName); double bytes = f.length(); double kilobytes = (bytes / 1024); System.out.println("Details :" + kilobytes); s3.putObject(new PutObjectRequest(bucketName, key, f)); statistics.setSize(String.valueOf(kilobytes)); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); perfMap.put("S3SAVERES", object.getObjectMetadata().getLastModified()); statistics.setKey(key); statistics.setS3Res(object.getObjectMetadata().getLastModified()); MyUI.stats.add(statistics); displayTextInputStream(object.getObjectContent()); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } return perfMap; }
From source file:com.davidgildeh.hadoop.input.simpledb.SimpleDBDAO.java
License:Apache License
/** * Does a paged query in SimpleDB//w ww . j ava 2s. co m * * @param query The Select Query * @param nextToken If there is a paging token to start from, or null if none * @return The SelectResult from the query */ private SelectResult doQuery(String query, String nextToken) { SelectResult results = null; try { if (LOG.isDebugEnabled()) { LOG.debug("Running Query: " + query); } SelectRequest selectRequest = new SelectRequest(query); if (nextToken != null) { selectRequest.setNextToken(nextToken); } results = sdb.select(selectRequest); } catch (AmazonServiceException ase) { LOG.error("Caught an AmazonServiceException, which means your request made it " + "to Amazon SimpleDB, but was rejected with an error response for some reason."); LOG.error("Select Query: " + query); LOG.error("Error Message: " + ase.getMessage()); LOG.error("HTTP Status Code: " + ase.getStatusCode()); LOG.error("AWS Error Code: " + ase.getErrorCode()); LOG.error("Error Type: " + ase.getErrorType()); LOG.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { LOG.error("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with SimpleDB, " + "such as not being able to access the network."); LOG.error("Error Message: " + ace.getMessage()); } finally { return results; } }
From source file:com.deploymentio.cfnstacker.CloudFormationClient.java
License:Apache License
/** * Gets all non-progress events for stack that were generated after a * certain time. This method will ignore any "throttling" error from AWS and * return empty results.//from ww w .j a v a2 s .c o m * * @param stackId unique ID for the stack * @param startDate only events after this time are considered * @return a list of stack events */ public List<StackEvent> getStackEvents(String stackId, Date startDate, OperationTracker tracker, int checkIntervalSeconds) { ArrayList<StackEvent> events = new ArrayList<StackEvent>(); DescribeStackEventsResult result = null; String nextToken = null; doLoop: do { try { result = client.describeStackEvents(new DescribeStackEventsRequest().withStackName(stackId)); } catch (AmazonServiceException ase) { if ("Throttling".equals(ase.getErrorCode())) { logger.warn("Got a throttling error from AWS while calling describeStackEvents()"); break; } else { throw ase; } } nextToken = result.getNextToken(); for (StackEvent evt : result.getStackEvents()) { // break out if we start seeing events older than our start date if (!evt.getTimestamp().after(startDate)) { if (logger.isTraceEnabled()) { logger.trace(createStackEventLogMessage(evt, startDate, "Saw event older than startdate")); } break doLoop; } // mark that an event was generated if (tracker != null) { tracker.markEventsGenerated(stackId); } // ignore IN_PROGRESS events if (!evt.getResourceStatus().endsWith("_IN_PROGRESS")) { if (logger.isTraceEnabled()) { logger.trace(createStackEventLogMessage(evt, startDate, "Adding event")); } events.add(evt); } else { if (logger.isTraceEnabled()) { logger.trace(createStackEventLogMessage(evt, startDate, "Ignorning event")); } } // start tracking a sub-stack if we come across one if (tracker != null && evt.getResourceType().equals("AWS::CloudFormation::Stack") && !evt.getPhysicalResourceId().equals(stackId)) { tracker.track(this, evt.getLogicalResourceId(), evt.getPhysicalResourceId(), checkIntervalSeconds); } } } while (!StringUtils.isEmpty(nextToken)); // sort the events Collections.sort(events, new Comparator<StackEvent>() { @Override public int compare(StackEvent e1, StackEvent e2) { return e1.getTimestamp().compareTo(e2.getTimestamp()); } }); return events; }
From source file:com.dev.appx.sns.SNSMobilePush.java
License:Open Source License
public static void igniteSNS() throws IOException { /*// w ww . j ava 2 s. c om * TODO: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this sample. * http://aws.amazon.com/security-credentials */ AmazonSNS sns = new AmazonSNSClient( new PropertiesCredentials(SNSMobilePush.class.getResourceAsStream("/AwsCredentials.properties"))); sns.setEndpoint("https://sns.us-west-2.amazonaws.com"); System.out.println("===========================================\n"); System.out.println("Getting Started with Amazon SNS"); System.out.println("===========================================\n"); try { SNSMobilePush sample = new SNSMobilePush(sns); /* TODO: Uncomment the services you wish to use. */ //sample.demoAndroidAppNotification(); // sample.demoKindleAppNotification(); // sample.demoAppleAppNotification(); // sample.demoAppleSandboxAppNotification(); // sample.demoBaiduAppNotification(); // sample.demoWNSAppNotification(); // sample.demoMPNSAppNotification(); sample.createTopic("test"); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon SNS, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with SNS, such as not " + "being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.digitaslbi.helios.mock.utils.ConnectionHelper.java
public static void createFolder(String bucketName, String folderName) { // create meta-data for your folder and set content-length to 0 ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(0);// w w w. ja va 2 s . c o m // create empty content InputStream emptyContent = new ByteArrayInputStream(new byte[0]); // create a PutObjectRequest passing the folder name suffixed by / PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, folderName + MocksConstants.AWS_PARENT_DELIMITER.getValue(), emptyContent, metadata); // send request to S3 to create folder try { s3Client.putObject(putObjectRequest); } catch (AmazonServiceException ase) { log.error("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); log.error("Error Message: " + ase.getMessage()); log.error("HTTP Status Code: " + ase.getStatusCode()); log.error("AWS Error Code: " + ase.getErrorCode()); log.error("Error Type: " + ase.getErrorType()); log.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { log.error("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); log.error("Error Message: " + ace.getMessage()); } }
From source file:com.digitaslbi.helios.mock.utils.ConnectionHelper.java
public static void uploadFile(String bucketName, String folderName, String filePath) { try {//from w w w . j a v a2 s . co m log.debug("Uploading a new object to S3 from a file\n"); java.io.File file = new java.io.File(filePath); s3Client.putObject(new PutObjectRequest(bucketName, folderName, file) .withCannedAcl(CannedAccessControlList.PublicRead)); } catch (AmazonServiceException ase) { log.error("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); log.error("Error Message: " + ase.getMessage()); log.error("HTTP Status Code: " + ase.getStatusCode()); log.error("AWS Error Code: " + ase.getErrorCode()); log.error("Error Type: " + ase.getErrorType()); log.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { log.error("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); log.error("Error Message: " + ace.getMessage()); } }