List of usage examples for com.amazonaws AmazonServiceException getRequestId
public String getRequestId()
From source file:net.solarnetwork.node.backup.s3.SdkS3Client.java
License:Open Source License
@Override public String getObjectAsString(String key) throws IOException { AmazonS3 client = getClient();// w ww . j a v a2 s .com try { return client.getObjectAsString(bucketName, key); } catch (AmazonServiceException e) { log.warn("AWS error: {}; HTTP code {}; AWS code {}; type {}; request ID {}", e.getMessage(), e.getStatusCode(), e.getErrorCode(), e.getErrorType(), e.getRequestId()); throw new RemoteServiceException("Error getting S3 object at " + key, e); } catch (AmazonClientException e) { log.debug("Error communicating with AWS: {}", e.getMessage()); throw new IOException("Error communicating with AWS", e); } }
From source file:net.solarnetwork.node.backup.s3.SdkS3Client.java
License:Open Source License
@Override public S3Object getObject(String key) throws IOException { AmazonS3 client = getClient();/*from w ww . j av a 2 s . com*/ try { return client.getObject(bucketName, key); } catch (AmazonServiceException e) { log.warn("AWS error: {}; HTTP code {}; AWS code {}; type {}; request ID {}", e.getMessage(), e.getStatusCode(), e.getErrorCode(), e.getErrorType(), e.getRequestId()); throw new RemoteServiceException("Error getting S3 object at " + key, e); } catch (AmazonClientException e) { log.debug("Error communicating with AWS: {}", e.getMessage()); throw new IOException("Error communicating with AWS", e); } }
From source file:net.solarnetwork.node.backup.s3.SdkS3Client.java
License:Open Source License
@Override public S3ObjectReference putObject(String key, InputStream in, ObjectMetadata objectMetadata) throws IOException { AmazonS3 client = getClient();/*from w w w . ja v a 2 s.c o m*/ try { PutObjectRequest req = new PutObjectRequest(bucketName, key, in, objectMetadata); client.putObject(req); return new S3ObjectReference(key, objectMetadata.getContentLength(), objectMetadata.getLastModified()); } catch (AmazonServiceException e) { log.warn("AWS error: {}; HTTP code {}; AWS code {}; type {}; request ID {}", e.getMessage(), e.getStatusCode(), e.getErrorCode(), e.getErrorType(), e.getRequestId()); throw new RemoteServiceException("Error putting S3 object at " + key, e); } catch (AmazonClientException e) { log.debug("Error communicating with AWS: {}", e.getMessage()); throw new IOException("Error communicating with AWS", e); } }
From source file:net.solarnetwork.node.backup.s3.SdkS3Client.java
License:Open Source License
@Override public void deleteObjects(Set<String> keys) throws IOException { AmazonS3 client = getClient();/*from w w w . j av a 2 s. c o m*/ try { DeleteObjectsRequest req = new DeleteObjectsRequest(bucketName) .withKeys(keys.stream().map(k -> new KeyVersion(k)).collect(Collectors.toList())); client.deleteObjects(req); } catch (AmazonServiceException e) { log.warn("AWS error: {}; HTTP code {}; AWS code {}; type {}; request ID {}", e.getMessage(), e.getStatusCode(), e.getErrorCode(), e.getErrorType(), e.getRequestId()); throw new RemoteServiceException("Error deleting S3 objects " + keys, e); } catch (AmazonClientException e) { log.debug("Error communicating with AWS: {}", e.getMessage()); throw new IOException("Error communicating with AWS", e); } }
From source file:org.akvo.flow.deploy.Deploy.java
License:Open Source License
public static void main(String[] args) throws IOException { if (args.length != 7) { System.err.println("Missing argument, please provide S3 access key, S3 secret key, " + "instanceId , apkPath, version, GAE username and GAE password"); return;//from w ww . j a va 2s. co m } File file = new File(args[APK_PATH]); if (!file.exists()) { System.err.println("Can't find apk at " + args[APK_PATH]); return; } final String accessKey = args[S3_ACCESS_KEY]; final String secretKey = args[S3_SECRET_KEY]; final String instance = args[INSTANCE_ID]; final String accountId = args[ACCOUNT_ID]; final String accountSecret = args[ACCOUNT_SECRET]; final String version = args[VERSION]; final String s3Path = "apk/" + instance + "/" + file.getName(); final String s3Url = "http://akvoflow.s3.amazonaws.com/apk/" + instance + '/' + file.getName(); final String host = instance + ".appspot.com"; try { uploadS3(accessKey, secretKey, s3Path, file); updateVersion(host, accountId, accountSecret, s3Url, version, getMD5Checksum(file)); } catch (AmazonServiceException ase) { System.err.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.err.println("Error Message: " + ase.getMessage()); System.err.println("HTTP Status Code: " + ase.getStatusCode()); System.err.println("AWS Error Code: " + ase.getErrorCode()); System.err.println("Error Type: " + ase.getErrorType()); System.err.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.err.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.err.println("Error Message: " + ace.getMessage()); } catch (IOException e) { System.err.println("Error updating APK version in GAE"); e.printStackTrace(); } }
From source file:org.apache.airavata.gfac.ec2.AmazonInstanceScheduler.java
License:Apache License
/** * Monitors the CPU Utilization using Amazon Cloud Watch. In order to monitor the instance, Cloud Watch Monitoring * should be enabled for the running instance. * * @param credential EC2 credentials//from www . j a v a2 s .c om * @param instanceId instance id * @return average CPU utilization of the instance */ public static double monitorInstance(AWSCredentials credential, String instanceId) { try { AmazonCloudWatchClient cw = new AmazonCloudWatchClient(credential); long offsetInMilliseconds = 1000 * 60 * 60 * 24; GetMetricStatisticsRequest request = new GetMetricStatisticsRequest() .withStartTime(new Date(new Date().getTime() - offsetInMilliseconds)).withNamespace("AWS/EC2") .withPeriod(60 * 60) .withDimensions(new Dimension().withName("InstanceId").withValue(instanceId)) .withMetricName("CPUUtilization").withStatistics("Average", "Maximum").withEndTime(new Date()); GetMetricStatisticsResult getMetricStatisticsResult = cw.getMetricStatistics(request); double avgCPUUtilization = 0; List dataPoint = getMetricStatisticsResult.getDatapoints(); for (Object aDataPoint : dataPoint) { Datapoint dp = (Datapoint) aDataPoint; avgCPUUtilization = dp.getAverage(); log.info(instanceId + " instance's average CPU utilization : " + dp.getAverage()); } return avgCPUUtilization; } catch (AmazonServiceException ase) { log.error("Caught an AmazonServiceException, which means the request was made " + "to Amazon EC2, but was rejected with an error response for some reason."); log.error("Error Message: " + ase.getMessage()); log.error("HTTP Status Code: " + ase.getStatusCode()); log.error("AWS Error Code: " + ase.getErrorCode()); log.error("Error Type: " + ase.getErrorType()); log.error("Request ID: " + ase.getRequestId()); } return 0; }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
private void printAmazonServiceException(AmazonServiceException ase) { LOG.info("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); LOG.info("Error Message: " + ase.getMessage()); LOG.info("HTTP Status Code: " + ase.getStatusCode()); LOG.info("AWS Error Code: " + ase.getErrorCode()); LOG.info("Error Type: " + ase.getErrorType()); LOG.info("Request ID: " + ase.getRequestId()); LOG.info("Class Name: " + ase.getClass().getName()); }
From source file:org.apache.nifi.processors.aws.dynamodb.AbstractDynamoDBProcessor.java
License:Apache License
protected List<FlowFile> processServiceException(final ProcessSession session, List<FlowFile> flowFiles, AmazonServiceException exception) { List<FlowFile> failedFlowFiles = new ArrayList<>(); for (FlowFile flowFile : flowFiles) { Map<String, String> attributes = new HashMap<>(); attributes.put(DYNAMODB_ERROR_EXCEPTION_MESSAGE, exception.getMessage()); attributes.put(DYNAMODB_ERROR_CODE, exception.getErrorCode()); attributes.put(DYNAMODB_ERROR_MESSAGE, exception.getErrorMessage()); attributes.put(DYNAMODB_ERROR_TYPE, exception.getErrorType().name()); attributes.put(DYNAMODB_ERROR_SERVICE, exception.getServiceName()); attributes.put(DYNAMODB_ERROR_RETRYABLE, Boolean.toString(exception.isRetryable())); attributes.put(DYNAMODB_ERROR_REQUEST_ID, exception.getRequestId()); attributes.put(DYNAMODB_ERROR_STATUS_CODE, Integer.toString(exception.getStatusCode())); attributes.put(DYNAMODB_ERROR_EXCEPTION_MESSAGE, exception.getMessage()); attributes.put(DYNAMODB_ERROR_RETRYABLE, Boolean.toString(exception.isRetryable())); flowFile = session.putAllAttributes(flowFile, attributes); failedFlowFiles.add(flowFile);/* ww w.j a v a2s .com*/ } return failedFlowFiles; }
From source file:org.apache.nifi.processors.aws.lambda.PutLambda.java
License:Apache License
/** * Populate exception attributes in the flow file * @param session process session// ww w . j av a 2 s. c o m * @param flowFile the flow file * @param exception exception thrown during invocation * @return FlowFile the updated flow file */ private FlowFile populateExceptionAttributes(final ProcessSession session, FlowFile flowFile, final AmazonServiceException exception) { Map<String, String> attributes = new HashMap<>(); attributes.put(AWS_LAMBDA_EXCEPTION_MESSAGE, exception.getErrorMessage()); attributes.put(AWS_LAMBDA_EXCEPTION_ERROR_CODE, exception.getErrorCode()); attributes.put(AWS_LAMBDA_EXCEPTION_REQUEST_ID, exception.getRequestId()); attributes.put(AWS_LAMBDA_EXCEPTION_STATUS_CODE, Integer.toString(exception.getStatusCode())); if (exception.getCause() != null) attributes.put(AWS_LAMBDA_EXCEPTION_CAUSE, exception.getCause().getMessage()); attributes.put(AWS_LAMBDA_EXCEPTION_ERROR_TYPE, exception.getErrorType().toString()); attributes.put(AWS_LAMBDA_EXCEPTION_MESSAGE, exception.getErrorMessage()); flowFile = session.putAllAttributes(flowFile, attributes); return flowFile; }
From source file:org.apache.s4.serializer.dynamodb.EventCountAndReportPE.java
License:Apache License
public void onEvent(TopicEvent event) { if (firstEvent) { logger.info("Handling new Event [{}]", getId()); firstEvent = false;/*from w w w .j av a2s .c o m*/ firstInsert = true; } count += event.getCount(); // countUsedEvents++; // SB // logger.info("Used Data Events counter [{}]", countUsedEvents); // SB if (false) { // BEGINNING OF THE BLOCK!!!!!!!!!!! if (firstInsert) { firstInsert = false; try { // Data fusion config file: try { // File fusionPropsFile = new File(System.getProperty("user.home") + "/DataFusion.properties"); File fusionPropsFile = new File("/home/ec2-user/DataFusion.properties"); if (!fusionPropsFile.exists()) { fusionPropsFile = new File(System.getProperty("user.home") + "/DataFusion.properties"); if (!fusionPropsFile.exists()) { logger.error( "Cannot find Data fusion properties file in this location :[{}]. Make sure it is available at this place and includes AWS credentials (accessKey, secretKey)", fusionPropsFile.getAbsolutePath()); } } fusionProperties.load(new FileInputStream(fusionPropsFile)); accuracy = Double.parseDouble(fusionProperties.getProperty("accuracy")); confidence = Double.parseDouble(fusionProperties.getProperty("confidence")); } catch (Exception e) { logger.error("Cannot find Data fusion config file", e); } // Create and configure DynamoDB client AWSCredentials credentials = new BasicAWSCredentials(awsProperties.getProperty("accessKey"), awsProperties.getProperty("secretKey")); AmazonDynamoDBClient dynamoDBClient = new AmazonDynamoDBClient(credentials); logger.info("Create DynamoDB client"); dynamoDBClient.setEndpoint("dynamodb.eu-west-1.amazonaws.com"); logger.info("DynamoDB client credentials are accepted and endpoint selected"); // try { // Extracted context, e.g query, activity String searchQueryAPI = "Test KnowledgeDiscovery API Query"; String object = "Object detected"; Map<String, AttributeValue> itemRT = new HashMap<String, AttributeValue>(); Map<String, AttributeValue> itemDQ = new HashMap<String, AttributeValue>(); Iterable<String> dataSplit = Splitter.on(' ').omitEmptyStrings().trimResults().split(getId()); // List<String> dataList = Lists.newArrayList(Elements.getElements(dataSplit)); // String receivedMsgs = dataList.get(dataList.size()-1); // countReceivedMsgs = Integer.parseInt(receivedMsgs);; int i = 0; for (String token : dataSplit) { i++; receivedMsgs = token; } int k = 0; for (String token : dataSplit) { k++; if (k == (i - 2)) { receivedAppID = token; } else if (k == (i - 1)) { receivedUserID = token; } } appID = Double.parseDouble(receivedAppID); userID = Double.parseDouble(receivedUserID); // STUPID HARDCODE but fast for prototype, should change to class later :) if (appID == 0 && userID > 0) { // CV app and serialization table rtEventsTableName = "TableEventVector_CV"; tableDataQuality = "EventVectorQuality_CV"; db_orig = db_base_dir + "/cv.db"; countReceivedMsgs_CV = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev_CV; countReceivedMsgsPrev_CV = Integer.parseInt(receivedMsgs); countUsedMsgs_CV++; countReceivedMsgs = countReceivedMsgs_CV; countUsedMsgs = countUsedMsgs_CV; } else if (appID == 1 && userID > 0) { // NLP rtEventsTableName = "TableEventVector_NLP"; tableDataQuality = "EventVectorSetQuality_NLP"; db_orig = db_base_dir + "/nlp.db"; countReceivedMsgs_NLP = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev_NLP; countReceivedMsgsPrev_NLP = Integer.parseInt(receivedMsgs); countUsedMsgs_NLP++; countReceivedMsgs = countReceivedMsgs_NLP; countUsedMsgs = countUsedMsgs_NLP; } else if (appID == 2 && userID > 0) { // Audio rtEventsTableName = "TableEventVector_Audio"; tableDataQuality = "EventVectorQuality_Audio"; db_orig = db_base_dir + "/audio.db"; countReceivedMsgs_Audio = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev_Audio; countReceivedMsgsPrev_Audio = Integer.parseInt(receivedMsgs); countUsedMsgs_Audio++; countReceivedMsgs = countReceivedMsgs_Audio; countUsedMsgs = countUsedMsgs_Audio; } else { // all others Events available in DB rtEventsTableName = "TableEventVector"; tableDataQuality = "EventVectorQuality"; countReceivedMsgs = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev; countReceivedMsgsPrev = Integer.parseInt(receivedMsgs); countUsedMsgs++; } try { // Users database connection db_conn = DriverManager.getConnection("jdbc:sqlite:" + db_orig); //Actual invocation of Users DB without "rating" field db_stmt = db_conn.prepareStatement( "SELECT id, title, country, name, surname FROM user WHERE appID = ? AND userID = ?"); db_stmt.setDouble(1, userID); db_stmt.setDouble(2, appID); rs = db_stmt.executeQuery(); // Index updates/inserts String ID = rs.getString(1); String location = rs.getString(2); String country = rs.getString(3); String name = rs.getString(4); String surname = rs.getString(5); // resultSet adjustment according to the Accuracy and Confidence levels (1 / number of results and multiplied by 100%) accuracyRT = (1 / rs.getFetchSize()) * 100; confidence = sqrt(accuracyRT * accuracyRT + accuracy * accuracy); // Collect to DynamoDB items (CandidateSet and CandidateSetQuality) itemRT.put("id", new AttributeValue().withS(placesID)); itemRT.put("country", new AttributeValue().withS(country)); itemRT.put("name", new AttributeValue().withS(String.valueOf(lat))); itemRT.put("surname", new AttributeValue().withS(String.valueOf(lon))); itemRT.put("query", new AttributeValue().withS(searchQueryAPI)); itemRT.put("rating", new AttributeValue().withN(String.valueOf(count))); itemRT.put("title", new AttributeValue().withS(location)); itemRT.put("topic", new AttributeValue().withS(getId())); itemRT.put("event", new AttributeValue().withS(activity)); itemRT.put("ts", new AttributeValue().withS(dateFormatter.format(new Date()))); itemDQ.put("TimeStamp", new AttributeValue().withS(dateFormatter.format(new Date()))); itemDQ.put("ReceivedMsgs", new AttributeValue().withN(String.valueOf(countReceivedMsgs))); itemDQ.put("UsedMsgs", new AttributeValue().withN(String.valueOf(countUsedMsgs))); itemDQ.put("Accuracy", new AttributeValue().withN(String.valueOf(count))); itemDQ.put("Timeliness", new AttributeValue().withS(dateFormatter.format(new Date()))); itemDQ.put("Completeness", new AttributeValue().withN(String.valueOf(count))); itemDQ.put("Consistency", new AttributeValue().withN(String.valueOf(count))); itemDQ.put("Confidence", new AttributeValue().withN(String.valueOf(count))); itemDQ.put("Privacy", new AttributeValue().withS("anonymised")); PutItemRequest itemRequestRT = new PutItemRequest().withTableName(rtEventsTableName) .withItem(itemRT); PutItemRequest itemRequestDQ = new PutItemRequest().withTableName(tableDataQuality) .withItem(itemDQ); dynamoDBClient.putItem(itemRequestRT); dynamoDBClient.putItem(itemRequestDQ); itemRT.clear(); itemDQ.clear(); logger.info("TableEvent set size [{}], last known size [{}] ", countReceivedMsgs, countReceivedMsgsPrev); logger.info("Wrote EventVector to DynamoDB [{}] ", rtEventsTableName); logger.info("Wrote EventVector Quality measurements to DynamoDB [{}] ", tableDataQuality); // Closing second "try" } catch (Exception e) { // logger.error("Cannot close DB file", e); } finally { try { rs.close(); } catch (SQLException e) { logger.error("Cannot close ResultSet", e); } try { db_stmt.close(); } catch (SQLException e) { logger.error("Cannot close Statement", e); } try { db_conn.close(); } catch (SQLException e) { logger.error("Cannot close DB file", e); } } // Closing first "try" } catch (AmazonServiceException ase) { logger.error( "Caught an AmazonServiceException, which means your request made it to AWS, but was rejected with an error response for some reason."); logger.error("Error Message: " + ase.getMessage()); logger.error("HTTP Status Code: " + ase.getStatusCode()); logger.error("AWS Error Code: " + ase.getErrorCode()); logger.error("Error Type: " + ase.getErrorType()); logger.error("Request ID: " + ase.getRequestId()); } } // end of if (count == 1) } // END OF THE BLOCK !!!!!!!!!!!!!!! }