List of usage examples for com.amazonaws.regions Region getName
public String getName()
From source file:com.remediatetheflag.global.utils.AWSHelper.java
License:Apache License
public Integer getClusterContainerInstances(Region region) { AmazonECS client = AmazonECSClientBuilder.standard().withRegion(region.getName()) .withCredentials(new DefaultAWSCredentialsProviderChain()).build(); DescribeClustersRequest request = new DescribeClustersRequest(); request.withClusters(RTFConfig.getExercisesCluster()); logger.debug("Requesting number of cluster running instances for region " + region.getName() + " cluster " + RTFConfig.getExercisesCluster()); try {/* w ww . j av a 2 s .c o m*/ DescribeClustersResult response = client.describeClusters(request); return response.getClusters().get(0).getRegisteredContainerInstancesCount(); } catch (Exception e) { logger.error("Error getClusterContainerInstances for region " + region.getName() + " due to:\n" + e.getMessage()); return 0; } }
From source file:com.remediatetheflag.global.utils.AWSHelper.java
License:Apache License
public Double getClusterMemoryReservation(Region region) { AmazonCloudWatch client = AmazonCloudWatchClientBuilder.standard().withRegion(region.getName()) .withCredentials(new DefaultAWSCredentialsProviderChain()).build(); Dimension dimension = new Dimension(); dimension.setName("ClusterName"); dimension.setValue(RTFConfig.getExercisesCluster()); Date date = new Date(); Calendar cal = Calendar.getInstance(); cal.setTime(date);//from ww w . j a v a 2s.c o m cal.add(Calendar.MINUTE, -5); GetMetricStatisticsRequest request = new GetMetricStatisticsRequest().withMetricName("MemoryReservation") .withDimensions(dimension).withPeriod(60).withStartTime(cal.getTime()).withEndTime(date) .withStatistics("Average").withNamespace("AWS/ECS"); try { logger.debug("Requesting memory reservation for region " + region.getName() + " cluster " + RTFConfig.getExercisesCluster()); GetMetricStatisticsResult response = client.getMetricStatistics(request); if (response.getDatapoints().isEmpty()) return 0.0; return response.getDatapoints().get(0).getAverage(); } catch (Exception e) { logger.error("Error getClusterContainerInstances for memory reservation in region " + region.getName() + " due to:\n" + e.getMessage()); return 0.0; } }
From source file:com.remediatetheflag.global.utils.AWSHelper.java
License:Apache License
public List<String> getRunningECSTasks(List<Region> activeRegions) { LinkedList<String> list = new LinkedList<String>(); for (Region region : activeRegions) { logger.debug("Enumerating running tasks on cluster " + RTFConfig.getExercisesCluster() + " for region " + region.getName()); AmazonECS client = AmazonECSClientBuilder.standard().withRegion(Regions.fromName(region.getName())) .withCredentials(new DefaultAWSCredentialsProviderChain()).build(); ListTasksRequest request = new ListTasksRequest().withCluster(RTFConfig.getExercisesCluster()); try {//from w w w . j a v a2 s.co m ListTasksResult response = client.listTasks(request); list.addAll(response.getTaskArns()); } catch (Exception e) { logger.error("Error getRunningECSTasks for region " + region + " due to:\n" + e.getMessage()); } } return list; }
From source file:eu.roschi.obdkinesis.HttpReferrerCounterApplication.java
License:Open Source License
/** * Start the Kinesis Client application. * /*from www . j a va2s .c om*/ * @param args Expecting 4 arguments: Application name to use for the Kinesis Client Application, Stream name to * read from, DynamoDB table name to persist counts into, and the AWS region in which these resources * exist or should be created. */ public static void main(String[] args) throws UnknownHostException { if (args.length != 2) { System.err.println("Using default values"); COMPUTE_RANGE_FOR_COUNTS_IN_MILLIS = 30000; COMPUTE_INTERVAL_IN_MILLIS = 2000; } else { COMPUTE_RANGE_FOR_COUNTS_IN_MILLIS = Integer.parseInt(args[0]); COMPUTE_INTERVAL_IN_MILLIS = Integer.parseInt(args[1]); System.err.println( "Using values " + Integer.parseInt(args[0]) + " width " + Integer.parseInt(args[1]) + " rate"); } String applicationName = "obd_kinesis"; String streamName = "obd_input_stream"; String countsTableName = "obd_kinesis_count"; Region region = SampleUtils.parseRegion("us-west-2"); AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain(); ClientConfiguration clientConfig = SampleUtils.configureUserAgentForSample(new ClientConfiguration()); AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, clientConfig); kinesis.setRegion(region); AmazonDynamoDB dynamoDB = new AmazonDynamoDBClient(credentialsProvider, clientConfig); dynamoDB.setRegion(region); // Creates a stream to write to, if it doesn't exist StreamUtils streamUtils = new StreamUtils(kinesis); streamUtils.createStreamIfNotExists(streamName, 2); LOG.info(String.format("%s stream is ready for use", streamName)); DynamoDBUtils dynamoDBUtils = new DynamoDBUtils(dynamoDB); dynamoDBUtils.createCountTableIfNotExists(countsTableName); LOG.info(String.format("%s DynamoDB table is ready for use", countsTableName)); String workerId = String.valueOf(UUID.randomUUID()); LOG.info(String.format("Using working id: %s", workerId)); KinesisClientLibConfiguration kclConfig = new KinesisClientLibConfiguration(applicationName, streamName, credentialsProvider, workerId); kclConfig.withCommonClientConfig(clientConfig); kclConfig.withRegionName(region.getName()); kclConfig.withInitialPositionInStream(InitialPositionInStream.LATEST); // Persist counts to DynamoDB DynamoDBCountPersister persister = new DynamoDBCountPersister( dynamoDBUtils.createMapperForTable(countsTableName)); IRecordProcessorFactory recordProcessor = new CountingRecordProcessorFactory<HttpReferrerPair>( HttpReferrerPair.class, persister, COMPUTE_RANGE_FOR_COUNTS_IN_MILLIS, COMPUTE_INTERVAL_IN_MILLIS); Worker worker = new Worker(recordProcessor, kclConfig); int exitCode = 0; try { worker.run(); } catch (Throwable t) { LOG.error("Caught throwable while processing data.", t); exitCode = 1; } System.exit(exitCode); }
From source file:io.macgyver.plugin.cloud.aws.AWSServiceClientImpl.java
License:Apache License
@Override public void scanRegion(Region region) { scanRegion(Regions.fromName(region.getName())); }
From source file:io.macgyver.plugin.cloud.aws.scanner.ASGScanner.java
License:Apache License
@Override public void scan(Region region) { GraphNodeGarbageCollector gc = newGarbageCollector().label("AwsAsg").region(region); forEachAsg(region, asg -> {/*ww w . j av a2 s . com*/ ObjectNode n = convertAwsObject(asg, region); String asgArn = n.path("aws_arn").asText(); String cypher = "merge (x:AwsAsg {aws_arn:{aws_arn}}) set x+={props}, x.updateTs=timestamp() return x"; Preconditions.checkNotNull(getNeoRxClient()); getNeoRxClient().execCypher(cypher, "aws_arn", asgArn, "props", n).forEach(gc.MERGE_ACTION); mapAsgRelationships(asg, asgArn, region.getName()); }); if (targetAutoScalingGroupNames == null) { // only invoke if scanned all gc.invoke(); } }
From source file:io.macgyver.plugin.cloud.aws.scanner.AWSScannerGroup.java
License:Apache License
@Override public void scan(Region region) { logger.info("scanning account:{} region:{}", getAWSServiceClient().getAccountId(), region.getName()); scannerList.forEach(it -> {//ww w . jav a 2 s .co m logger.debug("{} scanning region {}", it, region); try { it.scan(region); } catch (RuntimeException e) { logger.error("problem scanning region " + region, e); } }); }
From source file:io.macgyver.plugin.cloud.aws.scanner.AWSServiceScanner.java
License:Apache License
public ObjectNode convertAwsObject(Object x, Region region) { ObjectNode n = mapper.valueToTree(x); n.put("region", region.getName()); n.put("account", getAccountId()); n = flatten(n);/*from w ww .ja v a 2s .c om*/ return n; }
From source file:io.macgyver.plugin.cloud.aws.scanner.EC2InstanceScanner.java
License:Apache License
@Override public void scan(Region region) { GraphNodeGarbageCollector gc = new GraphNodeGarbageCollector().neo4j(getNeoRxClient()) .account(getAccountId()).label("AwsEc2Instance").region(region.getName()); forEachInstance(region, instance -> { try {// w ww.j a v a 2s .c o m if (instance.getState().getName().equals("terminated")) { // instance is terminated // we may want to take the opportunity to delete it right here } else { JsonNode n = convertAwsObject(instance, region); NeoRxClient neoRx = getNeoRxClient(); String subnetId = n.path("aws_subnetId").asText(null); String instanceArn = n.path("aws_arn").asText(null); String account = n.path("aws_account").asText(null); String imageId = n.path("aws_imageId").asText(null); Preconditions.checkNotNull(neoRx); Preconditions.checkState(!Strings.isNullOrEmpty(instanceArn), "aws_arn must not be null"); Preconditions.checkState(!Strings.isNullOrEmpty(account), "aws_account must not be null"); String createInstanceCypher = "merge (x:AwsEc2Instance {aws_arn:{instanceArn}}) set x+={props}, x.updateTs=timestamp() return x"; neoRx.execCypher(createInstanceCypher, "instanceArn", instanceArn, "props", n) .forEach(gc.MERGE_ACTION); if (!Strings.isNullOrEmpty(imageId)) { String amiArn = String.format("arn:aws:ec2:%s::image/%s", region.getName(), imageId); String mapToImageCypher = "match (x:AwsAmi {aws_arn:{amiArn}}), " + "(y:AwsEc2Instance {aws_arn:{instanceArn}}) " + "merge (y)-[r:USES]-(x) set r.updateTs=timestamp()"; neoRx.execCypher(mapToImageCypher, "amiArn", amiArn, "instanceArn", instanceArn); } if (!Strings.isNullOrEmpty(subnetId)) { String subnetArn = String.format("arn:aws:ec2:%s:%s:subnet/%s", region.getName(), account, subnetId); String mapToSubnetCypher = "match (x:AwsSubnet {aws_arn:{subnetArn}}), " + "(y:AwsEc2Instance {aws_arn:{instanceArn}}) " + "merge (y)-[r:RESIDES_IN]->(x) set r.updateTs=timestamp()"; neoRx.execCypher(mapToSubnetCypher, "subnetArn", subnetArn, "instanceArn", instanceArn); } } } catch (RuntimeException e) { logger.warn("problem scanning EC2 instance", e); } }); gc.invoke(); }
From source file:io.macgyver.plugin.cloud.aws.scanner.ELBScanner.java
License:Apache License
@Override public void scan(Region region) { GraphNodeGarbageCollector gc = newGarbageCollector().label("AwsElb").region(region); forEachElb(region, elb -> {/*from w ww . ja v a 2s . co m*/ try { ObjectNode n = convertAwsObject(elb, region); String elbArn = n.path("aws_arn").asText(); String cypher = "merge (x:AwsElb {aws_arn:{aws_arn}}) set x+={props} set x.updateTs=timestamp() return x"; Preconditions.checkNotNull(neoRx); neoRx.execCypher(cypher, "aws_arn", elbArn, "props", n).forEach(gc.MERGE_ACTION); mapElbRelationships(elb, elbArn, region.getName()); } catch (RuntimeException e) { logger.warn("problem scanning ELBs", e); } }); if (targetLoadBalancerNames == null) { // gc only if we scan all load balancers gc.invoke(); } }