List of usage examples for com.amazonaws.auth DefaultAWSCredentialsProviderChain DefaultAWSCredentialsProviderChain
public DefaultAWSCredentialsProviderChain()
From source file:com.remediatetheflag.global.utils.AWSHelper.java
License:Apache License
public Date getRunningECSTaskStartTime(String taskArn) { String regionFromArn = taskArn.split(":")[3]; AmazonECS client = AmazonECSClientBuilder.standard().withRegion(regionFromArn) .withCredentials(new DefaultAWSCredentialsProviderChain()).build(); DescribeTasksRequest request = new DescribeTasksRequest().withCluster(RTFConfig.getExercisesCluster()) .withTasks(taskArn);/* w ww . java 2 s.c o m*/ try { DescribeTasksResult response = client.describeTasks(request); return response.getTasks().get(0).getCreatedAt(); } catch (Exception e) { logger.error("Could not get creation time for task arn " + taskArn); return null; } }
From source file:com.remediatetheflag.global.utils.AWSHelper.java
License:Apache License
public RTFInstanceReservation pollReservation(RTFInstanceReservation reservation) { if (null == reservation.getEcs()) { reservation.setError(true);/*ww w .j a v a 2 s .c o m*/ reservation.setFulfilled(false); reservation.setWaitSeconds(0); return reservation; } AmazonECS client = AmazonECSClientBuilder.standard().withRegion(reservation.getEcs().getRegion()) .withCredentials(new DefaultAWSCredentialsProviderChain()).build(); DescribeTasksRequest request = new DescribeTasksRequest().withCluster(RTFConfig.getExercisesCluster()) .withTasks(reservation.getEcs().getTaskArn()); try { DescribeTasksResult response = client.describeTasks(request); if (response.getTasks().isEmpty()) { reservation.setError(true); reservation.setFulfilled(true); reservation.setWaitSeconds(0); return reservation; } Integer rdpPort = -1; Integer httpPort = -1; Task task = response.getTasks().get(0); if (task.getLastStatus().equalsIgnoreCase(Constants.AWS_ECS_STATUS_STOPPED)) { reservation.setError(true); reservation.setFulfilled(true); reservation.setWaitSeconds(0); return reservation; } List<NetworkBinding> nb = task.getContainers().get(0).getNetworkBindings(); if (nb.size() > 2) { logger.warn("More than two port bindings, only RDP 3389 and HTTP 8080 will be mapped"); } for (NetworkBinding b : nb) { if (b.getContainerPort().equals(3389)) { rdpPort = b.getHostPort(); } else if (b.getContainerPort().equals(8080)) { httpPort = b.getHostPort(); } } if (rdpPort == -1 || httpPort == -1) { reservation.setFulfilled(false); reservation.setError(false); reservation.setWaitSeconds(10); return reservation; } reservation.getEcs().setHttpPort(httpPort); reservation.getEcs().setRdpPort(rdpPort); String containerInstanceId = task.getContainerInstanceArn(); DescribeContainerInstancesRequest containerRequest = new DescribeContainerInstancesRequest() .withCluster(RTFConfig.getExercisesCluster()).withContainerInstances(containerInstanceId); DescribeContainerInstancesResult containerResponse = client .describeContainerInstances(containerRequest); if (containerResponse.getContainerInstances().isEmpty()) { reservation.setFulfilled(false); reservation.setError(true); reservation.setWaitSeconds(0); return reservation; } String ec2InstanceId = containerResponse.getContainerInstances().get(0).getEc2InstanceId(); AmazonEC2 ec2 = AmazonEC2ClientBuilder.standard().withRegion(reservation.getEcs().getRegion()) .withCredentials(new DefaultAWSCredentialsProviderChain()).build(); DescribeInstancesRequest instanceRequest = new DescribeInstancesRequest() .withInstanceIds(ec2InstanceId); DescribeInstancesResult instanceInstances = ec2.describeInstances(instanceRequest); if (instanceInstances.getReservations().isEmpty() || instanceInstances.getReservations().get(0).getInstances().isEmpty()) { reservation.setFulfilled(false); reservation.setError(true); reservation.setWaitSeconds(0); return reservation; } String ipAddress = instanceInstances.getReservations().get(0).getInstances().get(0) .getPrivateIpAddress(); reservation.getEcs().setIpAddress(ipAddress); reservation.setFulfilled(true); reservation.setError(false); reservation.setWaitSeconds(0); return reservation; } catch (Exception e) { logger.error("Error pollReservation " + e.getMessage()); reservation.setError(true); reservation.setFulfilled(false); reservation.setWaitSeconds(0); return reservation; } }
From source file:com.remediatetheflag.global.utils.AWSHelper.java
License:Apache License
public void createExerciseLogGroup(Regions region, String exerciseLogGroup) { AWSLogs logs = AWSLogsClientBuilder.standard().withRegion(region) .withCredentials(new DefaultAWSCredentialsProviderChain()).build(); try {/*from w ww .java2 s . c om*/ CreateLogGroupRequest logGroupRequest = new CreateLogGroupRequest(exerciseLogGroup); logs.createLogGroup(logGroupRequest); logger.debug("Log group created"); } catch (Exception e) { logger.debug("Log group already existing."); } }
From source file:com.spotify.docker.BuildMojo.java
License:Apache License
private AciRepository getAciRepository() throws MojoExecutionException { if (Strings.isNullOrEmpty(this.repository)) { throw new MojoExecutionException("Must set repository to push to"); }/*from www .jav a 2 s . co m*/ URI uri = URI.create(this.repository); String scheme = uri.getScheme(); if (scheme.equals("s3")) { AWSCredentialsProvider provider = new DefaultAWSCredentialsProviderChain(); AmazonS3Client amazonS3Client = new AmazonS3Client(provider); String bucketName = uri.getHost(); String prefix = uri.getPath(); return new S3AciRepository(amazonS3Client, bucketName, prefix); } else { throw new MojoExecutionException("Unknown repository scheme: " + scheme); } }
From source file:com.streamsets.pipeline.lib.aws.AwsUtil.java
License:Apache License
public static AWSCredentialsProvider getCredentialsProvider(CredentialValue accessKeyId, CredentialValue secretKey) throws StageException { AWSCredentialsProvider credentialsProvider; if (accessKeyId != null && secretKey != null && !accessKeyId.get().isEmpty() && !secretKey.get().isEmpty()) { credentialsProvider = new AWSStaticCredentialsProvider( new BasicAWSCredentials(accessKeyId.get(), secretKey.get())); } else {/*from w ww.java 2s . c om*/ credentialsProvider = new DefaultAWSCredentialsProviderChain(); } return credentialsProvider; }
From source file:com.streamsets.pipeline.stage.lib.aws.AWSUtil.java
License:Apache License
public static AWSCredentialsProvider getCredentialsProvider(AWSConfig config) { AWSCredentialsProvider credentialsProvider; if (!config.awsAccessKeyId.isEmpty() && !config.awsSecretAccessKey.isEmpty()) { credentialsProvider = new StaticCredentialsProvider( new BasicAWSCredentials(config.awsAccessKeyId, config.awsSecretAccessKey)); } else {// w w w .j a v a2s .c o m credentialsProvider = new DefaultAWSCredentialsProviderChain(); } return credentialsProvider; }
From source file:com.streamsets.pipeline.stage.origin.kinesis.KinesisSource.java
License:Apache License
@Override protected List<ConfigIssue> init() { List<ConfigIssue> issues = super.init(); checkStreamExists(issues);/* www . j a v a 2s .co m*/ if (issues.isEmpty()) { batchQueue = new LinkedTransferQueue<>(); DataParserFactoryBuilder builder = new DataParserFactoryBuilder(getContext(), dataFormat.getParserFormat()).setMaxDataLen(50 * 1024); // Max Message for Kinesis is 50KiB switch (dataFormat) { case SDC_JSON: break; case JSON: builder.setMode(JsonMode.MULTIPLE_OBJECTS); break; } parserFactory = builder.build(); executorService = Executors.newFixedThreadPool(1); IRecordProcessorFactory recordProcessorFactory = new StreamSetsRecordProcessorFactory(batchQueue); // Create the KCL worker with the StreamSets record processor factory KinesisClientLibConfiguration kclConfig = new KinesisClientLibConfiguration(applicationName, streamName, new DefaultAWSCredentialsProviderChain(), UUID.randomUUID().toString()); kclConfig.withRegionName(region.getName()).withMaxRecords(maxBatchSize) .withIdleTimeBetweenReadsInMillis(idleTimeBetweenReads) .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON); // Configurable? worker = new Worker.Builder().recordProcessorFactory(recordProcessorFactory).config(kclConfig).build(); executorService.execute(worker); LOG.info("Launched KCL Worker"); } return issues; }
From source file:com.thinkbiganalytics.jms.sqs.SqsConfig.java
License:Apache License
@Bean public SQSConnectionFactory connectionFactory() { String regionName = env.getProperty("sqs.region.name"); SQSConnectionFactory factory = SQSConnectionFactory.builder().withRegionName(regionName) .withEndpoint("sqs." + regionName + ".amazonaws.com") .withAWSCredentialsProvider(new DefaultAWSCredentialsProviderChain()).build(); LOG.info("Setup Amazon SQS ConnectionFactory for " + regionName); return factory; }
From source file:com.trulia.stail.Stail.java
License:Apache License
public static void main(String[] args) { final Stail stail = new Stail(); JCommander jct = new JCommander(stail); jct.setProgramName("stail"); try {//w ww . j a va 2 s . c o m jct.parse(args); AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain(); if (stail.profile != null) { credentialsProvider = new ProfileCredentialsProvider(stail.profile); } if (stail.role != null) { credentialsProvider = new STSAssumeRoleSessionCredentialsProvider.Builder(stail.role, "stail") .withStsClient(AWSSecurityTokenServiceClientBuilder.standard() .withCredentials(credentialsProvider).build()) .build(); } AmazonKinesis client = AmazonKinesisClientBuilder.standard().withRegion(stail.region) .withCredentials(credentialsProvider).build(); // prepare the initial shard iterators at the LATEST position Map<Shard, String> shardIterators = getShardIterators(client, stail.stream, stail.start); IRecordProcessor processor = stail.json ? new JSONRecordProcessor() : new RawRecordProcessor(); Map<Shard, RateLimiter> rateLimiters = new HashMap<>(); shardIterators.keySet() .forEach(shard -> rateLimiters.put(shard, RateLimiter.create(MAX_SHARD_THROUGHPUT))); long end = Strings.isNullOrEmpty(stail.duration) ? Long.MAX_VALUE : System.currentTimeMillis() + Duration.parse(stail.duration).toMillis(); Set<String> reshardedShards = new HashSet<>(); Map<Shard, String> sequenceNumbers = new HashMap<>(); while (System.currentTimeMillis() < end) { if (!reshardedShards.isEmpty()) { // get the new list of shards List<Shard> shards = getShards(client, stail.stream); for (Shard shard : shards) { if (!Strings.isNullOrEmpty(shard.getParentShardId()) && reshardedShards.contains(shard.getParentShardId())) { // the old shard was split, so we need to consume this new shard from the beginning shardIterators.put(shard, getOldestShardIterator(client, stail.stream, shard)); } else if (!Strings.isNullOrEmpty(shard.getAdjacentParentShardId()) && reshardedShards.contains(shard.getAdjacentParentShardId())) { // the old shards were merged into a new shard shardIterators.put(shard, getOldestShardIterator(client, stail.stream, shard)); } } reshardedShards.clear(); } for (Shard shard : Lists.newArrayList(shardIterators.keySet())) { String shardIterator = shardIterators.remove(shard); GetRecordsRequest getRecordsRequest = new GetRecordsRequest(); getRecordsRequest.setShardIterator(shardIterator); getRecordsRequest.setLimit(BATCH_SIZE); try { GetRecordsResult getRecordsResult = client.getRecords(getRecordsRequest); List<Record> records = getRecordsResult.getRecords(); processor.processRecords(records, null); shardIterator = getRecordsResult.getNextShardIterator(); if (records.size() <= 0) { // nothing on the stream yet, so lets wait a bit to see if something appears TimeUnit.SECONDS.sleep(1); } else { int bytesRead = records.stream().map(record -> record.getData().position()) .reduce((_1, _2) -> _1 + _2).get(); sequenceNumbers.put(shard, records.get(records.size() - 1).getSequenceNumber()); // optionally sleep if we have hit the limit for this shard rateLimiters.get(shard).acquire(bytesRead); } if (!Strings.isNullOrEmpty(shardIterator)) { shardIterators.put(shard, shardIterator); } else { reshardedShards.add(shard.getShardId()); } } catch (ProvisionedThroughputExceededException e) { logger.warn("tripped the max throughput. Backing off: {}", e.getMessage()); TimeUnit.SECONDS.sleep(6); // we tripped the max throughput. Back off // add the original iterator back into the map so we can try it again shardIterators.put(shard, shardIterator); } catch (ExpiredIteratorException e) { logger.debug("Iterator expired", e); String sequenceNumber = sequenceNumbers.get(shard); if (sequenceNumber == null) { logger.warn("No previously known sequence number for {}. Moving to LATEST", shard.getShardId()); shardIterators.put(shard, getShardIterator(client, stail.stream, shard, null)); } else { shardIterators.put(shard, getShardIteratorAtSequenceNumber(client, stail.stream, shard, sequenceNumber)); } } } } } catch (ParameterException e) { jct.usage(); System.exit(1); } catch (InterruptedException e) { Thread.currentThread().interrupt(); System.exit(2); } }
From source file:com.yrashk.etcetera.S3ConfigBackend.java
License:Mozilla Public License
public S3ConfigBackend(String bucket, String endpoint, String name, int order) { this.bucket = bucket; this.endpoint = endpoint; this.name = name; this.order = order; client = new AmazonS3Client(new DefaultAWSCredentialsProviderChain()); if (endpoint != null) { client.setEndpoint(endpoint);/* w ww.ja v a 2s .c om*/ } }