Example usage for com.amazonaws.auth BasicAWSCredentials BasicAWSCredentials

List of usage examples for com.amazonaws.auth BasicAWSCredentials BasicAWSCredentials

Introduction

In this page you can find the example usage for com.amazonaws.auth BasicAWSCredentials BasicAWSCredentials.

Prototype

public BasicAWSCredentials(String accessKey, String secretKey) 

Source Link

Document

Constructs a new BasicAWSCredentials object, with the specified AWS access key and AWS secret key.

Usage

From source file:com.pinterest.secor.uploader.S3UploadManager.java

License:Apache License

public S3UploadManager(SecorConfig config) {
    super(config);

    final String accessKey = mConfig.getAwsAccessKey();
    final String secretKey = mConfig.getAwsSecretKey();
    final String endpoint = mConfig.getAwsEndpoint();
    final String region = mConfig.getAwsRegion();
    final String awsRole = mConfig.getAwsRole();

    s3Path = mConfig.getS3Path();

    AmazonS3 client;/*www .  java2 s .  c  o m*/
    AWSCredentialsProvider provider;

    ClientConfiguration clientConfiguration = new ClientConfiguration();
    boolean isHttpProxyEnabled = mConfig.getAwsProxyEnabled();

    //proxy settings
    if (isHttpProxyEnabled) {
        LOG.info("Http Proxy Enabled for S3UploadManager");
        String httpProxyHost = mConfig.getAwsProxyHttpHost();
        int httpProxyPort = mConfig.getAwsProxyHttpPort();
        clientConfiguration.setProxyHost(httpProxyHost);
        clientConfiguration.setProxyPort(httpProxyPort);
    }

    if (accessKey.isEmpty() || secretKey.isEmpty()) {
        provider = new DefaultAWSCredentialsProviderChain();
    } else {
        provider = new AWSCredentialsProvider() {
            public AWSCredentials getCredentials() {
                return new BasicAWSCredentials(accessKey, secretKey);
            }

            public void refresh() {
            }
        };
    }

    if (!awsRole.isEmpty()) {
        provider = new STSAssumeRoleSessionCredentialsProvider(provider, awsRole, "secor");
    }

    client = new AmazonS3Client(provider, clientConfiguration);

    if (!endpoint.isEmpty()) {
        client.setEndpoint(endpoint);
    } else if (!region.isEmpty()) {
        client.setRegion(Region.getRegion(Regions.fromName(region)));
    }

    mManager = new TransferManager(client);
}

From source file:com.pinterest.teletraan.config.AWSFactory.java

License:Apache License

public AwsAutoScaleGroupManager buildAwsAutoScalingManager() {
    AmazonAutoScalingClient aasClient;/*from   w  w  w  .  ja va2  s.  c  o m*/
    if (StringUtils.isNotEmpty(id) && StringUtils.isNotEmpty(key)) {
        AWSCredentials myCredentials = new BasicAWSCredentials(id, key);
        aasClient = new AmazonAutoScalingClient(myCredentials);
    } else {
        LOG.info(
                "AWS credential is missing for creating auto scaling client. Assuming to use IAM role for authentication.");
        aasClient = new AmazonAutoScalingClient();
    }
    return new AwsAutoScaleGroupManager(sns_arn, role_arn, aasClient);
}

From source file:com.pinterest.teletraan.config.AWSFactory.java

License:Apache License

public AwsAlarmManager buildAwsAlarmManager() {
    AmazonCloudWatchClient cloudWatcherClient;
    if (StringUtils.isNotEmpty(id) && StringUtils.isNotEmpty(key)) {
        AWSCredentials myCredentials = new BasicAWSCredentials(id, key);
        cloudWatcherClient = new AmazonCloudWatchClient(myCredentials);
    } else {//from w ww  .j  a  v a2s. c  om
        LOG.info(
                "AWS credential is missing for creating cloudwatch client. Assuming to use IAM role for authentication.");
        cloudWatcherClient = new AmazonCloudWatchClient();
    }
    return new AwsAlarmManager(cloudWatcherClient);
}

From source file:com.pinterest.teletraan.config.AWSFactory.java

License:Apache License

public AmazonEC2Client buildEC2Client() {
    if (StringUtils.isNotEmpty(id) && StringUtils.isNotEmpty(key)) {
        AWSCredentials myCredentials = new BasicAWSCredentials(id, key);
        return new AmazonEC2Client(myCredentials);
    } else {/*from ww  w .j a  v  a 2s . c  om*/
        LOG.info(
                "AWS credential is missing for creating ec2 client. Assuming to use IAM role for authentication.");
        return new AmazonEC2Client();
    }
}

From source file:com.pinterest.terrapin.hadoop.S3Uploader.java

License:Apache License

@Override
List<Pair<Path, Long>> getFileList() {
    return TerrapinUtil.getS3FileList(
            new BasicAWSCredentials(conf.get("fs.s3n.awsAccessKeyId"), conf.get("fs.s3n.awsSecretAccessKey")),
            s3Bucket, s3KeyPrefix);//from   w w w.j a va2s . c om
}

From source file:com.plumbee.flume.source.sqs.SQSSource.java

License:Apache License

@Override
public void configure(Context context) {

    // Mandatory configuration parameters.
    queueURL = context.getString(ConfigurationConstants.CONFIG_QUEUE_URL);
    Preconditions.checkArgument(StringUtils.isNotBlank(queueURL), ErrorMessages.MISSING_MANDATORY_PARAMETER,
            ConfigurationConstants.CONFIG_QUEUE_URL);

    // Optional configuration parameters.
    queueRecvBatchSize = context.getInteger(ConfigurationConstants.CONFIG_RECV_BATCH_SIZE,
            ConfigurationConstants.DEFAULT_RECV_BATCH_SIZE);
    Preconditions.checkArgument(queueRecvBatchSize > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_RECV_BATCH_SIZE);

    queueDeleteBatchSize = context.getInteger(ConfigurationConstants.CONFIG_DELETE_BATCH_SIZE,
            ConfigurationConstants.DEFAULT_DELETE_BATCH_SIZE);
    Preconditions.checkArgument(queueDeleteBatchSize > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_DELETE_BATCH_SIZE);

    queueRecvPollingTimeout = context.getInteger(ConfigurationConstants.CONFIG_RECV_TIMEOUT,
            ConfigurationConstants.DEFAULT_RECV_TIMEOUT);
    Preconditions.checkArgument(queueRecvPollingTimeout > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_RECV_TIMEOUT);

    queueRecvVisabilityTimeout = context.getInteger(ConfigurationConstants.CONFIG_RECV_VISTIMEOUT,
            ConfigurationConstants.DEFAULT_RECV_VISTIMEOUT);
    Preconditions.checkArgument(queueRecvVisabilityTimeout > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_RECV_VISTIMEOUT);

    batchSize = context.getInteger(ConfigurationConstants.CONFIG_BATCH_SIZE,
            ConfigurationConstants.DEFAULT_BATCH_SIZE);
    Preconditions.checkArgument(batchSize > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_BATCH_SIZE);

    nbThreads = context.getInteger(ConfigurationConstants.CONFIG_NB_CONSUMER_THREADS,
            ConfigurationConstants.DEFAULT_NB_CONSUMER_THREADS);
    Preconditions.checkArgument(nbThreads > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_NB_CONSUMER_THREADS);
    Preconditions.checkArgument(nbThreads <= ClientConfiguration.DEFAULT_MAX_CONNECTIONS,
            "%s cannot cannot exceed %s " + "(Default Amazon client connection pool size)",
            ConfigurationConstants.CONFIG_NB_CONSUMER_THREADS, ClientConfiguration.DEFAULT_MAX_CONNECTIONS);

    // Don't let the number of messages to be polled from SQS using one
    // call exceed the transaction batchSize for the downstream channel.
    Preconditions.checkArgument(queueRecvBatchSize <= batchSize, "%s must be smaller than or equal to the %s",
            ConfigurationConstants.CONFIG_RECV_BATCH_SIZE, ConfigurationConstants.CONFIG_BATCH_SIZE);

    flushInterval = context.getLong(ConfigurationConstants.CONFIG_FLUSH_INTERVAL,
            ConfigurationConstants.DEFAULT_FLUSH_INTERVAL);
    Preconditions.checkArgument(flushInterval > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_FLUSH_INTERVAL);
    flushInterval = TimeUnit.SECONDS.toMillis(flushInterval);

    // Runner backoff configuration.
    maxBackOffSleep = context.getLong(ConfigurationConstants.CONFIG_MAX_BACKOFF_SLEEP,
            ConfigurationConstants.DEFAULT_MAX_BACKOFF_SLEEP);
    Preconditions.checkArgument(maxBackOffSleep > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_MAX_BACKOFF_SLEEP);

    backOffSleepIncrement = context.getLong(ConfigurationConstants.CONFIG_BACKOFF_SLEEP_INCREMENT,
            ConfigurationConstants.DEFAULT_BACKOFF_SLEEP_INCREMENT);
    Preconditions.checkArgument(backOffSleepIncrement > 0, ErrorMessages.NEGATIVE_PARAMETER_VALUE,
            ConfigurationConstants.CONFIG_BACKOFF_SLEEP_INCREMENT);

    Preconditions.checkArgument(flushInterval > maxBackOffSleep, "%s too high, %s cannot be respected",
            ConfigurationConstants.CONFIG_MAX_BACKOFF_SLEEP, ConfigurationConstants.CONFIG_FLUSH_INTERVAL);

    // Log a warning if the flushInterval plus maxBackOffSleep exceed
    // the queueRecvVisabilityTimeout of messages. On queues with
    // low levels of throughput this can cause message duplication!
    if ((flushInterval + maxBackOffSleep) > TimeUnit.SECONDS.toMillis(queueRecvVisabilityTimeout)) {
        LOGGER.warn("{} too low, potential for message duplication",
                ConfigurationConstants.CONFIG_FLUSH_INTERVAL);
    }/*from   www  .ja  va 2 s .  c om*/

    // The following configuration options allows credentials to be
    // provided via the configuration context.
    String awsAccessKeyId = context.getString(ConfigurationConstants.CONFIG_AWS_ACCESS_KEY_ID);
    String awsSecretKey = context.getString(ConfigurationConstants.CONFIG_AWS_SECRET_KEY);

    if (StringUtils.isNotBlank(awsAccessKeyId) && StringUtils.isNotBlank(awsSecretKey)) {
        if (client == null) {
            // Create the AmazonSQSClient using BasicAWSCredentials
            client = new AmazonSQSClient(new BasicAWSCredentials(awsAccessKeyId, awsSecretKey),
                    new ClientConfiguration().withMaxConnections(nbThreads));
        } else {
            LOGGER.warn("Cannot set AWS credentials for AmazonSQSClient, " + "client already initialized");
        }
    }

    // Default to the DefaultAWSCredentialsProviderChain.
    if (client == null) {
        client = new AmazonSQSClient(new ClientConfiguration().withMaxConnections(nbThreads));
    }
}

From source file:com.pocketdealhunter.HotDealsMessagesUtil.java

License:Open Source License

public HotDealsMessagesUtil() {
    AWSCredentials credentials = new BasicAWSCredentials(Constants.ACCESS_KEY_ID, Constants.SECRET_KEY);
    Region region = Region.getRegion(Regions.US_WEST_2);

    this.snsClient = new AmazonSNSClient(credentials);
    this.snsClient.setRegion(region);

    this.sqsClient = new AmazonSQSClient(credentials);
    this.sqsClient.setRegion(region);

    // Find the Topic for this App or create one.
    this.topicARN = this.findTopicArn();
    if (topicARN == null) {
        this.topicARN = this.createTopic();
    }//from   w  w w  .  j a v  a 2s .  c  om

    // Find the Queue for this App or create one.
    this.queueUrl = this.findQueueUrl();
    if (this.queueUrl == null) {
        this.queueUrl = this.createQueue();

        // Allow time for the queue to be created.
        try {
            Thread.sleep(4 * 1000);
        } catch (Exception exception) {
        }

        this.subscribeQueue();
    }
}

From source file:com.projectlaver.batch.FacebookListingPostingItemProcessor.java

License:Open Source License

void copyS3ObjectToTempFile(File tempFile, String filename) throws IOException, FileNotFoundException {
    AWSCredentials myCredentials = new BasicAWSCredentials(this.s3accessKey, this.s3secretKey);
    AmazonS3 s3 = new AmazonS3Client(myCredentials);
    S3Object object = s3.getObject(this.s3publicBucketName, filename);

    IOUtils.copy(object.getObjectContent(), new FileOutputStream(tempFile));
}

From source file:com.projectlaver.service.ListingService.java

License:Open Source License

/**
 * Public methods//w  w  w  .j a v a  2  s .  com
 */

@Transactional(readOnly = false)
public Listing create(Listing listing) throws Exception {

    // set the expiration for one year from now if unset
    if (listing.getExpires() == null) {
        listing.setExpires(this.addDays(new Date(), 365));
    }

    // merge the user (reattach to DB)
    User user = listing.getSeller();
    User mergedUser = this.em.merge(user);
    listing.setSeller(mergedUser);

    if (listing.getImageAsFile() != null) {
        // upload the image preview to the public S3 bucket
        AWSCredentials myCredentials = new BasicAWSCredentials(this.s3accessKey, this.s3secretKey);
        TransferManager tx = new TransferManager(myCredentials);
        Upload myUpload = tx.upload(this.s3publicBucketName, listing.getImageFilename(),
                listing.getImageAsFile());
        myUpload.waitForCompletion();
    }

    if (listing.getContentFiles() != null && listing.getContentFiles().size() > 0) {

        Set<ContentFile> contentFiles = listing.getContentFiles();

        AWSCredentials myCredentials = new BasicAWSCredentials(this.s3accessKey, this.s3secretKey);
        TransferManager tx = new TransferManager(myCredentials);

        for (ContentFile file : contentFiles) {
            // upload the digital content to the private S3 bucket
            Upload myUpload = tx.upload(this.s3privateBucketName, file.getContentFilename(),
                    file.getDigitalContentAsFile());
            myUpload.waitForCompletion();
        }
    }

    return this.listingRepository.save(listing);
}

From source file:com.projectlaver.service.ListingService.java

License:Open Source License

void streamAwsContentToResponse(String contentFilename, String bucketName, OutputStream outputStream)
        throws IOException {
    AWSCredentials myCredentials = new BasicAWSCredentials(this.s3accessKey, this.s3secretKey);
    AmazonS3 s3 = new AmazonS3Client(myCredentials);
    S3Object object = s3.getObject(bucketName, contentFilename);

    FileCopyUtils.copy(object.getObjectContent(), outputStream);
}