Example usage for com.amazonaws.auth DefaultAWSCredentialsProviderChain DefaultAWSCredentialsProviderChain

List of usage examples for com.amazonaws.auth DefaultAWSCredentialsProviderChain DefaultAWSCredentialsProviderChain

Introduction

In this page you can find the example usage for com.amazonaws.auth DefaultAWSCredentialsProviderChain DefaultAWSCredentialsProviderChain.

Prototype

public DefaultAWSCredentialsProviderChain() 

Source Link

Usage

From source file:com.github.sjones4.youcan.youtwo.YouTwoClient.java

License:Open Source License

public YouTwoClient() {
    this(new DefaultAWSCredentialsProviderChain(), new ClientConfiguration());
}

From source file:com.github.sjones4.youcan.youtwo.YouTwoClient.java

License:Open Source License

public YouTwoClient(final ClientConfiguration clientConfiguration) {
    this(new DefaultAWSCredentialsProviderChain(), clientConfiguration);
}

From source file:com.google.crypto.tink.integration.awskms.AwsKmsClient.java

License:Apache License

/**
 * Loads default AWS credentials./*from  w w w.j a  va 2 s . c  o m*/
 *
 * <p>AWS credentials provider chain that looks for credentials in this order:
 *
 * <ul>
 *   <li>Environment Variables - AWS_ACCESS_KEY_ID and AWS_SECRET_KEY
 *   <li>Java System Properties - aws.accessKeyId and aws.secretKey
 *   <li>Credential profiles file at the default location (~/.aws/credentials)
 *   <li>Instance profile credentials delivered through the Amazon EC2 metadata service
 * </ul>
 *
 * @throws GeneralSecurityException if the client initialization fails
 */
@Override
public KmsClient withDefaultCredentials() throws GeneralSecurityException {
    try {
        return withCredentialsProvider(new DefaultAWSCredentialsProviderChain());
    } catch (AmazonServiceException e) {
        throw new GeneralSecurityException("cannot load default credentials", e);
    }
}

From source file:com.infinitechaos.vpcviewer.service.impl.VpcServiceImpl.java

License:Open Source License

private synchronized AmazonEC2 getClientForRegion(final String regionName) {
    return clients.computeIfAbsent(Regions.fromName(regionName), region -> {
        LOG.info("Creating client for region {}", region);
        return Region.getRegion(region).createClient(AmazonEC2Client.class,
                new DefaultAWSCredentialsProviderChain(), new ClientConfiguration());
    });/*from  w  w  w  . j  a v a 2  s.  co  m*/
}

From source file:com.innoq.hagmans.bachelor.TemperatureConsumer.java

License:Open Source License

public static void main(String[] args) throws InterruptedException {
    if (args.length == 2) {
        streamName = args[0];//from w  ww. j ava  2 s.c  om
        db_name = args[1];
    }

    // Initialize Utils
    KinesisClientLibConfiguration config = new KinesisClientLibConfiguration(db_name, streamName,
            new DefaultAWSCredentialsProviderChain(), "KinesisProducerLibSampleConsumer")
                    .withRegionName(TemperatureProducer.REGION)
                    .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

    Region region = RegionUtils.getRegion(TemperatureProducer.REGION);
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    AmazonDynamoDB amazonDynamoDB = new AmazonDynamoDBClient(credentialsProvider, new ClientConfiguration());
    AmazonDynamoDBClient client = new AmazonDynamoDBClient(credentialsProvider);
    client.setRegion(region);
    DynamoDB dynamoDB = new DynamoDB(client);
    amazonDynamoDB.setRegion(region);
    DynamoDBUtils dbUtils = new DynamoDBUtils(dynamoDB, amazonDynamoDB, client);
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, new ClientConfiguration());
    kinesis.setRegion(region);
    StreamUtils streamUtils = new StreamUtils(kinesis);
    try {
        if (!streamUtils.isActive(kinesis.describeStream(streamName))) {
            log.info("Stream is not active. Waiting for Stream to become active....");
            streamUtils.waitForStreamToBecomeActive(streamName);
        }
    } catch (ResourceNotFoundException e) {
        log.info("Stream is not created right now. Waiting for stream to get created and become active....");
        streamUtils.waitForStreamToBecomeActive(streamName);
    }
    dbUtils.deleteTable(db_name);
    dbUtils.createTemperatureTableIfNotExists(tableName);

    Thread.sleep(1000);

    final TemperatureConsumer consumer = new TemperatureConsumer();

    new Worker.Builder().recordProcessorFactory(consumer).config(config).build().run();
}

From source file:com.innoq.hagmans.bachelor.TemperatureProducer.java

License:Open Source License

/**
 * Here'll walk through some of the config options and create an instance of
 * KinesisProducer, which will be used to put records.
 * /*from   w  ww .ja v  a 2s.c o m*/
 * @return KinesisProducer instance used to put records.
 */
public static KinesisProducer getKinesisProducer() {
    // There are many configurable parameters in the KPL. See the javadocs
    // on each each set method for details.
    KinesisProducerConfiguration config = new KinesisProducerConfiguration();

    // You can also load config from file. A sample properties file is
    // included in the project folder.
    // KinesisProducerConfiguration config =
    // KinesisProducerConfiguration.fromPropertiesFile("default_config.properties");

    // If you're running in EC2 and want to use the same Kinesis region as
    // the one your instance is in, you can simply leave out the region
    // configuration; the KPL will retrieve it from EC2 metadata.
    config.setRegion(REGION);

    // You can pass credentials programmatically through the configuration,
    // similar to the AWS SDK. DefaultAWSCredentialsProviderChain is used
    // by default, so this configuration can be omitted if that is all
    // that is needed.
    config.setCredentialsProvider(new DefaultAWSCredentialsProviderChain());

    // The maxConnections parameter can be used to control the degree of
    // parallelism when making HTTP requests. We're going to use only 1 here
    // since our throughput is fairly low. Using a high number will cause a
    // bunch of broken pipe errors to show up in the logs. This is due to
    // idle connections being closed by the server. Setting this value too
    // large may also cause request timeouts if you do not have enough
    // bandwidth.
    config.setMaxConnections(1);

    // Set a more generous timeout in case we're on a slow connection.
    config.setRequestTimeout(60000);

    // RecordMaxBufferedTime controls how long records are allowed to wait
    // in the KPL's buffers before being sent. Larger values increase
    // aggregation and reduces the number of Kinesis records put, which can
    // be helpful if you're getting throttled because of the records per
    // second limit on a shard. The default value is set very low to
    // minimize propagation delay, so we'll increase it here to get more
    // aggregation.
    config.setRecordMaxBufferedTime(15000);

    // If you have built the native binary yourself, you can point the Java
    // wrapper to it with the NativeExecutable option. If you want to pass
    // environment variables to the executable, you can either use a wrapper
    // shell script, or set them for the Java process, which will then pass
    // them on to the child process.
    // config.setNativeExecutable("my_directory/kinesis_producer");

    // If you end up using the default configuration (a Configuration
    // instance
    // without any calls to set*), you can just leave the config argument
    // out.
    //
    // Note that if you do pass a Configuration instance, mutating that
    // instance after initializing KinesisProducer has no effect. We do not
    // support dynamic re-configuration at the moment.
    KinesisProducer producer = new KinesisProducer(config);

    return producer;
}

From source file:com.innoq.hagmans.bachelor.TemperatureProducer.java

License:Open Source License

public static void main(String[] args) throws Exception {

    if (args.length == 4) {
        streamName = args[0];//from w  w w .  j a va  2s  . c om
        sensorName = args[1];
        secondsToRun = Integer.parseInt(args[2]);
        recordsPerSecond = Integer.parseInt(args[3]);
    }

    // Create a new stream if it doesn't already exists
    Region region = RegionUtils.getRegion(REGION);
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, new ClientConfiguration());
    kinesis.setRegion(region);
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStream(streamName, NUMBER_OF_SHARDS);

    final KinesisProducer producer = getKinesisProducer();

    // The monotonically increasing sequence number we will put in the data
    // of each record
    final AtomicLong sequenceNumber = new AtomicLong(0);

    // The number of records that have finished (either successfully put, or
    // failed)
    final AtomicLong completed = new AtomicLong(0);

    // KinesisProducer.addUserRecord is asynchronous. A callback can be used
    // to receive the results.
    final FutureCallback<UserRecordResult> callback = new FutureCallback<UserRecordResult>() {
        @Override
        public void onFailure(Throwable t) {
            // We don't expect any failures during this sample. If it
            // happens, we will log the first one and exit.
            if (t instanceof UserRecordFailedException) {
                Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts());
                log.error(String.format("Record failed to put - %s : %s", last.getErrorCode(),
                        last.getErrorMessage()));
            }
            log.error("Exception during put", t);
            System.exit(1);
        }

        @Override
        public void onSuccess(UserRecordResult result) {
            temperature = Utils.getNextTemperature(temperature);
            completed.getAndIncrement();
        }
    };

    // The lines within run() are the essence of the KPL API.
    final Runnable putOneRecord = new Runnable() {
        @Override
        public void run() {
            ByteBuffer data = Utils.generateData(temperature, sensorName, DATA_SIZE);
            // TIMESTAMP is our partition key
            ListenableFuture<UserRecordResult> f = producer.addUserRecord(streamName, TIMESTAMP,
                    Utils.randomExplicitHashKey(), data);
            Futures.addCallback(f, callback);
        }
    };

    // This gives us progress updates
    EXECUTOR.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            long put = sequenceNumber.get();
            long total = recordsPerSecond * secondsToRun;
            double putPercent = 100.0 * put / total;
            long done = completed.get();
            double donePercent = 100.0 * done / total;
            log.info(String.format("Put %d of %d so far (%.2f %%), %d have completed (%.2f %%)", put, total,
                    putPercent, done, donePercent));
        }
    }, 1, 1, TimeUnit.SECONDS);

    // Kick off the puts
    log.info(String.format("Starting puts... will run for %d seconds at %d records per second", secondsToRun,
            recordsPerSecond));
    executeAtTargetRate(EXECUTOR, putOneRecord, sequenceNumber, secondsToRun, recordsPerSecond);

    // Wait for puts to finish. After this statement returns, we have
    // finished all calls to putRecord, but the records may still be
    // in-flight. We will additionally wait for all records to actually
    // finish later.
    EXECUTOR.awaitTermination(secondsToRun + 1, TimeUnit.SECONDS);

    // If you need to shutdown your application, call flushSync() first to
    // send any buffered records. This method will block until all records
    // have finished (either success or fail). There are also asynchronous
    // flush methods available.
    //
    // Records are also automatically flushed by the KPL after a while based
    // on the time limit set with Configuration.setRecordMaxBufferedTime()
    log.info("Waiting for remaining puts to finish...");
    producer.flushSync();
    log.info("All records complete.");

    // This kills the child process and shuts down the threads managing it.
    producer.destroy();
    log.info("Finished.");
}

From source file:com.ipcglobal.awscdh.util.Utils.java

License:Apache License

/**
 * Inits the credentials./*from ww  w  . j a  va 2  s .c o  m*/
 *
 * @return the AWS credentials provider
 */
public static AWSCredentialsProvider initCredentials() {
    // Get credentials from IMDS. If unsuccessful, get them from the
    // credential profiles file.
    AWSCredentialsProvider credentialsProvider = null;
    try {
        credentialsProvider = new DefaultAWSCredentialsProviderChain();
        // Verify we can fetch credentials from the provider
        credentialsProvider.getCredentials();
        //log.info("Obtained credentials from DefaultAWSCredentialsProviderChain.");
    } catch (AmazonClientException e) {
        log.error("Unable to obtain credentials from DefaultAWSCredentialsProviderChain", e);
    }
    return credentialsProvider;
}

From source file:com.ivona.services.tts.IvonaSpeechCloudClient.java

License:Open Source License

/**
 * Default Constructor.//from  ww  w  .  jav a  2s  .c o m
 */
public IvonaSpeechCloudClient() {
    this(new DefaultAWSCredentialsProviderChain(), new ClientConfiguration());
}

From source file:com.ivona.services.tts.IvonaSpeechCloudClient.java

License:Open Source License

/**
 * Constructor which allows custom ClientConfiguration to be passed.
 *
 * @param clientConfiguration//from w  w  w.  j  a  v a 2 s  . c  om
 */
public IvonaSpeechCloudClient(ClientConfiguration clientConfiguration) {
    this(new DefaultAWSCredentialsProviderChain(), clientConfiguration);
}