Example usage for com.amazonaws.regions RegionUtils getRegion

List of usage examples for com.amazonaws.regions RegionUtils getRegion

Introduction

In this page you can find the example usage for com.amazonaws.regions RegionUtils getRegion.

Prototype

public static Region getRegion(String regionName) 

Source Link

Document

Returns the region with the given regionName and proper partition if found in region metadata.

Usage

From source file:com.digitalpebble.stormcrawler.aws.bolt.CloudSearchIndexerBolt.java

License:Apache License

@SuppressWarnings({ "rawtypes", "unchecked" })
@Override/*from  w  w  w. j a  v  a 2 s  .c  o  m*/
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
    super.prepare(conf, context, collector);
    _collector = collector;

    this.eventCounter = context.registerMetric("CloudSearchIndexer", new MultiCountMetric(), 10);

    maxTimeBuffered = ConfUtils.getInt(conf, CloudSearchConstants.MAX_TIME_BUFFERED, 10);

    maxDocsInBatch = ConfUtils.getInt(conf, CloudSearchConstants.MAX_DOCS_BATCH, -1);

    buffer = new StringBuffer(MAX_SIZE_BATCH_BYTES).append('[');

    dumpBatchFilesToTemp = ConfUtils.getBoolean(conf, "cloudsearch.batch.dump", false);

    if (dumpBatchFilesToTemp) {
        // only dumping to local file
        // no more config required
        return;
    }

    String endpoint = ConfUtils.getString(conf, "cloudsearch.endpoint");

    if (StringUtils.isBlank(endpoint)) {
        String message = "Missing CloudSearch endpoint";
        LOG.error(message);
        throw new RuntimeException(message);
    }

    String regionName = ConfUtils.getString(conf, CloudSearchConstants.REGION);

    AmazonCloudSearchClient cl = new AmazonCloudSearchClient();
    if (StringUtils.isNotBlank(regionName)) {
        cl.setRegion(RegionUtils.getRegion(regionName));
    }

    String domainName = null;

    // retrieve the domain name
    DescribeDomainsResult domains = cl.describeDomains(new DescribeDomainsRequest());

    Iterator<DomainStatus> dsiter = domains.getDomainStatusList().iterator();
    while (dsiter.hasNext()) {
        DomainStatus ds = dsiter.next();
        if (ds.getDocService().getEndpoint().equals(endpoint)) {
            domainName = ds.getDomainName();
            break;
        }
    }
    // check domain name
    if (StringUtils.isBlank(domainName)) {
        throw new RuntimeException("No domain name found for CloudSearch endpoint");
    }

    DescribeIndexFieldsResult indexDescription = cl
            .describeIndexFields(new DescribeIndexFieldsRequest().withDomainName(domainName));
    for (IndexFieldStatus ifs : indexDescription.getIndexFields()) {
        String indexname = ifs.getOptions().getIndexFieldName();
        String indextype = ifs.getOptions().getIndexFieldType();
        LOG.info("CloudSearch index name {} of type {}", indexname, indextype);
        csfields.put(indexname, indextype);
    }

    client = new AmazonCloudSearchDomainClient();
    client.setEndpoint(endpoint);
}

From source file:com.digitalpebble.stormcrawler.aws.s3.AbstractS3CacheBolt.java

License:Apache License

/** Returns an S3 client given the configuration **/
public static AmazonS3Client getS3Client(Map conf) {
    AWSCredentialsProvider provider = new DefaultAWSCredentialsProviderChain();
    AWSCredentials credentials = provider.getCredentials();
    ClientConfiguration config = new ClientConfiguration();

    AmazonS3Client client = new AmazonS3Client(credentials, config);

    String regionName = ConfUtils.getString(conf, REGION);
    if (StringUtils.isNotBlank(regionName)) {
        client.setRegion(RegionUtils.getRegion(regionName));
    }//  w w  w.jav  a 2s.  c  o  m

    String endpoint = ConfUtils.getString(conf, ENDPOINT);
    if (StringUtils.isNotBlank(endpoint)) {
        client.setEndpoint(endpoint);
    }
    return client;
}

From source file:com.example.base.StreamSource.java

License:Open Source License

/**
 * Creates a new StreamSource./*w ww  . j a  v a 2s  .  c  o m*/
 * 
 * @param config
 *        Configuration to determine which stream to put records to and get {@link AWSCredentialsProvider}
 * @param inputFile
 *        File containing record data to emit on each line
 * @param loopOverStreamSource
 *        Loop over the stream source to continually put records
 */
public StreamSource(String content, KinesisConnectorConfiguration config, String inputFile,
        boolean loopOverStreamSource) {
    this.config = config;
    this.content = content;
    this.inputFile = inputFile;
    this.loopOverInputFile = loopOverStreamSource;
    this.objectMapper = new ObjectMapper();
    kinesisClient = new AmazonKinesisClient(new AWSCredentialsProvider() {
        @Override
        public AWSCredentials getCredentials() {
            return new AWSCredentials() {
                @Override
                public String getAWSAccessKeyId() {
                    return "AKIAJMOK3KHKIRPDXQBA";
                }

                @Override
                public String getAWSSecretKey() {
                    return "s2MEQ6k4acTakezkqFdoYWw909DqLwIro8ZeR9Iy";
                }
            };
        }

        @Override
        public void refresh() {

        }
    });
    kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
    if (config.KINESIS_ENDPOINT != null) {
        kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
    }
    KinesisUtils.createInputStream(config);
}

From source file:com.example.utils.KinesisUtils.java

License:Open Source License

/**
 * Creates the Amazon Kinesis stream specified by config.KINESIS_INPUT_STREAM
 * /*from   w  w  w  .j a v a 2  s  .c o  m*/
 * @param config
 *        The configuration with the specified input stream name and {@link AWSCredentialsProvider}
 * @param shardCount
 *        The shard count to create the stream with
 */
public static void createInputStream(KinesisConnectorConfiguration config) {
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(config.AWS_CREDENTIALS_PROVIDER);
    kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
    if (config.KINESIS_ENDPOINT != null) {
        kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
    }
    createAndWaitForStreamToBecomeAvailable(kinesisClient, config.KINESIS_INPUT_STREAM,
            config.KINESIS_INPUT_STREAM_SHARD_COUNT);
}

From source file:com.example.utils.KinesisUtils.java

License:Open Source License

/**
 * Creates the Amazon Kinesis stream specified by config.KINESIS_OUTPUT_STREAM.
 * //from w  w w  .j  a  v a 2s . c om
 * @param config
 *        The configuration with the specified output stream name and {@link AWSCredentialsProvider}
 * @param shardCount
 *        The shard count to create the stream with
 */
public static void createOutputStream(KinesisConnectorConfiguration config) {
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(config.AWS_CREDENTIALS_PROVIDER);
    kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
    if (config.KINESIS_ENDPOINT != null) {
        kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
    }
    createAndWaitForStreamToBecomeAvailable(kinesisClient, config.KINESIS_OUTPUT_STREAM,
            config.KINESIS_OUTPUT_STREAM_SHARD_COUNT);
}

From source file:com.example.utils.KinesisUtils.java

License:Open Source License

/**
 * Deletes the input stream specified by config.KINESIS_INPUT_STREAM
 * //ww w .  j a  v  a 2s  .c o  m
 * @param config
 *        The configuration containing the stream name and {@link AWSCredentialsProvider}
 */
public static void deleteInputStream(KinesisConnectorConfiguration config) {
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(config.AWS_CREDENTIALS_PROVIDER);
    kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
    if (config.KINESIS_ENDPOINT != null) {
        kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
    }
    deleteStream(kinesisClient, config.KINESIS_INPUT_STREAM);
}

From source file:com.example.utils.KinesisUtils.java

License:Open Source License

/**
 * Deletes the output stream specified by config.KINESIS_OUTPUT_STREAM
 * // w  w  w  .j  a v a 2 s  . com
 * @param config
 *        The configuration containing the stream name and {@link AWSCredentialsProvider}
 */
public static void deleteOutputStream(KinesisConnectorConfiguration config) {
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(config.AWS_CREDENTIALS_PROVIDER);
    kinesisClient.setRegion(RegionUtils.getRegion(config.REGION_NAME));
    if (config.KINESIS_ENDPOINT != null) {
        kinesisClient.setEndpoint(config.KINESIS_ENDPOINT);
    }
    deleteStream(kinesisClient, config.KINESIS_OUTPUT_STREAM);
}

From source file:com.hangum.tadpole.aws.rds.commons.core.utils.AmazonRDSUtsils.java

License:Open Source License

/**
 * Get RDS to Tadpole UserDB data.//from  w w  w . j a  va  2s.c om
 * 
 * @param accessKey
 * @param secretKey
 * @param regionName
 * @return 
 * @throws Exception
 */
public static List<AWSRDSUserDBDAO> getDBList(String accessKey, String secretKey, String regionName)
        throws Exception {
    List<AWSRDSUserDBDAO> returnDBList = new ArrayList<AWSRDSUserDBDAO>();

    try {
        BasicAWSCredentials awsCredential = new BasicAWSCredentials(accessKey, secretKey);
        AmazonRDSClient rdsClient = new AmazonRDSClient(awsCredential);
        rdsClient.setRegion(RegionUtils.getRegion(regionName));

        DescribeDBInstancesResult describeDBInstance = rdsClient.describeDBInstances();
        List<DBInstance> listDBInstance = describeDBInstance.getDBInstances();
        for (DBInstance rdsDbInstance : listDBInstance) {
            AWSRDSUserDBDAO rdsUserDB = new AWSRDSUserDBDAO();

            // rds information
            rdsUserDB.setAccessKey(accessKey);
            rdsUserDB.setSecretKey(secretKey);
            rdsUserDB.setEndPoint(regionName);

            // ext information
            rdsUserDB.setExt1(rdsDbInstance.getDBInstanceClass());
            rdsUserDB.setExt2(rdsDbInstance.getAvailabilityZone());

            // db information
            String strDBMStype = rdsDbInstance.getEngine();
            if (strDBMStype.startsWith("sqlserver")) {
                String strEngVer = rdsDbInstance.getEngineVersion();
                //               if(strEngVer.startsWith("11")) 
                //               else strDBMStype = "MSSQL_8_LE";

                strDBMStype = DBDefine.MSSQL_DEFAULT.getDBToString();
            } else if (strDBMStype.startsWith("oracle")) {
                strDBMStype = DBDefine.ORACLE_DEFAULT.getDBToString();
            }

            rdsUserDB.setDbms_types(DBDefine.getDBDefine(strDBMStype).getDBToString());
            rdsUserDB.setDisplay_name(
                    rdsDbInstance.getDBInstanceIdentifier() + "." + rdsDbInstance.getAvailabilityZone());
            rdsUserDB.setOperation_type(DBOperationType.DEVELOP.toString());
            rdsUserDB.setDb(rdsDbInstance.getDBInstanceIdentifier());//getDBName());
            rdsUserDB.setHost(rdsDbInstance.getEndpoint().getAddress());
            rdsUserDB.setPort("" + rdsDbInstance.getEndpoint().getPort());
            rdsUserDB.setLocale(
                    rdsDbInstance.getCharacterSetName() == null ? "" : rdsDbInstance.getCharacterSetName());
            rdsUserDB.setUsers(rdsDbInstance.getMasterUsername());
            rdsUserDB.setPasswd("");

            returnDBList.add(rdsUserDB);
        }
    } catch (Exception e) {
        throw e;
    }

    return returnDBList;
}

From source file:com.innoq.hagmans.bachelor.TemperatureConsumer.java

License:Open Source License

public static void main(String[] args) throws InterruptedException {
    if (args.length == 2) {
        streamName = args[0];//from   ww  w . j  ava2  s  .  c  o  m
        db_name = args[1];
    }

    // Initialize Utils
    KinesisClientLibConfiguration config = new KinesisClientLibConfiguration(db_name, streamName,
            new DefaultAWSCredentialsProviderChain(), "KinesisProducerLibSampleConsumer")
                    .withRegionName(TemperatureProducer.REGION)
                    .withInitialPositionInStream(InitialPositionInStream.TRIM_HORIZON);

    Region region = RegionUtils.getRegion(TemperatureProducer.REGION);
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    AmazonDynamoDB amazonDynamoDB = new AmazonDynamoDBClient(credentialsProvider, new ClientConfiguration());
    AmazonDynamoDBClient client = new AmazonDynamoDBClient(credentialsProvider);
    client.setRegion(region);
    DynamoDB dynamoDB = new DynamoDB(client);
    amazonDynamoDB.setRegion(region);
    DynamoDBUtils dbUtils = new DynamoDBUtils(dynamoDB, amazonDynamoDB, client);
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, new ClientConfiguration());
    kinesis.setRegion(region);
    StreamUtils streamUtils = new StreamUtils(kinesis);
    try {
        if (!streamUtils.isActive(kinesis.describeStream(streamName))) {
            log.info("Stream is not active. Waiting for Stream to become active....");
            streamUtils.waitForStreamToBecomeActive(streamName);
        }
    } catch (ResourceNotFoundException e) {
        log.info("Stream is not created right now. Waiting for stream to get created and become active....");
        streamUtils.waitForStreamToBecomeActive(streamName);
    }
    dbUtils.deleteTable(db_name);
    dbUtils.createTemperatureTableIfNotExists(tableName);

    Thread.sleep(1000);

    final TemperatureConsumer consumer = new TemperatureConsumer();

    new Worker.Builder().recordProcessorFactory(consumer).config(config).build().run();
}

From source file:com.innoq.hagmans.bachelor.TemperatureProducer.java

License:Open Source License

public static void main(String[] args) throws Exception {

    if (args.length == 4) {
        streamName = args[0];/*w w w  .j av  a 2s .  com*/
        sensorName = args[1];
        secondsToRun = Integer.parseInt(args[2]);
        recordsPerSecond = Integer.parseInt(args[3]);
    }

    // Create a new stream if it doesn't already exists
    Region region = RegionUtils.getRegion(REGION);
    AWSCredentialsProvider credentialsProvider = new DefaultAWSCredentialsProviderChain();
    AmazonKinesis kinesis = new AmazonKinesisClient(credentialsProvider, new ClientConfiguration());
    kinesis.setRegion(region);
    StreamUtils streamUtils = new StreamUtils(kinesis);
    streamUtils.createStream(streamName, NUMBER_OF_SHARDS);

    final KinesisProducer producer = getKinesisProducer();

    // The monotonically increasing sequence number we will put in the data
    // of each record
    final AtomicLong sequenceNumber = new AtomicLong(0);

    // The number of records that have finished (either successfully put, or
    // failed)
    final AtomicLong completed = new AtomicLong(0);

    // KinesisProducer.addUserRecord is asynchronous. A callback can be used
    // to receive the results.
    final FutureCallback<UserRecordResult> callback = new FutureCallback<UserRecordResult>() {
        @Override
        public void onFailure(Throwable t) {
            // We don't expect any failures during this sample. If it
            // happens, we will log the first one and exit.
            if (t instanceof UserRecordFailedException) {
                Attempt last = Iterables.getLast(((UserRecordFailedException) t).getResult().getAttempts());
                log.error(String.format("Record failed to put - %s : %s", last.getErrorCode(),
                        last.getErrorMessage()));
            }
            log.error("Exception during put", t);
            System.exit(1);
        }

        @Override
        public void onSuccess(UserRecordResult result) {
            temperature = Utils.getNextTemperature(temperature);
            completed.getAndIncrement();
        }
    };

    // The lines within run() are the essence of the KPL API.
    final Runnable putOneRecord = new Runnable() {
        @Override
        public void run() {
            ByteBuffer data = Utils.generateData(temperature, sensorName, DATA_SIZE);
            // TIMESTAMP is our partition key
            ListenableFuture<UserRecordResult> f = producer.addUserRecord(streamName, TIMESTAMP,
                    Utils.randomExplicitHashKey(), data);
            Futures.addCallback(f, callback);
        }
    };

    // This gives us progress updates
    EXECUTOR.scheduleAtFixedRate(new Runnable() {
        @Override
        public void run() {
            long put = sequenceNumber.get();
            long total = recordsPerSecond * secondsToRun;
            double putPercent = 100.0 * put / total;
            long done = completed.get();
            double donePercent = 100.0 * done / total;
            log.info(String.format("Put %d of %d so far (%.2f %%), %d have completed (%.2f %%)", put, total,
                    putPercent, done, donePercent));
        }
    }, 1, 1, TimeUnit.SECONDS);

    // Kick off the puts
    log.info(String.format("Starting puts... will run for %d seconds at %d records per second", secondsToRun,
            recordsPerSecond));
    executeAtTargetRate(EXECUTOR, putOneRecord, sequenceNumber, secondsToRun, recordsPerSecond);

    // Wait for puts to finish. After this statement returns, we have
    // finished all calls to putRecord, but the records may still be
    // in-flight. We will additionally wait for all records to actually
    // finish later.
    EXECUTOR.awaitTermination(secondsToRun + 1, TimeUnit.SECONDS);

    // If you need to shutdown your application, call flushSync() first to
    // send any buffered records. This method will block until all records
    // have finished (either success or fail). There are also asynchronous
    // flush methods available.
    //
    // Records are also automatically flushed by the KPL after a while based
    // on the time limit set with Configuration.setRecordMaxBufferedTime()
    log.info("Waiting for remaining puts to finish...");
    producer.flushSync();
    log.info("All records complete.");

    // This kills the child process and shuts down the threads managing it.
    producer.destroy();
    log.info("Finished.");
}