Example usage for com.amazonaws.regions Region getRegion

List of usage examples for com.amazonaws.regions Region getRegion

Introduction

In this page you can find the example usage for com.amazonaws.regions Region getRegion.

Prototype

public static Region getRegion(Regions region) 

Source Link

Document

Returns the region with the id given, or null if it cannot be found in the current regions.xml file.

Usage

From source file:com.stfciz.aws.ec2.data.EC2InstancesManager.java

/**
 *
 * @param region/*from  w w w.j  av a  2  s . co  m*/
 * @return
 */
public List<EC2Instance> getEC2Instances(String region) {
    if (this.credentials == null) {
        return null;
    }
    List<EC2Instance> ec2Instances;
    ec2Instances = new ArrayList<>();
    if (region != null) {
        this.amazonEC2Client.setRegion(Region.getRegion(Regions.fromName(region)));
    }
    final DescribeInstancesResult describeInstances = this.amazonEC2Client.describeInstances();
    final List<Reservation> reservations = describeInstances.getReservations();
    for (Reservation reservation : reservations) {
        final List<Instance> instances = reservation.getInstances();
        for (Instance instance : instances) {
            EC2Instance ec2Instance = new EC2Instance();
            ec2Instance.setId(instance.getInstanceId());
            ec2Instance.setPublicDnsName(instance.getPublicDnsName());
            List<Tag> tags = instance.getTags();

            if (tags != null && !tags.isEmpty()) {
                StringBuilder name = new StringBuilder();
                for (Tag tag : tags) {
                    if (name.length() > 0) {
                        name.append(", ");
                    }
                    name.append(tag.getValue());
                }
                ec2Instance.setName(name.toString());
            }

            final InstanceState state = instance.getState();
            ec2Instance.setStatus(state.getName());
            ec2Instance.setStatusCode(state.getCode());

            if (this.tagIncludes != null && ec2Instance.getName() != null) {
                for (String tagInclude : this.tagIncludes) {
                    if (ec2Instance.getName().contains(tagInclude)) {
                        ec2Instance.setPublicDnsName(instance.getPublicDnsName());
                        ec2Instances.add(ec2Instance);
                        break;
                    }
                }
            } else {
                ec2Instances.add(ec2Instance);
            }
        }
    }

    return ec2Instances;
}

From source file:com.stockcloud.updatestock.java

License:Open Source License

/**
 * The only information needed to create a client are security credentials
 * consisting of the AWS Access Key ID and Secret Access Key. All other
 * configuration, such as the service endpoints, are performed
 * automatically. Client parameters, such as proxies, can be specified in an
 * optional ClientConfiguration object when constructing a client.
 * //from www .  ja  v  a  2s . c o m
 * @see com.amazonaws.auth.BasicAWSCredentials
 * @see com.amazonaws.auth.PropertiesCredentials
 * @see com.amazonaws.ClientConfiguration
 */
private static void init() throws Exception {
    /*
     * This credentials provider implementation loads your AWS credentials
     * from a properties file at the root of your classpath.
     */
    dynamoDB = new AmazonDynamoDBClient(new ClasspathPropertiesFileCredentialsProvider());
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    dynamoDB.setRegion(usWest2);

}

From source file:com.streamsets.datacollector.bundles.SupportBundleManager.java

License:Apache License

/**
 * Instead of providing support bundle directly to user, upload it to StreamSets backend services.
 *//*from www.ja va  2  s  .co  m*/
public void uploadNewBundleFromInstances(List<BundleContentGenerator> generators, BundleType bundleType)
        throws IOException {
    // Generate bundle
    SupportBundle bundle = generateNewBundleFromInstances(generators, bundleType);

    boolean enabled = configuration.get(Constants.UPLOAD_ENABLED, Constants.DEFAULT_UPLOAD_ENABLED);
    String accessKey = configuration.get(Constants.UPLOAD_ACCESS, Constants.DEFAULT_UPLOAD_ACCESS);
    String secretKey = configuration.get(Constants.UPLOAD_SECRET, Constants.DEFAULT_UPLOAD_SECRET);
    String bucket = configuration.get(Constants.UPLOAD_BUCKET, Constants.DEFAULT_UPLOAD_BUCKET);
    int bufferSize = configuration.get(Constants.UPLOAD_BUFFER_SIZE, Constants.DEFAULT_UPLOAD_BUFFER_SIZE);

    if (!enabled) {
        throw new IOException("Uploading support bundles was disabled by administrator.");
    }

    AWSCredentialsProvider credentialsProvider = new StaticCredentialsProvider(
            new BasicAWSCredentials(accessKey, secretKey));
    AmazonS3Client s3Client = new AmazonS3Client(credentialsProvider, new ClientConfiguration());
    s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
    s3Client.setRegion(Region.getRegion(Regions.US_WEST_2));

    // Object Metadata
    ObjectMetadata s3Metadata = new ObjectMetadata();
    for (Map.Entry<Object, Object> entry : getMetadata(bundleType).entrySet()) {
        s3Metadata.addUserMetadata((String) entry.getKey(), (String) entry.getValue());
    }

    List<PartETag> partETags;
    InitiateMultipartUploadResult initResponse = null;
    try {
        // Uploading part by part
        LOG.info("Initiating multi-part support bundle upload");
        partETags = new ArrayList<>();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket,
                bundle.getBundleKey());
        initRequest.setObjectMetadata(s3Metadata);
        initResponse = s3Client.initiateMultipartUpload(initRequest);
    } catch (AmazonClientException e) {
        LOG.error("Support bundle upload failed: ", e);
        throw new IOException("Support bundle upload failed", e);
    }

    try {
        byte[] buffer = new byte[bufferSize];
        int partId = 1;
        int size = -1;
        while ((size = readFully(bundle.getInputStream(), buffer)) != -1) {
            LOG.debug("Uploading part {} of size {}", partId, size);
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(bundle.getBundleKey()).withUploadId(initResponse.getUploadId())
                    .withPartNumber(partId++).withInputStream(new ByteArrayInputStream(buffer))
                    .withPartSize(size);

            partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
        }

        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucket,
                bundle.getBundleKey(), initResponse.getUploadId(), partETags);

        s3Client.completeMultipartUpload(compRequest);
        LOG.info("Support bundle upload finished");
    } catch (Exception e) {
        LOG.error("Support bundle upload failed", e);
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, bundle.getBundleKey(), initResponse.getUploadId()));

        throw new IOException("Can't upload support bundle", e);
    } finally {
        // Close the client
        s3Client.shutdown();
    }
}

From source file:com.streamsets.pipeline.stage.destination.kinesis.KinesisTarget.java

License:Apache License

private void checkStreamExists(List<ConfigIssue> issues) {
    ClientConfiguration kinesisConfiguration = new ClientConfiguration();
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(kinesisConfiguration);
    kinesisClient.setRegion(Region.getRegion(region));

    try {//w  w w  .j ava2  s  .  com
        DescribeStreamResult result = kinesisClient.describeStream(streamName);
        LOG.info("Connected successfully to stream: {} with description: {}", streamName,
                result.getStreamDescription().toString());
    } catch (Exception e) {
        issues.add(getContext().createConfigIssue(
                com.streamsets.pipeline.stage.origin.kinesis.Groups.KINESIS.name(), "streamName",
                Errors.KINESIS_01, e.toString()));
    } finally {
        kinesisClient.shutdown();
    }
}

From source file:com.streamsets.pipeline.stage.destination.kinesis.KinesisTarget.java

License:Apache License

private void createKinesisClient() {
    kinesisClient = new AmazonKinesisClient(kinesisConfiguration);
    kinesisClient.setRegion(Region.getRegion(region));
}

From source file:com.streamsets.pipeline.stage.lib.kinesis.KinesisUtil.java

License:Apache License

public static long getShardCount(Regions region, AWSConfig awsConfig, String streamName)
        throws AmazonClientException {
    ClientConfiguration kinesisConfiguration = new ClientConfiguration();
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(AWSUtil.getCredentialsProvider(awsConfig),
            kinesisConfiguration);//from  ww w . ja v a2 s  .c  o  m
    kinesisClient.setRegion(Region.getRegion(region));

    try {
        long numShards = 0;
        String lastShardId = null;
        StreamDescription description;
        do {
            if (lastShardId == null) {
                description = kinesisClient.describeStream(streamName).getStreamDescription();
            } else {
                description = kinesisClient.describeStream(streamName, lastShardId).getStreamDescription();
            }

            for (Shard shard : description.getShards()) {
                if (shard.getSequenceNumberRange().getEndingSequenceNumber() == null) {
                    // Then this shard is open, so we should count it. Shards with an ending sequence number
                    // are closed and cannot be written to, so we skip counting them.
                    ++numShards;
                }
            }

            int pageSize = description.getShards().size();
            lastShardId = description.getShards().get(pageSize - 1).getShardId();

        } while (description.getHasMoreShards());

        LOG.debug("Connected successfully to stream: '{}' with '{}' shards.", streamName, numShards);

        return numShards;
    } finally {
        kinesisClient.shutdown();
    }
}

From source file:com.streamsets.pipeline.stage.origin.kinesis.KinesisSource.java

License:Apache License

private void checkStreamExists(List<ConfigIssue> issues) {
    ClientConfiguration kinesisConfiguration = new ClientConfiguration();
    AmazonKinesisClient kinesisClient = new AmazonKinesisClient(kinesisConfiguration);
    kinesisClient.setRegion(Region.getRegion(region));

    try {//from w w w.  j av a  2 s . com
        DescribeStreamResult result = kinesisClient.describeStream(streamName);
        LOG.info("Connected successfully to stream: {} with description: {}", streamName,
                result.getStreamDescription().toString());
    } catch (Exception e) {
        issues.add(getContext().createConfigIssue(Groups.KINESIS.name(), "streamName", Errors.KINESIS_01,
                e.toString()));
    } finally {
        kinesisClient.shutdown();
    }
}

From source file:com.streamsets.pipeline.stage.origin.s3.S3Config.java

License:Apache License

private void validateConnection(Stage.Context context, List<Stage.ConfigIssue> issues) {
    //Access Key ID - username [unique in aws]
    //secret access key - password
    AWSCredentials credentials = new BasicAWSCredentials(accessKeyId, secretAccessKey);
    s3Client = new AmazonS3Client(credentials, new ClientConfiguration());
    s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
    if (endPoint != null && !endPoint.isEmpty()) {
        s3Client.setEndpoint(endPoint);/*  ww  w . j  a va  2s . c om*/
    } else {
        s3Client.setRegion(Region.getRegion(region));
    }
    try {
        //check if the credentials are right by trying to list buckets
        s3Client.listBuckets();
    } catch (AmazonS3Exception e) {
        issues.add(context.createConfigIssue(Groups.S3.name(), "accessKeyId", Errors.S3_SPOOLDIR_20,
                e.toString()));
    }
}

From source file:com.supprema.utils.S3Sample.java

License:Open Source License

public static void main(String[] args) throws IOException {

    /*/*from w  w  w . jav a 2 s .  co m*/
     * The ProfileCredentialsProvider will return your [fabiano-user-s3]
     * credential profile by reading from the credentials file located at
     * (/Users/fabianorodriguesmatias/.aws/credentials).
     */
    AWSCredentials credentials = null;
    try {
        credentials = new ProfileCredentialsProvider("fabiano-user-s3").getCredentials();
    } catch (Exception e) {
        throw new AmazonClientException(
                "Cannot load the credentials from the credential profiles file. "
                        + "Please make sure that your credentials file is at the correct "
                        + "location (/Users/fabianorodriguesmatias/.aws/credentials), and is in valid format.",
                e);
    }

    AmazonS3 s3 = new AmazonS3Client(credentials);
    Region usWest2 = Region.getRegion(Regions.US_WEST_2);
    s3.setRegion(usWest2);

    String bucketName = "my-first-s3-bucket-" + UUID.randomUUID();
    String key = "MyObjectKey";

    System.out.println("===========================================");
    System.out.println("Getting Started with Amazon S3");
    System.out.println("===========================================\n");

    try {
        /*
         * Create a new S3 bucket - Amazon S3 bucket names are globally unique,
         * so once a bucket name has been taken by any user, you can't create
         * another bucket with that same name.
         *
         * You can optionally specify a location for your bucket if you want to
         * keep your data closer to your applications or users.
         */
        System.out.println("Creating bucket " + bucketName + "\n");
        s3.createBucket(bucketName);

        /*
         * List the buckets in your account
         */
        System.out.println("Listing buckets");
        for (Bucket bucket : s3.listBuckets()) {
            System.out.println(" - " + bucket.getName());
        }
        System.out.println();

        /*
         * Upload an object to your bucket - You can easily upload a file to
         * S3, or upload directly an InputStream if you know the length of
         * the data in the stream. You can also specify your own metadata
         * when uploading to S3, which allows you set a variety of options
         * like content-type and content-encoding, plus additional metadata
         * specific to your applications.
         */
        System.out.println("Uploading a new object to S3 from a file\n");
        s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile()));

        /*
         * Download an object - When you download an object, you get all of
         * the object's metadata and a stream from which to read the contents.
         * It's important to read the contents of the stream as quickly as
         * possibly since the data is streamed directly from Amazon S3 and your
         * network connection will remain open until you read all the data or
         * close the input stream.
         *
         * GetObjectRequest also supports several other options, including
         * conditional downloading of objects based on modification times,
         * ETags, and selectively downloading a range of an object.
         */
        System.out.println("Downloading an object");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        System.out.println("Content-Type: " + object.getObjectMetadata().getContentType());
        displayTextInputStream(object.getObjectContent());

        /*
         * List objects in your bucket by prefix - There are many options for
         * listing the objects in your bucket.  Keep in mind that buckets with
         * many objects might truncate their results when listing their objects,
         * so be sure to check if the returned object listing is truncated, and
         * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve
         * additional results.
         */
        System.out.println("Listing objects");
        ObjectListing objectListing = s3
                .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My"));
        for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
            System.out.println(
                    " - " + objectSummary.getKey() + "  " + "(size = " + objectSummary.getSize() + ")");
        }
        System.out.println();

        /*
         * Delete an object - Unless versioning has been turned on for your bucket,
         * there is no way to undelete an object, so use caution when deleting objects.
         */
        System.out.println("Deleting an object\n");
        s3.deleteObject(bucketName, key);

        /*
         * Delete a bucket - A bucket must be completely empty before it can be
         * deleted, so remember to delete any objects from your buckets before
         * you try to delete them.
         */
        System.out.println("Deleting bucket " + bucketName + "\n");
        s3.deleteBucket(bucketName);
    } catch (AmazonServiceException ase) {
        System.out.println("Caught an AmazonServiceException, which means your request made it "
                + "to Amazon S3, but was rejected with an error response for some reason.");
        System.out.println("Error Message:    " + ase.getMessage());
        System.out.println("HTTP Status Code: " + ase.getStatusCode());
        System.out.println("AWS Error Code:   " + ase.getErrorCode());
        System.out.println("Error Type:       " + ase.getErrorType());
        System.out.println("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        System.out.println("Caught an AmazonClientException, which means the client encountered "
                + "a serious internal problem while trying to communicate with S3, "
                + "such as not being able to access the network.");
        System.out.println("Error Message: " + ace.getMessage());
    }
}

From source file:com.tcl.gateway.firehose.log4j.FirehoseAppender.java

License:Open Source License

/**
 * Configures this appender instance and makes it ready for use by the
 * consumers. It validates mandatory parameters and confirms if the configured
 * stream is ready for publishing data yet.
 * /* ww  w.  j a va 2s. c om*/
 * Error details are made available through the fallback handler for this
 * appender
 * 
 * @throws IllegalStateException
 *           if we encounter issues configuring this appender instance
 */
@Override
public void activateOptions() {
    if (deliveryStreamName == null) {
        initializationFailed = true;
        error("Invalid configuration - streamName cannot be null for appender: " + name);
    }

    if (layout == null) {
        initializationFailed = true;
        error("Invalid configuration - No layout for appender: " + name);
    }

    ClientConfiguration clientConfiguration = new ClientConfiguration();
    clientConfiguration = setProxySettingsFromSystemProperties(clientConfiguration);

    clientConfiguration.setMaxErrorRetry(maxRetries);
    clientConfiguration.setRetryPolicy(new RetryPolicy(PredefinedRetryPolicies.DEFAULT_RETRY_CONDITION,
            PredefinedRetryPolicies.DEFAULT_BACKOFF_STRATEGY, maxRetries, true));
    clientConfiguration.setUserAgent(AppenderConstants.USER_AGENT_STRING);

    final BlockingQueue<Runnable> taskBuffer = new LinkedBlockingDeque<Runnable>(bufferSize);
    ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(threadCount, threadCount,
            AppenderConstants.DEFAULT_THREAD_KEEP_ALIVE_SEC, TimeUnit.SECONDS, taskBuffer,
            new BlockFastProducerPolicy());
    threadPoolExecutor.prestartAllCoreThreads();
    firehoseClient = new AmazonKinesisFirehoseAsyncClient(new CustomCredentialsProviderChain(),
            clientConfiguration, threadPoolExecutor);

    boolean regionProvided = !Validator.isBlank(region);
    if (!regionProvided) {
        region = AppenderConstants.DEFAULT_REGION;
    }
    if (!Validator.isBlank(endpoint)) {
        if (regionProvided) {
            LOGGER.warn("Received configuration for both region as well as Amazon Kinesis endpoint. ("
                    + endpoint + ") will be used as endpoint instead of default endpoint for region (" + region
                    + ")");
        }
        firehoseClient.setEndpoint(endpoint);
    } else {
        firehoseClient.setRegion(Region.getRegion(Regions.fromName(region)));
    }

    DescribeDeliveryStreamResult describeResult = null;
    try {
        describeResult = firehoseClient.describeDeliveryStream(
                new DescribeDeliveryStreamRequest().withDeliveryStreamName(deliveryStreamName));
        String streamStatus = describeResult.getDeliveryStreamDescription().getDeliveryStreamStatus();
        if (!StreamStatus.ACTIVE.name().equals(streamStatus)
                && !StreamStatus.UPDATING.name().equals(streamStatus)) {
            initializationFailed = true;
            error("Delivery Stream " + deliveryStreamName
                    + " is not ready (in active/updating status) for appender: " + name);
        }
    } catch (ResourceNotFoundException rnfe) {
        initializationFailed = true;
        error("Delivery  Stream " + deliveryStreamName + " doesn't exist for appender: " + name, rnfe);
    }

    asyncCallHander = new AsyncPutCallStatsReporter(name);

    if (metric) {
        MetricRegistry registry = new MetricRegistry();
        registry.register("Gauge", new Gauge<Integer>() {

            @Override
            public Integer getValue() {
                return taskBuffer.size();
            }

        });

        ConsoleReporter.forRegistry(registry).build().start(3, TimeUnit.SECONDS);
    }
}