List of usage examples for com.amazonaws.regions Region getRegion
public static Region getRegion(Regions region)
From source file:br.com.unb.aws.client.S3ClientV1.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w ww . jav a2 s . c o m*/ * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) * and save the following lines after replacing the underlined values with your own. * * [default] * aws_access_key_id = YOUR_ACCESS_KEY_ID * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY */ AmazonS3 s3 = new AmazonS3Client(); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:ch.cyberduck.core.kms.KMSEncryptionFeature.java
License:Open Source License
/** * @return List of IDs of KMS managed keys */// ww w. j a v a 2 s . c o m @Override public Set<Algorithm> getKeys(final Path container, final LoginCallback prompt) throws BackgroundException { final Set<Algorithm> keys = super.getKeys(container, prompt); try { keys.addAll(this.authenticated(new Authenticated<Set<Algorithm>>() { @Override public Set<Algorithm> call() throws BackgroundException { // Create new IAM credentials final AWSKMSClient client = new AWSKMSClient(new com.amazonaws.auth.AWSCredentials() { @Override public String getAWSAccessKeyId() { return host.getCredentials().getUsername(); } @Override public String getAWSSecretKey() { return host.getCredentials().getPassword(); } }, configuration); final Location feature = session.getFeature(Location.class); final Location.Name region = feature.getLocation(containerService.getContainer(container)); client.setRegion(Region.getRegion(Regions.fromName(region.getIdentifier()))); try { final Map<String, String> aliases = new HashMap<String, String>(); for (AliasListEntry entry : client.listAliases().getAliases()) { aliases.put(entry.getTargetKeyId(), entry.getAliasName()); } final Set<Algorithm> keys = new HashSet<Algorithm>(); for (KeyListEntry entry : client.listKeys().getKeys()) { keys.add(new AliasedAlgorithm(entry, aliases.get(entry.getKeyId()), region)); } return keys; } catch (AmazonClientException e) { throw new AmazonServiceExceptionMappingService().map("Cannot read AWS KMS configuration", e); } finally { client.shutdown(); } } }, prompt)); } catch (AccessDeniedException e) { log.warn(String.format("Ignore failure reading keys from KMS. %s", e.getMessage())); keys.add(SSE_KMS_DEFAULT); } return keys; }
From source file:Cloud.Tweets.SimpleQueueServiceSample.java
License:Open Source License
public AmazonSQS createSQSs() { System.out.println("helloooooooooooo"); AWSCredentials credentials;/* w ww . j a va 2s. co m*/ try { credentials = new PropertiesCredentials( SimpleQueueServiceSample.class.getResourceAsStream("AwsCredentials.Properties")); System.out.println("hello"); //credentials = new ProfileCredentialsProvider("~/.aws/AwsCredentials.Properties").getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/Users/daniel/.aws/credentials), and is in valid format.", e); } AmazonSQS sqs = new AmazonSQSClient(credentials); System.out.println(sqs.toString()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); sqs.setRegion(usWest2); System.out.println("==========================================="); System.out.println("Getting Started with Amazon SQS"); System.out.println("===========================================\n"); // Create a queue System.out.println("Creating a new SQS queue called MyQueue.\n"); CreateQueueRequest createQueueRequest = new CreateQueueRequest("MyQueue"); myQueueUrl = sqs.createQueue(createQueueRequest).getQueueUrl(); // List queues System.out.println("Listing all queues in your account.\n"); for (String queueUrl : sqs.listQueues().getQueueUrls()) { System.out.println(" QueueUrl: " + queueUrl); } System.out.println(); return sqs; }
From source file:cloudworker.DynamoDBService.java
License:Apache License
DynamoDBService(AWSCredentials credentials) throws Exception { dynamoDB = new AmazonDynamoDBClient(credentials); Region usEast1 = Region.getRegion(Regions.US_EAST_1); dynamoDB.setRegion(usEast1);/*from ww w. j a v a 2 s . c om*/ createTable(); }
From source file:cloudworker.RemoteWorker.java
License:Apache License
private static void init() throws Exception { /*/*from ww w . j a v a 2 s . co m*/ * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (~/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider().getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } ec2 = new AmazonEC2Client(credentials); sqs = new AmazonSQSClient(credentials); Region usEast1 = Region.getRegion(Regions.US_EAST_1); ec2.setRegion(usEast1); sqs.setRegion(usEast1); dynamoDB = new DynamoDBService(credentials); }
From source file:co.cask.cdap.template.etl.realtime.source.SqsSource.java
License:Apache License
@Override public void initialize(RealtimeContext contex) { try {/*from w w w . j a v a 2 s.com*/ SQSConnectionFactory.Builder sqsBuild = SQSConnectionFactory.builder() .withRegion(Region.getRegion(Regions.fromName(config.region))); connectionFactory = config.endpoint == null ? sqsBuild.build() : sqsBuild.withEndpoint(config.endpoint).build(); connection = connectionFactory.createConnection(config.accessID, config.accessKey); session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); consumer = session.createConsumer(session.createQueue(config.queueName)); connection.start(); } catch (Exception e) { if (session != null) { try { session.close(); } catch (Exception ex1) { LOG.warn("Exception when closing session", ex1); } } if (connection != null) { try { connection.close(); } catch (Exception ex2) { LOG.warn("Exception when closing connection", ex2); } } if (consumer != null) { try { consumer.close(); } catch (Exception ex3) { LOG.warn("Exception when closing consumer", ex3); } } LOG.error("Failed to connect to SQS"); throw new IllegalStateException("Could not connect to SQS."); } }
From source file:co.cask.hydrator.plugin.realtime.source.SqsSource.java
License:Apache License
@Override public void initialize(RealtimeContext context) { try {//from w w w.ja v a 2 s. com super.initialize(context); SQSConnectionFactory.Builder sqsBuild = SQSConnectionFactory.builder() .withRegion(Region.getRegion(Regions.fromName(config.region))); connectionFactory = config.endpoint == null ? sqsBuild.build() : sqsBuild.withEndpoint(config.endpoint).build(); connection = connectionFactory.createConnection(config.accessID, config.accessKey); session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); consumer = session.createConsumer(session.createQueue(config.queueName)); connection.start(); } catch (Exception e) { if (session != null) { try { session.close(); } catch (Exception ex1) { LOG.warn("Exception when closing session", ex1); } } if (connection != null) { try { connection.close(); } catch (Exception ex2) { LOG.warn("Exception when closing connection", ex2); } } if (consumer != null) { try { consumer.close(); } catch (Exception ex3) { LOG.warn("Exception when closing consumer", ex3); } } LOG.error("Failed to connect to SQS"); throw new IllegalStateException("Could not connect to SQS."); } }
From source file:com.acer.batterycapacitydemo.CognitoSyncClientManager.java
License:Open Source License
public static void initClients() { if (syncClient != null) return;/*from www .j a va 2 s .co m*/ credentialsProvider = new CognitoCachingCredentialsProvider(context, IDENTITY_POOL_ID, myAWS_REGION); syncClient = new CognitoSyncManager(context, myAWS_REGION, credentialsProvider); ddb = new AmazonDynamoDBClient(credentialsProvider); ddb.setRegion(Region.getRegion(myAWS_REGION)); }
From source file:com.ad.mediasharing.awsclientmanager.AmazonClientManager.java
License:Open Source License
private void initClients() { AWSCredentials credentials = AmazonSharedPreferencesWrapper .getCredentialsFromSharedPreferences(this.sharedPreferences); Region region = Region.getRegion(Regions.US_WEST_2); s3Client = new AmazonS3Client(credentials); s3Client.setRegion(region);//from ww w. j a v a 2s .c o m }
From source file:com.aegeus.aws.ElasticMapReduceService.java
License:Apache License
public ElasticMapReduceService(EmrConfigObject config) { this.config = config; emr = new AmazonElasticMapReduceClient( new BasicAWSCredentials(config.getAccessKey(), config.getSecretKey())); emr.setRegion(Region.getRegion(Regions.fromName(config.getRegion()))); }