List of usage examples for com.amazonaws.regions Region getRegion
public static Region getRegion(Regions region)
From source file:org.openflamingo.fs.s3.S3Utils.java
License:Apache License
/** * Region? Amazon S3 Client ?./*from w w w. j a v a2 s . c o m*/ * * @param region Amazon S3 Region * @param accessKey Amazon S3 Access Key * @param secretKey Amazon S3 Secret Key * @return Amazon S3 Client */ public static AmazonS3Client getAmazonS3Client(String region, String accessKey, String secretKey) { Region awsRegion = Region.getRegion(Regions.valueOf(region)); AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); AmazonS3Client awsClient = new AmazonS3Client(awsCredentials); awsClient.setRegion(awsRegion); return awsClient; }
From source file:org.openhab.persistence.dynamodb.internal.DynamoDBConfig.java
License:Open Source License
/** * * @param config persistence service configuration * @return DynamoDB configuration. Returns null in case of configuration errors *//*from w w w .j a v a 2 s . c om*/ public static DynamoDBConfig fromConfig(Map<String, Object> config) { if (config == null || config.isEmpty()) { logger.error("Configuration not provided! At least AWS region and credentials must be provided."); return null; } try { String regionName = (String) config.get("region"); if (isBlank(regionName)) { invalidRegionLogHelp(regionName); return null; } final Region region; try { region = Region.getRegion(Regions.fromName(regionName)); } catch (IllegalArgumentException e) { invalidRegionLogHelp(regionName); return null; } AWSCredentials credentials; String accessKey = (String) config.get("accessKey"); String secretKey = (String) config.get("secretKey"); if (!isBlank(accessKey) && !isBlank(secretKey)) { logger.debug("accessKey and secretKey specified. Using those."); credentials = new BasicAWSCredentials(accessKey, secretKey); } else { logger.debug("accessKey and/or secretKey blank. Checking profilesConfigFile and profile."); String profilesConfigFile = (String) config.get("profilesConfigFile"); String profile = (String) config.get("profile"); if (isBlank(profilesConfigFile) || isBlank(profile)) { logger.error("Specify either 1) accessKey and secretKey; or 2) profilesConfigFile and " + "profile for providing AWS credentials"); return null; } credentials = new ProfilesConfigFile(profilesConfigFile).getCredentials(profile); } String table = (String) config.get("tablePrefix"); if (isBlank(table)) { logger.debug("Using default table name {}", DEFAULT_TABLE_PREFIX); table = DEFAULT_TABLE_PREFIX; } final boolean createTable; String createTableParam = (String) config.get("createTable"); if (isBlank(createTableParam)) { logger.debug("Creating table on demand: {}", DEFAULT_CREATE_TABLE_ON_DEMAND); createTable = DEFAULT_CREATE_TABLE_ON_DEMAND; } else { createTable = Boolean.parseBoolean(createTableParam); } final long readCapacityUnits; String readCapacityUnitsParam = (String) config.get("readCapacityUnits"); if (isBlank(readCapacityUnitsParam)) { logger.debug("Read capacity units: {}", DEFAULT_READ_CAPACITY_UNITS); readCapacityUnits = DEFAULT_READ_CAPACITY_UNITS; } else { readCapacityUnits = Long.parseLong(readCapacityUnitsParam); } final long writeCapacityUnits; String writeCapacityUnitsParam = (String) config.get("writeCapacityUnits"); if (isBlank(writeCapacityUnitsParam)) { logger.debug("Write capacity units: {}", DEFAULT_WRITE_CAPACITY_UNITS); writeCapacityUnits = DEFAULT_WRITE_CAPACITY_UNITS; } else { writeCapacityUnits = Long.parseLong(writeCapacityUnitsParam); } return new DynamoDBConfig(region, credentials, table, createTable, readCapacityUnits, writeCapacityUnits); } catch (Exception e) { logger.error("Error with configuration", e); return null; } }
From source file:org.ow2.petals.cloud.manager.ec2.commands.BaseCommand.java
License:Open Source License
protected AmazonEC2 getClient() throws CloudManagerException { Provider provider = checkNotNull( Iterables.tryFind(providerRegistryService.get(), new Predicate<Provider>() { public boolean apply(org.ow2.petals.cloud.manager.api.deployment.Provider input) { return input.getName().equals(account); }//www . ja v a 2 s . c o m }).orNull(), "Can not retrieve account %s from the registry", account); Credentials credentials = provider.getCredentials(); AWSCredentials awsCredentials = new BasicAWSCredentials(credentials.getPublicKey(), credentials.getPrivateKey()); AmazonEC2 ec2 = new AmazonEC2Client(awsCredentials); ec2.setEndpoint(provider.getEndpoint()); // TODO in properties //ec2.setRegion(provider.getRegion()); ec2.setRegion(Region.getRegion(Regions.DEFAULT_REGION)); return ec2; }
From source file:org.p365.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/* w ww. j av a 2s. co m*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "mynewbuket"; String key = "Myobj/sd.jpg"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); if (!s3.doesBucketExist(bucketName)) { s3.createBucket(bucketName); } /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); String pathname = "D:\\Program Files\\apache-tomcat-7.0.42\\webapps\\WorkerForP365\\src\\AAA_1465.jpg"; File file = new File(pathname); s3.putObject( new PutObjectRequest(bucketName, key, file).withCannedAcl(CannedAccessControlList.PublicRead)); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ //System.out.println("Deleting an object\n"); //s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ //System.out.println("Deleting bucket " + bucketName + "\n"); //s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:org.restcomm.connect.commons.amazonS3.S3AccessTool.java
License:Open Source License
public AmazonS3 getS3client() { BasicAWSCredentials awsCreds = new BasicAWSCredentials(accessKey, securityKey); if (testing && (!testingUrl.isEmpty() || !testingUrl.equals(""))) { s3client = new AmazonS3Client(awsCreds); s3client.setRegion(Region.getRegion(Regions.fromName(bucketRegion))); s3client.setEndpoint(testingUrl); } else {/* ww w. j a va 2 s . c o m*/ s3client = AmazonS3ClientBuilder.standard().withRegion(Regions.fromName(bucketRegion)) .withCredentials(new AWSStaticCredentialsProvider(awsCreds)).build(); } return s3client; }
From source file:org.selman.tweetamo.PersistentStore.java
License:Apache License
public static PersistentStore getInstance() { synchronized (PersistentStore.class) { if (INSTANCE == null) { try { INSTANCE = new PersistentStore(Region.getRegion(Regions.US_EAST_1), 1L, 50L); } catch (Exception e) { LOG.error("Failed to create PersistentStore", e); }//from w ww. j a va 2 s.co m } return INSTANCE; } }
From source file:org.springframework.cloud.aws.jdbc.rds.AmazonRdsDataSourceUserTagsFactoryBean.java
License:Apache License
private Region getRegion() { if (this.region != null) { return this.region; }/* w ww.j av a2 s . c om*/ return Region.getRegion(Regions.DEFAULT_REGION); }
From source file:org.swiftshire.nifi.processors.kinesis.consumer.AbstractKinesisConsumerProcessor.java
License:Apache License
/** * * @param context// w w w. j av a2 s .c om */ protected void intializeRegionAndEndpoint(ProcessContext context) { // If the processor supports REGION, get the configured region. if (getSupportedPropertyDescriptors().contains(REGION)) { final String regionName = context.getProperty(REGION).getValue(); if (regionName != null) { region = Region.getRegion(Regions.fromName(regionName)); client.setRegion(region); } else { region = null; } } // If the endpoint override has been configured, set the endpoint. // (per Amazon docs this should only be configured at client creation) final String endpoint = StringUtils.trimToEmpty(context.getProperty(ENDPOINT_OVERRIDE).getValue()); if (!endpoint.isEmpty()) { client.setEndpoint(endpoint); } }
From source file:org.traccar.database.DataManager.java
License:Apache License
/** * Initialize database/*w w w .j ava2 s. c om*/ */ private void initDatabase(Properties properties) throws Exception { // Load driver String driver = properties.getProperty("database.driver"); if (driver != null) { String driverFile = properties.getProperty("database.driverFile"); if (driverFile != null) { URL url = new URL("jar:file:" + new File(driverFile).getAbsolutePath() + "!/"); URLClassLoader cl = new URLClassLoader(new URL[] { url }); Driver d = (Driver) Class.forName(driver, true, cl).newInstance(); DriverManager.registerDriver(new DriverDelegate(d)); } else { Class.forName(driver); } } // Initialize data source ComboPooledDataSource ds = new ComboPooledDataSource(); ds.setDriverClass(properties.getProperty("database.driver")); ds.setJdbcUrl(properties.getProperty("database.url")); ds.setUser(properties.getProperty("database.user")); ds.setPassword(properties.getProperty("database.password")); ds.setIdleConnectionTestPeriod(600); ds.setTestConnectionOnCheckin(true); dataSource = ds; // Load statements from configuration String query; query = properties.getProperty("database.selectDevice"); if (query != null) { queryGetDevices = new NamedParameterStatement(query, dataSource); } awsAccessKeyId = properties.getProperty("aws.accessKey"); awsSecretAccessKey = properties.getProperty("aws.accessSecret"); String awsSQSQueueName = properties.getProperty("aws.queueName"); if (awsAccessKeyId != null && awsSecretAccessKey != null) { AWSCredentialsProvider credentialsProvider = new AWSCredentialsProvider() { @Override public AWSCredentials getCredentials() { return new AWSCredentials() { @Override public String getAWSAccessKeyId() { return awsAccessKeyId; } @Override public String getAWSSecretKey() { return awsSecretAccessKey; } }; } @Override public void refresh() { } }; snsClient = new AmazonSNSClient(credentialsProvider); snsClient.setRegion(Region.getRegion(Regions.AP_SOUTHEAST_1)); GsonBuilder builder = new GsonBuilder(); gson = builder.create(); if (awsSQSQueueName != null) { // Create the connection factory using the environment variable credential provider. // Connections this factory creates can talk to the queues in us-east-1 region. SQSConnectionFactory connectionFactory = SQSConnectionFactory.builder() .withRegion(Region.getRegion(Regions.AP_SOUTHEAST_1)) .withAWSCredentialsProvider(credentialsProvider).build(); // Create the connection. SQSConnection connection = connectionFactory.createConnection(); // Get the wrapped client AmazonSQSMessagingClientWrapper client = connection.getWrappedAmazonSQSClient(); // Create an SQS queue named 'TestQueue' if it does not already exist. if (!client.queueExists(awsSQSQueueName)) { client.createQueue(awsSQSQueueName); } // Create the non-transacted session with AUTO_ACKNOWLEDGE mode Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); // Create a queue identity with name 'TestQueue' in the session Queue queue = session.createQueue(awsSQSQueueName); // Create a consumer for the 'TestQueue'. MessageConsumer consumer = session.createConsumer(queue); // Instantiate and set the message listener for the consumer. consumer.setMessageListener(new AWSSqsMessageListener()); // Start receiving incoming messages. connection.start(); } } query = properties.getProperty("database.insertPosition"); if (query != null) { queryAddPosition = new NamedParameterStatement(query, dataSource, Statement.RETURN_GENERATED_KEYS); } query = properties.getProperty("database.updatePosition"); if (query != null) { queryUpdatePosition = new NamedParameterStatement(query, dataSource); } query = properties.getProperty("database.updateLatestPosition"); if (query != null) { queryUpdateLatestPosition = new NamedParameterStatement(query, dataSource); } }
From source file:org.zalando.stups.fullstop.controller.S3Controller.java
License:Apache License
@RequestMapping(method = RequestMethod.GET, value = "/download") public void downloadFiles(@RequestParam(value = "bucket") final String bucket, @RequestParam(value = "location") final String location, @RequestParam(value = "page") final int page) { try {//from w ww. j a v a 2 s . com log.info("Creating fullstop directory here: {}", fullstopLoggingDir); boolean mkdirs = new File(fullstopLoggingDir).mkdirs(); } catch (SecurityException e) { // do nothing } AmazonS3Client amazonS3Client = new AmazonS3Client(); amazonS3Client.setRegion(Region.getRegion(Regions .fromName((String) cloudTrailProcessingLibraryProperties.getAsProperties().get(S3_REGION_KEY)))); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket) // .withPrefix(location) // .withMaxKeys(page); ObjectListing objectListing = amazonS3Client.listObjects(listObjectsRequest); final List<S3ObjectSummary> s3ObjectSummaries = objectListing.getObjectSummaries(); while (objectListing.isTruncated()) { objectListing = amazonS3Client.listNextBatchOfObjects(objectListing); s3ObjectSummaries.addAll(objectListing.getObjectSummaries()); } for (S3ObjectSummary s3ObjectSummary : s3ObjectSummaries) { String bucketName = s3ObjectSummary.getBucketName(); String key = s3ObjectSummary.getKey(); S3Object object = amazonS3Client.getObject(new GetObjectRequest(bucketName, key)); InputStream inputStream = object.getObjectContent(); File file = new File(fullstopLoggingDir, object.getBucketName() + object.getObjectMetadata().getETag() + JSON_GZ); copyInputStreamToFile(inputStream, file); log.info("File saved here: {}", file.getAbsolutePath()); } }