List of usage examples for com.amazonaws.regions RegionUtils getRegion
public static Region getRegion(String regionName)
From source file:org.ow2.proactive.scheduler.examples.S3ConnectorUploader.java
License:Open Source License
/** * @see JavaExecutable#init(Map)/*w w w .j a va2 s .com*/ */ @Override public void init(Map<String, Serializable> args) throws Exception { if (args.containsKey(BUCKET_NAME_ARG) && !args.get(BUCKET_NAME_ARG).toString().isEmpty()) { bucketName = args.get(BUCKET_NAME_ARG).toString(); } else { throw new IllegalArgumentException( "You have to specify a valid bucket name. Empty value is not allowed."); } if (args.containsKey(REGION_ARG) && !args.get(REGION_ARG).toString().isEmpty() && RegionUtils.getRegion(args.get(REGION_ARG).toString()) != null) { region = args.get(REGION_ARG).toString(); } else { throw new IllegalArgumentException("You have to specify a valid region \"" + args.get(REGION_ARG).toString() + "\" is not allowed."); } if (args.containsKey(REMOTE_PREFIX_ARG) && !args.get(REMOTE_PREFIX_ARG).toString().isEmpty()) { s3RemoteRelativePath = args.get(REMOTE_PREFIX_ARG).toString(); } if (args.containsKey(S3_LOCAL_RELATIVE_PATH) && !args.get(S3_LOCAL_RELATIVE_PATH).toString().isEmpty()) { s3LocalRelativePath = args.get(S3_LOCAL_RELATIVE_PATH).toString(); } else { //Default value is getLocalSpace() because it will always be writable and moreover can be used to transfer files to another data space (global, user) s3LocalRelativePath = getLocalSpace(); } if (args.containsKey(ACCESS_KEY) && !args.get(ACCESS_KEY).toString().isEmpty()) { accessKey = args.get(ACCESS_KEY).toString(); } else { throw new IllegalArgumentException( "You have to specify a your access key. Empty value is not allowed."); } // Retrieve the credential secretKey = getThirdPartyCredential(accessKey); if (secretKey == null) { throw new IllegalArgumentException( "You first need to add your Secret Key to 3rd-party credentials under the key: " + accessKey); } }
From source file:org.pentaho.amazon.client.AmazonClientCredentials.java
License:Apache License
private String extractRegion(String region) { return RegionUtils.getRegion(AmazonRegion.extractRegionFromDescription(region)).getName(); }
From source file:org.springframework.cloud.aws.core.config.AmazonWebserviceClientFactoryBean.java
License:Apache License
public void setCustomRegion(String customRegionName) { this.customRegion = RegionUtils.getRegion(customRegionName); }
From source file:org.springframework.cloud.aws.core.region.Ec2MetadataRegionProvider.java
License:Apache License
protected Region getCurrentRegion() { try {/*from ww w. j av a2 s . c o m*/ InstanceInfo instanceInfo = EC2MetadataUtils.getInstanceInfo(); return instanceInfo != null && instanceInfo.getRegion() != null ? RegionUtils.getRegion(instanceInfo.getRegion()) : null; } catch (AmazonClientException e) { return null; } }
From source file:org.springframework.cloud.aws.core.region.StaticRegionProvider.java
License:Apache License
/** * Constructs and configures the static region for this RegionProvider implementation. * * @param configuredRegion//from w w w .ja va 2 s.c o m * - the region that will be statically returned in {@link #getRegion()} */ @RuntimeUse public StaticRegionProvider(String configuredRegion) { this.configuredRegion = RegionUtils.getRegion(configuredRegion); Assert.notNull(this.configuredRegion, "The region '" + configuredRegion + "' is not a valid region!"); }
From source file:org.talend.components.s3.runtime.S3Connection.java
License:Open Source License
public static AmazonS3 createClient(S3OutputProperties properties) { S3DatasetProperties data_set = properties.getDatasetProperties(); S3DatastoreProperties data_store = properties.getDatasetProperties().getDatastoreProperties(); com.amazonaws.auth.AWSCredentials credentials = new com.amazonaws.auth.BasicAWSCredentials( data_store.accessKey.getValue(), data_store.secretKey.getValue()); Region region = RegionUtils.getRegion(data_set.region.getValue().getValue()); Boolean clientSideEnc = data_set.encryptDataInMotion.getValue(); AmazonS3 conn = null;/*from w w w . j av a2 s. c om*/ if (clientSideEnc != null && clientSideEnc) { String kms_cmk = data_set.kmsForDataInMotion.getValue(); KMSEncryptionMaterialsProvider encryptionMaterialsProvider = new KMSEncryptionMaterialsProvider( kms_cmk); conn = new AmazonS3EncryptionClient(credentials, encryptionMaterialsProvider, new CryptoConfiguration().withAwsKmsRegion(region)); } else { AWSCredentialsProvider basicCredentialsProvider = new StaticCredentialsProvider(credentials); conn = new AmazonS3Client(basicCredentialsProvider); } conn.setRegion(region); return conn; }
From source file:org.voltdb.exportclient.KinesisFirehoseExportClient.java
License:Open Source License
@Override public void configure(Properties config) throws Exception { String regionName = config.getProperty("region", "").trim(); if (regionName.isEmpty()) { throw new IllegalArgumentException("KinesisFirehoseExportClient: must provide a region"); }//www .j ava 2 s.c o m m_region = RegionUtils.getRegion(regionName); m_streamName = config.getProperty("stream.name", "").trim(); if (m_streamName.isEmpty()) { throw new IllegalArgumentException("KinesisFirehoseExportClient: must provide a stream.name"); } m_accessKey = config.getProperty("access.key", "").trim(); if (m_accessKey.isEmpty()) { throw new IllegalArgumentException("KinesisFirehoseExportClient: must provide an access.key"); } m_secretKey = config.getProperty("secret.key", "").trim(); if (m_secretKey.isEmpty()) { throw new IllegalArgumentException("KinesisFirehoseExportClient: must provide a secret.key"); } m_timeZone = TimeZone.getTimeZone(config.getProperty("timezone", VoltDB.REAL_DEFAULT_TIMEZONE.getID())); m_recordSeparator = config.getProperty(RECORD_SEPARATOR, "\n"); config.setProperty(ROW_LENGTH_LIMIT, config.getProperty(ROW_LENGTH_LIMIT, Integer.toString(1024000 - m_recordSeparator.length()))); m_backOffCap = Integer.parseInt(config.getProperty(BACKOFF_CAP, "1000")); // minimal interval between each putRecordsBatch api call; // for small records (row length < 1KB): records/s is the bottleneck // for large records (row length > 1KB): data throughput is the bottleneck // for orignal limit, (5000 records/s divie by 500 records per call = 10 calls) // interval is 1000 ms / 10 = 100 ms m_streamLimit = Integer.parseInt(config.getProperty(STREAM_LIMIT, "5000")); m_backOffBase = Math.max(2, 1000 / (m_streamLimit / BATCH_NUMBER_LIMIT)); // concurrent aws client = number of export table to this stream * number of voltdb partition m_concurrentWriter = Integer.parseInt(config.getProperty(CONCURRENT_WRITER, "0")); m_backOffStrategy = config.getProperty(BACKOFF_TYPE, "equal"); m_firehoseClient = new AmazonKinesisFirehoseClient(new BasicAWSCredentials(m_accessKey, m_secretKey)); m_firehoseClient.setRegion(m_region); m_backOff = BackOffFactory.getBackOff(m_backOffStrategy, m_backOffBase, m_backOffCap); m_sink = new FirehoseSink(m_streamName, m_firehoseClient, m_concurrentWriter, m_backOff); m_batchMode = Boolean.parseBoolean(config.getProperty(BATCH_MODE, "true")); m_batchSize = Math.min(BATCH_NUMBER_LIMIT, Integer.parseInt(config.getProperty(BATCH_SIZE, "200"))); }
From source file:org.xmlsh.aws.gradle.s3.AmazonS3PluginExtension.java
License:BSD License
private AmazonS3 initClient() { AwsPluginExtension aws = project.getExtensions().getByType(AwsPluginExtension.class); ClientConfiguration clientConfiguration = new ClientConfiguration(); if (maxErrorRetry > 0) clientConfiguration.setMaxErrorRetry(maxErrorRetry); AmazonS3Client client = aws.createClient(AmazonS3Client.class, profileName, clientConfiguration); if (region != null) { client.setRegion(RegionUtils.getRegion(region)); }/*from w w w . java 2 s.c o m*/ return client; }
From source file:shapeways.api.robocreator.BaseRoboCreator.java
License:Apache License
/** * Initialize the servlet. Sets up the base directory properties for finding * stuff later on.// w ww .j a v a2 s . c om */ public void init() throws ServletException { // DirectorServlet overrides all but this method. If your making changes here // you might need to change that class as well ServletConfig config = getServletConfig(); ServletContext ctx = config.getServletContext(); serviceQueue = getInitParameter("ServiceQueue"); if (serviceQueue == null) { System.out.println("ServiceQueue is null, add entry to web.xml"); } shapewaysHost = getInitParameter("ShapewaysHost"); System.out.println("ShapewaysHost: " + shapewaysHost); if (shapewaysHost == null) { shapewaysHost = ShapewaysAPI.getShapewaysHost(); } String instanceType = getInstanceMetadata("instance-type", "localhost"); // 0 = number of processors. > 0 specific number. Default is 1 String num_threads_st = getInitParameter("NumThreads"); Gson gson = new Gson(); Map<String, Number> threadsMap = new HashMap<String, Number>(); try { threadsMap = gson.fromJson(num_threads_st, Map.class); } catch (Exception e) { System.out.println( "Cannot parse threads: " + serviceQueue + ". Should be map of instanceType to numThreads"); System.out.println("numThreads: " + num_threads_st); e.printStackTrace(); } threads = threadsMap.get(instanceType).intValue(); if (threads == 0) { threads = Runtime.getRuntime().availableProcessors(); ; } threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(threads); consumerKey = getInitParameter("ShapewaysConsumerKey"); consumerSecret = getInitParameter("ShapewaysConsumerSecret"); accessToken = getInitParameter("ShapewaysAccessToken"); accessSecret = getInitParameter("ShapewaysAccessSecret"); String proxyType = getInitParameter("ProxyType"); String proxyHost = getInitParameter("ProxyHost"); String proxyPort = getInitParameter("ProxyPort"); if (proxyHost != null) { // TODO: think about selectors as this is per JVM System.out.println("Configuring proxy: " + proxyHost + ":" + proxyPort); Properties systemSettings = System.getProperties(); systemSettings.put("proxySet", "true"); if (proxyType.equalsIgnoreCase("SOCKS")) { systemSettings.put("socksProxyHost", proxyHost); systemSettings.put("socksProxyPort", proxyPort); } else { systemSettings.put("http.proxyHost", proxyHost); systemSettings.put("http.proxyPort", proxyPort); } } String awsAccessKey = getInitParameter("AWSAccessKey"); String awsAccessSecret = getInitParameter("AWSAccessSecret"); String awsRegion = getInitParameter("AWSRegion"); String st = getInitParameter("AWSSQSVisibilityTimeout"); if (st != null) { visibilityTimeout = Integer.parseInt(st); } st = getInitParameter("AWSSQSMessagePollFrequency"); if (st != null) { pollFrequency = Integer.parseInt(st); } else { pollFrequency = DEFAULT_POLL_FREQUENCY; } // TODO: Not certain we want to do these here as it delays deploy and might stop all deploys if AWS is down. // Switch to a threaded runnable or maybe just add the job to the threadPool we have. sqs = new AmazonSQSClient(new BasicAWSCredentials(awsAccessKey, awsAccessSecret)); Region region = RegionUtils.getRegion(awsRegion); sqs.setRegion(region); threadPool.submit(new SQSCreateQueueTask(sqs, QUEUE_PREPEND + serviceQueue, visibilityTimeout, this)); }
From source file:shapeways.api.robocreator.RoboCreatorWeb.java
License:Apache License
/** * Initialize the servlet. Sets up the base directory properties for finding * stuff later on./* w w w . j a v a 2 s. c o m*/ */ public void init() throws ServletException { if (DEBUG) { System.out.println("Starting RoboCreatorWeb"); } ServletConfig config = getServletConfig(); ServletContext ctx = config.getServletContext(); kernels = getInitParameter("Kernels"); if (kernels == null) { System.out.println("ServiceQueue is null, add entry to web.xml"); } System.out.println("Handling Kernels: " + kernels); instanceType = getInstanceMetadata("instance-type", "localhost"); // 0 = number of processors. > 0 specific number. Default is 1 String num_threads_st = getInitParameter("NumThreads"); Gson gson = new Gson(); Map<String, Number> threadsMap = new HashMap<String, Number>(); try { threadsMap = gson.fromJson(num_threads_st, Map.class); } catch (Exception e) { System.out.println( "Cannot parse threads in RoboCreatorWeb. Should be map of instanceType to numThreads"); System.out.println("numThreads: " + num_threads_st); e.printStackTrace(); } int threads = threadsMap.get(instanceType).intValue(); if (threads == 0) { int cores = Runtime.getRuntime().availableProcessors(); threads = cores; } threadPool = (ThreadPoolExecutor) Executors.newFixedThreadPool(threads); System.out.println("ThreadPool: " + threadPool); awsAccessKey = getInitParameter("AWSAccessKey"); awsAccessSecret = getInitParameter("AWSAccessSecret"); awsRegion = getInitParameter("AWSRegion"); String st = getInitParameter("AWSSQSVisibilityTimeout"); if (st != null) { visibilityTimeout = Integer.parseInt(st); } // TODO: Not certain we want to do these here as it delays deploy and might stop all deploys if AWS is down. // Switch to a threaded runnable or maybe just add the job to the threadPool we have. sqs = new AmazonSQSClient(new BasicAWSCredentials(awsAccessKey, awsAccessSecret)); Region region = RegionUtils.getRegion(awsRegion); sqs.setRegion(region); queUrlMap = new HashMap<String, String>(); System.out.println("Creating Queues"); String[] queues = kernels.split(" "); for (int i = 0; i < queues.length; i++) { threadPool.submit(new SQSCreateQueueTask(sqs, QUEUE_PREPEND + queues[i], visibilityTimeout, this)); } }