List of usage examples for com.amazonaws.auth BasicAWSCredentials BasicAWSCredentials
public BasicAWSCredentials(String accessKey, String secretKey)
From source file:com.liferay.amazontools.BaseAMITool.java
License:Open Source License
protected AmazonAutoScalingClient getAmazonAutoScalingClient(String accessKey, String secretKey, String endpoint) {/* w ww. java2 s . c om*/ AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); AmazonAutoScalingClient amazonAutoScalingClient = new AmazonAutoScalingClient(awsCredentials); amazonAutoScalingClient.setEndpoint(endpoint); return amazonAutoScalingClient; }
From source file:com.liferay.portal.store.s3.S3Store.java
License:Open Source License
protected AWSCredentialsProvider getAWSCredentialsProvider() { if (Validator.isNotNull(_s3StoreConfiguration.accessKey()) && Validator.isNotNull(_s3StoreConfiguration.secretKey())) { AWSCredentials awsCredentials = new BasicAWSCredentials(_s3StoreConfiguration.accessKey(), _s3StoreConfiguration.secretKey()); return new StaticCredentialsProvider(awsCredentials); }// www. j a va 2 s. co m return new DefaultAWSCredentialsProviderChain(); }
From source file:com.lithium.flow.filer.S3Filer.java
License:Apache License
public S3Filer(@Nonnull Config config, @Nonnull Access access) { checkNotNull(config);/* w w w. ja v a 2 s . c o m*/ checkNotNull(access); uri = URI.create(config.getString("url")); bucket = uri.getHost(); partSize = config.getInt("s3.partSize", 5 * 1024 * 1024); tempDir = new File(config.getString("s3.tempDir", System.getProperty("java.io.tmpdir"))); service = Executors.newFixedThreadPool(config.getInt("s3.threads", 1)); String key = config.getString("key"); String secret = access.getPrompt().prompt(bucket, bucket + " secret: ", true, false); s3 = new AmazonS3Client(new BasicAWSCredentials(key, secret)); }
From source file:com.logpig.mweagle.rolling.S3Settings.java
License:Apache License
public BasicAWSCredentials getAWSCredentials() { return new BasicAWSCredentials(accessKey, secretKey); }
From source file:com.maya.portAuthority.util.ImageUploader.java
public static void uploadImage(String imageURL, String imageName, String bucketName) throws MalformedURLException, IOException { // credentials object identifying user for authentication AWSCredentials credentials = new BasicAWSCredentials("AKIAJBFSMHRTIQQ7BKYA", "AdHgeP4dyWInWwPn9YlfxFCm3qP1lHjdxOxeJqDa"); // create a client connection based on credentials AmazonS3 s3client = new AmazonS3Client(credentials); String folderName = "image"; //folder name // String bucketName = "ppas-image-upload"; //must be unique try {//from ww w. j a v a 2s . c o m if (!(s3client.doesBucketExist(bucketName))) { s3client.setRegion(Region.getRegion(Regions.US_EAST_1)); // Note that CreateBucketRequest does not specify region. So bucket is // created in the region specified in the client. s3client.createBucket(new CreateBucketRequest(bucketName)); } //Enabe CORS: // <?xml version="1.0" encoding="UTF-8"?> //<CORSConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> // <CORSRule> // <AllowedOrigin>http://ask-ifr-download.s3.amazonaws.com</AllowedOrigin> // <AllowedMethod>GET</AllowedMethod> // </CORSRule> //</CORSConfiguration> BucketCrossOriginConfiguration configuration = new BucketCrossOriginConfiguration(); CORSRule corsRule = new CORSRule() .withAllowedMethods( Arrays.asList(new CORSRule.AllowedMethods[] { CORSRule.AllowedMethods.GET })) .withAllowedOrigins(Arrays.asList(new String[] { "http://ask-ifr-download.s3.amazonaws.com" })); configuration.setRules(Arrays.asList(new CORSRule[] { corsRule })); s3client.setBucketCrossOriginConfiguration(bucketName, configuration); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } String fileName = folderName + SUFFIX + imageName + ".png"; URL url = new URL(imageURL); ObjectMetadata omd = new ObjectMetadata(); omd.setContentType("image/png"); omd.setContentLength(url.openConnection().getContentLength()); // upload file to folder and set it to public s3client.putObject(new PutObjectRequest(bucketName, fileName, url.openStream(), omd) .withCannedAcl(CannedAccessControlList.PublicRead)); }
From source file:com.maya.portAuthority.util.ImageUploader.java
public static void uploadImage(String imageURL, String imageName) throws MalformedURLException, IOException { // credentials object identifying user for authentication AWSCredentials credentials = new BasicAWSCredentials("<Your access key id>", "<Your secret access key>"); // create a client connection based on credentials AmazonS3 s3client = new AmazonS3Client(credentials); String folderName = "image"; String bucketName = "ppas-image-upload"; String fileName = folderName + SUFFIX + imageName + ".png"; URL url = new URL(imageURL); ObjectMetadata omd = new ObjectMetadata(); omd.setContentType("image/png"); omd.setContentLength(url.openConnection().getContentLength()); // upload file to folder and set it to public s3client.putObject(new PutObjectRequest(bucketName, fileName, url.openStream(), omd) .withCannedAcl(CannedAccessControlList.PublicRead)); }
From source file:com.meltmedia.jgroups.aws.AWS_PING.java
License:Apache License
/** * Starts this protocol./*from ww w.j ava 2s . c o m*/ */ public void start() throws Exception { super.start(); // start up a new ec2 client with the region specific endpoint. if (access_key == null && secret_key == null) { AWSCredentialsProvider awsCredentialsProvider = loadCredentialsProvider(credentials_provider_class, getClass(), log); ec2 = new AmazonEC2Client(awsCredentialsProvider); } else { ec2 = new AmazonEC2Client(new BasicAWSCredentials(access_key, secret_key)); } ec2.setEndpoint(endpoint); //Lets do some good old reflection work to add a unmarshaller to the AmazonEC2Client just to log the exceptions from soap. if (log_aws_error_messages) { setupAWSExceptionLogging(); } }
From source file:com.mesosphere.dcos.cassandra.executor.backup.S3StorageDriver.java
License:Apache License
private AmazonS3Client getAmazonS3Client(BackupRestoreContext ctx) throws URISyntaxException { final String accessKey = ctx.getAccountId(); final String secretKey = ctx.getSecretKey(); String endpoint = getEndpoint(ctx); LOGGER.info("endpoint: {}", endpoint); final BasicAWSCredentials basicAWSCredentials = new BasicAWSCredentials(accessKey, secretKey); final AmazonS3Client amazonS3Client = new AmazonS3Client(basicAWSCredentials); amazonS3Client.setEndpoint(endpoint); if (ctx.usesEmc()) { final S3ClientOptions options = new S3ClientOptions(); options.setPathStyleAccess(true); amazonS3Client.setS3ClientOptions(options); }//from w ww .j av a 2s. c om return amazonS3Client; }
From source file:com.mesosphere.dcos.cassandra.executor.backup.S3StorageDriver.java
License:Apache License
private TransferManager getS3TransferManager(BackupRestoreContext ctx) { final String accessKey = ctx.getAccountId(); final String secretKey = ctx.getSecretKey(); final BasicAWSCredentials basicAWSCredentials = new BasicAWSCredentials(accessKey, secretKey); TransferManager tx = new TransferManager(basicAWSCredentials); return tx;/*from ww w . j a v a 2s . c o m*/ }
From source file:com.metamx.druid.indexing.coordinator.http.IndexerCoordinatorNode.java
License:Open Source License
private void initializeResourceManagement(final JacksonConfigManager configManager) { if (resourceManagementSchedulerFactory == null) { if (!config.isAutoScalingEnabled()) { resourceManagementSchedulerFactory = new ResourceManagementSchedulerFactory() { @Override/*from w w w . j a va 2s .c o m*/ public ResourceManagementScheduler build(RemoteTaskRunner runner) { return new NoopResourceManagementScheduler(); } }; } else { resourceManagementSchedulerFactory = new ResourceManagementSchedulerFactory() { @Override public ResourceManagementScheduler build(RemoteTaskRunner runner) { final ScheduledExecutorService scalingScheduledExec = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("ScalingExec--%d") .build()); final AtomicReference<WorkerSetupData> workerSetupData = configManager .watch(WorkerSetupData.CONFIG_KEY, WorkerSetupData.class); AutoScalingStrategy strategy; if (config.getAutoScalingImpl().equalsIgnoreCase("ec2")) { strategy = new EC2AutoScalingStrategy(getJsonMapper(), new AmazonEC2Client(new BasicAWSCredentials( PropUtils.getProperty(getProps(), "com.metamx.aws.accessKey"), PropUtils.getProperty(getProps(), "com.metamx.aws.secretKey"))), getConfigFactory().build(EC2AutoScalingStrategyConfig.class), workerSetupData); } else if (config.getAutoScalingImpl().equalsIgnoreCase("noop")) { strategy = new NoopAutoScalingStrategy(); } else { throw new ISE("Invalid strategy implementation: %s", config.getAutoScalingImpl()); } return new ResourceManagementScheduler(runner, new SimpleResourceManagementStrategy(strategy, getConfigFactory().build(SimpleResourceManagmentConfig.class), workerSetupData), getConfigFactory().build(ResourceManagementSchedulerConfig.class), scalingScheduledExec); } }; } } }