List of usage examples for com.amazonaws.auth ClasspathPropertiesFileCredentialsProvider ClasspathPropertiesFileCredentialsProvider
public ClasspathPropertiesFileCredentialsProvider()
AwsCredentials.properties
file from the classpath to read AWS security credentials. From source file:nyu.twitter.lg.FentchTwitter.java
License:Open Source License
/** * The only information needed to create a client are security credentials * consisting of the AWS Access Key ID and Secret Access Key. All other * configuration, such as the service endpoints, are performed * automatically. Client parameters, such as proxies, can be specified in an * optional ClientConfiguration object when constructing a client. * * @see com.amazonaws.auth.BasicAWSCredentials * @see com.amazonaws.auth.ProfilesConfigFile * @see com.amazonaws.ClientConfiguration */// www . j a v a 2 s .co m private static void init() throws Exception { /* * The ProfileCredentialsProvider will return your [New US East * (Virginia) Profile] credential profile by reading from the * credentials file located at (). */ // AWSCredentials credentials = null; AWSCredentialsProvider credentialsProvider = null; try { credentialsProvider = new ClasspathPropertiesFileCredentialsProvider(); // credentials = new ProfileCredentialsProvider( // "New US East (Virginia) Profile").getCredentials(); } catch (Exception e) { throw new AmazonClientException( // "Cannot load the credentials from the credential profiles file. " // + "Please make sure that your credentials file is at the correct " // + "location (), and is in valid format.", e); } dynamoDB = new AmazonDynamoDBClient(credentialsProvider); Region usEast1 = Region.getRegion(Regions.US_EAST_1); dynamoDB.setRegion(usEast1); }
From source file:org.apache.storm.kinesis.spout.CredentialsProviderChain.java
License:Apache License
public CredentialsProviderChain() { super(new EnvironmentVariableCredentialsProvider(), new SystemPropertiesCredentialsProvider(), new ClasspathPropertiesFileCredentialsProvider(), new InstanceProfileCredentialsProvider(), new ProfileCredentialsProvider()); }
From source file:org.cto.VVS3Box.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w w w .j a v a2s. co m*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "lior.test-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:org.nickelproject.applications.S3Module.java
License:Apache License
@Provides @Singleton/* w ww .j a v a 2 s .c o m*/ AmazonS3 provideS3Client() { return new AmazonS3Client(new AWSCredentialsProviderChain(new DefaultAWSCredentialsProviderChain(), new ClasspathPropertiesFileCredentialsProvider())); }
From source file:org.p365.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*//from www.j a v a2 s . com * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "mynewbuket"; String key = "Myobj/sd.jpg"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); if (!s3.doesBucketExist(bucketName)) { s3.createBucket(bucketName); } /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); String pathname = "D:\\Program Files\\apache-tomcat-7.0.42\\webapps\\WorkerForP365\\src\\AAA_1465.jpg"; File file = new File(pathname); s3.putObject( new PutObjectRequest(bucketName, key, file).withCannedAcl(CannedAccessControlList.PublicRead)); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ //System.out.println("Deleting an object\n"); //s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ //System.out.println("Deleting bucket " + bucketName + "\n"); //s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:org.selman.tweetamo.PersistentStore.java
License:Apache License
/** * The only information needed to create a client are security credentials * consisting of the AWS Access Key ID and Secret Access Key. All other * configuration, such as the service endpoints, are performed * automatically. Client parameters, such as proxies, can be specified in an * optional ClientConfiguration object when constructing a client. * //from w ww . j a v a 2 s. co m * @see com.amazonaws.auth.BasicAWSCredentials * @see com.amazonaws.auth.PropertiesCredentials * @see com.amazonaws.ClientConfiguration */ private PersistentStore(Region region, long readCapacity, long writeCapacity) throws Exception { /* * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. */ dynamoDB = new AmazonDynamoDBClient(new ClasspathPropertiesFileCredentialsProvider()); dynamoDB.setRegion(region); try { if (!tablesExist()) { createTables(readCapacity, writeCapacity); } waitForTableToBecomeAvailable(TABLE_NAME); } catch (Exception e) { handleException(e); } }
From source file:org.selman.tweetamo.TweetamoClient.java
License:Apache License
public static void main(String[] args) throws Exception { if (args.length != 2) { System.out.println("Usage: [language] [search topic]"); }//from w w w . j a v a 2s . co m kinesisClient = new AmazonKinesisClient(new ClasspathPropertiesFileCredentialsProvider()); waitForStreamToBecomeAvailable(STREAM_NAME); LOG.info("Publishing tweets to stream : " + STREAM_NAME); StatusListener listener = new StatusListener() { public void onStatus(Status status) { try { PutRecordRequest putRecordRequest = new PutRecordRequest(); putRecordRequest.setStreamName(STREAM_NAME); putRecordRequest.setData(TweetSerializer.toBytes(status)); putRecordRequest.setPartitionKey(status.getUser().getScreenName()); PutRecordResult putRecordResult = kinesisClient.putRecord(putRecordRequest); LOG.info("Successfully putrecord, partition key : " + putRecordRequest.getPartitionKey() + ", ShardID : " + putRecordResult.getShardId()); } catch (Exception e) { LOG.error("Failed to putrecord", e); } } public void onDeletionNotice(StatusDeletionNotice statusDeletionNotice) { } public void onTrackLimitationNotice(int numberOfLimitedStatuses) { } public void onException(Exception ex) { ex.printStackTrace(); } @Override public void onScrubGeo(long arg0, long arg1) { } @Override public void onStallWarning(StallWarning arg0) { } }; ClasspathTwitterCredentialsProvider provider = new ClasspathTwitterCredentialsProvider(); TwitterCredentials credentials = provider.getTwitterCredentials(); ConfigurationBuilder cb = new ConfigurationBuilder(); cb.setDebugEnabled(true).setOAuthConsumerKey(credentials.getConsumerKey()) .setOAuthConsumerSecret(credentials.getConsumerSecret()) .setOAuthAccessToken(credentials.getAccessToken()) .setOAuthAccessTokenSecret(credentials.getAccessTokenSecret()); TwitterStream twitterStream = new TwitterStreamFactory(cb.build()).getInstance(); twitterStream.addListener(listener); FilterQuery filterQuery = new FilterQuery(); filterQuery.language(new String[] { args[0] }); filterQuery.track(new String[] { args[1] }); twitterStream.filter(filterQuery); }
From source file:org.selman.tweetamo.TweetamoServer.java
License:Apache License
private static void configure(String propertiesFile) throws IOException { if (propertiesFile != null) { loadProperties(propertiesFile);/*from w w w .j av a 2 s . c om*/ } // ensure the JVM will refresh the cached IP values of AWS resources (e.g. service endpoints). java.security.Security.setProperty("networkaddress.cache.ttl", "60"); String workerId = InetAddress.getLocalHost().getCanonicalHostName() + ":" + UUID.randomUUID(); LOG.info("Using workerId: " + workerId); // Get credentials from IMDS. If unsuccessful, get them from the classpath. AWSCredentialsProvider credentialsProvider = null; try { credentialsProvider = new InstanceProfileCredentialsProvider(); // Verify we can fetch credentials from the provider credentialsProvider.getCredentials(); LOG.info("Obtained credentials from the IMDS."); } catch (AmazonClientException e) { LOG.info("Unable to obtain credentials from the IMDS, trying classpath properties", e); credentialsProvider = new ClasspathPropertiesFileCredentialsProvider(); // Verify we can fetch credentials from the provider credentialsProvider.getCredentials(); LOG.info("Obtained credentials from the properties file."); } LOG.info("Using credentials with access key id: " + credentialsProvider.getCredentials().getAWSAccessKeyId()); kinesisClientLibConfiguration = new KinesisClientLibConfiguration(applicationName, streamName, credentialsProvider, workerId); }
From source file:org.smap.notifications.interfaces.EmitNotifications.java
License:Open Source License
public void publish(int event, String msg, String subject) { //create a new SNS client AmazonSNS sns = AmazonSNSClient.builder().withRegion("ap-southeast-1") .withCredentials(new ClasspathPropertiesFileCredentialsProvider()).build(); String topic = getTopic(event); if (topic != null) { PublishRequest publishRequest = new PublishRequest(topic, msg, subject); PublishResult publishResult = sns.publish(publishRequest); log.info("Publish: " + subject + " MessageId - " + publishResult.getMessageId()); }/*from w ww .j a va2 s. c om*/ }
From source file:org.webapp.controllers.AwsConsoleApp.java
License:Open Source License
/** * The only information needed to create a client are security credentials * consisting of the AWS Access Key ID and Secret Access Key. All other * configuration, such as the service endpoints, are performed * automatically. Client parameters, such as proxies, can be specified in an * optional ClientConfiguration object when constructing a client. * * @see com.amazonaws.auth.BasicAWSCredentials * @see com.amazonaws.auth.PropertiesCredentials * @see com.amazonaws.ClientConfiguration *//* w ww. ja v a 2s. c om*/ private static void init() throws Exception { /* * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. */ AWSCredentialsProvider credentialsProvider = new ClasspathPropertiesFileCredentialsProvider(); ec2 = new AmazonEC2Client(credentialsProvider); s3 = new AmazonS3Client(credentialsProvider); sdb = new AmazonSimpleDBClient(credentialsProvider); }