List of usage examples for com.amazonaws.auth BasicAWSCredentials BasicAWSCredentials
public BasicAWSCredentials(String accessKey, String secretKey)
From source file:com.sludev.commons.vfs2.provider.s3.SS3FileProvider.java
License:Apache License
/** * Create FileSystem event hook// ww w. j a va 2s . com * * @param rootName * @param fileSystemOptions * @return * @throws FileSystemException */ @Override protected FileSystem doCreateFileSystem(FileName rootName, FileSystemOptions fileSystemOptions) throws FileSystemException { SS3FileSystem fileSystem = null; GenericFileName genRootName = (GenericFileName) rootName; AWSCredentials storageCreds; AmazonS3Client client; FileSystemOptions currFSO; UserAuthenticator ua; if (fileSystemOptions == null) { currFSO = getDefaultFileSystemOptions(); ua = SS3FileSystemConfigBuilder.getInstance().getUserAuthenticator(currFSO); } else { currFSO = fileSystemOptions; ua = DefaultFileSystemConfigBuilder.getInstance().getUserAuthenticator(currFSO); } UserAuthenticationData authData = null; try { authData = ua.requestAuthentication(AUTHENTICATOR_TYPES); String currAcct = UserAuthenticatorUtils.toString(UserAuthenticatorUtils.getData(authData, UserAuthenticationData.USERNAME, UserAuthenticatorUtils.toChar(genRootName.getUserName()))); String currKey = UserAuthenticatorUtils.toString(UserAuthenticatorUtils.getData(authData, UserAuthenticationData.PASSWORD, UserAuthenticatorUtils.toChar(genRootName.getPassword()))); storageCreds = new BasicAWSCredentials(currAcct, currKey); client = new AmazonS3Client(storageCreds); if (StringUtils.isNoneBlank(endpoint)) { client.setEndpoint(endpoint); } if (region != null) { client.setRegion(region); } fileSystem = new SS3FileSystem(genRootName, client, fileSystemOptions); } finally { UserAuthenticatorUtils.cleanup(authData); } return fileSystem; }
From source file:com.snapdeal.scm.core.AmazonS3Configuration.java
License:Open Source License
@Bean public AmazonS3 getS3Client() { if (s3Client == null) { AWSCredentials s3Credentials = new BasicAWSCredentials(accessId, secretKey); s3Client = new AmazonS3Client(s3Credentials); s3Client.setRegion(Region.getRegion(Regions.fromName(region))); }/* ww w . j av a2s . c o m*/ return s3Client; }
From source file:com.springboot.demo.framework.aws.s3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*//w ww . j a v a2s . c o m * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (~/.aws/credentials). */ AWSCredentials basicCredentials = new BasicAWSCredentials(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY); AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider().getCredentials(); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } /* * Create S3 Client */ AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient(); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Returns an URL for the object stored in the specified bucket and key */ URL url = s3.getUrl(bucketName, key); System.out.println("upload file url : " + url.toString()); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.springboot.jms.AWSJmsSend.java
License:Open Source License
public static void testAWSQueue() throws Exception { AmazonSQS sqs = new AmazonSQSClient(new BasicAWSCredentials("", "")); Region usWest2 = Region.getRegion(Regions.US_EAST_1); sqs.setRegion(usWest2);//from w w w. j av a2 s . c o m System.out.println("==========================================="); System.out.println("Getting Started with Amazon SQS"); System.out.println("===========================================\n"); try { String myQueueUrl = "https://sqs.us-east-1.amazonaws.com/918558451804/PetQueue"; // Send a message System.out.println("Sending a message to MyQueue.\n"); for (int i = 1; i <= 11; i++) { sqs.sendMessage(new SendMessageRequest(myQueueUrl, "This is my message number " + i)); Thread.sleep(1000); } } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon SQS, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with SQS, such as not " + "being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.springboot.jms.AWSJmsSendAndReceive.java
License:Open Source License
public static void testAWSQueue() throws Exception { /*// w w w . ja v a 2s . c om * The ProfileCredentialsProvider will return your [default] * credential profile by reading from the credentials file located at * (~/.aws/credentials). */ /* AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider().getCredentials(); } catch (Exception e) { throw new AmazonClientException( "Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (~/.aws/credentials), and is in valid format.", e); } AmazonSQS sqs = new AmazonSQSClient(credentials);*/ AmazonSQS sqs = new AmazonSQSClient(new BasicAWSCredentials("", "")); Region usWest2 = Region.getRegion(Regions.US_EAST_1); sqs.setRegion(usWest2); System.out.println("==========================================="); System.out.println("Getting Started with Amazon SQS"); System.out.println("===========================================\n"); try { /* // Create a queue System.out.println("Creating a new SQS queue called MyQueue.\n"); CreateQueueRequest createQueueRequest = new CreateQueueRequest("PetQueue"); String myQueueUrl = sqs.createQueue(createQueueRequest).getQueueUrl();*/ String myQueueUrl = "https://sqs.us-east-1.amazonaws.com/918558451804/PetQueue"; // List queues System.out.println("Listing all queues in your account.\n"); for (String queueUrl : sqs.listQueues().getQueueUrls()) { System.out.println(" QueueUrl: " + queueUrl); } System.out.println(); // Send a message System.out.println("Sending a message to MyQueue.\n"); sqs.sendMessage(new SendMessageRequest(myQueueUrl, "This is my message text.")); // Receive messages System.out.println("Receiving messages from MyQueue.\n"); ReceiveMessageRequest receiveMessageRequest = new ReceiveMessageRequest(myQueueUrl); List<Message> messages = sqs.receiveMessage(receiveMessageRequest).getMessages(); for (Message message : messages) { System.out.println(" Message"); System.out.println(" MessageId: " + message.getMessageId()); System.out.println(" ReceiptHandle: " + message.getReceiptHandle()); System.out.println(" MD5OfBody: " + message.getMD5OfBody()); System.out.println(" Body: " + message.getBody()); for (Entry<String, String> entry : message.getAttributes().entrySet()) { System.out.println(" Attribute"); System.out.println(" Name: " + entry.getKey()); System.out.println(" Value: " + entry.getValue()); } } System.out.println(); // Delete a message System.out.println("Deleting a message.\n"); String messageReceiptHandle = messages.get(0).getReceiptHandle(); sqs.deleteMessage(new DeleteMessageRequest(myQueueUrl, messageReceiptHandle)); /* // Delete a queue System.out.println("Deleting the test queue.\n"); sqs.deleteQueue(new DeleteQueueRequest(myQueueUrl));*/ } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon SQS, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with SQS, such as not " + "being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.srotya.flume.kinesis.sink.KinesisSink.java
License:Apache License
@Override public void configure(Context ctx) { ImmutableMap<String, String> props = ctx.getSubProperties(Constants.SETTINGS); if (!props.containsKey(Constants.ACCESS_KEY) || !props.containsKey(Constants.ACCESS_SECRET)) { Throwables.propagate(/*from w ww.j ava 2 s.c o m*/ new InvalidArgumentException("Must provide AWS credentials i.e. accessKey and accessSecret")); } awsCredentials = new BasicAWSCredentials(props.get(Constants.ACCESS_KEY), props.get(Constants.ACCESS_SECRET)); clientConfig = new ClientConfiguration(); if (props.containsKey(Constants.PROXY_HOST)) { clientConfig.setProxyHost(props.get(Constants.PROXY_HOST)); clientConfig.setProxyPort(Integer.parseInt(props.getOrDefault(Constants.PROXY_PORT, "80"))); clientConfig.setProtocol(Protocol.valueOf(props.getOrDefault(Constants.PROTOCOL, "HTTPS"))); } if (!props.containsKey(Constants.STREAM_NAME)) { Throwables.propagate(new InvalidArgumentException("Must provide Kinesis stream name")); } streamName = props.get(Constants.STREAM_NAME); putSize = Integer.parseInt(props.getOrDefault(Constants.PUT_SIZE, "100")); if (putSize > 500) { Throwables.propagate( new InvalidArgumentException("AWS Kinesis doesn't allow more than 500 put requests")); } endpoint = props.getOrDefault(Constants.ENDPOINT, Constants.DEFAULT_ENDPOINT); String serializerClass = props.getOrDefault(Constants.SERIALIZER, GsonSerializer.class.getName()); try { serializer = (KinesisSerializer) Class.forName(serializerClass).newInstance(); } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) { Throwables.propagate(e); } serializer.configure(props); }
From source file:com.srotya.flume.kinesis.source.KinesisSource.java
License:Apache License
@Override protected void doConfigure(Context ctx) throws FlumeException { ImmutableMap<String, String> props = ctx.getSubProperties(Constants.SETTINGS); if (!props.containsKey(Constants.ACCESS_KEY) || !props.containsKey(Constants.ACCESS_SECRET)) { Throwables.propagate(/*from w w w .j a v a 2s . co m*/ new InvalidArgumentException("Must provide AWS credentials i.e. accessKey and accessSecret")); } awsCredentials = new BasicAWSCredentials(props.get(Constants.ACCESS_KEY), props.get(Constants.ACCESS_SECRET)); clientConfig = new ClientConfiguration(); if (props.containsKey(Constants.PROXY_HOST)) { clientConfig.setProxyHost(props.get(Constants.PROXY_HOST)); clientConfig.setProxyPort(Integer.parseInt(props.getOrDefault(Constants.PROXY_PORT, "80"))); clientConfig.setProtocol(Protocol.valueOf(props.getOrDefault(Constants.PROTOCOL, "HTTPS"))); } if (!props.containsKey(Constants.STREAM_NAME)) { Throwables.propagate(new InvalidArgumentException("Must provide Kinesis stream name")); } streamName = props.get(Constants.STREAM_NAME); putSize = Integer.parseInt(props.getOrDefault(Constants.PUT_SIZE, "100")); if (putSize > 500) { Throwables.propagate( new InvalidArgumentException("AWS Kinesis doesn't allow more than 500 put requests")); } endpoint = props.getOrDefault(Constants.ENDPOINT, Constants.DEFAULT_ENDPOINT); String serializerClass = props.getOrDefault(Constants.SERIALIZER, GsonSerializer.class.getName()); try { serializer = (KinesisSerializer) Class.forName(serializerClass).newInstance(); } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) { Throwables.propagate(e); } serializer.configure(props); shardId = Integer.parseInt(props.getOrDefault(SHARD_INDEX, "0")); shardIteratorType = props.getOrDefault(ITERATOR_TYPE, DEFAULT_ITERATOR_TYPE); }
From source file:com.stehno.sanctuary.core.remote.S3RemoteStore.java
License:Apache License
public S3RemoteStore(String s3AccessKey, String s3SecretKey) { this.amazonS3 = new AmazonS3Client(new BasicAWSCredentials(s3AccessKey, s3SecretKey)); }
From source file:com.streamreduce.queue.CamelFacade.java
License:Apache License
private static SQSClientAndEndPointPair setupSqsEndpointAndClient(String queueName, String environment) throws UnsupportedEncodingException { Properties cloudProperties = PropertiesOverrideLoader.loadProperties("cloud.properties"); String accessKeyId = cloudProperties.getProperty("nodeable.aws.accessKeyId"); String secretKey = cloudProperties.getProperty("nodeable.aws.secretKey"); long messageRetentionPeriod = TimeUnit.DAYS.toSeconds(14); String actualQueueName = SqsQueueNameFormatter.formatSqsQueueName(queueName, environment); AWSCredentials awsCredentials = new BasicAWSCredentials(accessKeyId, secretKey); AmazonSQSClient sqsClient = new AmazonSQSClient(awsCredentials); String endpoint = String.format( "aws-sqs://" + actualQueueName + "?amazonSQSClient=#amazonSQSClient&" + "messageRetentionPeriod=%d", messageRetentionPeriod);/*from ww w.j a v a2 s.c o m*/ return new SQSClientAndEndPointPair(sqsClient, endpoint); }
From source file:com.streamsets.datacollector.bundles.SupportBundleManager.java
License:Apache License
/** * Instead of providing support bundle directly to user, upload it to StreamSets backend services. *///from www . ja v a 2s . co m public void uploadNewBundleFromInstances(List<BundleContentGenerator> generators, BundleType bundleType) throws IOException { // Generate bundle SupportBundle bundle = generateNewBundleFromInstances(generators, bundleType); boolean enabled = configuration.get(Constants.UPLOAD_ENABLED, Constants.DEFAULT_UPLOAD_ENABLED); String accessKey = configuration.get(Constants.UPLOAD_ACCESS, Constants.DEFAULT_UPLOAD_ACCESS); String secretKey = configuration.get(Constants.UPLOAD_SECRET, Constants.DEFAULT_UPLOAD_SECRET); String bucket = configuration.get(Constants.UPLOAD_BUCKET, Constants.DEFAULT_UPLOAD_BUCKET); int bufferSize = configuration.get(Constants.UPLOAD_BUFFER_SIZE, Constants.DEFAULT_UPLOAD_BUFFER_SIZE); if (!enabled) { throw new IOException("Uploading support bundles was disabled by administrator."); } AWSCredentialsProvider credentialsProvider = new StaticCredentialsProvider( new BasicAWSCredentials(accessKey, secretKey)); AmazonS3Client s3Client = new AmazonS3Client(credentialsProvider, new ClientConfiguration()); s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true)); s3Client.setRegion(Region.getRegion(Regions.US_WEST_2)); // Object Metadata ObjectMetadata s3Metadata = new ObjectMetadata(); for (Map.Entry<Object, Object> entry : getMetadata(bundleType).entrySet()) { s3Metadata.addUserMetadata((String) entry.getKey(), (String) entry.getValue()); } List<PartETag> partETags; InitiateMultipartUploadResult initResponse = null; try { // Uploading part by part LOG.info("Initiating multi-part support bundle upload"); partETags = new ArrayList<>(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, bundle.getBundleKey()); initRequest.setObjectMetadata(s3Metadata); initResponse = s3Client.initiateMultipartUpload(initRequest); } catch (AmazonClientException e) { LOG.error("Support bundle upload failed: ", e); throw new IOException("Support bundle upload failed", e); } try { byte[] buffer = new byte[bufferSize]; int partId = 1; int size = -1; while ((size = readFully(bundle.getInputStream(), buffer)) != -1) { LOG.debug("Uploading part {} of size {}", partId, size); UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket) .withKey(bundle.getBundleKey()).withUploadId(initResponse.getUploadId()) .withPartNumber(partId++).withInputStream(new ByteArrayInputStream(buffer)) .withPartSize(size); partETags.add(s3Client.uploadPart(uploadRequest).getPartETag()); } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucket, bundle.getBundleKey(), initResponse.getUploadId(), partETags); s3Client.completeMultipartUpload(compRequest); LOG.info("Support bundle upload finished"); } catch (Exception e) { LOG.error("Support bundle upload failed", e); s3Client.abortMultipartUpload( new AbortMultipartUploadRequest(bucket, bundle.getBundleKey(), initResponse.getUploadId())); throw new IOException("Can't upload support bundle", e); } finally { // Close the client s3Client.shutdown(); } }