List of usage examples for com.amazonaws.services.s3 AmazonS3Client AmazonS3Client
@SdkInternalApi AmazonS3Client(AmazonS3ClientParams s3ClientParams)
From source file:com.supprema.utils.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*// w w w. j a v a 2s. co m * The ProfileCredentialsProvider will return your [fabiano-user-s3] * credential profile by reading from the credentials file located at * (/Users/fabianorodriguesmatias/.aws/credentials). */ AWSCredentials credentials = null; try { credentials = new ProfileCredentialsProvider("fabiano-user-s3").getCredentials(); } catch (Exception e) { throw new AmazonClientException( "Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (/Users/fabianorodriguesmatias/.aws/credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.swf.common.ConfigHelper.java
License:Open Source License
public AmazonS3 createS3Client(AWSCredentials credentials) { return new AmazonS3Client(credentials); }
From source file:com.topera.epoch.service.S3Util.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/* ww w .j a va 2 s . c om*/ * Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users) * and save the following lines after replacing the underlined values with your own. * * [default] * aws_access_key_id = YOUR_ACCESS_KEY_ID * aws_secret_access_key = YOUR_SECRET_ACCESS_KEY */ AWSCredentials creds = new AWSCredentials() { public String getAWSSecretKey() { // TODO Auto-generated method stub return "5VVtmI7vcecuVbw8JsG4uo2O1/9RwwLHrTT01Itz"; } public String getAWSAccessKeyId() { // TODO Auto-generated method stub return "AKIAJCMYALI46A2DIPRQ"; } }; AmazonS3 s3 = new AmazonS3Client(creds); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.tracermedia.maven.plugins.AbstractBeanstalkMojo.java
License:Open Source License
protected AmazonS3Client getS3Client() { if (_s3Client == null) { AWSCredentials cred = new BasicAWSCredentials(accessKey, secretKey); _s3Client = new AmazonS3Client(cred); }// w w w . j a v a 2 s . com return _s3Client; }
From source file:com.travoca.app.utils.amazon.UploadService.java
License:Apache License
@Override public void onCreate() { super.onCreate(); // android.os.Debug.waitForDebugger(); s3Client = new AmazonS3Client( new BasicAWSCredentials(getString(R.string.s3_access_key), getString(R.string.s3_secret))); nm = (NotificationManager) getSystemService(NOTIFICATION_SERVICE); IntentFilter f = new IntentFilter(); f.addAction(UPLOAD_CANCELLED_ACTION); registerReceiver(uploadCancelReceiver, f); }
From source file:com.tremolosecurity.unison.proxy.filters.s3.AwsS3Proxy.java
License:Apache License
@Override public void initFilter(HttpFilterConfig config) throws Exception { this.accessKey = this.getConfigAttr(config, "accessKey"); logger.info("Access Key : ************"); this.secretKey = this.getConfigAttr(config, "secretKey"); logger.info("Secret Key : *************"); this.topBucket = "/" + this.getConfigAttr(config, "topBucket") + "/"; logger.info("Top Bucket : '" + this.topBucket + "'"); this.s3Client = new AmazonS3Client(new BasicAWSCredentials(this.accessKey, this.secretKey)); }
From source file:com.trsvax.tapestry.aws.core.services.AWSCoreModule.java
License:Apache License
public static void bind(ServiceBinder binder) { //binder.bind(AWSMailTransport.class,AWSMailTransportImpl.class); binder.bind(AmazonS3.class, new ServiceBuilder<AmazonS3>() { public AmazonS3 buildService(ServiceResources serviceResources) { return new AmazonS3Client(serviceResources.getService(AWSCredentials.class)); }/*from w ww . j a va 2s . c o m*/ }); binder.bind(AmazonDynamoDB.class, new ServiceBuilder<AmazonDynamoDB>() { public AmazonDynamoDB buildService(ServiceResources serviceResources) { return new AmazonDynamoDBClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonEC2.class, new ServiceBuilder<AmazonEC2>() { public AmazonEC2 buildService(ServiceResources serviceResources) { return new AmazonEC2Client(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonSimpleDB.class, new ServiceBuilder<AmazonSimpleDB>() { public AmazonSimpleDB buildService(ServiceResources serviceResources) { return new AmazonSimpleDBClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonSQS.class, new ServiceBuilder<AmazonSQS>() { public AmazonSQS buildService(ServiceResources serviceResources) { return new AmazonSQSClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonSNS.class, new ServiceBuilder<AmazonSNS>() { public AmazonSNS buildService(ServiceResources serviceResources) { return new AmazonSNSClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonRDS.class, new ServiceBuilder<AmazonRDS>() { public AmazonRDS buildService(ServiceResources serviceResources) { return new AmazonRDSClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonElasticMapReduce.class, new ServiceBuilder<AmazonElasticMapReduce>() { public AmazonElasticMapReduce buildService(ServiceResources serviceResources) { return new AmazonElasticMapReduceClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonSimpleEmailService.class, new ServiceBuilder<AmazonSimpleEmailService>() { public AmazonSimpleEmailService buildService(ServiceResources serviceResources) { return new AmazonSimpleEmailServiceClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonElasticLoadBalancing.class, new ServiceBuilder<AmazonElasticLoadBalancing>() { public AmazonElasticLoadBalancing buildService(ServiceResources serviceResources) { return new AmazonElasticLoadBalancingClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonCloudWatch.class, new ServiceBuilder<AmazonCloudWatch>() { public AmazonCloudWatch buildService(ServiceResources serviceResources) { return new AmazonCloudWatchClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonAutoScaling.class, new ServiceBuilder<AmazonAutoScaling>() { public AmazonAutoScaling buildService(ServiceResources serviceResources) { return new AmazonAutoScalingClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonIdentityManagement.class, new ServiceBuilder<AmazonIdentityManagement>() { public AmazonIdentityManagement buildService(ServiceResources serviceResources) { return new AmazonIdentityManagementClient(serviceResources.getService(AWSCredentials.class)); } }); }
From source file:com.tvarit.plugin.AutoScalingMojo.java
License:Open Source License
@Override public void execute() throws MojoExecutionException, MojoFailureException { getLog().debug("Starting " + this.getClass().getSimpleName() + " execution "); final BasicAWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); AmazonS3Client amazonS3Client = new AmazonS3Client(awsCredentials); final MavenProject project = (MavenProject) this.getPluginContext().getOrDefault("project", null); String lambdaCodeS3Bucket = this.bucketName; if (lambdaCodeS3Key == null) { lambdaCodeS3Key = new LambdaS3BucketKeyMaker().makeKey(project); lambdaCodeS3Bucket = "tvarit"; }// w w w. j a va 2 s . c o m AmazonCloudFormationClient amazonCloudFormationClient = new AmazonCloudFormationClient(awsCredentials); AmazonEC2Client amazonEC2Client = new AmazonEC2Client(awsCredentials); List<com.amazonaws.services.cloudformation.model.Parameter> allParams = new AsgParameterMaker().make( amazonEC2Client, amazonCloudFormationClient, project, projectName, lambdaCodeS3Key, lambdaCodeS3Bucket); final String stackName = projectName + "-asg"; if (templateUrl == null) try { templateUrl = new TemplateUrlMaker().makeUrl(project, "autoscaling.template").toString(); } catch (MalformedURLException e) { throw new MojoExecutionException( "Could not create default url for templates. Please open an issue on github.", e); } final CreateStackRequest createStackRequest = new CreateStackRequest() .withCapabilities(Capability.CAPABILITY_IAM).withStackName(stackName).withParameters(allParams) .withTemplateURL(templateUrl); final Stack stack = new StackMaker().makeStack(createStackRequest, amazonCloudFormationClient, getLog()); new S3WarUploadEventToInvokeLambdaMaker().make(amazonS3Client, bucketName, stack); getLog().info("Finished completing stack"); }
From source file:com.tvarit.plugin.TvaritTomcatDeployerMojo.java
License:Open Source License
@Override public void execute() throws MojoExecutionException, MojoFailureException { final MavenProject project = (MavenProject) this.getPluginContext().getOrDefault("project", null); if (templateUrl == null) try {/*ww w. j a v a 2 s.c om*/ templateUrl = new TemplateUrlMaker().makeUrl(project, "newinstance.template").toString(); } catch (MalformedURLException e) { throw new MojoExecutionException( "Could not create default url for templates. Please open an issue on github.", e); } final BasicAWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey); AmazonS3Client s3Client = new AmazonS3Client(awsCredentials); final File warFile = project.getArtifact().getFile(); final String key = "deployables/" + project.getGroupId() + "/" + project.getArtifactId() + "/" + project.getVersion() + "/" + warFile.getName(); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, warFile); final ObjectMetadata metadata = new ObjectMetadata(); final HashMap<String, String> userMetadata = new HashMap<>(); userMetadata.put("project_name", projectName); userMetadata.put("stack_template_url", templateUrl); userMetadata.put("private_key_name", sshKeyName); metadata.setUserMetadata(userMetadata); putObjectRequest.withMetadata(metadata); final PutObjectResult putObjectResult = s3Client.putObject(putObjectRequest); /* AmazonCloudFormationClient amazonCloudFormationClient = new AmazonCloudFormationClient(awsCredentials); final com.amazonaws.services.cloudformation.model.Parameter projectNameParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("projectName").withParameterValue(this.projectName); final com.amazonaws.services.cloudformation.model.Parameter publicSubnetsParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("publicSubnets").withParameterValue(commaSeparatedSubnetIds); final com.amazonaws.services.cloudformation.model.Parameter tvaritRoleParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("tvaritRole").withParameterValue(tvaritRole); final com.amazonaws.services.cloudformation.model.Parameter tvaritInstanceProfileParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("tvaritInstanceProfile").withParameterValue(this.tvaritInstanceProfile); final com.amazonaws.services.cloudformation.model.Parameter tvaritBucketNameParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("bucketName").withParameterValue(this.bucketName); final com.amazonaws.services.cloudformation.model.Parameter instanceSecurityGroupIdParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("sgId").withParameterValue(this.instanceSecurityGroupId); final com.amazonaws.services.cloudformation.model.Parameter sshKeyNameParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("keyName").withParameterValue(this.sshKeyName); final String warFileUrl = s3Client.getUrl(bucketName, key).toString(); final com.amazonaws.services.cloudformation.model.Parameter warFileUrlParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("warFileUrl").withParameterValue(warFileUrl); final CreateStackRequest createStackRequest = new CreateStackRequest(); if (templateUrl == null) { try { templateUrl = new TemplateUrlMaker().makeUrl(project, "newinstance.template").toString(); } catch (MalformedURLException e) { throw new MojoExecutionException("Could not create default url for templates. Please open an issue on github.", e); } } createStackRequest. withStackName(projectName + "-instance-" + project.getVersion().replace(".", "-")). withParameters( projectNameParameter, publicSubnetsParameter, tvaritInstanceProfileParameter, tvaritRoleParameter, tvaritBucketNameParameter, instanceSecurityGroupIdParameter, warFileUrlParameter, sshKeyNameParameter ). withDisableRollback(true). withTemplateURL(templateUrl); createStackRequest.withDisableRollback(true); final Stack stack = new StackMaker().makeStack(createStackRequest, amazonCloudFormationClient, getLog()); AmazonAutoScalingClient amazonAutoScalingClient = new AmazonAutoScalingClient(awsCredentials); final AttachInstancesRequest attachInstancesRequest = new AttachInstancesRequest(); attachInstancesRequest.withInstanceIds(stack.getOutputs().get(0).getOutputValue(), stack.getOutputs().get(1).getOutputValue()).withAutoScalingGroupName(autoScalingGroupName); amazonAutoScalingClient.attachInstances(attachInstancesRequest); */ }
From source file:com.twitter.heron.uploader.s3.S3Uploader.java
License:Open Source License
@Override public void initialize(Config config) { bucket = S3Context.bucket(config); String accessKey = S3Context.accessKey(config); String accessSecret = S3Context.secretKey(config); if (bucket == null || bucket.isEmpty()) { throw new RuntimeException("Missing heron.uploader.s3.bucket config value"); }//from w w w.j a v a 2 s. co m if (accessKey == null || accessKey.isEmpty()) { throw new RuntimeException("Missing heron.uploader.s3.access_key config value"); } if (accessSecret == null || accessSecret.isEmpty()) { throw new RuntimeException("Missing heron.uploader.s3.secret_key config value"); } AWSCredentials credentials = new BasicAWSCredentials(accessKey, accessSecret); s3Client = new AmazonS3Client(credentials); final String topologyName = Context.topologyName(config); final String topologyPackageLocation = Context.topologyPackageFile(config); pathPrefix = S3Context.pathPrefix(config); packageFileHandler = new File(topologyPackageLocation); // The path the packaged topology will be uploaded to remoteFilePath = generateS3Path(pathPrefix, topologyName, packageFileHandler.getName()); // Generate the location of the backup file incase we need to revert the deploy previousVersionFilePath = generateS3Path(pathPrefix, topologyName, "previous_" + packageFileHandler.getName()); }