List of usage examples for com.amazonaws.services.s3 AmazonS3Client doesBucketExist
@Override public boolean doesBucketExist(String bucketName) throws SdkClientException, AmazonServiceException
From source file:backup.store.s3.S3BackupStoreUtil.java
License:Apache License
public static boolean exists(String bucketName) throws Exception { AmazonS3Client client = new AmazonS3Client(new DefaultAWSCredentialsProviderChain()); return client.doesBucketExist(bucketName); }
From source file:ch.entwine.weblounge.maven.S3DeployMojo.java
License:Open Source License
/** * //from ww w . ja va2 s .c o m * {@inheritDoc} * * @see org.apache.maven.plugin.Mojo#execute() */ public void execute() throws MojoExecutionException, MojoFailureException { // Setup AWS S3 client AWSCredentials credentials = new BasicAWSCredentials(awsAccessKey, awsSecretKey); AmazonS3Client uploadClient = new AmazonS3Client(credentials); TransferManager transfers = new TransferManager(credentials); // Make sure key prefix does not start with a slash but has one at the // end if (keyPrefix.startsWith("/")) keyPrefix = keyPrefix.substring(1); if (!keyPrefix.endsWith("/")) keyPrefix = keyPrefix + "/"; // Keep track of how much data has been transferred long totalBytesTransferred = 0L; int items = 0; Queue<Upload> uploads = new LinkedBlockingQueue<Upload>(); try { // Check if S3 bucket exists getLog().debug("Checking whether bucket " + bucket + " exists"); if (!uploadClient.doesBucketExist(bucket)) { getLog().error("Desired bucket '" + bucket + "' does not exist!"); return; } getLog().debug("Collecting files to transfer from " + resources.getDirectory()); List<File> res = getResources(); for (File file : res) { // Make path of resource relative to resources directory String filename = file.getName(); String extension = FilenameUtils.getExtension(filename); String path = file.getPath().substring(resources.getDirectory().length()); String key = concat("/", keyPrefix, path).substring(1); // Delete old file version in bucket getLog().debug("Removing existing object at " + key); uploadClient.deleteObject(bucket, key); // Setup meta data ObjectMetadata meta = new ObjectMetadata(); meta.setCacheControl("public, max-age=" + String.valueOf(valid * 3600)); FileInputStream fis = null; GZIPOutputStream gzipos = null; final File fileToUpload; if (gzip && ("js".equals(extension) || "css".equals(extension))) { try { fis = new FileInputStream(file); File gzFile = File.createTempFile(file.getName(), null); gzipos = new GZIPOutputStream(new FileOutputStream(gzFile)); IOUtils.copy(fis, gzipos); fileToUpload = gzFile; meta.setContentEncoding("gzip"); if ("js".equals(extension)) meta.setContentType("text/javascript"); if ("css".equals(extension)) meta.setContentType("text/css"); } catch (FileNotFoundException e) { getLog().error(e); continue; } catch (IOException e) { getLog().error(e); continue; } finally { IOUtils.closeQuietly(fis); IOUtils.closeQuietly(gzipos); } } else { fileToUpload = file; } // Do a random check for existing errors before starting the next upload if (erroneousUpload != null) break; // Create put object request long bytesToTransfer = fileToUpload.length(); totalBytesTransferred += bytesToTransfer; PutObjectRequest request = new PutObjectRequest(bucket, key, fileToUpload); request.setProgressListener(new UploadListener(credentials, bucket, key, bytesToTransfer)); request.setMetadata(meta); // Schedule put object request getLog().info( "Uploading " + key + " (" + FileUtils.byteCountToDisplaySize((int) bytesToTransfer) + ")"); Upload upload = transfers.upload(request); uploads.add(upload); items++; } } catch (AmazonServiceException e) { getLog().error("Uploading resources failed: " + e.getMessage()); } catch (AmazonClientException e) { getLog().error("Uploading resources failed: " + e.getMessage()); } // Wait for uploads to be finished String currentUpload = null; try { Thread.sleep(1000); getLog().info("Waiting for " + uploads.size() + " uploads to finish..."); while (!uploads.isEmpty()) { Upload upload = uploads.poll(); currentUpload = upload.getDescription().substring("Uploading to ".length()); if (TransferState.InProgress.equals(upload.getState())) getLog().debug("Waiting for upload " + currentUpload + " to finish"); upload.waitForUploadResult(); } } catch (AmazonServiceException e) { throw new MojoExecutionException("Error while uploading " + currentUpload); } catch (AmazonClientException e) { throw new MojoExecutionException("Error while uploading " + currentUpload); } catch (InterruptedException e) { getLog().debug("Interrupted while waiting for upload to finish"); } // Check for errors that happened outside of the actual uploading if (erroneousUpload != null) { throw new MojoExecutionException("Error while uploading " + erroneousUpload); } getLog().info("Deployed " + items + " files (" + FileUtils.byteCountToDisplaySize((int) totalBytesTransferred) + ") to s3://" + bucket); }
From source file:com.amazon.services.awsrum.utils.S3Utils.java
License:Open Source License
/** * //from w ww . ja v a2 s. c om * @param client * The {@link AmazonS3Client} with read permissions * @param bucketName * Check if this bucket exists * @return true if the S3 bucket exists, otherwise return false */ private static boolean bucketExists(AmazonS3Client client, String bucketName) { return client.doesBucketExist(bucketName); }
From source file:com.kinesisboard.amazonaws.utils.S3Utils.java
License:Open Source License
/** * // www . j a va 2 s .co m * @param client * The {@link AmazonS3Client} with read permissions * @param bucketName * Check if this bucket exists * @return true if the Amazon S3 bucket exists, otherwise return false */ private static boolean bucketExists(AmazonS3Client client, String bucketName) { System.out.println("S3Utils : Bucket " + bucketName + " Exists : " + client.doesBucketExist(bucketName)); return client.doesBucketExist(bucketName); }
From source file:com.netflix.dynomitemanager.sidecore.backup.S3Backup.java
License:Apache License
/** * Uses the Amazon S3 API to upload the AOF/RDB to S3 * Filename: Backup location + DC + Rack + App + Token *//*from w w w . ja va 2s. c o m*/ @Override public boolean upload(File file, DateTime todayStart) { logger.info("Snapshot backup: sending " + file.length() + " bytes to S3"); /* Key name is comprised of the * backupDir + DC + Rack + token + Date */ String keyName = config.getBackupLocation() + "/" + iid.getInstance().getDatacenter() + "/" + iid.getInstance().getRack() + "/" + iid.getInstance().getToken() + "/" + todayStart.getMillis(); // Get bucket location. logger.info("Key in Bucket: " + keyName); logger.info("S3 Bucket Name:" + config.getBucketName()); AmazonS3Client s3Client = new AmazonS3Client(cred.getAwsCredentialProvider()); try { // Checking if the S3 bucket exists, and if does not, then we create it if (!(s3Client.doesBucketExist(config.getBucketName()))) { logger.error("Bucket with name: " + config.getBucketName() + " does not exist"); return false; } else { logger.info("Uploading data to S3\n"); // Create a list of UploadPartResponse objects. You get one of these for // each part upload. List<PartETag> partETags = new ArrayList<PartETag>(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest( config.getBucketName(), keyName); InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); long contentLength = file.length(); long filePosition = 0; long partSize = this.initPartSize; try { for (int i = 1; filePosition < contentLength; i++) { // Last part can be less than initPartSize (500MB). Adjust part size. partSize = Math.min(partSize, (contentLength - filePosition)); // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest() .withBucketName(config.getBucketName()).withKey(keyName) .withUploadId(initResponse.getUploadId()).withPartNumber(i) .withFileOffset(filePosition).withFile(file).withPartSize(partSize); // Upload part and add response to our list. partETags.add(s3Client.uploadPart(uploadRequest).getPartETag()); filePosition += partSize; } CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest( config.getBucketName(), keyName, initResponse.getUploadId(), partETags); s3Client.completeMultipartUpload(compRequest); } catch (Exception e) { logger.error("Abosting multipart upload due to error"); s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(config.getBucketName(), keyName, initResponse.getUploadId())); } return true; } } catch (AmazonServiceException ase) { logger.error( "AmazonServiceException;" + " request made it to Amazon S3, but was rejected with an error "); logger.error("Error Message: " + ase.getMessage()); logger.error("HTTP Status Code: " + ase.getStatusCode()); logger.error("AWS Error Code: " + ase.getErrorCode()); logger.error("Error Type: " + ase.getErrorType()); logger.error("Request ID: " + ase.getRequestId()); return false; } catch (AmazonClientException ace) { logger.error("AmazonClientException;" + " the client encountered " + "an internal error while trying to " + "communicate with S3, "); logger.error("Error Message: " + ace.getMessage()); return false; } }
From source file:com.netflix.dynomitemanager.sidecore.backup.S3Restore.java
License:Apache License
/** * Uses the Amazon S3 API to restore from S3 *///from w w w.j a v a2 s. co m @Override public boolean restoreData(String dateString) { long time = restoreTime(dateString); if (time > -1) { logger.info("Restoring data from S3."); AmazonS3Client s3Client = new AmazonS3Client(cred.getAwsCredentialProvider()); try { /* construct the key for the backup data */ String keyName = config.getBackupLocation() + "/" + iid.getInstance().getDatacenter() + "/" + iid.getInstance().getRack() + "/" + iid.getInstance().getToken() + "/" + time; logger.info("S3 Bucket Name: " + config.getBucketName()); logger.info("Key in Bucket: " + keyName); // Checking if the S3 bucket exists, and if does not, then we create it if (!(s3Client.doesBucketExist(config.getBucketName()))) { logger.error("Bucket with name: " + config.getBucketName() + " does not exist"); } else { S3Object s3object = s3Client.getObject(new GetObjectRequest(config.getBucketName(), keyName)); logger.info("Content-Type: " + s3object.getObjectMetadata().getContentType()); String filepath = null; if (config.isAof()) { filepath = config.getPersistenceLocation() + "/appendonly.aof"; } else { filepath = config.getPersistenceLocation() + "/nfredis.rdb"; } IOUtils.copy(s3object.getObjectContent(), new FileOutputStream(new File(filepath))); } return true; } catch (AmazonServiceException ase) { logger.error("AmazonServiceException;" + " request made it to Amazon S3, but was rejected with an error "); logger.error("Error Message: " + ase.getMessage()); logger.error("HTTP Status Code: " + ase.getStatusCode()); logger.error("AWS Error Code: " + ase.getErrorCode()); logger.error("Error Type: " + ase.getErrorType()); logger.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { logger.error("AmazonClientException;" + " the client encountered " + "an internal error while trying to " + "communicate with S3, "); logger.error("Error Message: " + ace.getMessage()); } catch (IOException io) { logger.error("File storing error: " + io.getMessage()); } } else { logger.error("Date in FP: " + dateString); } return false; }
From source file:com.streamsets.pipeline.stage.origin.s3.S3ConfigBean.java
License:Apache License
private void validateBucket(Stage.Context context, List<Stage.ConfigIssue> issues, AmazonS3Client s3Client, String bucket, String groupName, String configName) { if (bucket == null || bucket.isEmpty()) { issues.add(context.createConfigIssue(groupName, configName, Errors.S3_SPOOLDIR_11)); } else if (!s3Client.doesBucketExist(bucket)) { issues.add(context.createConfigIssue(groupName, configName, Errors.S3_SPOOLDIR_12, bucket)); }//from w w w . j av a 2 s . c o m }
From source file:it.polimi.modaclouds.cpimlibrary.blobmng.AmazonBlobManager.java
License:Apache License
public AmazonBlobManager(AmazonS3Client s3) { if (this.bucketName == null) { try {//from w w w . j a v a 2s. c om Properties endpoints = new Properties(); if (this.getClass().getResourceAsStream("/endpoints.properties") != null) { endpoints.load(this.getClass().getResourceAsStream("/endpoints.properties")); if (endpoints.getProperty("S3") != null) this.bucketName = endpoints.getProperty("S3").toLowerCase(Locale.ENGLISH); } if (this.bucketName == null) this.bucketName = "mycloudapplicationbucket"; } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } this.s3 = s3; if (!s3.doesBucketExist(bucketName)) { System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); } }
From source file:org.akvo.flow.InstanceConfigurator.java
License:Open Source License
public static void main(String[] args) throws Exception { Options opts = getOptions();//from w w w . j a va2 s . co m CommandLineParser parser = new BasicParser(); CommandLine cli = null; try { cli = parser.parse(opts, args); } catch (Exception e) { System.err.println(e.getMessage()); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp(InstanceConfigurator.class.getName(), opts); System.exit(1); } String awsAccessKey = cli.getOptionValue("ak"); String awsSecret = cli.getOptionValue("as"); String bucketName = cli.getOptionValue("bn"); String gaeId = cli.getOptionValue("gae"); String outFolder = cli.getOptionValue("o"); String flowServices = cli.getOptionValue("fs"); String alias = cli.getOptionValue("a"); String emailFrom = cli.getOptionValue("ef"); String emailTo = cli.getOptionValue("et"); String orgName = cli.getOptionValue("on"); String signingKey = cli.getOptionValue("sk"); File out = new File(outFolder); if (!out.exists()) { out.mkdirs(); } Map<String, AccessKey> accessKeys = new HashMap<String, AccessKey>(); String apiKey = UUID.randomUUID().toString().replaceAll("-", ""); AWSCredentials creds = new BasicAWSCredentials(awsAccessKey, awsSecret); AmazonIdentityManagementClient iamClient = new AmazonIdentityManagementClient(creds); AmazonS3Client s3Client = new AmazonS3Client(creds); // Creating bucket System.out.println("Creating bucket: " + bucketName); try { if (s3Client.doesBucketExist(bucketName)) { System.out.println(bucketName + " already exists, skipping creation"); } else { s3Client.createBucket(bucketName, Region.EU_Ireland); } } catch (Exception e) { System.err.println("Error trying to create bucket " + bucketName + " : " + e.getMessage()); System.exit(1); } // Creating users and groups String gaeUser = bucketName + GAE_SUFFIX; String apkUser = bucketName + APK_SUFFIX; // GAE System.out.println("Creating user: " + gaeUser); GetUserRequest gaeUserRequest = new GetUserRequest(); gaeUserRequest.setUserName(gaeUser); try { iamClient.getUser(gaeUserRequest); System.out.println("User already exists, skipping creation"); } catch (NoSuchEntityException e) { iamClient.createUser(new CreateUserRequest(gaeUser)); } System.out.println("Requesting security credentials for " + gaeUser); CreateAccessKeyRequest gaeAccessRequest = new CreateAccessKeyRequest(); gaeAccessRequest.setUserName(gaeUser); CreateAccessKeyResult gaeAccessResult = iamClient.createAccessKey(gaeAccessRequest); accessKeys.put(gaeUser, gaeAccessResult.getAccessKey()); // APK System.out.println("Creating user: " + apkUser); GetUserRequest apkUserRequest = new GetUserRequest(); apkUserRequest.setUserName(apkUser); try { iamClient.getUser(apkUserRequest); System.out.println("User already exists, skipping creation"); } catch (NoSuchEntityException e) { iamClient.createUser(new CreateUserRequest(apkUser)); } System.out.println("Requesting security credentials for " + apkUser); CreateAccessKeyRequest apkAccessRequest = new CreateAccessKeyRequest(); apkAccessRequest.setUserName(apkUser); CreateAccessKeyResult apkAccessResult = iamClient.createAccessKey(apkAccessRequest); accessKeys.put(apkUser, apkAccessResult.getAccessKey()); System.out.println("Configuring security policies..."); Configuration cfg = new Configuration(); cfg.setClassForTemplateLoading(InstanceConfigurator.class, "/org/akvo/flow/templates"); cfg.setObjectWrapper(new DefaultObjectWrapper()); cfg.setDefaultEncoding("UTF-8"); Map<String, Object> data = new HashMap<String, Object>(); data.put("bucketName", bucketName); data.put("version", new SimpleDateFormat("yyyy-MM-dd").format(new Date())); data.put("accessKey", accessKeys); Template t1 = cfg.getTemplate("apk-s3-policy.ftl"); StringWriter apkPolicy = new StringWriter(); t1.process(data, apkPolicy); Template t2 = cfg.getTemplate("gae-s3-policy.ftl"); StringWriter gaePolicy = new StringWriter(); t2.process(data, gaePolicy); iamClient.putUserPolicy( new PutUserPolicyRequest(apkUser, apkUser, Policy.fromJson(apkPolicy.toString()).toJson())); iamClient.putUserPolicy( new PutUserPolicyRequest(gaeUser, gaeUser, Policy.fromJson(gaePolicy.toString()).toJson())); System.out.println("Creating configuration files..."); // survey.properties Map<String, Object> apkData = new HashMap<String, Object>(); apkData.put("awsBucket", bucketName); apkData.put("awsAccessKeyId", accessKeys.get(apkUser).getAccessKeyId()); apkData.put("awsSecretKey", accessKeys.get(apkUser).getSecretAccessKey()); apkData.put("serverBase", "https://" + gaeId + ".appspot.com"); apkData.put("restApiKey", apiKey); Template t3 = cfg.getTemplate("survey.properties.ftl"); FileWriter fw = new FileWriter(new File(out, "/survey.properties")); t3.process(apkData, fw); // appengine-web.xml Map<String, Object> webData = new HashMap<String, Object>(); webData.put("awsBucket", bucketName); webData.put("awsAccessKeyId", accessKeys.get(gaeUser).getAccessKeyId()); webData.put("awsSecretAccessKey", accessKeys.get(gaeUser).getSecretAccessKey()); webData.put("s3url", "https://" + bucketName + ".s3.amazonaws.com"); webData.put("instanceId", gaeId); webData.put("alias", alias); webData.put("flowServices", flowServices); webData.put("apiKey", apiKey); webData.put("emailFrom", emailFrom); webData.put("emailTo", emailTo); webData.put("organization", orgName); webData.put("signingKey", signingKey); Template t5 = cfg.getTemplate("appengine-web.xml.ftl"); FileWriter fw3 = new FileWriter(new File(out, "/appengine-web.xml")); t5.process(webData, fw3); System.out.println("Done"); }
From source file:org.entando.entando.plugins.jps3awsclient.aps.system.services.storage.AmazonS3StorageManager.java
License:Open Source License
public void checkForAndCreateBucket(String bucketName, AmazonS3Client client) { // Make sure it's lower case to comply with Amazon S3 recommendations bucketName = bucketName.toLowerCase(); if (this._bucketMap.get(bucketName) == null) { if (client.doesBucketExist(bucketName)) { this._bucketMap.put(bucketName, true); } else {/*www.j a va 2s . c o m*/ // Bucket hasn't been created yet so we create it CreateBucketRequest request = new CreateBucketRequest(bucketName); request.withCannedAcl(CannedAccessControlList.LogDeliveryWrite); client.createBucket(request); this._bucketMap.put(bucketName, true); } } }