List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest
public PutObjectRequest(String bucketName, String key, String redirectLocation)
From source file:org.pentaho.amazon.emr.job.AmazonElasticMapReduceJobExecutor.java
License:Apache License
public Result execute(Result result, int arg1) throws KettleException { Log4jFileAppender appender = null;/*from w w w .ja va 2 s . c om*/ String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$ try { appender = LogWriter.createFileAppender(logFileName, true, false); LogWriter.getInstance().addAppender(appender); log.setLogLevel(parentJob.getLogLevel()); } catch (Exception e) { logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.FailedToOpenLogFile", //$NON-NLS-1$ logFileName, e.toString())); logError(Const.getStackTracker(e)); } try { // create/connect aws service AmazonElasticMapReduceClient emrClient = new AmazonElasticMapReduceClient(awsCredentials); // pull down jar from vfs FileObject jarFile = KettleVFS.getFileObject(buildFilename(jarUrl)); File tmpFile = File.createTempFile("customEMR", "jar"); tmpFile.deleteOnExit(); FileOutputStream tmpFileOut = new FileOutputStream(tmpFile); IOUtils.copy(jarFile.getContent().getInputStream(), tmpFileOut); URL localJarUrl = tmpFile.toURI().toURL(); // find main class in jar String mainClass = getMainClass(localJarUrl); // create staging bucket AmazonS3 s3Client = new AmazonS3Client(awsCredentials); FileSystemOptions opts = new FileSystemOptions(); DefaultFileSystemConfigBuilder.getInstance().setUserAuthenticator(opts, new StaticUserAuthenticator( null, awsCredentials.getAWSAccessKeyId(), awsCredentials.getAWSSecretKey())); FileObject stagingDirFileObject = KettleVFS.getFileObject(stagingDir, getVariables(), opts); String stagingBucketName = stagingDirFileObject.getName().getBaseName(); if (!s3Client.doesBucketExist(stagingBucketName)) { s3Client.createBucket(stagingBucketName); } // delete old jar if needed try { s3Client.deleteObject(stagingBucketName, jarFile.getName().getBaseName()); } catch (Exception ex) { logError(Const.getStackTracker(ex)); } // put jar in s3 staging bucket s3Client.putObject(new PutObjectRequest(stagingBucketName, jarFile.getName().getBaseName(), tmpFile)); // create non-vfs s3 url to jar String stagingS3JarUrl = "s3://" + stagingBucketName + "/" + jarFile.getName().getBaseName(); String stagingS3BucketUrl = "s3://" + stagingBucketName; RunJobFlowRequest runJobFlowRequest = null; RunJobFlowResult runJobFlowResult = null; if (StringUtil.isEmpty(hadoopJobFlowId)) { // create EMR job flow runJobFlowRequest = createJobFlow(stagingS3BucketUrl, stagingS3JarUrl, mainClass); // start EMR job runJobFlowResult = emrClient.runJobFlow(runJobFlowRequest); } else { List<String> jarStepArgs = new ArrayList<String>(); if (!StringUtil.isEmpty(cmdLineArgs)) { StringTokenizer st = new StringTokenizer(cmdLineArgs, " "); while (st.hasMoreTokens()) { String token = st.nextToken(); logBasic("adding args: " + token); jarStepArgs.add(token); } } HadoopJarStepConfig hadoopJarStep = new HadoopJarStepConfig(); hadoopJarStep.setJar(stagingS3JarUrl); hadoopJarStep.setMainClass(mainClass); hadoopJarStep.setArgs(jarStepArgs); StepConfig stepConfig = new StepConfig(); stepConfig.setName("custom jar: " + jarUrl); stepConfig.setHadoopJarStep(hadoopJarStep); List<StepConfig> steps = new ArrayList<StepConfig>(); steps.add(stepConfig); AddJobFlowStepsRequest addJobFlowStepsRequest = new AddJobFlowStepsRequest(); addJobFlowStepsRequest.setJobFlowId(hadoopJobFlowId); addJobFlowStepsRequest.setSteps(steps); emrClient.addJobFlowSteps(addJobFlowStepsRequest); } String loggingIntervalS = environmentSubstitute(loggingInterval); int logIntv = 60; try { logIntv = Integer.parseInt(loggingIntervalS); } catch (NumberFormatException ex) { logError("Unable to parse logging interval '" + loggingIntervalS + "' - using " + "default of 60"); } // monitor it / blocking / logging if desired if (blocking) { try { if (log.isBasic()) { String executionState = "RUNNING"; List<String> jobFlowIds = new ArrayList<String>(); String id = hadoopJobFlowId; if (StringUtil.isEmpty(hadoopJobFlowId)) { id = runJobFlowResult.getJobFlowId(); jobFlowIds.add(id); } while (isRunning(executionState)) { DescribeJobFlowsRequest describeJobFlowsRequest = new DescribeJobFlowsRequest(); describeJobFlowsRequest.setJobFlowIds(jobFlowIds); DescribeJobFlowsResult describeJobFlowsResult = emrClient .describeJobFlows(describeJobFlowsRequest); boolean found = false; for (JobFlowDetail jobFlowDetail : describeJobFlowsResult.getJobFlows()) { if (jobFlowDetail.getJobFlowId().equals(id)) { executionState = jobFlowDetail.getExecutionStatusDetail().getState(); found = true; } } if (!found) { break; } // logBasic(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.RunningPercent", setupPercent, // mapPercent, reducePercent)); logBasic(hadoopJobName + " execution status: " + executionState); try { if (isRunning(executionState)) { Thread.sleep(logIntv * 1000); } } catch (InterruptedException ie) { // Ignore } } if ("FAILED".equalsIgnoreCase(executionState)) { result.setStopped(true); result.setNrErrors(1); result.setResult(false); S3Object outObject = s3Client.getObject(stagingBucketName, id + "/steps/1/stdout"); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); IOUtils.copy(outObject.getObjectContent(), outStream); logError(outStream.toString()); S3Object errorObject = s3Client.getObject(stagingBucketName, id + "/steps/1/stderr"); ByteArrayOutputStream errorStream = new ByteArrayOutputStream(); IOUtils.copy(errorObject.getObjectContent(), errorStream); logError(errorStream.toString()); } } } catch (Exception e) { logError(e.getMessage(), e); } } } catch (Throwable t) { t.printStackTrace(); result.setStopped(true); result.setNrErrors(1); result.setResult(false); logError(t.getMessage(), t); } if (appender != null) { LogWriter.getInstance().removeAppender(appender); appender.close(); ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(), parentJob.getJobname(), getName()); result.getResultFiles().put(resultFile.getFile().toString(), resultFile); } return result; }
From source file:org.pentaho.amazon.hive.job.AmazonHiveJobExecutor.java
License:Apache License
/** * Executes a Hive job into the AWS Elastic MapReduce service. *//*from w ww .j a v a 2 s . c om*/ public Result execute(Result result, int arg1) throws KettleException { // Setup a log file. Log4jFileAppender appender = null; String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$ try { appender = LogWriter.createFileAppender(logFileName, true, false); LogWriter.getInstance().addAppender(appender); log.setLogLevel(parentJob.getLogLevel()); } catch (Exception e) { logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.FailedToOpenLogFile", //$NON-NLS-1$ logFileName, e.toString())); logError(Const.getStackTracker(e)); } try { // Create and connect an AWS service. AmazonElasticMapReduceClient emrClient = new AmazonElasticMapReduceClient(awsCredentials); AmazonS3 s3Client = new AmazonS3Client(awsCredentials); // Get bucket name and S3 URL. String stagingBucketName = GetBucketName(stagingDir); String stagingS3BucketUrl = "s3://" + stagingBucketName; //$NON-NLS-1$ // Prepare staging S3 URL for Hive script file. String stagingS3qUrl = ""; if (qUrl.startsWith(S3FileProvider.SCHEME + "://")) { //$NON-NLS-1$ // If the .q file is in S3, its staging S3 URL is s3://{bucketname}/{path} if (qUrl.indexOf("@s3") > 0) { //$NON-NLS-1$ stagingS3qUrl = S3FileProvider.SCHEME + "://" + qUrl.substring(qUrl.indexOf("@s3") + 4); //$NON-NLS-1$ } else { stagingS3qUrl = qUrl; } } else { // A local filename is given for the Hive script file. It should be copied to the S3 Log Directory. // First, check for the correct protocol. if (!qUrl.startsWith("file:")) { //$NON-NLS-1$ if (log.isBasic()) { logBasic(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.HiveScriptFilename.Error") + qUrl); //$NON-NLS-1$ } } // pull down .q file from VSF FileObject qFile = KettleVFS.getFileObject(buildFilename(qUrl)); File tmpFile = File.createTempFile("customEMR", "q"); //$NON-NLS-1$ tmpFile.deleteOnExit(); FileOutputStream tmpFileOut = new FileOutputStream(tmpFile); IOUtils.copy(qFile.getContent().getInputStream(), tmpFileOut); // Get key name for the script file S3 destination. Key is defined as path name after {bucket}/ String key = GetKeyFromS3Url(stagingDir); if (key == null) { key = qFile.getName().getBaseName(); } else { key += "/" + qFile.getName().getBaseName(); //$NON-NLS-1$ } // delete the previous .q file in S3 try { s3Client.deleteObject(stagingBucketName, key); } catch (Exception ex) { logError(Const.getStackTracker(ex)); } // Put .q file in S3 Log Directory. s3Client.putObject(new PutObjectRequest(stagingBucketName, key, tmpFile)); stagingS3qUrl = stagingS3BucketUrl + "/" + key; //$NON-NLS-1$ } // AWS provides script-runner.jar (in its public bucket), which should be used as a MapReduce jar for Hive EMR // job. jarUrl = "s3://elasticmapreduce/libs/script-runner/script-runner.jar"; //$NON-NLS-1$ RunJobFlowRequest runJobFlowRequest = null; RunJobFlowResult runJobFlowResult = null; if (StringUtil.isEmpty(hadoopJobFlowId)) { // create an EMR job flow, start a step to setup Hive and get the job flow ID. runJobFlowRequest = createJobFlow(); runJobFlowResult = emrClient.runJobFlow(runJobFlowRequest); hadoopJobFlowId = runJobFlowResult.getJobFlowId(); } // Now EMR job flow is ready to accept a Run Hive Script step. // First, prepare a Job Flow ID list. List<String> jobFlowIds = new ArrayList<String>(); jobFlowIds.add(hadoopJobFlowId); // Configure a HadoopJarStep. String args = "s3://elasticmapreduce/libs/hive/hive-script " + "--base-path s3://elasticmapreduce/libs/hive/ --hive-version 0.7 --run-hive-script --args -f " + environmentSubstitute(stagingS3qUrl) + " " + environmentSubstitute(cmdLineArgs); //$NON-NLS-1$ List<StepConfig> steps = ConfigHadoopJarStep(hadoopJobName, jarUrl, args); // Add a Run Hive Script step to the existing job flow. AddJobFlowStepsRequest addJobFlowStepsRequest = new AddJobFlowStepsRequest(); addJobFlowStepsRequest.setJobFlowId(hadoopJobFlowId); addJobFlowStepsRequest.setSteps(steps); emrClient.addJobFlowSteps(addJobFlowStepsRequest); // Set a logging interval. String loggingIntervalS = environmentSubstitute(loggingInterval); int logIntv = 10; try { logIntv = Integer.parseInt(loggingIntervalS); } catch (NumberFormatException ex) { logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.LoggingInterval.Error", //$NON-NLS-1$ loggingIntervalS)); } // monitor and log if intended. if (blocking) { try { if (log.isBasic()) { String executionState = "RUNNING"; //$NON-NLS-1$ while (isRunning(executionState)) { DescribeJobFlowsRequest describeJobFlowsRequest = new DescribeJobFlowsRequest(); describeJobFlowsRequest.setJobFlowIds(jobFlowIds); DescribeJobFlowsResult describeJobFlowsResult = emrClient .describeJobFlows(describeJobFlowsRequest); boolean found = false; for (JobFlowDetail jobFlowDetail : describeJobFlowsResult.getJobFlows()) { if (jobFlowDetail.getJobFlowId().equals(hadoopJobFlowId)) { executionState = jobFlowDetail.getExecutionStatusDetail().getState(); found = true; } } if (!found) { break; } logBasic(hadoopJobName + " " + BaseMessages.getString(PKG, //$NON-NLS-1$ "AmazonElasticMapReduceJobExecutor.JobFlowExecutionStatus", hadoopJobFlowId) + executionState); if (parentJob.isStopped()) { if (!alive) { TerminateJobFlowsRequest terminateJobFlowsRequest = new TerminateJobFlowsRequest(); terminateJobFlowsRequest.withJobFlowIds(hadoopJobFlowId); emrClient.terminateJobFlows(terminateJobFlowsRequest); } break; } try { if (isRunning(executionState)) { Thread.sleep(logIntv * 1000); } } catch (InterruptedException ie) { logError(Const.getStackTracker(ie)); } } if ("FAILED".equalsIgnoreCase(executionState)) { //$NON-NLS-1$ result.setStopped(true); result.setNrErrors(1); result.setResult(false); S3Object outObject = s3Client.getObject(stagingBucketName, hadoopJobFlowId + "/steps/1/stdout"); //$NON-NLS-1$ ByteArrayOutputStream outStream = new ByteArrayOutputStream(); IOUtils.copy(outObject.getObjectContent(), outStream); logError(outStream.toString()); S3Object errorObject = s3Client.getObject(stagingBucketName, hadoopJobFlowId + "/steps/1/stderr"); //$NON-NLS-1$ ByteArrayOutputStream errorStream = new ByteArrayOutputStream(); IOUtils.copy(errorObject.getObjectContent(), errorStream); logError(errorStream.toString()); } } } catch (Exception e) { logError(e.getMessage(), e); } } } catch (Throwable t) { t.printStackTrace(); result.setStopped(true); result.setNrErrors(1); result.setResult(false); logError(t.getMessage(), t); } if (appender != null) { LogWriter.getInstance().removeAppender(appender); appender.close(); ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(), parentJob.getJobname(), getName()); result.getResultFiles().put(resultFile.getFile().toString(), resultFile); } return result; }
From source file:org.plos.repo.service.S3StoreService.java
License:Open Source License
@Override public boolean saveUploadedObject(Bucket bucket, UploadInfo uploadInfo, RepoObject repoObject) { int retries = 5; int tryCount = 0; int waitSecond = 4; ObjectMapper m = new ObjectMapper(); Map<String, java.lang.Object> propsObj = m.convertValue(repoObject, Map.class); Map<String, String> propsStr = new HashMap<>(); for (Map.Entry<String, java.lang.Object> entry : propsObj.entrySet()) { try {//from w ww . j a va 2s . c o m if (entry.getValue() == null) { propsStr.put(entry.getKey(), ""); } else { propsStr.put(entry.getKey(), entry.getValue().toString()); } } catch (ClassCastException cce) { log.error("Problem converting object to metadata", cce); } } ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(uploadInfo.getSize()); objectMetadata.setUserMetadata(propsStr); File tempFile = new File(uploadInfo.getTempLocation()); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket.getBucketName(), uploadInfo.getChecksum(), tempFile); putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); putObjectRequest.setMetadata(objectMetadata); while (tryCount < retries) { try { s3Client.putObject(putObjectRequest); // TODO: check result and do something about it tempFile.delete(); return true; } catch (Exception e) { tryCount++; log.error("Error during putObject", e); try { Thread.sleep(waitSecond * 1000); } catch (Exception e2) { } } } return false; }
From source file:org.restcomm.connect.commons.amazonS3.S3AccessTool.java
License:Open Source License
public URI uploadFile(final String fileToUpload) { if (s3client == null) { s3client = getS3client();//from ww w. java 2s . c om } if (logger.isInfoEnabled()) { logger.info("S3 Region: " + bucketRegion.toString()); } try { if (testing && (!testingUrl.isEmpty() || !testingUrl.equals(""))) { // s3client.setEndpoint(testingUrl); // s3client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true)); FileUtils.touch(new File(URI.create(fileToUpload))); } StringBuffer bucket = new StringBuffer(); bucket.append(bucketName); if (folder != null && !folder.isEmpty()) bucket.append("/").append(folder); URI fileUri = URI.create(fileToUpload); if (logger.isInfoEnabled()) { logger.info("File to upload to S3: " + fileUri.toString()); } File file = new File(fileUri); while (!FileUtils.waitFor(file, 30)) { } if (file.exists()) { PutObjectRequest putRequest = new PutObjectRequest(bucket.toString(), file.getName(), file); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentType(new MimetypesFileTypeMap().getContentType(file)); putRequest.setMetadata(metadata); if (reducedRedundancy) putRequest.setStorageClass(StorageClass.ReducedRedundancy); s3client.putObject(putRequest); if (removeOriginalFile) { removeLocalFile(file); } URI recordingS3Uri = s3client.getUrl(bucket.toString(), file.getName()).toURI(); return recordingS3Uri; // return downloadUrl.toURI(); } else { logger.error("Timeout waiting for the recording file: " + file.getAbsolutePath()); return null; } } catch (AmazonServiceException ase) { logger.error("Caught an AmazonServiceException"); logger.error("Error Message: " + ase.getMessage()); logger.error("HTTP Status Code: " + ase.getStatusCode()); logger.error("AWS Error Code: " + ase.getErrorCode()); logger.error("Error Type: " + ase.getErrorType()); logger.error("Request ID: " + ase.getRequestId()); return null; } catch (AmazonClientException ace) { logger.error("Caught an AmazonClientException "); logger.error("Error Message: " + ace.getMessage()); return null; } catch (URISyntaxException e) { logger.error("URISyntaxException: " + e.getMessage()); return null; } catch (IOException e) { logger.error("Problem while trying to touch recording file for testing", e); return null; } }
From source file:org.springframework.aws.ivy.S3Repository.java
License:Apache License
@Override protected void put(File source, String destination, boolean overwrite) throws IOException { String bucket = S3Utils.getBucket(destination); String key = S3Utils.getKey(destination); getService().putObject(new PutObjectRequest(bucket, key, source).withCannedAcl(acl)); }
From source file:org.springframework.integration.aws.s3.core.AmazonS3OperationsImpl.java
License:Apache License
public void putObject(String bucketName, String folder, String objectName, AmazonS3Object s3Object) { if (logger.isDebugEnabled()) { logger.debug("Putting object to bucket " + bucketName + " and folder " + folder); logger.debug("Object Name is " + objectName); }/*from w w w . j a v a 2 s . c o m*/ if (objectName == null) throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Object Name is Mandatory"); boolean isTempFile = false; File file = s3Object.getFileSource(); InputStream in = s3Object.getInputStream(); if (file != null && in != null) throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "File Object and Input Stream in the S3 Object are mutually exclusive"); if (file == null && in == null) throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "At lease one of File object or Input Stream in the S3 Object are mandatory"); String key; if (folder != null) { key = folder.endsWith(PATH_SEPARATOR) ? folder + objectName : folder + PATH_SEPARATOR + objectName; } else { key = objectName; } if (in != null) { file = getTempFile(in, bucketName, objectName); isTempFile = true; } PutObjectRequest request; if (file != null) { request = new PutObjectRequest(bucketName, key, file); //if the size of the file is greater than the threshold for multipart upload, //set the Content-MD5 header for this upload. This header will also come handy //later in inbound-channel-adapter where we cant find the MD5 sum of the //multipart upload file from its ETag String stringContentMD5 = null; try { stringContentMD5 = AmazonWSCommonUtils.encodeHex(AmazonWSCommonUtils.getContentsMD5AsBytes(file)); } catch (UnsupportedEncodingException e) { logger.error("Exception while generating the content's MD5 of the file " + file.getAbsolutePath(), e); } if (stringContentMD5 != null) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentMD5(stringContentMD5); request.withMetadata(metadata); } } else throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Unable to get the File handle to upload the file to S3"); Upload upload; try { upload = transferManager.upload(request); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Encountered Exception while invoking upload on multipart/single thread file, " + "see nested exceptions for more details", e); } //Wait till the upload completes, the call to putObject is synchronous try { if (logger.isInfoEnabled()) logger.info("Waiting for Upload to complete"); upload.waitForCompletion(); if (logger.isInfoEnabled()) logger.info("Upload completed"); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Encountered Exception while uploading the multipart/single thread file, " + "see nested exceptions for more details", e); } if (isTempFile) { //Delete the temp file if (logger.isDebugEnabled()) logger.debug("Deleting temp file: " + file.getName()); file.delete(); } //Now since the object is present on S3, set the AccessControl list on it //Please note that it is not possible to set the object ACL with the //put object request, and hence both these operations cannot be atomic //it is possible the objects is uploaded and the ACl not set due to some //failure AmazonS3ObjectACL acl = s3Object.getObjectACL(); AccessControlList objectACL = getAccessControlList(bucketName, key, acl); if (objectACL != null) { if (logger.isInfoEnabled()) logger.info("Setting Access control list for key " + key); try { client.setObjectAcl(bucketName, key, objectACL); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, objectName, "Encountered Exception while setting the Object ACL for key , " + key + "see nested exceptions for more details", e); } if (logger.isDebugEnabled()) logger.debug("Successfully set the object ACL"); } else { if (logger.isInfoEnabled()) logger.info("No Object ACL found to be set"); } }
From source file:org.springframework.integration.aws.s3.core.DefaultAmazonS3Operations.java
License:Apache License
/** * The implementation puts the given {@link File} instance to the provided bucket against * the given key.// ww w . jav a2 s . c om * * @param bucketName The bucket on S3 where this object is to be put * @param key The key against which this Object is to be stored in S3 * @param file resource to be uploaded to S3 * @param objectACL the Object's Access controls for the object to be uploaded * @param userMetadata The user's metadata to be associated with the object uploaded * @param stringContentMD5 The MD5 sum of the contents of the file to be uploaded */ @Override public void doPut(String bucketName, String key, File file, AmazonS3ObjectACL objectACL, Map<String, String> userMetadata, String stringContentMD5) { ObjectMetadata metadata = new ObjectMetadata(); PutObjectRequest request = new PutObjectRequest(bucketName, key, file); request.withMetadata(metadata); if (stringContentMD5 != null) { metadata.setContentMD5(stringContentMD5); } if (userMetadata != null) { metadata.setUserMetadata(userMetadata); } Upload upload; try { upload = transferManager.upload(request); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, key, "Encountered Exception while invoking upload on multipart/single thread file, " + "see nested exceptions for more details", e); } //Wait till the upload completes, the call to putObject is synchronous try { if (logger.isInfoEnabled()) { logger.info("Waiting for Upload to complete"); } upload.waitForCompletion(); if (logger.isInfoEnabled()) { logger.info("Upload completed"); } } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, key, "Encountered Exception while uploading the multipart/single thread file, " + "see nested exceptions for more details", e); } //Now since the object is present on S3, set the AccessControl list on it //Please note that it is not possible to set the object ACL with the //put object request, and hence both these operations cannot be atomic //it is possible the objects is uploaded and the ACl not set due to some //failure if (objectACL != null) { if (logger.isInfoEnabled()) { logger.info("Setting Access control list for key " + key); } try { client.setObjectAcl(bucketName, key, getAccessControlList(bucketName, key, objectACL)); } catch (Exception e) { throw new AmazonS3OperationException(credentials.getAccessKey(), bucketName, key, "Encountered Exception while setting the Object ACL for key , " + key + "see nested exceptions for more details", e); } if (logger.isDebugEnabled()) { logger.debug("Successfully set the object ACL"); } } }
From source file:org.xmlsh.aws.gradle.s3.AmazonS3FileUploadTask.java
License:BSD License
@TaskAction public void upload() throws IOException { // to enable conventionMappings feature String bucketName = getBucketName(); String key = getKey();/*from w w w . j a va2s . co m*/ File file = getFile(); if (bucketName == null) throw new GradleException("bucketName is not specified"); if (key == null) throw new GradleException("key is not specified"); if (file == null) throw new GradleException("file is not specified"); if (file.isFile() == false) throw new GradleException("file must be regular file"); AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class); AmazonS3 s3 = ext.getClient(); // metadata will be null iff the object does not exist ObjectMetadata metadata = existingObjectMetadata(); if (metadata == null || (isOverwrite() && metadata.getETag().equals(md5()) == false)) { getLogger().info("uploading... " + bucketName + "/" + key); s3.putObject(new PutObjectRequest(bucketName, key, file).withMetadata(getObjectMetadata())); getLogger().info("upload completed: " + getResourceUrl()); } else { getLogger().info("s3://{}/{} already exists with matching md5 sum -- skipped", bucketName, key); } setResourceUrl(((AmazonS3Client) s3).getResourceUrl(bucketName, key)); }
From source file:org.xmlsh.aws.gradle.s3.AmazonS3ProgressiveFileUploadTask.java
License:BSD License
@TaskAction public void upload() throws InterruptedException { // to enable conventionMappings feature String bucketName = getBucketName(); String key = getKey();//from w ww . j av a 2 s . c o m File file = getFile(); if (bucketName == null) throw new GradleException("bucketName is not specified"); if (key == null) throw new GradleException("key is not specified"); if (file == null) throw new GradleException("file is not specified"); if (file.isFile() == false) throw new GradleException("file must be regular file"); AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class); AmazonS3 s3 = ext.getClient(); TransferManager s3mgr = new TransferManager(s3); getLogger().info("Uploading... s3://{}/{}", bucketName, key); Upload upload = s3mgr.upload( new PutObjectRequest(getBucketName(), getKey(), getFile()).withMetadata(getObjectMetadata())); upload.addProgressListener(new ProgressListener() { public void progressChanged(ProgressEvent event) { getLogger().info(" {}% uploaded", upload.getProgress().getPercentTransferred()); } }); upload.waitForCompletion(); setResourceUrl(((AmazonS3Client) s3).getResourceUrl(bucketName, key)); getLogger().info("Upload completed: {}", getResourceUrl()); }
From source file:org.xmlsh.aws.gradle.s3.BulkUploadTask.java
License:BSD License
@TaskAction public void upload() { // to enable conventionMappings feature String bucketName = getBucketName(); String prefix = getNormalizedPrefix(); FileTree source = getSource();/* ww w. j a va2s . c o m*/ AmazonS3PluginExtension ext = getProject().getExtensions().getByType(AmazonS3PluginExtension.class); AmazonS3 s3 = ext.getClient(); getLogger().info("uploading... {} to s3://{}/{}", source, bucketName, prefix); source.visit(new EmptyFileVisitor() { public void visitFile(FileVisitDetails element) { String key = prefix + element.getRelativePath(); getLogger().info(" => s3://{}/{}", bucketName, key); Closure<ObjectMetadata> metadataProvider = getMetadataProvider(); s3.putObject(new PutObjectRequest(bucketName, key, element.getFile()) .withMetadata(metadataProvider == null ? null : metadataProvider.call(getBucketName(), key, element.getFile()))); } }); }