List of usage examples for com.amazonaws.services.s3.model PutObjectRequest setCannedAcl
public void setCannedAcl(CannedAccessControlList cannedAcl)
From source file:org.alanwilliamson.amazon.s3.Write.java
License:Open Source License
private void writeData(AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String mimetype, cfData data, int retry, int retryseconds, String acl, String aes256key, Map<String, String> customheaders) throws Exception { if (mimetype == null) { if (data.getDataType() == cfData.CFBINARYDATA) mimetype = "application/unknown"; else if (cfData.isSimpleValue(data)) mimetype = "text/plain"; else//from ww w. ja va 2s . c om mimetype = "application/json"; // Check to see if the mime type is in the metadata if (metadata != null && metadata.containsKey("Content-Type")) mimetype = metadata.get("Content-Type"); } InputStream ios = null; long size = 0; if (data.getDataType() == cfData.CFSTRINGDATA) { ios = new java.io.ByteArrayInputStream(data.getString().getBytes()); size = data.getString().length(); } else if (data.getDataType() == cfData.CFBINARYDATA) { ios = new java.io.ByteArrayInputStream(((cfBinaryData) data).getByteArray()); size = ((cfBinaryData) data).getLength(); } else { serializejson json = new serializejson(); StringBuilder out = new StringBuilder(); json.encodeJSON(out, data, false, CaseType.MAINTAIN, DateType.LONG); size = out.length(); mimetype = "application/json"; ios = new java.io.ByteArrayInputStream(out.toString().getBytes()); } // Setup the object data ObjectMetadata omd = new ObjectMetadata(); if (metadata != null) omd.setUserMetadata(metadata); omd.setContentType(mimetype); omd.setContentLength(size); AmazonS3 s3Client = getAmazonS3(amazonKey); // Let us run around the number of attempts int attempts = 0; while (attempts < retry) { try { PutObjectRequest por = new PutObjectRequest(bucket, key, ios, omd); por.setStorageClass(storage); if (aes256key != null && !aes256key.isEmpty()) por.setSSECustomerKey(new SSECustomerKey(aes256key)); if (acl != null && !acl.isEmpty()) por.setCannedAcl(amazonKey.getAmazonCannedAcl(acl)); if (customheaders != null && !customheaders.isEmpty()) { Iterator<String> it = customheaders.keySet().iterator(); while (it.hasNext()) { String k = it.next(); por.putCustomRequestHeader(k, customheaders.get(k)); } } s3Client.putObject(por); break; } catch (Exception e) { cfEngine.log("Failed: AmazonS3Write(bucket=" + bucket + "; key=" + key + "; attempt=" + (attempts + 1) + "; exception=" + e.getMessage() + ")"); attempts++; if (attempts == retry) throw e; else Thread.sleep(retryseconds * 1000); } } }
From source file:org.alanwilliamson.amazon.s3.Write.java
License:Open Source License
private void writeFile(AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String localpath, int retry, int retryseconds, boolean deletefile, boolean background, String callback, String callbackdata, String appname, String acl, String aes256key, Map<String, String> customheaders) throws Exception { File localFile = new File(localpath); if (!localFile.isFile()) throw new Exception("The file specified does not exist: " + localpath); // Push this to the background loader to handle and return immediately if (background) { BackgroundUploader.acceptFile(amazonKey, bucket, key, metadata, storage, localpath, retry, retryseconds, deletefile, callback, callbackdata, appname, acl, aes256key, customheaders); return;//from w w w. j ava 2s.com } // Setup the object data ObjectMetadata omd = new ObjectMetadata(); if (metadata != null) omd.setUserMetadata(metadata); AmazonS3 s3Client = getAmazonS3(amazonKey); // Let us run around the number of attempts int attempts = 0; while (attempts < retry) { try { PutObjectRequest por = new PutObjectRequest(bucket, key, localFile); por.setMetadata(omd); por.setStorageClass(storage); if (acl != null && !acl.isEmpty()) por.setCannedAcl(amazonKey.getAmazonCannedAcl(acl)); if (aes256key != null && !aes256key.isEmpty()) por.setSSECustomerKey(new SSECustomerKey(aes256key)); if (customheaders != null && !customheaders.isEmpty()) { Iterator<String> it = customheaders.keySet().iterator(); while (it.hasNext()) { String k = it.next(); por.putCustomRequestHeader(k, customheaders.get(k)); } } s3Client.putObject(por); break; } catch (Exception e) { cfEngine.log("Failed: AmazonS3Write(bucket=" + bucket + "key=" + key + "; file=" + localFile + "; attempt=" + (attempts + 1) + "; exception=" + e.getMessage() + ")"); attempts++; if (attempts == retry) throw e; else Thread.sleep(retryseconds * 1000); } } // delete the file now that it is a success if (deletefile) localFile.delete(); }
From source file:org.apache.camel.component.aws.s3.S3Producer.java
License:Apache License
@Override public void process(final Exchange exchange) throws Exception { ObjectMetadata objectMetadata = new ObjectMetadata(); Long contentLength = exchange.getIn().getHeader(S3Constants.CONTENT_LENGTH, Long.class); if (contentLength != null) { objectMetadata.setContentLength(contentLength); }/*from w w w . j a v a 2 s . co m*/ String contentType = exchange.getIn().getHeader(S3Constants.CONTENT_TYPE, String.class); if (contentType != null) { objectMetadata.setContentType(contentType); } String cacheControl = exchange.getIn().getHeader(S3Constants.CACHE_CONTROL, String.class); if (cacheControl != null) { objectMetadata.setCacheControl(cacheControl); } String contentDisposition = exchange.getIn().getHeader(S3Constants.CONTENT_DISPOSITION, String.class); if (contentDisposition != null) { objectMetadata.setContentDisposition(contentDisposition); } String contentEncoding = exchange.getIn().getHeader(S3Constants.CONTENT_ENCODING, String.class); if (contentEncoding != null) { objectMetadata.setContentEncoding(contentEncoding); } String contentMD5 = exchange.getIn().getHeader(S3Constants.CONTENT_MD5, String.class); if (contentMD5 != null) { objectMetadata.setContentMD5(contentMD5); } Date lastModified = exchange.getIn().getHeader(S3Constants.LAST_MODIFIED, Date.class); if (lastModified != null) { objectMetadata.setLastModified(lastModified); } Map<String, String> userMetadata = exchange.getIn().getHeader(S3Constants.USER_METADATA, Map.class); if (userMetadata != null) { objectMetadata.setUserMetadata(userMetadata); } File filePayload = null; Object obj = exchange.getIn().getMandatoryBody(); if (obj instanceof File) { filePayload = (File) obj; } PutObjectRequest putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), exchange.getIn().getMandatoryBody(InputStream.class), objectMetadata); String storageClass = determineStorageClass(exchange); if (storageClass != null) { putObjectRequest.setStorageClass(storageClass); } String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class); if (cannedAcl != null) { CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl); putObjectRequest.setCannedAcl(objectAcl); } AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class); if (acl != null) { // note: if cannedacl and acl are both specified the last one will be used. refer to // PutObjectRequest#setAccessControlList for more details putObjectRequest.setAccessControlList(acl); } LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange); PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest); LOG.trace("Received result [{}]", putObjectResult); Message message = getMessageForResponse(exchange); message.setHeader(S3Constants.E_TAG, putObjectResult.getETag()); if (putObjectResult.getVersionId() != null) { message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId()); } if (getConfiguration().isDeleteAfterWrite() && filePayload != null) { IOHelper.close(putObjectRequest.getInputStream()); FileUtil.deleteFile(filePayload); } }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** * The src file is on the local disk. Add it to FS at * the given dst name./*from w ww. j a va 2s. co m*/ * * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be * used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } LOG.info("Copying local file from " + src + " to " + dst); // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMinimumUploadPartSize(partSize); transferConfiguration.setMultipartUploadThreshold(partSizeThreshold); TransferManager transfers = new TransferManager(s3); transfers.setConfiguration(transferConfiguration); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } finally { transfers.shutdownNow(false); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
private void createEmptyObject(final String bucketName, final String objectName) throws AmazonClientException, AmazonServiceException { final InputStream im = new InputStream() { @Override/*from www .java2 s. c om*/ public int read() throws IOException { return -1; } }; final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(0L); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om); putObjectRequest.setCannedAcl(cannedACL); s3.putObject(putObjectRequest); statistics.incrementWriteOps(1); }
From source file:org.apache.hadoop.fs.s3a.S3AOutputStream.java
License:Apache License
@Override public synchronized void close() throws IOException { if (closed) { return;/* w w w . j a v a 2 s.com*/ } backupStream.close(); LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload"); LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold); try { TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMinimumUploadPartSize(partSize); transferConfiguration.setMultipartUploadThreshold(partSizeThreshold); TransferManager transfers = new TransferManager(client); transfers.setConfiguration(transferConfiguration); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics); upload.addProgressListener(listener); upload.waitForUploadResult(); long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred(); if (statistics != null && delta != 0) { if (LOG.isDebugEnabled()) { LOG.debug("S3A write delta changed after finished: " + delta + " bytes"); } statistics.incrementBytesWritten(delta); } // This will delete unnecessary fake parent directories fs.finishedWrite(key); } catch (InterruptedException e) { throw new IOException(e); } finally { if (!backupFile.delete()) { LOG.warn("Could not delete temporary s3a file: " + backupFile); } super.close(); closed = true; } LOG.info("OutputStream for key '" + key + "' upload complete"); }
From source file:org.apache.hadoop.fs.s3r.S3RFastOutputStream.java
License:Apache License
private void putObject() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket, key); }/*from www. j av a 2 s .c om*/ final ObjectMetadata om = createDefaultMetadata(); om.setContentLength(buffer.size()); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, new ByteArrayInputStream(buffer.toByteArray()), om); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setGeneralProgressListener(progressListener); ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() { @Override public PutObjectResult call() throws Exception { return client.putObject(putObjectRequest); } }); //wait for completion try { putObjectResult.get(); } catch (InterruptedException ie) { LOG.warn("Interrupted object upload:" + ie, ie); Thread.currentThread().interrupt(); } catch (ExecutionException ee) { throw new IOException("Regular upload failed", ee.getCause()); } }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
/** * The src file is on the local disk. Add it to FS at * the given dst name.// ww w .ja v a 2 s.c om * * This version doesn't need to create a temporary file to calculate the md5. * Sadly this doesn't seem to be used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } if (LOG.isDebugEnabled()) { LOG.debug("Copying local file from " + src + " to " + dst); } // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }
From source file:org.apache.hadoop.fs.s3r.S3ROutputStream.java
License:Apache License
@Override public synchronized void close() throws IOException { if (closed) { return;/* w ww . j a v a2 s .c om*/ } backupStream.close(); if (LOG.isDebugEnabled()) { LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload"); LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold); } try { final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics); upload.addProgressListener(listener); upload.waitForUploadResult(); long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred(); if (statistics != null && delta != 0) { if (LOG.isDebugEnabled()) { LOG.debug("S3A write delta changed after finished: " + delta + " bytes"); } statistics.incrementBytesWritten(delta); } // This will delete unnecessary fake parent directories fs.finishedWrite(key); } catch (InterruptedException e) { throw new IOException(e); } finally { if (!backupFile.delete()) { LOG.warn("Could not delete temporary s3a file: {}", backupFile); } super.close(); closed = true; } if (LOG.isDebugEnabled()) { LOG.debug("OutputStream for key '" + key + "' upload complete"); } }
From source file:org.broadleafcommerce.vendor.amazon.s3.S3FileServiceProvider.java
License:Apache License
protected List<String> addOrUpdateResourcesInternal(S3Configuration s3config, AmazonS3Client s3, FileWorkArea workArea, List<File> files, boolean removeFilesFromWorkArea) { final List<String> resourcePaths = new ArrayList<String>(); for (final File srcFile : files) { if (!srcFile.getAbsolutePath().startsWith(workArea.getFilePathLocation())) { throw new FileServiceException("Attempt to update file " + srcFile.getAbsolutePath() + " that is not in the passed in WorkArea " + workArea.getFilePathLocation()); }/*w w w . jav a2 s . co m*/ final long ts1 = System.currentTimeMillis(); final String fileName = srcFile.getAbsolutePath().substring(workArea.getFilePathLocation().length()); final String resourceName = buildResourceName(s3config, fileName); ObjectMetadata meta = null; try { final GetObjectMetadataRequest get = new GetObjectMetadataRequest(s3config.getDefaultBucketName(), resourceName); meta = s3.getObjectMetadata(get); } catch (AmazonS3Exception ex) { meta = null; } final long ts2 = System.currentTimeMillis(); if (meta == null || meta.getContentLength() != srcFile.length()) { final PutObjectRequest put = new PutObjectRequest(s3config.getDefaultBucketName(), resourceName, srcFile); if ((s3config.getStaticAssetFileExtensionPattern() != null) && s3config .getStaticAssetFileExtensionPattern().matcher(getExtension(fileName)).matches()) { put.setCannedAcl(CannedAccessControlList.PublicRead); } s3.putObject(put); final long ts3 = System.currentTimeMillis(); if (LOG.isTraceEnabled()) { final String s3Uri = String.format("s3://%s/%s", s3config.getDefaultBucketName(), resourceName); final String msg = String.format( "%s copied/updated to %s; queryTime = %dms; uploadTime = %dms; totalTime = %dms", srcFile.getAbsolutePath(), s3Uri, ts2 - ts1, ts3 - ts2, ts3 - ts1); LOG.trace(msg); } } else { if (LOG.isTraceEnabled()) { final String s3Uri = String.format("s3://%s/%s", s3config.getDefaultBucketName(), resourceName); final String msg = String.format( "%s already at %s with same filesize = %dbytes; queryTime = %dms", srcFile.getAbsolutePath(), s3Uri, srcFile.length(), ts2 - ts1); LOG.trace(msg); } } resourcePaths.add(fileName); } return resourcePaths; }