List of usage examples for com.amazonaws.services.s3.model PutObjectRequest setMetadata
public void setMetadata(ObjectMetadata metadata)
From source file:org.apache.hadoop.fs.s3a.S3AOutputStream.java
License:Apache License
@Override public synchronized void close() throws IOException { if (closed) { return;// w ww . j a v a 2 s . c o m } backupStream.close(); LOG.info("OutputStream for key '" + key + "' closed. Now beginning upload"); LOG.info("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold); try { TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMinimumUploadPartSize(partSize); transferConfiguration.setMultipartUploadThreshold(partSizeThreshold); TransferManager transfers = new TransferManager(client); transfers.setConfiguration(transferConfiguration); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics); upload.addProgressListener(listener); upload.waitForUploadResult(); long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred(); if (statistics != null && delta != 0) { if (LOG.isDebugEnabled()) { LOG.debug("S3A write delta changed after finished: " + delta + " bytes"); } statistics.incrementBytesWritten(delta); } // This will delete unnecessary fake parent directories fs.finishedWrite(key); } catch (InterruptedException e) { throw new IOException(e); } finally { if (!backupFile.delete()) { LOG.warn("Could not delete temporary s3a file: " + backupFile); } super.close(); closed = true; } LOG.info("OutputStream for key '" + key + "' upload complete"); }
From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java
License:Apache License
/** * The src file is on the local disk. Add it to FS at * the given dst name./*www.ja va 2 s . c o m*/ * * This version doesn't need to create a temporary file to calculate the md5. * Sadly this doesn't seem to be used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } if (LOG.isDebugEnabled()) { LOG.debug("Copying local file from " + src + " to " + dst); } // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }
From source file:org.apache.hadoop.fs.s3r.S3ROutputStream.java
License:Apache License
@Override public synchronized void close() throws IOException { if (closed) { return;/*from w w w. ja v a 2s .c o m*/ } backupStream.close(); if (LOG.isDebugEnabled()) { LOG.debug("OutputStream for key '" + key + "' closed. Now beginning upload"); LOG.debug("Minimum upload part size: " + partSize + " threshold " + partSizeThreshold); } try { final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, backupFile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); Upload upload = transfers.upload(putObjectRequest); ProgressableProgressListener listener = new ProgressableProgressListener(upload, progress, statistics); upload.addProgressListener(listener); upload.waitForUploadResult(); long delta = upload.getProgress().getBytesTransferred() - listener.getLastBytesTransferred(); if (statistics != null && delta != 0) { if (LOG.isDebugEnabled()) { LOG.debug("S3A write delta changed after finished: " + delta + " bytes"); } statistics.incrementBytesWritten(delta); } // This will delete unnecessary fake parent directories fs.finishedWrite(key); } catch (InterruptedException e) { throw new IOException(e); } finally { if (!backupFile.delete()) { LOG.warn("Could not delete temporary s3a file: {}", backupFile); } super.close(); closed = true; } if (LOG.isDebugEnabled()) { LOG.debug("OutputStream for key '" + key + "' upload complete"); } }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3RequestDecorator.java
License:Apache License
/** * Set encryption in {@link PutObjectRequest} *///from w w w.j a va2s . c o m public PutObjectRequest decorate(PutObjectRequest request) { switch (getDataEncryption()) { case SSE_S3: ObjectMetadata metadata = request.getMetadata() == null ? new ObjectMetadata() : request.getMetadata(); metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setMetadata(metadata); break; case NONE: break; } return request; }
From source file:org.apache.oodt.cas.filemgr.datatransfer.S3DataTransferer.java
License:Apache License
@Override public void transferProduct(Product product) throws DataTransferException, IOException { for (Reference ref : product.getProductReferences()) { String origRef = stripProtocol(ref.getOrigReference(), false); String dataStoreRef = stripProtocol(ref.getDataStoreReference(), true); try {/* w w w. j a v a 2 s . c o m*/ PutObjectRequest request = new PutObjectRequest(bucketName, dataStoreRef, new File(origRef)); if (encrypt) { ObjectMetadata requestMetadata = new ObjectMetadata(); requestMetadata.setServerSideEncryption(AES_256_SERVER_SIDE_ENCRYPTION); request.setMetadata(requestMetadata); } s3Client.putObject(request); } catch (AmazonClientException e) { throw new DataTransferException( String.format("Failed to upload product reference %s to S3 at %s", origRef, dataStoreRef), e); } } }
From source file:org.apache.zeppelin.notebook.repo.OldS3NotebookRepo.java
License:Apache License
@Override public void save(Note note, AuthenticationInfo subject) throws IOException { String json = note.toJson();//from w w w . j a v a 2 s . c o m String key = user + "/" + "notebook" + "/" + note.getId() + "/" + "note.json"; File file = File.createTempFile("note", "json"); try { Writer writer = new OutputStreamWriter(new FileOutputStream(file)); writer.write(json); writer.close(); PutObjectRequest putRequest = new PutObjectRequest(bucketName, key, file); if (useServerSideEncryption) { // Request server-side encryption. ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); putRequest.setMetadata(objectMetadata); } s3client.putObject(putRequest); } catch (AmazonClientException ace) { throw new IOException("Unable to store note in S3: " + ace, ace); } finally { FileUtils.deleteQuietly(file); } }
From source file:org.apache.zeppelin.notebook.repo.S3NotebookRepo.java
License:Apache License
@Override public void save(Note note, AuthenticationInfo subject) throws IOException { String json = note.toJson();//from w ww . j a v a2 s . co m String key = rootFolder + "/" + buildNoteFileName(note); File file = File.createTempFile("note", "zpln"); try { Writer writer = new OutputStreamWriter(new FileOutputStream(file)); writer.write(json); writer.close(); PutObjectRequest putRequest = new PutObjectRequest(bucketName, key, file); if (useServerSideEncryption) { // Request server-side encryption. ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); putRequest.setMetadata(objectMetadata); } s3client.putObject(putRequest); } catch (AmazonClientException ace) { throw new IOException("Unable to store note in S3: " + ace, ace); } finally { FileUtils.deleteQuietly(file); } }
From source file:org.cloudifysource.esc.driver.provisioning.privateEc2.AmazonS3Uploader.java
License:Open Source License
/** * Upload file.//from w w w . j a va 2 s . co m * * @param bucketFullPath * The path of the bucket where to download the file. * @param file * The file to upload. * @return The URL to access the file in s3 */ public S3Object uploadFile(final String bucketFullPath, final File file) { final BucketLifecycleConfiguration.Rule ruleArchiveAndExpire = new BucketLifecycleConfiguration.Rule() .withId("Delete cloudFolder archives").withPrefix(this.extractPrefix(bucketFullPath) + ZIP_PREFIX) .withExpirationInDays(1).withStatus(BucketLifecycleConfiguration.ENABLED.toString()); final List<BucketLifecycleConfiguration.Rule> rules = new ArrayList<BucketLifecycleConfiguration.Rule>(); rules.add(ruleArchiveAndExpire); final BucketLifecycleConfiguration configuration = new BucketLifecycleConfiguration().withRules(rules); this.s3client.setBucketLifecycleConfiguration(bucketFullPath, configuration); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketFullPath, this.accessKey, file); putObjectRequest.setKey(file.getName()); final ObjectMetadata metadata = new ObjectMetadata(); putObjectRequest.setMetadata(metadata); this.s3client.putObject(putObjectRequest); final S3Object object = this.s3client.getObject(bucketFullPath, file.getName()); return object; }
From source file:org.entando.entando.plugins.jps3awsclient.aps.system.services.storage.AmazonS3StorageManager.java
License:Open Source License
public void store(IStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) throws ApsSystemException { try {//from w w w .j av a2 s . c o m AmazonS3Client client = this.getS3Client(); String bucketName = obj.getBucketName().toLowerCase(); this.checkForAndCreateBucket(bucketName, client); ObjectMetadata omd = new ObjectMetadata(); omd.setContentType(obj.getContentType()); omd.setContentLength(obj.getContentLength()); PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, obj.getStoragePath(), obj.getInputStream(), omd); // Check if reduced redundancy is enabled if (reducedRedundancy) { putObjectRequest.setStorageClass(StorageClass.ReducedRedundancy); } if (null != obj.getUserMetadata()) { ObjectMetadata objectMetadata = new ObjectMetadata(); putObjectRequest.setMetadata(objectMetadata); Iterator<String> iter = obj.getUserMetadata().keySet().iterator(); while (iter.hasNext()) { String key = iter.next(); objectMetadata.addUserMetadata(key, obj.getUserMetadata().get(key)); } } client.putObject(putObjectRequest); // If we have an ACL set access permissions for the the data on S3 if (acl != null) { client.setObjectAcl(bucketName, obj.getStoragePath(), acl); } } catch (Throwable t) { _logger.error("Error storing object", t); throw new ApsSystemException("Error storing object", t); } }
From source file:org.finra.dm.dao.impl.S3DaoImpl.java
License:Apache License
@Override public S3FileTransferResultsDto uploadFile(final S3FileTransferRequestParamsDto params) throws InterruptedException { LOGGER.info(String.format("Uploading %s local file to s3://%s/%s ...", params.getLocalPath(), params.getS3BucketName(), params.getS3KeyPrefix())); // Perform the transfer. S3FileTransferResultsDto results = performTransfer(params, new Transferer() { @Override/*from w w w .j a va 2 s .c o m*/ public Transfer performTransfer(TransferManager transferManager) { // Get a handle to the local file. File localFile = new File(params.getLocalPath()); // Create and prepare the metadata. ObjectMetadata metadata = new ObjectMetadata(); prepareMetadata(params, metadata); // Create a put request and a transfer manager with the parameters and the metadata. PutObjectRequest putObjectRequest = new PutObjectRequest(params.getS3BucketName(), params.getS3KeyPrefix(), localFile); putObjectRequest.setMetadata(metadata); return s3Operations.upload(putObjectRequest, transferManager); } }); LOGGER.info("Local file \"" + params.getLocalPath() + "\" contains " + results.getTotalBytesTransferred() + " byte(s) which was successfully transferred to S3 key prefix \"" + params.getS3KeyPrefix() + "\" in bucket \"" + params.getS3BucketName() + "\" in " + DmDateUtils.formatDuration(results.getDurationMillis(), true)); LOGGER.info(String.format("Overall transfer rate: %.2f kBytes/s (%.2f Mbits/s)", getTransferRateInKilobytesPerSecond(results.getTotalBytesTransferred(), results.getDurationMillis()), getTransferRateInMegabitsPerSecond(results.getTotalBytesTransferred(), results.getDurationMillis()))); return results; }