List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata
public ObjectMetadata()
From source file:org.apache.nifi.minifi.c2.cache.s3.S3OutputStream.java
License:Apache License
private MultipartUpload newMultipartUpload() throws IOException { InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, key, new ObjectMetadata()); try {//w w w. ja va2 s .co m return new MultipartUpload(s3.initiateMultipartUpload(initRequest).getUploadId()); } catch (AmazonClientException e) { throw new IOException("Unable to initiate MultipartUpload: " + e, e); } }
From source file:org.apache.nifi.processors.aws.s3.AbstractS3IT.java
License:Apache License
protected void putTestFileEncrypted(String key, File file) throws AmazonS3Exception, FileNotFoundException { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, key, new FileInputStream(file), objectMetadata);/*from w w w . jav a2 s .co m*/ client.putObject(putRequest); }
From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java
License:Apache License
@Override public void onTrigger(final ProcessContext context, final ProcessSession session) { FlowFile flowFile = session.get();//from ww w . ja v a 2s. c o m if (flowFile == null) { return; } final long startNanos = System.nanoTime(); final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue(); final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue(); final String cacheKey = getIdentifier() + "/" + bucket + "/" + key; final AmazonS3Client s3 = getClient(); final FlowFile ff = flowFile; final Map<String, String> attributes = new HashMap<>(); final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key()); attributes.put(S3_BUCKET_KEY, bucket); attributes.put(S3_OBJECT_KEY, key); final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue(); final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue(); final long now = System.currentTimeMillis(); /* * If necessary, run age off for existing uploads in AWS S3 and local state */ ageoffS3Uploads(context, s3, now); /* * Then */ try { session.read(flowFile, new InputStreamCallback() { @Override public void process(final InputStream rawIn) throws IOException { try (final InputStream in = new BufferedInputStream(rawIn)) { final ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key())); objectMetadata.setContentLength(ff.getSize()); final String contentType = context.getProperty(CONTENT_TYPE) .evaluateAttributeExpressions(ff).getValue(); if (contentType != null) { objectMetadata.setContentType(contentType); attributes.put(S3_CONTENT_TYPE, contentType); } final String expirationRule = context.getProperty(EXPIRATION_RULE_ID) .evaluateAttributeExpressions(ff).getValue(); if (expirationRule != null) { objectMetadata.setExpirationTimeRuleId(expirationRule); } final Map<String, String> userMetadata = new HashMap<>(); for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties() .entrySet()) { if (entry.getKey().isDynamic()) { final String value = context.getProperty(entry.getKey()) .evaluateAttributeExpressions(ff).getValue(); userMetadata.put(entry.getKey().getName(), value); } } final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue(); if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) { objectMetadata.setSSEAlgorithm(serverSideEncryption); attributes.put(S3_SSE_ALGORITHM, serverSideEncryption); } if (!userMetadata.isEmpty()) { objectMetadata.setUserMetadata(userMetadata); } if (ff.getSize() <= multipartThreshold) { //---------------------------------------- // single part upload //---------------------------------------- final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata); request.setStorageClass( StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue())); final AccessControlList acl = createACL(context, ff); if (acl != null) { request.setAccessControlList(acl); } final CannedAccessControlList cannedAcl = createCannedACL(context, ff); if (cannedAcl != null) { request.withCannedAcl(cannedAcl); } try { final PutObjectResult result = s3.putObject(request); if (result.getVersionId() != null) { attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId()); } if (result.getETag() != null) { attributes.put(S3_ETAG_ATTR_KEY, result.getETag()); } if (result.getExpirationTime() != null) { attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString()); } if (result.getMetadata().getRawMetadata().keySet() .contains(S3_STORAGECLASS_META_KEY)) { attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata() .getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString()); } if (userMetadata.size() > 0) { StringBuilder userMetaBldr = new StringBuilder(); for (String userKey : userMetadata.keySet()) { userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey)); } attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString()); } attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT); } catch (AmazonClientException e) { getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}", new Object[] { ffFilename, bucket, key, e.getMessage() }); throw (e); } } else { //---------------------------------------- // multipart upload //---------------------------------------- // load or create persistent state //------------------------------------------------------------ MultipartState currentState; try { currentState = getLocalStateIfInS3(s3, bucket, cacheKey); if (currentState != null) { if (currentState.getPartETags().size() > 0) { final PartETag lastETag = currentState.getPartETags() .get(currentState.getPartETags().size() - 1); getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' " + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' " + "contentLength='{}' partsLoaded={} lastPart={}/{}", new Object[] { ffFilename, bucket, key, currentState.getUploadId(), currentState.getFilePosition(), currentState.getPartSize(), currentState.getStorageClass().toString(), currentState.getContentLength(), currentState.getPartETags().size(), Integer.toString(lastETag.getPartNumber()), lastETag.getETag() }); } else { getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' " + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' " + "contentLength='{}' no partsLoaded", new Object[] { ffFilename, bucket, key, currentState.getUploadId(), currentState.getFilePosition(), currentState.getPartSize(), currentState.getStorageClass().toString(), currentState.getContentLength() }); } } else { currentState = new MultipartState(); currentState.setPartSize(multipartPartSize); currentState.setStorageClass( StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue())); currentState.setContentLength(ff.getSize()); persistLocalState(cacheKey, currentState); getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'", new Object[] { ffFilename, bucket, key }); } } catch (IOException e) { getLogger().error("IOException initiating cache state while processing flow files: " + e.getMessage()); throw (e); } // initiate multipart upload or find position in file //------------------------------------------------------------ if (currentState.getUploadId().isEmpty()) { final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest( bucket, key, objectMetadata); initiateRequest.setStorageClass(currentState.getStorageClass()); final AccessControlList acl = createACL(context, ff); if (acl != null) { initiateRequest.setAccessControlList(acl); } final CannedAccessControlList cannedAcl = createCannedACL(context, ff); if (cannedAcl != null) { initiateRequest.withCannedACL(cannedAcl); } try { final InitiateMultipartUploadResult initiateResult = s3 .initiateMultipartUpload(initiateRequest); currentState.setUploadId(initiateResult.getUploadId()); currentState.getPartETags().clear(); try { persistLocalState(cacheKey, currentState); } catch (Exception e) { getLogger().info("Exception saving cache state while processing flow file: " + e.getMessage()); throw (new ProcessException("Exception saving cache state", e)); } getLogger().info( "Success initiating upload flowfile={} available={} position={} " + "length={} bucket={} key={} uploadId={}", new Object[] { ffFilename, in.available(), currentState.getFilePosition(), currentState.getContentLength(), bucket, key, currentState.getUploadId() }); if (initiateResult.getUploadId() != null) { attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId()); } } catch (AmazonClientException e) { getLogger().info( "Failure initiating upload flowfile={} bucket={} key={} reason={}", new Object[] { ffFilename, bucket, key, e.getMessage() }); throw (e); } } else { if (currentState.getFilePosition() > 0) { try { final long skipped = in.skip(currentState.getFilePosition()); if (skipped != currentState.getFilePosition()) { getLogger().info( "Failure skipping to resume upload flowfile={} " + "bucket={} key={} position={} skipped={}", new Object[] { ffFilename, bucket, key, currentState.getFilePosition(), skipped }); } } catch (Exception e) { getLogger().info( "Failure skipping to resume upload flowfile={} bucket={} " + "key={} position={} reason={}", new Object[] { ffFilename, bucket, key, currentState.getFilePosition(), e.getMessage() }); throw (new ProcessException(e)); } } } // upload parts //------------------------------------------------------------ long thisPartSize; for (int part = currentState.getPartETags().size() + 1; currentState .getFilePosition() < currentState.getContentLength(); part++) { if (!PutS3Object.this.isScheduled()) { throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename + " part=" + part + " uploadId=" + currentState.getUploadId()); } thisPartSize = Math.min(currentState.getPartSize(), (currentState.getContentLength() - currentState.getFilePosition())); UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket) .withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in) .withPartNumber(part).withPartSize(thisPartSize); try { UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest); currentState.addPartETag(uploadPartResult.getPartETag()); currentState.setFilePosition(currentState.getFilePosition() + thisPartSize); try { persistLocalState(cacheKey, currentState); } catch (Exception e) { getLogger().info("Exception saving cache state processing flow file: " + e.getMessage()); } getLogger().info( "Success uploading part flowfile={} part={} available={} " + "etag={} uploadId={}", new Object[] { ffFilename, part, in.available(), uploadPartResult.getETag(), currentState.getUploadId() }); } catch (AmazonClientException e) { getLogger().info( "Failure uploading part flowfile={} part={} bucket={} key={} " + "reason={}", new Object[] { ffFilename, part, bucket, key, e.getMessage() }); throw (e); } } // complete multipart upload //------------------------------------------------------------ CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest( bucket, key, currentState.getUploadId(), currentState.getPartETags()); try { CompleteMultipartUploadResult completeResult = s3 .completeMultipartUpload(completeRequest); getLogger().info("Success completing upload flowfile={} etag={} uploadId={}", new Object[] { ffFilename, completeResult.getETag(), currentState.getUploadId() }); if (completeResult.getVersionId() != null) { attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId()); } if (completeResult.getETag() != null) { attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag()); } if (completeResult.getExpirationTime() != null) { attributes.put(S3_EXPIRATION_ATTR_KEY, completeResult.getExpirationTime().toString()); } if (currentState.getStorageClass() != null) { attributes.put(S3_STORAGECLASS_ATTR_KEY, currentState.getStorageClass().toString()); } if (userMetadata.size() > 0) { StringBuilder userMetaBldr = new StringBuilder(); for (String userKey : userMetadata.keySet()) { userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey)); } attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString()); } attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD); } catch (AmazonClientException e) { getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}", new Object[] { ffFilename, bucket, key, e.getMessage() }); throw (e); } } } } }); if (!attributes.isEmpty()) { flowFile = session.putAllAttributes(flowFile, attributes); } session.transfer(flowFile, REL_SUCCESS); final String url = s3.getResourceUrl(bucket, key); final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos); session.getProvenanceReporter().send(flowFile, url, millis); getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis }); try { removeLocalState(cacheKey); } catch (IOException e) { getLogger().info("Error trying to delete key {} from cache: {}", new Object[] { cacheKey, e.getMessage() }); } } catch (final ProcessException | AmazonClientException pe) { if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) { getLogger().info(pe.getMessage()); session.rollback(); } else { getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe }); flowFile = session.penalize(flowFile); session.transfer(flowFile, REL_FAILURE); } } }
From source file:org.apache.oodt.cas.filemgr.datatransfer.S3DataTransferer.java
License:Apache License
@Override public void transferProduct(Product product) throws DataTransferException, IOException { for (Reference ref : product.getProductReferences()) { String origRef = stripProtocol(ref.getOrigReference(), false); String dataStoreRef = stripProtocol(ref.getDataStoreReference(), true); try {/* ww w. ja v a 2 s . co m*/ PutObjectRequest request = new PutObjectRequest(bucketName, dataStoreRef, new File(origRef)); if (encrypt) { ObjectMetadata requestMetadata = new ObjectMetadata(); requestMetadata.setServerSideEncryption(AES_256_SERVER_SIDE_ENCRYPTION); request.setMetadata(requestMetadata); } s3Client.putObject(request); } catch (AmazonClientException e) { throw new DataTransferException( String.format("Failed to upload product reference %s to S3 at %s", origRef, dataStoreRef), e); } } }
From source file:org.apache.storm.s3.output.S3MemBufferedOutputStream.java
License:Apache License
public void close(T key, String identifier, long rotation) throws IOException { String name = fileNameFormat.getName(key, identifier, rotation, System.currentTimeMillis()); LOG.info("uploading {}/{} to S3", bucketName, name); outputStream.close();//from w ww . j av a 2 s. c o m final byte[] buf = outputStream.toByteArray(); InputStream input = new ByteArrayInputStream(buf); ObjectMetadata meta = new ObjectMetadata(); meta.setContentType(contentType); meta.setContentLength(buf.length); uploader.upload(key, bucketName, name, input, meta); input.close(); }
From source file:org.apache.streams.s3.S3OutputStreamWrapper.java
License:Apache License
private void addFile() throws Exception { InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray()); int contentLength = outputStream.size(); TransferManager transferManager = new TransferManager(amazonS3Client); ObjectMetadata metadata = new ObjectMetadata(); metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate()); metadata.setContentLength(contentLength); metadata.addUserMetadata("writer", "org.apache.streams"); for (String s : metaData.keySet()) metadata.addUserMetadata(s, metaData.get(s)); String fileNameToWrite = path + fileName; Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata); try {/*from www . jav a2s . com*/ upload.waitForUploadResult(); is.close(); transferManager.shutdownNow(false); LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName); } catch (Exception e) { // No Op } }
From source file:org.apache.usergrid.apm.util.CrashLogUploadToS3Simulator.java
License:Apache License
public static boolean uploadSimulatedCrashLogsToS3(String appId, String fileName) { DeploymentConfig config = DeploymentConfig.geDeploymentConfig(); AWSCredentials credentials = new BasicAWSCredentials(config.getAccessKey(), config.getSecretKey()); AmazonS3Client s3Client = new AmazonS3Client(credentials); PutObjectRequest putObjectRequest;/* ww w . j a v a 2 s . co m*/ String s3FullFileName = config.getEnvironment() + "/crashlog/" + appId + "/" + fileName; try { ObjectMetadata metaData = new ObjectMetadata(); metaData.setHeader(Headers.S3_CANNED_ACL, CannedAccessControlList.AuthenticatedRead); putObjectRequest = new PutObjectRequest(config.getS3LogBucket(), s3FullFileName, new ByteArrayInputStream(fileName.getBytes("UTF-8")), null); PutObjectResult result = s3Client.putObject(putObjectRequest); return true; } catch (UnsupportedEncodingException e) { e.printStackTrace(); } return false; }
From source file:org.apache.usergrid.apm.util.GenerateSimulatedMobileData.java
License:Apache License
public static boolean uploadSimulatedCrashLogsToS3(App app, String fileName) { DeploymentConfig config = DeploymentConfig.geDeploymentConfig(); AWSCredentials credentials = new BasicAWSCredentials(config.getAccessKey(), config.getSecretKey()); AmazonS3Client s3Client = new AmazonS3Client(credentials); PutObjectRequest putObjectRequest;//from w ww . j a v a 2 s. c o m String s3FullFileName = config.getEnvironment() + "/" + config.getS3CrashLogFolder() + "/" + app.getFullAppName() + "/" + fileName; String sampleCrashFileName = null; if (fileName.endsWith(".crash")) //it's an iOS crash file sampleCrashFileName = "ios-crash-log-example.txt"; else if (fileName.endsWith(".stacktrace")) sampleCrashFileName = "android-crash-log-example.txt"; try { ObjectMetadata metaData = new ObjectMetadata(); metaData.setHeader(Headers.S3_CANNED_ACL, CannedAccessControlList.AuthenticatedRead); InputStream is = Thread.currentThread().getContextClassLoader() .getResourceAsStream(sampleCrashFileName); putObjectRequest = new PutObjectRequest(config.getS3LogBucket(), s3FullFileName, is, null); //new ByteArrayInputStream( //fileName.getBytes("UTF-8")),null); PutObjectResult result = s3Client.putObject(putObjectRequest); return true; } catch (Exception e) { e.printStackTrace(); } return false; }
From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java
License:Apache License
@Override public void write(final UUID appId, final Entity entity, InputStream inputStream) throws Exception { String uploadFileName = AssetUtils.buildAssetKey(appId, entity); ByteArrayOutputStream baos = new ByteArrayOutputStream(); long written = IOUtils.copyLarge(inputStream, baos, 0, FIVE_MB); byte[] data = baos.toByteArray(); InputStream awsInputStream = new ByteArrayInputStream(data); final Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity); fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis()); String mimeType = AssetMimeHandler.get().getMimeType(entity, data); Boolean overSizeLimit = false; EntityManager em = emf.getEntityManager(appId); if (written < FIVE_MB) { // total smaller than 5mb ObjectMetadata om = new ObjectMetadata(); om.setContentLength(written);/*from w w w . j a v a 2 s . c o m*/ om.setContentType(mimeType); PutObjectResult result = null; result = getS3Client().putObject(bucketName, uploadFileName, awsInputStream, om); String md5sum = Hex.encodeHexString(Base64.decodeBase64(result.getContentMd5())); String eTag = result.getETag(); fileMetadata.put(AssetUtils.CONTENT_LENGTH, written); if (md5sum != null) fileMetadata.put(AssetUtils.CHECKSUM, md5sum); fileMetadata.put(AssetUtils.E_TAG, eTag); em.update(entity); } else { // bigger than 5mb... dump 5 mb tmp files and upload from them written = 0; //reset written to 0, we still haven't wrote anything in fact int partNumber = 1; int firstByte = 0; Boolean isFirstChunck = true; List<PartETag> partETags = new ArrayList<PartETag>(); //get the s3 client in order to initialize the multipart request getS3Client(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, uploadFileName); InitiateMultipartUploadResult initResponse = getS3Client().initiateMultipartUpload(initRequest); InputStream firstChunck = new ByteArrayInputStream(data); PushbackInputStream chunckableInputStream = new PushbackInputStream(inputStream, 1); // determine max size file allowed, default to 50mb long maxSizeBytes = 50 * FileUtils.ONE_MB; String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50"); if (StringUtils.isNumeric(maxSizeMbString)) { maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB; } // always allow files up to 5mb if (maxSizeBytes < 5 * FileUtils.ONE_MB) { maxSizeBytes = 5 * FileUtils.ONE_MB; } while (-1 != (firstByte = chunckableInputStream.read())) { long partSize = 0; chunckableInputStream.unread(firstByte); File tempFile = File.createTempFile( entity.getUuid().toString().concat("-part").concat(String.valueOf(partNumber)), "tmp"); tempFile.deleteOnExit(); OutputStream os = null; try { os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath())); if (isFirstChunck == true) { partSize = IOUtils.copyLarge(firstChunck, os, 0, (FIVE_MB)); isFirstChunck = false; } else { partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (FIVE_MB)); } written += partSize; if (written > maxSizeBytes) { overSizeLimit = true; logger.error("OVERSIZED FILE ({}). STARTING ABORT", written); break; //set flag here and break out of loop to run abort } } finally { IOUtils.closeQuietly(os); } FileInputStream chunk = new FileInputStream(tempFile); Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read()); if (!isLastPart) chunckableInputStream.unread(firstByte); UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(initResponse.getUploadId()) .withBucketName(bucketName).withKey(uploadFileName).withInputStream(chunk) .withPartNumber(partNumber).withPartSize(partSize).withLastPart(isLastPart); partETags.add(getS3Client().uploadPart(uploadRequest).getPartETag()); partNumber++; } //check for flag here then abort. if (overSizeLimit) { AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName, uploadFileName, initResponse.getUploadId()); ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucketName); MultipartUploadListing listResult = getS3Client().listMultipartUploads(listRequest); //upadte the entity with the error. try { logger.error("starting update of entity due to oversized asset"); fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes); em.update(entity); } catch (Exception e) { logger.error("Error updating entity with error message", e); } int timesIterated = 20; //loop and abort all the multipart uploads while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) { getS3Client().abortMultipartUpload(abortRequest); Thread.sleep(1000); timesIterated--; listResult = getS3Client().listMultipartUploads(listRequest); if (logger.isDebugEnabled()) { logger.debug("Files that haven't been aborted are: {}", listResult.getMultipartUploads().listIterator().toString()); } } if (timesIterated == 0) { logger.error("Files parts that couldn't be aborted in 20 seconds are:"); Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads().iterator(); while (multipartUploadIterator.hasNext()) { logger.error(multipartUploadIterator.next().getKey()); } } } else { CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName, uploadFileName, initResponse.getUploadId(), partETags); CompleteMultipartUploadResult amazonResult = getS3Client().completeMultipartUpload(request); fileMetadata.put(AssetUtils.CONTENT_LENGTH, written); fileMetadata.put(AssetUtils.E_TAG, amazonResult.getETag()); em.update(entity); } } }
From source file:org.apache.zeppelin.notebook.repo.OldS3NotebookRepo.java
License:Apache License
@Override public void save(Note note, AuthenticationInfo subject) throws IOException { String json = note.toJson();// w ww .j a v a2s .c o m String key = user + "/" + "notebook" + "/" + note.getId() + "/" + "note.json"; File file = File.createTempFile("note", "json"); try { Writer writer = new OutputStreamWriter(new FileOutputStream(file)); writer.write(json); writer.close(); PutObjectRequest putRequest = new PutObjectRequest(bucketName, key, file); if (useServerSideEncryption) { // Request server-side encryption. ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); putRequest.setMetadata(objectMetadata); } s3client.putObject(putRequest); } catch (AmazonClientException ace) { throw new IOException("Unable to store note in S3: " + ace, ace); } finally { FileUtils.deleteQuietly(file); } }