List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata
public ObjectMetadata()
From source file:org.akvo.flow.deploy.Deploy.java
License:Open Source License
private static void uploadS3(String accessKey, String secretKey, String s3Path, File file) throws AmazonServiceException, AmazonClientException { BasicAWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey); AmazonS3 s3 = new AmazonS3Client(credentials); PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, s3Path, file); ObjectMetadata metadata = new ObjectMetadata(); // set content type as android package file metadata.setContentType("application/vnd.android.package-archive"); // set content length to length of file metadata.setContentLength(file.length()); // set access to public putRequest.setMetadata(metadata);/*www.j a va 2s . c o m*/ putRequest.setCannedAcl(CannedAccessControlList.PublicRead); // try to put the apk in S3 PutObjectResult result = s3.putObject(putRequest); System.out.println("Apk uploaded successfully, with result ETag " + result.getETag()); }
From source file:org.alanwilliamson.amazon.s3.BackgroundUploader.java
License:Open Source License
private void uploadFile(Map<String, Object> jobFile) { File localFile = new File((String) jobFile.get("localpath")); if (!localFile.isFile()) { removeJobFile(jobFile);//from www. jav a 2 s . c om callbackCfc(jobFile, false, "local file no longer exists"); cfEngine.log("AmazonS3Write.BackgroundUploader: file no longer exists=" + localFile.getName()); return; } // Setup the object data ObjectMetadata omd = new ObjectMetadata(); if (jobFile.containsKey("metadata")) omd.setUserMetadata((Map<String, String>) jobFile.get("metadata")); TransferManager tm = null; AmazonS3 s3Client = null; try { AmazonKey amazonKey = (AmazonKey) jobFile.get("amazonkey"); s3Client = new AmazonBase().getAmazonS3(amazonKey); PutObjectRequest por = new PutObjectRequest((String) jobFile.get("bucket"), (String) jobFile.get("key"), localFile); por.setMetadata(omd); por.setStorageClass((StorageClass) jobFile.get("storage")); if (jobFile.containsKey("acl")) por.setCannedAcl(amazonKey.getAmazonCannedAcl((String) jobFile.get("acl"))); if (jobFile.containsKey("aes256key")) por.setSSECustomerKey(new SSECustomerKey((String) jobFile.get("aes256key"))); if (jobFile.containsKey("customheaders")) { Map<String, String> customheaders = (Map) jobFile.get("customheaders"); Iterator<String> it = customheaders.keySet().iterator(); while (it.hasNext()) { String k = it.next(); por.putCustomRequestHeader(k, customheaders.get(k)); } } long startTime = System.currentTimeMillis(); tm = new TransferManager(s3Client); Upload upload = tm.upload(por); upload.waitForCompletion(); log(jobFile, "Uploaded; timems=" + (System.currentTimeMillis() - startTime)); removeJobFile(jobFile); callbackCfc(jobFile, true, null); if ((Boolean) jobFile.get("deletefile")) localFile.delete(); } catch (Exception e) { log(jobFile, "Failed=" + e.getMessage()); callbackCfc(jobFile, false, e.getMessage()); int retry = (Integer) jobFile.get("retry"); int attempt = (Integer) jobFile.get("attempt") + 1; if (retry == attempt) { removeJobFile(jobFile); } else { jobFile.put("attempt", attempt); jobFile.put("attemptdate", System.currentTimeMillis() + (Long) jobFile.get("retryms")); acceptFile(jobFile); } if (s3Client != null) cleanupMultiPartUploads(s3Client, (String) jobFile.get("bucket")); } finally { if (tm != null) tm.shutdownNow(true); } }
From source file:org.alanwilliamson.amazon.s3.Write.java
License:Open Source License
private void writeData(AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String mimetype, cfData data, int retry, int retryseconds, String acl, String aes256key, Map<String, String> customheaders) throws Exception { if (mimetype == null) { if (data.getDataType() == cfData.CFBINARYDATA) mimetype = "application/unknown"; else if (cfData.isSimpleValue(data)) mimetype = "text/plain"; else/*w w w . j a v a 2 s. c o m*/ mimetype = "application/json"; // Check to see if the mime type is in the metadata if (metadata != null && metadata.containsKey("Content-Type")) mimetype = metadata.get("Content-Type"); } InputStream ios = null; long size = 0; if (data.getDataType() == cfData.CFSTRINGDATA) { ios = new java.io.ByteArrayInputStream(data.getString().getBytes()); size = data.getString().length(); } else if (data.getDataType() == cfData.CFBINARYDATA) { ios = new java.io.ByteArrayInputStream(((cfBinaryData) data).getByteArray()); size = ((cfBinaryData) data).getLength(); } else { serializejson json = new serializejson(); StringBuilder out = new StringBuilder(); json.encodeJSON(out, data, false, CaseType.MAINTAIN, DateType.LONG); size = out.length(); mimetype = "application/json"; ios = new java.io.ByteArrayInputStream(out.toString().getBytes()); } // Setup the object data ObjectMetadata omd = new ObjectMetadata(); if (metadata != null) omd.setUserMetadata(metadata); omd.setContentType(mimetype); omd.setContentLength(size); AmazonS3 s3Client = getAmazonS3(amazonKey); // Let us run around the number of attempts int attempts = 0; while (attempts < retry) { try { PutObjectRequest por = new PutObjectRequest(bucket, key, ios, omd); por.setStorageClass(storage); if (aes256key != null && !aes256key.isEmpty()) por.setSSECustomerKey(new SSECustomerKey(aes256key)); if (acl != null && !acl.isEmpty()) por.setCannedAcl(amazonKey.getAmazonCannedAcl(acl)); if (customheaders != null && !customheaders.isEmpty()) { Iterator<String> it = customheaders.keySet().iterator(); while (it.hasNext()) { String k = it.next(); por.putCustomRequestHeader(k, customheaders.get(k)); } } s3Client.putObject(por); break; } catch (Exception e) { cfEngine.log("Failed: AmazonS3Write(bucket=" + bucket + "; key=" + key + "; attempt=" + (attempts + 1) + "; exception=" + e.getMessage() + ")"); attempts++; if (attempts == retry) throw e; else Thread.sleep(retryseconds * 1000); } } }
From source file:org.alanwilliamson.amazon.s3.Write.java
License:Open Source License
private void writeFile(AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String localpath, int retry, int retryseconds, boolean deletefile, boolean background, String callback, String callbackdata, String appname, String acl, String aes256key, Map<String, String> customheaders) throws Exception { File localFile = new File(localpath); if (!localFile.isFile()) throw new Exception("The file specified does not exist: " + localpath); // Push this to the background loader to handle and return immediately if (background) { BackgroundUploader.acceptFile(amazonKey, bucket, key, metadata, storage, localpath, retry, retryseconds, deletefile, callback, callbackdata, appname, acl, aes256key, customheaders); return;/*from w w w . j a v a 2s . c o m*/ } // Setup the object data ObjectMetadata omd = new ObjectMetadata(); if (metadata != null) omd.setUserMetadata(metadata); AmazonS3 s3Client = getAmazonS3(amazonKey); // Let us run around the number of attempts int attempts = 0; while (attempts < retry) { try { PutObjectRequest por = new PutObjectRequest(bucket, key, localFile); por.setMetadata(omd); por.setStorageClass(storage); if (acl != null && !acl.isEmpty()) por.setCannedAcl(amazonKey.getAmazonCannedAcl(acl)); if (aes256key != null && !aes256key.isEmpty()) por.setSSECustomerKey(new SSECustomerKey(aes256key)); if (customheaders != null && !customheaders.isEmpty()) { Iterator<String> it = customheaders.keySet().iterator(); while (it.hasNext()) { String k = it.next(); por.putCustomRequestHeader(k, customheaders.get(k)); } } s3Client.putObject(por); break; } catch (Exception e) { cfEngine.log("Failed: AmazonS3Write(bucket=" + bucket + "key=" + key + "; file=" + localFile + "; attempt=" + (attempts + 1) + "; exception=" + e.getMessage() + ")"); attempts++; if (attempts == retry) throw e; else Thread.sleep(retryseconds * 1000); } } // delete the file now that it is a success if (deletefile) localFile.delete(); }
From source file:org.anhonesteffort.p25.wav.WaveFileS3Sender.java
License:Open Source License
private ObjectMetadata metadata(long length) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(length);//from w w w . ja v a 2s . c o m metadata.addUserMetadata(METADATA_CHANNEL_ID, proto.toString(channelId)); metadata.addUserMetadata(METADATA_TERMINATED, wasTerminated().toString()); metadata.addUserMetadata(METADATA_START_TIME, getStartTime().toString()); metadata.addUserMetadata(METADATA_END_TIME, getEndTime().toString()); metadata.addUserMetadata(METADATA_LATITUDE, getLatitude().toString()); metadata.addUserMetadata(METADATA_LONGITUDE, getLongitude().toString()); return metadata; }
From source file:org.apache.apex.malhar.lib.fs.s3.S3BlockUploadOperator.java
License:Apache License
/** * Creates the empty object metadata for initiate multipart upload request. * @return the ObjectMetadata/* w w w .j av a 2s. com*/ */ public ObjectMetadata createObjectMetadata() { return new ObjectMetadata(); }
From source file:org.apache.apex.malhar.lib.fs.s3.S3Reconciler.java
License:Apache License
/** * Uploads the file on Amazon S3 using putObject API from S3 client *//* w w w . jav a2 s .co m*/ @Override protected void processCommittedData(FSRecordCompactionOperator.OutputMetaData outputMetaData) { try { Path path = new Path(outputMetaData.getPath()); if (fs.exists(path) == false) { logger.debug("Ignoring non-existent path assuming replay : {}", path); return; } FSDataInputStream fsinput = fs.open(path); ObjectMetadata omd = new ObjectMetadata(); omd.setContentLength(outputMetaData.getSize()); String keyName = directoryName + Path.SEPARATOR + outputMetaData.getFileName(); PutObjectRequest request = new PutObjectRequest(bucketName, keyName, fsinput, omd); if (outputMetaData.getSize() < Integer.MAX_VALUE) { request.getRequestClientOptions().setReadLimit((int) outputMetaData.getSize()); } else { throw new RuntimeException("PutRequestSize greater than Integer.MAX_VALUE"); } if (fs.exists(path)) { PutObjectResult result = s3client.putObject(request); logger.debug("File {} Uploaded at {}", keyName, result.getETag()); } } catch (FileNotFoundException e) { logger.debug("Ignoring non-existent path assuming replay : {}", outputMetaData.getPath()); } catch (IOException e) { logger.error("Unable to create Stream: {}", e.getMessage()); } }
From source file:org.apache.beam.sdk.io.aws.s3.S3WritableByteChannel.java
License:Apache License
S3WritableByteChannel(AmazonS3 amazonS3, S3ResourceId path, String contentType, S3Options options) throws IOException { this.amazonS3 = checkNotNull(amazonS3, "amazonS3"); this.options = checkNotNull(options); this.path = checkNotNull(path, "path"); checkArgument(/*from w w w . j ava2 s.c o m*/ atMostOne(options.getSSECustomerKey() != null, options.getSSEAlgorithm() != null, options.getSSEAwsKeyManagementParams() != null), "Either SSECustomerKey (SSE-C) or SSEAlgorithm (SSE-S3)" + " or SSEAwsKeyManagementParams (SSE-KMS) must not be set at the same time."); // Amazon S3 API docs: Each part must be at least 5 MB in size, except the last part. checkArgument(options .getS3UploadBufferSizeBytes() >= S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES, "S3UploadBufferSizeBytes must be at least %s bytes", S3UploadBufferSizeBytesFactory.MINIMUM_UPLOAD_BUFFER_SIZE_BYTES); this.uploadBuffer = ByteBuffer.allocate(options.getS3UploadBufferSizeBytes()); eTags = new ArrayList<>(); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentType(contentType); if (options.getSSEAlgorithm() != null) { objectMetadata.setSSEAlgorithm(options.getSSEAlgorithm()); } InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(path.getBucket(), path.getKey()) .withStorageClass(options.getS3StorageClass()).withObjectMetadata(objectMetadata); request.setSSECustomerKey(options.getSSECustomerKey()); request.setSSEAwsKeyManagementParams(options.getSSEAwsKeyManagementParams()); InitiateMultipartUploadResult result; try { result = amazonS3.initiateMultipartUpload(request); } catch (AmazonClientException e) { throw new IOException(e); } uploadId = result.getUploadId(); }
From source file:org.apache.camel.component.aws.s3.S3Producer.java
License:Apache License
@Override public void process(final Exchange exchange) throws Exception { ObjectMetadata objectMetadata = new ObjectMetadata(); Long contentLength = exchange.getIn().getHeader(S3Constants.CONTENT_LENGTH, Long.class); if (contentLength != null) { objectMetadata.setContentLength(contentLength); }/*from w w w .ja v a 2 s . c o m*/ String contentType = exchange.getIn().getHeader(S3Constants.CONTENT_TYPE, String.class); if (contentType != null) { objectMetadata.setContentType(contentType); } String cacheControl = exchange.getIn().getHeader(S3Constants.CACHE_CONTROL, String.class); if (cacheControl != null) { objectMetadata.setCacheControl(cacheControl); } String contentDisposition = exchange.getIn().getHeader(S3Constants.CONTENT_DISPOSITION, String.class); if (contentDisposition != null) { objectMetadata.setContentDisposition(contentDisposition); } String contentEncoding = exchange.getIn().getHeader(S3Constants.CONTENT_ENCODING, String.class); if (contentEncoding != null) { objectMetadata.setContentEncoding(contentEncoding); } String contentMD5 = exchange.getIn().getHeader(S3Constants.CONTENT_MD5, String.class); if (contentMD5 != null) { objectMetadata.setContentMD5(contentMD5); } Date lastModified = exchange.getIn().getHeader(S3Constants.LAST_MODIFIED, Date.class); if (lastModified != null) { objectMetadata.setLastModified(lastModified); } Map<String, String> userMetadata = exchange.getIn().getHeader(S3Constants.USER_METADATA, Map.class); if (userMetadata != null) { objectMetadata.setUserMetadata(userMetadata); } File filePayload = null; Object obj = exchange.getIn().getMandatoryBody(); if (obj instanceof File) { filePayload = (File) obj; } PutObjectRequest putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), exchange.getIn().getMandatoryBody(InputStream.class), objectMetadata); String storageClass = determineStorageClass(exchange); if (storageClass != null) { putObjectRequest.setStorageClass(storageClass); } String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class); if (cannedAcl != null) { CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl); putObjectRequest.setCannedAcl(objectAcl); } AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class); if (acl != null) { // note: if cannedacl and acl are both specified the last one will be used. refer to // PutObjectRequest#setAccessControlList for more details putObjectRequest.setAccessControlList(acl); } LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange); PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest); LOG.trace("Received result [{}]", putObjectResult); Message message = getMessageForResponse(exchange); message.setHeader(S3Constants.E_TAG, putObjectResult.getETag()); if (putObjectResult.getVersionId() != null) { message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId()); } if (getConfiguration().isDeleteAfterWrite() && filePayload != null) { IOHelper.close(putObjectRequest.getInputStream()); FileUtil.deleteFile(filePayload); } }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** * The src file is on the local disk. Add it to FS at * the given dst name./*ww w. ja va 2 s . co m*/ * * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be * used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } LOG.info("Copying local file from " + src + " to " + dst); // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMinimumUploadPartSize(partSize); transferConfiguration.setMultipartUploadThreshold(partSizeThreshold); TransferManager transfers = new TransferManager(s3); transfers.setConfiguration(transferConfiguration); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } finally { transfers.shutdownNow(false); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }