List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata
public ObjectMetadata()
From source file:com.emc.vipr.s3.s3api.java
License:Open Source License
/********************************* Object Operations ******************************/ public static void CreateObjectWithMeta(String S3_ACCESS_KEY_ID, String S3_SECRET_KEY, String S3_ENDPOINT, String S3_ViPR_NAMESPACE, String S3_BUCKET, String key, InputStream content, String metaKey, String metaValue) throws Exception { System.out.println("Access ID:" + S3_ACCESS_KEY_ID); System.out.println("Access secret:" + S3_SECRET_KEY); System.out.println("Access URL:" + S3_ENDPOINT); System.out.println("Access namespace:" + S3_ViPR_NAMESPACE); System.out.println("Access bucket:" + S3_BUCKET); System.out.println("Access key:" + key); ViPRS3Client s3 = getS3Client(S3_ACCESS_KEY_ID, S3_SECRET_KEY, S3_ENDPOINT, S3_ViPR_NAMESPACE); // create the object in the demo bucket if (metaKey.equals("") && metaValue.equals("")) { s3.putObject(S3_BUCKET, key, content, null); } else {//from ww w .j ava 2 s.c om ObjectMetadata obj = new ObjectMetadata(); obj.addUserMetadata(metaKey, metaValue); s3.putObject(S3_BUCKET, key, content, obj); } }
From source file:com.emc.vipr.s3.s3api.java
License:Open Source License
public static void CreateLargeObject(String S3_ACCESS_KEY_ID, String S3_SECRET_KEY, String S3_ENDPOINT, String S3_ViPR_NAMESPACE, String S3_BUCKET, String key, File file, String metaKey, String metaValue) throws Exception { System.out.println("Access ID:" + S3_ACCESS_KEY_ID); System.out.println("Access secret:" + S3_SECRET_KEY); System.out.println("Access URL:" + S3_ENDPOINT); System.out.println("Access namespace:" + S3_ViPR_NAMESPACE); System.out.println("Access bucket:" + S3_BUCKET); System.out.println("Access key:" + key); ViPRS3Client s3 = getS3Client(S3_ACCESS_KEY_ID, S3_SECRET_KEY, S3_ENDPOINT, S3_ViPR_NAMESPACE); ObjectMetadata objmeta = new ObjectMetadata(); if (!(metaKey.equals("") && metaValue.equals(""))) { objmeta.addUserMetadata(metaKey, metaValue); }//from w ww . j ava 2 s.c o m InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(S3_BUCKET, key) .withObjectMetadata(objmeta); InitiateMultipartUploadResult initResponse = s3.initiateMultipartUpload(initRequest); long partSize = 1 * 1024 * 1024; // Set part size to 1 MB. // list of UploadPartResponse objects for each part that is uploaded List<PartETag> partETags = new ArrayList<PartETag>(); long filePosition = 0; for (int i = 1; filePosition < file.length(); i++) { // get the size of the chunk. Note - the last part can be less than the chunk size partSize = Math.min(partSize, (file.length() - filePosition)); System.out.println(String.format("Sending chunk [%d] starting at position [%d]", i, filePosition)); // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(S3_BUCKET).withKey(key) .withUploadId(initResponse.getUploadId()).withPartNumber(i).withFileOffset(filePosition) .withFile(file).withPartSize(partSize); // Upload part and add response to our list. PartETag eTagPart = s3.uploadPart(uploadRequest).getPartETag(); partETags.add(eTagPart); // set file position to the next part in the file filePosition += partSize; } System.out.println("Waiting for completion of multi-part upload"); CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(S3_BUCKET, key, initResponse.getUploadId(), partETags); s3.completeMultipartUpload(compRequest); }
From source file:com.emc.vipr.s3.sample._05_CreateObjectWithMetadata.java
License:Open Source License
public static void main(String[] args) throws Exception { // create the ViPR S3 Client ViPRS3Client s3 = ViPRS3Factory.getS3Client(); // retrieve the object key and value from user System.out.println("Enter the object key:"); String key = new BufferedReader(new InputStreamReader(System.in)).readLine(); System.out.println("Enter the object content:"); String content = new BufferedReader(new InputStreamReader(System.in)).readLine(); //retrieve the object metadata key and value from user System.out.println("Enter the metadata key:"); String metaKey = new BufferedReader(new InputStreamReader(System.in)).readLine(); System.out.println("Enter the metadata content:"); String metaValue = new BufferedReader(new InputStreamReader(System.in)).readLine(); // create the metadata ObjectMetadata metadata = new ObjectMetadata(); metadata.addUserMetadata(metaKey, metaValue); // create the object with the metadata in the demo bucket s3.putObject(ViPRS3Factory.S3_BUCKET, key, new StringInputStream(content), metadata); // print out object key/value and metadata key/value for validation System.out.println(String.format("created object [%s/%s] with metadata [%s=%s] and content: [%s]", ViPRS3Factory.S3_BUCKET, key, metaKey, metaValue, content)); }
From source file:com.emc.vipr.services.s3.ViPRS3Client.java
License:Open Source License
/** * Executes a (Subclass of) PutObjectRequest. In particular, we check for subclasses * of the UpdateObjectRequest and inject the value of the Range header. This version * also returns the raw ObjectMetadata for the response so callers can construct * their own result objects./*from w w w. jav a 2 s. c o m*/ * @param putObjectRequest the request to execute * @return an ObjectMetadata containing the response headers. */ protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) { assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object"); String bucketName = putObjectRequest.getBucketName(); String key = putObjectRequest.getKey(); ObjectMetadata metadata = putObjectRequest.getMetadata(); InputStream input = putObjectRequest.getInputStream(); if (metadata == null) metadata = new ObjectMetadata(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object"); assertParameterNotNull(key, "The key parameter must be specified when uploading an object"); /* * This is compatible with progress listener set by either the legacy * method GetObjectRequest#setProgressListener or the new method * GetObjectRequest#setGeneralProgressListener. */ com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener(); ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor .wrapListener(progressListener); // If a file is specified for upload, we need to pull some additional // information from it to auto-configure a few options if (putObjectRequest.getFile() != null) { File file = putObjectRequest.getFile(); // Always set the content length, even if it's already set metadata.setContentLength(file.length()); // Only set the content type if it hasn't already been set if (metadata.getContentType() == null) { metadata.setContentType(Mimetypes.getInstance().getMimetype(file)); } FileInputStream fileInputStream = null; try { fileInputStream = new FileInputStream(file); byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream); metadata.setContentMD5(BinaryUtils.toBase64(md5Hash)); } catch (Exception e) { throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e); } finally { try { fileInputStream.close(); } catch (Exception e) { } } try { input = new RepeatableFileInputStream(file); } catch (FileNotFoundException fnfe) { throw new AmazonClientException("Unable to find file to upload", fnfe); } } Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT); if (putObjectRequest.getAccessControlList() != null) { addAclHeaders(request, putObjectRequest.getAccessControlList()); } else if (putObjectRequest.getCannedAcl() != null) { request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); } if (putObjectRequest.getStorageClass() != null) { request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass()); } if (putObjectRequest.getRedirectLocation() != null) { request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation()); if (input == null) { input = new ByteArrayInputStream(new byte[0]); } } // Use internal interface to differentiate 0 from unset. if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) { /* * There's nothing we can do except for let the HTTP client buffer * the input stream contents if the caller doesn't tell us how much * data to expect in a stream since we have to explicitly tell * Amazon S3 how much we're sending before we start sending any of * it. */ log.warn("No content length specified for stream data. " + "Stream contents will be buffered in memory and could result in " + "out of memory errors."); } if (progressListenerCallbackExecutor != null) { com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream( input, progressListenerCallbackExecutor); fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE); } if (!input.markSupported()) { int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE; String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize"); if (bufferSizeOverride != null) { try { streamBufferSize = Integer.parseInt(bufferSizeOverride); } catch (Exception e) { log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride); } } input = new RepeatableInputStream(input, streamBufferSize); } MD5DigestCalculatingInputStream md5DigestStream = null; if (metadata.getContentMD5() == null) { /* * If the user hasn't set the content MD5, then we don't want to * buffer the whole stream in memory just to calculate it. Instead, * we can calculate it on the fly and validate it with the returned * ETag from the object upload. */ try { md5DigestStream = new MD5DigestCalculatingInputStream(input); input = md5DigestStream; } catch (NoSuchAlgorithmException e) { log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e); } } if (metadata.getContentType() == null) { /* * Default to the "application/octet-stream" if the user hasn't * specified a content type. */ metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); } populateRequestMetadata(request, metadata); request.setContent(input); if (putObjectRequest instanceof UpdateObjectRequest) { request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange()); } ObjectMetadata returnedMetadata = null; try { returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); } catch (AmazonClientException ace) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw ace; } finally { try { input.close(); } catch (Exception e) { log.warn("Unable to cleanly close input stream: " + e.getMessage(), e); } } String contentMd5 = metadata.getContentMD5(); if (md5DigestStream != null) { contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest()); } // Can't verify MD5 on appends/update (yet). if (!(putObjectRequest instanceof UpdateObjectRequest)) { if (returnedMetadata != null && contentMd5 != null) { byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5); byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag()); if (!Arrays.equals(clientSideHash, serverSideHash)) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw new AmazonClientException("Unable to verify integrity of data upload. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "You may need to delete the data stored in Amazon S3."); } } } fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE); return returnedMetadata; }
From source file:com.erudika.para.storage.AWSFileStore.java
License:Apache License
@Override public String store(String path, InputStream data) { if (StringUtils.startsWith(path, "/")) { path = path.substring(1);//from w w w . j a v a 2 s . c o m } if (StringUtils.isBlank(path) || data == null) { return null; } int maxFileSizeMBytes = Config.getConfigInt("para.s3.max_filesize_mb", 10); try { if (data.available() > 0 && data.available() <= (maxFileSizeMBytes * 1024 * 1024)) { ObjectMetadata om = new ObjectMetadata(); om.setCacheControl("max-age=15552000, must-revalidate"); // 180 days if (path.endsWith(".gz")) { om.setContentEncoding("gzip"); path = path.substring(0, path.length() - 3); } path = System.currentTimeMillis() + "." + path; PutObjectRequest por = new PutObjectRequest(bucket, path, data, om); por.setCannedAcl(CannedAccessControlList.PublicRead); por.setStorageClass(StorageClass.ReducedRedundancy); s3.putObject(por); return Utils.formatMessage(baseUrl, Config.AWS_REGION, bucket, path); } } catch (IOException e) { logger.error(null, e); } finally { try { data.close(); } catch (IOException ex) { logger.error(null, ex); } } return null; }
From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java
License:Open Source License
private PutObjectResult uploadSnapshotAsSingleObject(String compressedSnapFileName, Long actualSize, Long uncompressedSize, SnapshotProgressCallback callback) throws Exception { callback.setUploadSize(actualSize);/*from w w w . j a va 2 s . c o m*/ FileInputStreamWithCallback snapInputStream = new FileInputStreamWithCallback( new File(compressedSnapFileName), callback); ObjectMetadata objectMetadata = new ObjectMetadata(); Map<String, String> userMetadataMap = new HashMap<String, String>(); userMetadataMap.put(UNCOMPRESSED_SIZE_KEY, String.valueOf(uncompressedSize)); // Send the uncompressed length as the metadata objectMetadata.setUserMetadata(userMetadataMap); objectMetadata.setContentLength(actualSize); return retryAfterRefresh(new Function<PutObjectRequest, PutObjectResult>() { @Override @Nullable public PutObjectResult apply(@Nullable PutObjectRequest arg0) { eucaS3Client.refreshEndpoint(); return eucaS3Client.putObject(arg0); } }, new PutObjectRequest(bucketName, keyName, snapInputStream, objectMetadata), REFRESH_TOKEN_RETRIES); }
From source file:com.eucalyptus.blockstorage.S3SnapshotTransfer.java
License:Open Source License
private String initiateMulitpartUpload(Long uncompressedSize) throws SnapshotInitializeMpuException { InitiateMultipartUploadResult initResponse = null; InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName); ObjectMetadata objectMetadata = new ObjectMetadata(); Map<String, String> userMetadataMap = new HashMap<String, String>(); userMetadataMap.put(UNCOMPRESSED_SIZE_KEY, String.valueOf(uncompressedSize)); // Send the uncompressed length as the metadata objectMetadata.setUserMetadata(userMetadataMap); initRequest.setObjectMetadata(objectMetadata); try {//from w w w. j av a 2s .co m LOG.info("Inititating multipart upload: snapshotId=" + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName); initResponse = retryAfterRefresh( new Function<InitiateMultipartUploadRequest, InitiateMultipartUploadResult>() { @Override @Nullable public InitiateMultipartUploadResult apply(@Nullable InitiateMultipartUploadRequest arg0) { eucaS3Client.refreshEndpoint(); return eucaS3Client.initiateMultipartUpload(arg0); } }, initRequest, REFRESH_TOKEN_RETRIES); } catch (Exception ex) { throw new SnapshotInitializeMpuException("Failed to initialize multipart upload part for snapshotId=" + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName, ex); } if (StringUtils.isBlank(initResponse.getUploadId())) { throw new SnapshotInitializeMpuException("Invalid upload ID for multipart upload part for snapshotId=" + snapshotId + ", bucketName=" + bucketName + ", keyName=" + keyName); } return initResponse.getUploadId(); }
From source file:com.eucalyptus.blockstorage.SnapshotObjectOps.java
License:Open Source License
public void uploadSnapshot(File snapshotFile, SnapshotProgressCallback callback, String snapshotKey, String snapshotId) throws EucalyptusCloudException { try {//from w w w .j av a 2s .c o m s3Client.getS3Client().listObjects(StorageProperties.SNAPSHOT_BUCKET); } catch (Exception ex) { try { //if (!s3Client.getS3Client().doesBucketExist(StorageProperties.SNAPSHOT_BUCKET)) { s3Client.getS3Client().createBucket(StorageProperties.SNAPSHOT_BUCKET); //} } catch (Exception e) { LOG.error("Snapshot upload failed. Unable to create bucket: snapshots", e); throw new EucalyptusCloudException(e); } } try { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(snapshotFile.length()); //FIXME: need to set MD5 s3Client.getS3Client().putObject(StorageProperties.SNAPSHOT_BUCKET, snapshotKey, new FileInputStreamWithCallback(snapshotFile, callback), metadata); } catch (Exception ex) { LOG.error("Snapshot " + snapshotId + " upload failed to: " + snapshotKey, ex); throw new EucalyptusCloudException(ex); } }
From source file:com.eucalyptus.loadbalancing.workflow.LoadBalancingActivitiesImpl.java
License:Open Source License
@Override public AccessLogPolicyActivityResult modifyLoadBalancerAttributesCreateAccessLogPolicy( final String accountNumber, final String lbName, final Boolean accessLogEnabled, final String s3BucketName, final String s3BucketPrefix, final Integer emitInterval) throws LoadBalancingActivityException { final String ACCESSLOG_ROLE_POLICY_DOCUMENT = "{\"Statement\":" + "[ {" + "\"Action\": [\"s3:PutObject\"]," + "\"Effect\": \"Allow\"," + "\"Resource\": [\"arn:aws:s3:::BUCKETNAME_PLACEHOLDER/BUCKETPREFIX_PLACEHOLDER\"]" + "}]}"; AccessLogPolicyActivityResult result = new AccessLogPolicyActivityResult(); result.setShouldRollback(false);//from ww w. j a va 2 s . com if (!accessLogEnabled) return result; final String bucketName = s3BucketName; final String bucketPrefix = com.google.common.base.Objects.firstNonNull(s3BucketPrefix, ""); final String roleName = getRoleName(accountNumber, lbName); final String policyName = ACCESSLOG_ROLE_POLICY_NAME; try { final List<String> policies = EucalyptusActivityTasks.getInstance().listRolePolicies(roleName); if (policies.contains(policyName)) { EucalyptusActivityTasks.getInstance().deleteRolePolicy(roleName, policyName); } } catch (final Exception ex) { ; } String policyDocument = ACCESSLOG_ROLE_POLICY_DOCUMENT.replace("BUCKETNAME_PLACEHOLDER", bucketName); if (bucketPrefix.length() > 0) { policyDocument = policyDocument.replace("BUCKETPREFIX_PLACEHOLDER", bucketPrefix + "/*"); } else { policyDocument = policyDocument.replace("BUCKETPREFIX_PLACEHOLDER", "*"); } try { EucalyptusActivityTasks.getInstance().putRolePolicy(roleName, policyName, policyDocument); result.setRoleName(roleName); result.setPolicyName(policyName); result.setShouldRollback(true); } catch (final Exception ex) { throw new LoadBalancingActivityException( "failed to put role policy for loadbalancer vm's access to S3 buckets"); } try { final EucaS3Client s3c = getS3Client(roleName); final String key = s3BucketPrefix != null && !s3BucketPrefix.isEmpty() ? String.format("%s/AWSLogs/%s/ELBAccessLogTestFile", s3BucketPrefix, accountNumber) : String.format("AWSLogs/%s/ELBAccessLogTestFile", accountNumber); final DateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS"); final String content = String.format("Enable AccessLog for ELB: %s at %s", lbName, df.format(new Date())); final PutObjectRequest req = new PutObjectRequest(bucketName, key, new ByteArrayInputStream(content.getBytes(StandardCharsets.UTF_8)), new ObjectMetadata()) .withCannedAcl(CannedAccessControlList.BucketOwnerFullControl); s3c.putObject(req); } catch (final Exception ex) { LOG.warn("Failed to put test key to the access log bucket"); } return result; }
From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java
License:Open Source License
protected ObjectMetadata getS3ObjectMetadata(PutObjectType request) { ObjectMetadata meta = new ObjectMetadata(); if (request.getMetaData() != null) { for (MetaDataEntry m : request.getMetaData()) { meta.addUserMetadata(m.getName(), m.getValue()); }//from w w w . j a v a 2 s . c o m } if (!Strings.isNullOrEmpty(request.getContentLength())) { meta.setContentLength(Long.parseLong(request.getContentLength())); } if (!Strings.isNullOrEmpty(request.getContentMD5())) { meta.setContentMD5(request.getContentMD5()); } if (!Strings.isNullOrEmpty(request.getContentType())) { meta.setContentType(request.getContentType()); } return meta; }