List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength
public void setContentLength(long contentLength)
From source file:com.adeptj.modules.aws.s3.UploadResource.java
License:Apache License
@POST @Path(PATH_UPLOAD)/*from www . j a v a 2 s . c om*/ @Consumes(MULTIPART_FORM_DATA) @RequiresJwt public Response uploadFile(@MultipartForm S3UploadForm form) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength((long) form.getData().length); this.storageService.uploadFile(S3Request.builder().bucketName(form.getBucketName()).key(form.getKey()) .data(new BufferedInputStream(new ByteArrayInputStream(form.getData()))).metadata(metadata) .cannedACL(CannedAccessControlList.valueOf(form.getAccess())).build()); return Response.ok("File uploaded successfully!!").build(); }
From source file:com.aegeus.aws.SimpleStorageService.java
License:Apache License
public void createFolder(String bucket, String key) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(0); InputStream nullObject = new ByteArrayInputStream(new byte[0]); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key + "/", nullObject, metadata); s3.putObject(putObjectRequest);//from w ww . j a v a2s .c o m }
From source file:com.ALC.SC2BOAserver.aws.S3StorageManager.java
License:Open Source License
/** * Stores a given item on S3/*from w w w . j a v a 2 s.c o m*/ * @param obj the data to be stored * @param reducedRedundancy whether or not to use reduced redundancy storage * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default) */ public void store(SC2BOAStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) { // Make sure the bucket exists before we try to use it checkForAndCreateBucket(obj.getBucketName()); ObjectMetadata omd = new ObjectMetadata(); omd.setContentType(obj.getMimeType()); omd.setContentLength(obj.getData().length); ByteArrayInputStream is = new ByteArrayInputStream(obj.getData()); PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd); // Check if reduced redundancy is enabled if (reducedRedundancy) { request.setStorageClass(StorageClass.ReducedRedundancy); } s3Client.putObject(request); // If we have an ACL set access permissions for the the data on S3 if (acl != null) { s3Client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl); } }
From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java
License:Apache License
public boolean init() { if (!s3.doesBucketExist(bucketName)) { return false; }//from w w w . j a v a 2s . com String rawRootPath = rootPath.equals(rootSuffix) ? "" : rootSuffix.isEmpty() ? rootPath : rootPath.substring(0, rootPath.lastIndexOf(rootSuffix) - 1); if (!rawRootPath.isEmpty()) { try { ObjectMetadata rawRootMeta = s3.getObjectMetadata(bucketName, rawRootPath + "/"); } catch (AmazonClientException ex) { return false; } } if (!rawRootPath.equals(rootPath)) { try { ObjectMetadata rootMeta = s3.getObjectMetadata(bucketName, rootPath + "/"); } catch (AmazonClientException ex) { ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(0); s3.putObject(bucketName, rootPath + "/", new ByteArrayInputStream(new byte[0]), meta); } } return true; }
From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java
License:Apache License
@Override public boolean createDirectory(String path) { path = trimPath(path);//from w ww. ja va2 s.c o m try { ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(0); meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); s3.putObject(bucketName, toAbsoluteDirPath(path), new ByteArrayInputStream(new byte[0]), meta); } catch (AmazonClientException ex) { return false; } return true; }
From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java
License:Apache License
@Override public boolean writeFile(InputStream fileStream, FileSnapshot file) { if (fileStream == null) return false; if (file.isLargeFile()) { return writeLargeFile(fileStream, file); }/*from ww w . j a v a 2 s. c om*/ try { ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(file.getFileSize()); meta.getUserMetadata().put("lmd", file.getModifiedTimestamp().toDate().getTime() + ""); meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); s3.putObject(bucketName, toAbsoluteFilePath(file.getRelativePath()), fileStream, meta); } catch (AmazonClientException ex) { return false; } return true; }
From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java
License:Apache License
public boolean writeLargeFile(InputStream fileStream, FileSnapshot file) { if (fileStream == null) return false; try {/*from w w w . j a va 2 s .c om*/ ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(file.getFileSize()); meta.getUserMetadata().put("lmd", file.getModifiedTimestamp().toDate().getTime() + ""); meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); List<PartETag> partTags = new ArrayList<>(); String fileKey = toAbsoluteFilePath(file.getRelativePath()); InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, fileKey, meta); InitiateMultipartUploadResult result = s3.initiateMultipartUpload(request); long contentLength = file.getFileSize(); long partSize = 256 * 1024 * 1024; try { // Uploading the file, part by part. long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { partSize = Math.min(partSize, (contentLength - filePosition)); // Creating the request for a part upload UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName) .withKey(fileKey).withUploadId(result.getUploadId()).withPartNumber(i) .withInputStream(fileStream).withPartSize(partSize); // Upload part and add response to the result list. partTags.add(s3.uploadPart(uploadRequest).getPartETag()); filePosition += partSize; System.out.println("Uploaded " + Utils.readableFileSize(filePosition) + " out of " + Utils.readableFileSize(contentLength)); } } catch (Exception e) { System.out.println("UploadPartRequest failed: " + e.getMessage()); s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, fileKey, result.getUploadId())); return false; } s3.completeMultipartUpload( new CompleteMultipartUploadRequest(bucketName, fileKey, result.getUploadId(), partTags)); } catch (AmazonClientException ex) { System.out.println("Upload failed: " + ex.getMessage()); return false; } return true; }
From source file:com.amazon.aws.samplecode.travellog.aws.S3StorageManager.java
License:Open Source License
/** * Stores a given item on S3/* ww w. jav a 2s. c o m*/ * @param obj the data to be stored * @param reducedRedundancy whether or not to use reduced redundancy storage * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default) */ public void store(TravelLogStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) { //Make sure the bucket exists before we try to use it checkForAndCreateBucket(obj.getBucketName()); ObjectMetadata omd = new ObjectMetadata(); omd.setContentType(obj.getMimeType()); omd.setContentLength(obj.getData().length); ByteArrayInputStream is = new ByteArrayInputStream(obj.getData()); PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd); //Check if reduced redundancy is enabled if (reducedRedundancy) { request.setStorageClass(StorageClass.ReducedRedundancy); } s3client.putObject(request); //If we have an ACL set access permissions for the the data on S3 if (acl != null) { s3client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl); } }
From source file:com.amazon.photosharing.utils.content.UploadThread.java
License:Open Source License
@Override public void run() { ObjectMetadata meta_data = new ObjectMetadata(); if (p_content_type != null) meta_data.setContentType(p_content_type); meta_data.setContentLength(p_size); PutObjectRequest putObjectRequest = new PutObjectRequest(p_bucket_name, p_s3_key, p_file_stream, meta_data); putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); PutObjectResult res = s3Client.putObject(putObjectRequest); }
From source file:com.amazon.sqs.javamessaging.AmazonSQSExtendedClient.java
License:Open Source License
private void storeTextInS3(String s3Key, String messageContentStr, Long messageContentSize) { InputStream messageContentStream = new ByteArrayInputStream( messageContentStr.getBytes(StandardCharsets.UTF_8)); ObjectMetadata messageContentStreamMetadata = new ObjectMetadata(); messageContentStreamMetadata.setContentLength(messageContentSize); PutObjectRequest putObjectRequest = new PutObjectRequest(clientConfiguration.getS3BucketName(), s3Key, messageContentStream, messageContentStreamMetadata); try {/*from w ww .ja va 2 s. c om*/ clientConfiguration.getAmazonS3Client().putObject(putObjectRequest); } catch (AmazonServiceException e) { String errorMessage = "Failed to store the message content in an S3 object. SQS message was not sent."; LOG.error(errorMessage, e); throw new AmazonServiceException(errorMessage, e); } catch (AmazonClientException e) { String errorMessage = "Failed to store the message content in an S3 object. SQS message was not sent."; LOG.error(errorMessage, e); throw new AmazonClientException(errorMessage, e); } }