List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata
public ObjectMetadata()
From source file:com.universal.storage.UniversalS3Storage.java
License:Open Source License
/** * This method uploads a file with a length greater than PART_SIZE (5Mb). * /*from w w w . ja va 2s.c o m*/ * @param file to be stored within the storage. * @param path is the path for this new file within the root. * @throws UniversalIOException when a specific IO error occurs. */ private void uploadFile(File file, String path) throws UniversalIOException { // Create a list of UploadPartResponse objects. You get one of these // for each part upload. List<PartETag> partETags = new ArrayList<PartETag>(); // Step 1: Initialize. InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(this.settings.getRoot(), file.getName()); InitiateMultipartUploadResult initResponse = this.s3client.initiateMultipartUpload(initRequest); long contentLength = file.length(); long partSize = PART_SIZE; // Set part size to 5 MB. ObjectMetadata objectMetadata = new ObjectMetadata(); if (this.settings.getEncryption()) { objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } List<Tag> tags = new ArrayList<Tag>(); for (String key : this.settings.getTags().keySet()) { tags.add(new Tag(key, this.settings.getTags().get(key))); } try { this.triggerOnStoreFileListeners(); // Step 2: Upload parts. long filePosition = 0; for (int i = 1; filePosition < contentLength; i++) { // Last part can be less than 5 MB. Adjust part size. partSize = Math.min(partSize, (contentLength - filePosition)); // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest() .withBucketName(this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))) .withKey(file.getName()).withUploadId(initResponse.getUploadId()).withPartNumber(i) .withFileOffset(filePosition).withFile(file).withObjectMetadata(objectMetadata) .withPartSize(partSize); // Upload part and add response to our list. partETags.add(this.s3client.uploadPart(uploadRequest).getPartETag()); filePosition += partSize; } // Step 3: Complete. CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest( this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)), file.getName(), initResponse.getUploadId(), partETags); CompleteMultipartUploadResult result = this.s3client.completeMultipartUpload(compRequest); StorageClass storageClass = getStorageClass(); if (storageClass != StorageClass.Standard) { CopyObjectRequest copyObjectRequest = new CopyObjectRequest(this.settings.getRoot(), file.getName(), this.settings.getRoot(), file.getName()).withStorageClass(storageClass); this.s3client.copyObject(copyObjectRequest); } if (!tags.isEmpty()) { this.s3client.setObjectTagging(new SetObjectTaggingRequest(this.settings.getRoot(), file.getName(), new ObjectTagging(tags))); } this.triggerOnFileStoredListeners(new UniversalStorageData(file.getName(), PREFIX_S3_URL + (this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))) + "/" + file.getName(), result.getVersionId(), this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)))); } catch (Exception e) { this.s3client.abortMultipartUpload(new AbortMultipartUploadRequest(this.settings.getRoot(), file.getName(), initResponse.getUploadId())); UniversalIOException error = new UniversalIOException(e.getMessage()); this.triggerOnErrorListeners(error); throw error; } }
From source file:com.universal.storage.UniversalS3Storage.java
License:Open Source License
/** * This method uploads a file with a length lesser than PART_SIZE (5Mb). * /*from www . ja v a2s.c o m*/ * @param file to be stored within the storage. * @param path is the path for this new file within the root. * @throws UniversalIOException when a specific IO error occurs. */ private void uploadTinyFile(File file, String path) throws UniversalIOException { try { ObjectMetadata objectMetadata = new ObjectMetadata(); if (this.settings.getEncryption()) { objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } List<Tag> tags = new ArrayList<Tag>(); for (String key : this.settings.getTags().keySet()) { tags.add(new Tag(key, this.settings.getTags().get(key))); } PutObjectRequest request = new PutObjectRequest( this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)), file.getName(), file); request.setMetadata(objectMetadata); request.setTagging(new ObjectTagging(tags)); request.setStorageClass(getStorageClass()); this.triggerOnStoreFileListeners(); PutObjectResult result = this.s3client.putObject(request); this.triggerOnFileStoredListeners(new UniversalStorageData(file.getName(), PREFIX_S3_URL + (this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))) + "/" + file.getName(), result.getVersionId(), this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)))); } catch (Exception e) { UniversalIOException error = new UniversalIOException(e.getMessage()); this.triggerOnErrorListeners(error); throw error; } }
From source file:com.universal.storage.UniversalS3Storage.java
License:Open Source License
/** * This method creates a new folder within the storage using the passed path. If the new folder name already * exists within the storage, this process will skip the creation step. * /*from ww w . j a va2 s .c o m*/ * Root = /s3storage/ * path = /myFolder * Target = /s3storage/myFolder * * Root = /s3storage/ * path = /folders/myFolder * Target = /s3storage/folders/myFolder * * @param path is the folder's path. A path must end with forward slash '/', the back slash '\' is not * considered a folder indicator. * @param storeFiles is a flag to store the files after folder creation. * * @throws UniversalIOException when a specific IO error occurs. * @throws IllegalArgumentException is path has an invalid value. */ void createFolder(String path) throws UniversalIOException { PathValidator.validatePath(path); if ("".equals(path.trim())) { UniversalIOException error = new UniversalIOException("Invalid path. The path shouldn't be empty."); this.triggerOnErrorListeners(error); throw error; } ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(0); InputStream emptyContent = new ByteArrayInputStream(new byte[0]); try { PutObjectRequest putObjectRequest = new PutObjectRequest(this.settings.getRoot(), path.endsWith("/") ? path : (path + "/"), emptyContent, metadata); this.triggerOnCreateFolderListeners(); PutObjectResult result = s3client.putObject(putObjectRequest); this.triggerOnFolderCreatedListeners(new UniversalStorageData(path, PREFIX_S3_URL + (this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))), result.getVersionId(), this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path)))); } catch (Exception e) { UniversalIOException error = new UniversalIOException(e.getMessage()); this.triggerOnErrorListeners(error); throw error; } }
From source file:com.upplication.s3fs.S3FileSystemProvider.java
License:Open Source License
@Override public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> options, FileAttribute<?>... attrs) throws IOException { Preconditions.checkArgument(path instanceof S3Path, "path must be an instance of %s", S3Path.class.getName()); final S3Path s3Path = (S3Path) path; // we resolve to a file inside the temp folder with the s3path name final Path tempFile = createTempDir().resolve(path.getFileName().toString()); try {// w w w. ja va2 s . c o m InputStream is = s3Path.getFileSystem().getClient().getObject(s3Path.getBucket(), s3Path.getKey()) .getObjectContent(); if (is == null) throw new IOException(String.format("The specified path is a directory: %s", path)); Files.write(tempFile, IOUtils.toByteArray(is)); } catch (AmazonS3Exception e) { if (e.getStatusCode() != 404) throw new IOException(String.format("Cannot access file: %s", path), e); } // and we can use the File SeekableByteChannel implementation final SeekableByteChannel seekable = Files.newByteChannel(tempFile, options); return new SeekableByteChannel() { @Override public boolean isOpen() { return seekable.isOpen(); } @Override public void close() throws IOException { if (!seekable.isOpen()) { return; } seekable.close(); // upload the content where the seekable ends (close) if (Files.exists(tempFile)) { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(Files.size(tempFile)); // FIXME: #20 ServiceLoader cant load com.upplication.s3fs.util.FileTypeDetector when this library is used inside a ear :( metadata.setContentType(Files.probeContentType(tempFile)); try (InputStream stream = Files.newInputStream(tempFile)) { /* FIXME: if the stream is {@link InputStream#markSupported()} i can reuse the same stream and evict the close and open methods of probeContentType. By this way: metadata.setContentType(new Tika().detect(stream, tempFile.getFileName().toString())); */ s3Path.getFileSystem().getClient().putObject(s3Path.getBucket(), s3Path.getKey(), stream, metadata); } } else { // delete: check option delete_on_close s3Path.getFileSystem().getClient().deleteObject(s3Path.getBucket(), s3Path.getKey()); } // and delete the temp dir Files.deleteIfExists(tempFile); Files.deleteIfExists(tempFile.getParent()); } @Override public int write(ByteBuffer src) throws IOException { return seekable.write(src); } @Override public SeekableByteChannel truncate(long size) throws IOException { return seekable.truncate(size); } @Override public long size() throws IOException { return seekable.size(); } @Override public int read(ByteBuffer dst) throws IOException { return seekable.read(dst); } @Override public SeekableByteChannel position(long newPosition) throws IOException { return seekable.position(newPosition); } @Override public long position() throws IOException { return seekable.position(); } }; }
From source file:com.upplication.s3fs.S3FileSystemProvider.java
License:Open Source License
/** * Deviations from spec: Does not perform atomic check-and-create. Since a * directory is just an S3 object, all directories in the hierarchy are * created or it already existed./*from w w w . j av a2s. c o m*/ */ @Override public void createDirectory(Path dir, FileAttribute<?>... attrs) throws IOException { // FIXME: throw exception if the same key already exists at amazon s3 S3Path s3Path = (S3Path) dir; Preconditions.checkArgument(attrs.length == 0, "attrs not yet supported: %s", ImmutableList.copyOf(attrs)); // TODO ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(0); String keyName = s3Path.getKey() + (s3Path.getKey().endsWith("/") ? "" : "/"); s3Path.getFileSystem().getClient().putObject(s3Path.getBucket(), keyName, new ByteArrayInputStream(new byte[0]), metadata); }
From source file:com.upplication.s3fs.S3FileSystemProvider.java
License:Open Source License
@Override public void copy(Path source, Path target, CopyOption... options) throws IOException { Preconditions.checkArgument(source instanceof S3Path, "source must be an instance of %s", S3Path.class.getName()); Preconditions.checkArgument(target instanceof S3Path, "target must be an instance of %s", S3Path.class.getName()); if (isSameFile(source, target)) { return;//from w w w. java2s .c o m } S3Path s3Source = (S3Path) source; S3Path s3Target = (S3Path) target; /* * Preconditions.checkArgument(!s3Source.isDirectory(), * "copying directories is not yet supported: %s", source); // TODO * Preconditions.checkArgument(!s3Target.isDirectory(), * "copying directories is not yet supported: %s", target); // TODO */ ImmutableSet<CopyOption> actualOptions = ImmutableSet.copyOf(options); verifySupportedOptions(EnumSet.of(StandardCopyOption.REPLACE_EXISTING), actualOptions); if (!actualOptions.contains(StandardCopyOption.REPLACE_EXISTING)) { if (exists(s3Target)) { throw new FileAlreadyExistsException(format("target already exists: %s", target)); } } AmazonS3Client client = s3Source.getFileSystem().getClient(); final ObjectMetadata sourceObjMetadata = s3Source.getFileSystem().getClient() .getObjectMetadata(s3Source.getBucket(), s3Source.getKey()); final S3MultipartOptions opts = props != null ? new S3MultipartOptions<>(props) : new S3MultipartOptions(); final int chunkSize = opts.getChunkSize(); final long length = sourceObjMetadata.getContentLength(); if (length <= chunkSize) { CopyObjectRequest copyObjRequest = new CopyObjectRequest(s3Source.getBucket(), s3Source.getKey(), s3Target.getBucket(), s3Target.getKey()); if (sourceObjMetadata.getSSEAlgorithm() != null) { ObjectMetadata targetObjectMetadata = new ObjectMetadata(); targetObjectMetadata.setSSEAlgorithm(sourceObjMetadata.getSSEAlgorithm()); copyObjRequest.setNewObjectMetadata(targetObjectMetadata); } client.copyObject(copyObjRequest); } else { client.multipartCopyObject(s3Source, s3Target, length, opts); } }
From source file:com.upplication.s3fs.S3OutputStream.java
License:Open Source License
/** * Creates a new {@code S3OutputStream} that writes data directly into the S3 object with the given {@code objectId}. * No special object metadata or storage class will be attached to the object. * * @param s3 Amazon S3 API implementation to use * @param request An instance of {@link S3UploadRequest} * * @throws NullPointerException if at least one parameter is {@code null} *///from ww w . ja v a2s . c om public S3OutputStream(final AmazonS3 s3, S3UploadRequest request) { this.s3 = requireNonNull(s3); this.objectId = requireNonNull(request.getObjectId()); this.metadata = request.getMetadata() != null ? request.getMetadata() : new ObjectMetadata(); this.storageClass = request.getStorageClass(); this.request = request; this.chunkSize = request.getChunkSize(); }
From source file:com.upplication.s3fs.util.AmazonS3ClientMock.java
License:Open Source License
private S3Element parse(InputStream stream, String bucket, String key) { S3Object object = new S3Object(); object.setBucketName(bucket);/* w w w.j av a 2s . c o m*/ object.setKey(key); byte[] content; try { content = IOUtils.toByteArray(stream); } catch (IOException e) { throw new IllegalStateException("the stream is closed", e); } ObjectMetadata metadata = new ObjectMetadata(); metadata.setLastModified(new Date()); metadata.setContentLength(content.length); object.setObjectContent(new ByteArrayInputStream(content)); object.setObjectMetadata(metadata); // TODO: create converter between path permission and s3 permission AccessControlList permission = createAllPermission(); return new S3Element(object, permission, false); }
From source file:com.upplication.s3fs.util.AmazonS3ClientMock.java
License:Open Source License
private S3Element parse(Path elem, Path bucket) throws IOException { boolean dir = false; if (Files.isDirectory(elem)) { dir = true;// ww w . j ava 2 s.com } S3Object object = new S3Object(); object.setBucketName(bucket.getFileName().toString()); String key = bucket.relativize(elem).toString(); if (dir) { key += "/"; } object.setKey(key); ObjectMetadata metadata = new ObjectMetadata(); BasicFileAttributes attr = Files.readAttributes(elem, BasicFileAttributes.class); metadata.setLastModified(new Date(attr.lastAccessTime().toMillis())); if (dir) { metadata.setContentLength(0); object.setObjectContent(null); } else { metadata.setContentLength(attr.size()); object.setObjectContent(new ByteArrayInputStream(Files.readAllBytes(elem))); } object.setObjectMetadata(metadata); // TODO: create converter between path permission and s3 permission AccessControlList permission = createAllPermission(); return new S3Element(object, permission, dir); }
From source file:com.upplication.s3fs.util.S3UploadRequest.java
License:Open Source License
public S3UploadRequest setStorageEncryption(String storageEncryption) { if (storageEncryption == null) { return this; } else if (!"AES256".equals(storageEncryption)) { log.warn("Not a valid S3 server-side encryption type: `{}` -- Currently only AES256 is supported", storageEncryption);/* ww w .j a va 2s .c o m*/ } else { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); this.setMetadata(objectMetadata); } return this; }