Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength.

Prototype

public void setContentLength(long contentLength) 

Source Link

Document

<p> Sets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:com.shareplaylearn.models.UserItemManager.java

License:Open Source License

private ObjectMetadata makeBasicMetadata(int bufferLength, boolean isPublic, String itemName) {
    ObjectMetadata fileMetadata = new ObjectMetadata();
    fileMetadata.setContentEncoding(MediaType.APPLICATION_OCTET_STREAM);
    if (isPublic) {
        fileMetadata.addUserMetadata(UploadMetadataFields.PUBLIC, UploadMetadataFields.TRUE_VALUE);
    } else {/*  w  w  w .  j  a va 2s. co m*/
        fileMetadata.addUserMetadata(UploadMetadataFields.PUBLIC, UploadMetadataFields.FALSE_VALUE);
    }
    fileMetadata.addUserMetadata(UploadMetadataFields.DISPLAY_NAME, itemName);
    fileMetadata.setContentLength(bufferLength);
    return fileMetadata;
}

From source file:com.streamsets.pipeline.stage.destination.s3.WholeFileHelper.java

License:Apache License

@Override
public List<UploadMetadata> handle(Iterator<Record> recordIterator, String bucket, String keyPrefix)
        throws IOException, StageException {
    List<UploadMetadata> uploads = new ArrayList<>();
    //Only one record per batch if whole file
    if (recordIterator.hasNext()) {
        Record record = recordIterator.next();
        try {//w w w.  jav  a2  s. c om

            try {
                FileRefUtil.validateWholeFileRecord(record);
            } catch (IllegalArgumentException e) {
                LOGGER.error("Validation Failed For Record {}", e);
                throw new OnRecordErrorException(record, Errors.S3_52, e);
            }

            String fileName = getFileNameFromFileNameEL(keyPrefix, record);

            checkForWholeFileExistence(bucket, fileName);

            FileRef fileRef = record.get(FileRefUtil.FILE_REF_FIELD_PATH).getValueAsFileRef();

            ObjectMetadata metadata = getObjectMetadata();
            metadata = (metadata == null) ? new ObjectMetadata() : metadata;

            //Mandatory field path specifying size.
            metadata.setContentLength(
                    record.get(FileRefUtil.FILE_INFO_FIELD_PATH + "/" + SIZE).getValueAsLong());

            EventRecord eventRecord = createEventRecordForFileTransfer(record, bucket, fileName);

            //Fyi this gets closed automatically after upload completes.
            InputStream is = FileRefUtil.getReadableStream(context, fileRef, InputStream.class,
                    generatorService.wholeFileIncludeChecksumInTheEvents(),
                    ChecksumAlgorithm.forApi(generatorService.wholeFileChecksumAlgorithm()),
                    new FileRefStreamCloseEventHandler(eventRecord));
            //We are bypassing the generator because S3 has a convenient notion of taking input stream as a parameter.
            Upload upload = doUpload(bucket, fileName, is, metadata);
            uploads.add(new UploadMetadata(upload, bucket, ImmutableList.of(record),
                    ImmutableList.of(eventRecord)));

            //Add event to event lane.
        } catch (OnRecordErrorException e) {
            LOGGER.error("Error on record: {}", e);
            errorRecordHandler.onError(new OnRecordErrorException(record, e.getErrorCode(), e.getParams()));
        }
    }
    return uploads;
}

From source file:com.tfnsnproject.util.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3//  w w  w .j  a  va2  s. c o  m
 *
 * @param obj               the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl               a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(S3StorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) {
    // Make sure the bucket exists before we try to use it
    checkForAndCreateBucket(obj.getBucketName());

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(obj.getMimeType());
    omd.setContentLength(obj.getData().length);

    ByteArrayInputStream is = new ByteArrayInputStream(obj.getData());
    PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd);

    // Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }

    s3client.putObject(request);

    // If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl);
    }

}

From source file:com.tracermedia.maven.plugins.CreateVersionMojo.java

License:Open Source License

protected void copyFileToS3(final String s3Bucket, final String s3Key, final File file) throws IOException {

    final ObjectMetadata meta = new ObjectMetadata();

    InputStream in = new FileInputStream(file);
    try {/*from www .j a  v a  2  s .  c o m*/
        meta.setContentLength(file.length());
        meta.setContentType(Mimetypes.getInstance().getMimetype(file));
    } finally {
        in.close();
    }

    in = new ProgressReportingInputStream(new RepeatableFileInputStream(file), new ProgressListener() {
        int lastPercent = -1;
        int bytes = 0;

        public void progressChanged(ProgressEvent progressEvent) {
            bytes += progressEvent.getBytesTransfered();
            double percent = 100.0 * bytes / meta.getContentLength();
            if ((int) percent != lastPercent) {
                System.out.print(
                        String.format("\rCopying file [%s] to S3, bucket: %s, key: %s, progress: %.0f%%   ",
                                file.getName(), s3Bucket, s3Key, percent));
                lastPercent = (int) percent;
            }
        }
    });

    try {
        final PutObjectRequest request = new PutObjectRequest(s3Bucket, s3Key, in, meta);
        getS3Client().putObject(request);
        System.out.println(String.format("\rCopying file [%s] to S3, bucket: %s, key: %s, progress: %.0f%%   ",
                file.getName(), s3Bucket, s3Key, 100.0));
    } finally {
        in.close();
    }
}

From source file:com.universal.storage.UniversalS3Storage.java

License:Open Source License

/**
 * This method creates a new folder within the storage using the passed path. If the new folder name already
 * exists within the storage, this  process will skip the creation step.
 * //from  w  w  w.  j a  va  2  s .co m
 * Root = /s3storage/
 * path = /myFolder
 * Target = /s3storage/myFolder
 * 
 * Root = /s3storage/
 * path = /folders/myFolder
 * Target = /s3storage/folders/myFolder
 * 
 * @param path is the folder's path.  A path must end with forward slash '/', the back slash '\' is not 
 *        considered a folder indicator.
 * @param storeFiles is a flag to store the files after folder creation.
 * 
 * @throws UniversalIOException when a specific IO error occurs.
 * @throws IllegalArgumentException is path has an invalid value.
 */
void createFolder(String path) throws UniversalIOException {
    PathValidator.validatePath(path);

    if ("".equals(path.trim())) {
        UniversalIOException error = new UniversalIOException("Invalid path.  The path shouldn't be empty.");
        this.triggerOnErrorListeners(error);
        throw error;
    }

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);

    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

    try {
        PutObjectRequest putObjectRequest = new PutObjectRequest(this.settings.getRoot(),
                path.endsWith("/") ? path : (path + "/"), emptyContent, metadata);

        this.triggerOnCreateFolderListeners();

        PutObjectResult result = s3client.putObject(putObjectRequest);

        this.triggerOnFolderCreatedListeners(new UniversalStorageData(path,
                PREFIX_S3_URL + (this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))),
                result.getVersionId(), this.settings.getRoot() + ("".equals(path) ? "" : ("/" + path))));
    } catch (Exception e) {
        UniversalIOException error = new UniversalIOException(e.getMessage());
        this.triggerOnErrorListeners(error);
        throw error;
    }
}

From source file:com.upplication.s3fs.S3FileSystemProvider.java

License:Open Source License

@Override
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> options,
        FileAttribute<?>... attrs) throws IOException {
    Preconditions.checkArgument(path instanceof S3Path, "path must be an instance of %s",
            S3Path.class.getName());
    final S3Path s3Path = (S3Path) path;
    // we resolve to a file inside the temp folder with the s3path name
    final Path tempFile = createTempDir().resolve(path.getFileName().toString());

    try {/*  ww  w .ja v a2  s .c  om*/
        InputStream is = s3Path.getFileSystem().getClient().getObject(s3Path.getBucket(), s3Path.getKey())
                .getObjectContent();

        if (is == null)
            throw new IOException(String.format("The specified path is a directory: %s", path));

        Files.write(tempFile, IOUtils.toByteArray(is));
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() != 404)
            throw new IOException(String.format("Cannot access file: %s", path), e);
    }

    // and we can use the File SeekableByteChannel implementation
    final SeekableByteChannel seekable = Files.newByteChannel(tempFile, options);

    return new SeekableByteChannel() {
        @Override
        public boolean isOpen() {
            return seekable.isOpen();
        }

        @Override
        public void close() throws IOException {

            if (!seekable.isOpen()) {
                return;
            }
            seekable.close();
            // upload the content where the seekable ends (close)
            if (Files.exists(tempFile)) {
                ObjectMetadata metadata = new ObjectMetadata();
                metadata.setContentLength(Files.size(tempFile));
                // FIXME: #20 ServiceLoader cant load com.upplication.s3fs.util.FileTypeDetector when this library is used inside a ear :(
                metadata.setContentType(Files.probeContentType(tempFile));

                try (InputStream stream = Files.newInputStream(tempFile)) {
                    /*
                     FIXME: if the stream is {@link InputStream#markSupported()} i can reuse the same stream
                     and evict the close and open methods of probeContentType. By this way:
                     metadata.setContentType(new Tika().detect(stream, tempFile.getFileName().toString()));
                    */
                    s3Path.getFileSystem().getClient().putObject(s3Path.getBucket(), s3Path.getKey(), stream,
                            metadata);
                }
            } else {
                // delete: check option delete_on_close
                s3Path.getFileSystem().getClient().deleteObject(s3Path.getBucket(), s3Path.getKey());
            }
            // and delete the temp dir
            Files.deleteIfExists(tempFile);
            Files.deleteIfExists(tempFile.getParent());
        }

        @Override
        public int write(ByteBuffer src) throws IOException {
            return seekable.write(src);
        }

        @Override
        public SeekableByteChannel truncate(long size) throws IOException {
            return seekable.truncate(size);
        }

        @Override
        public long size() throws IOException {
            return seekable.size();
        }

        @Override
        public int read(ByteBuffer dst) throws IOException {
            return seekable.read(dst);
        }

        @Override
        public SeekableByteChannel position(long newPosition) throws IOException {
            return seekable.position(newPosition);
        }

        @Override
        public long position() throws IOException {
            return seekable.position();
        }
    };
}

From source file:com.upplication.s3fs.S3FileSystemProvider.java

License:Open Source License

/**
 * Deviations from spec: Does not perform atomic check-and-create. Since a
 * directory is just an S3 object, all directories in the hierarchy are
 * created or it already existed.//from   w  ww .j  a  va  2s.  co m
 */
@Override
public void createDirectory(Path dir, FileAttribute<?>... attrs) throws IOException {

    // FIXME: throw exception if the same key already exists at amazon s3

    S3Path s3Path = (S3Path) dir;

    Preconditions.checkArgument(attrs.length == 0, "attrs not yet supported: %s", ImmutableList.copyOf(attrs)); // TODO

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);

    String keyName = s3Path.getKey() + (s3Path.getKey().endsWith("/") ? "" : "/");

    s3Path.getFileSystem().getClient().putObject(s3Path.getBucket(), keyName,
            new ByteArrayInputStream(new byte[0]), metadata);
}

From source file:com.upplication.s3fs.S3OutputStream.java

License:Open Source License

/**
 * Stores the given buffer using a single-part upload process
 *
 * @param contentLength/*from  w w  w .j av  a  2s  .  co  m*/
 * @param content
 * @throws IOException
 */
private void putObject(final InputStream content, final long contentLength, byte[] checksum)
        throws IOException {

    final ObjectMetadata meta = metadata.clone();
    meta.setContentLength(contentLength);
    meta.setContentMD5(Base64.encodeAsString(checksum));

    final PutObjectRequest request = new PutObjectRequest(objectId.getBucket(), objectId.getKey(), content,
            meta);

    if (storageClass != null) {
        request.setStorageClass(storageClass);
    }

    try {
        s3.putObject(request);
    } catch (final AmazonClientException e) {
        throw new IOException("Failed to put data into Amazon S3 object", e);
    }
}

From source file:com.upplication.s3fs.util.AmazonS3ClientMock.java

License:Open Source License

private S3Element parse(InputStream stream, String bucket, String key) {

    S3Object object = new S3Object();

    object.setBucketName(bucket);//from  w  w w  .j  a  va  2 s .  c om
    object.setKey(key);

    byte[] content;
    try {
        content = IOUtils.toByteArray(stream);
    } catch (IOException e) {
        throw new IllegalStateException("the stream is closed", e);
    }

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setLastModified(new Date());
    metadata.setContentLength(content.length);

    object.setObjectContent(new ByteArrayInputStream(content));
    object.setObjectMetadata(metadata);
    // TODO: create converter between path permission and s3 permission
    AccessControlList permission = createAllPermission();
    return new S3Element(object, permission, false);
}

From source file:com.upplication.s3fs.util.AmazonS3ClientMock.java

License:Open Source License

private S3Element parse(Path elem, Path bucket) throws IOException {
    boolean dir = false;
    if (Files.isDirectory(elem)) {
        dir = true;//from   w w w.java2s .  c om
    }

    S3Object object = new S3Object();

    object.setBucketName(bucket.getFileName().toString());

    String key = bucket.relativize(elem).toString();
    if (dir) {
        key += "/";
    }
    object.setKey(key);

    ObjectMetadata metadata = new ObjectMetadata();
    BasicFileAttributes attr = Files.readAttributes(elem, BasicFileAttributes.class);
    metadata.setLastModified(new Date(attr.lastAccessTime().toMillis()));
    if (dir) {
        metadata.setContentLength(0);
        object.setObjectContent(null);
    } else {
        metadata.setContentLength(attr.size());
        object.setObjectContent(new ByteArrayInputStream(Files.readAllBytes(elem)));
    }

    object.setObjectMetadata(metadata);
    // TODO: create converter between path permission and s3 permission
    AccessControlList permission = createAllPermission();

    return new S3Element(object, permission, dir);
}