Example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata.

Prototype

public ObjectMetadata() 

Source Link

Usage

From source file:com.ibm.stocator.fs.cos.COSOutputStream.java

License:Apache License

@Override
public void close() throws IOException {
    if (closed.getAndSet(true)) {
        return;/*from   ww w .j  a va  2  s  . c  o  m*/
    }
    mBackupOutputStream.close();
    LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
    try {
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(mBackupFile.length());
        om.setContentType(mContentType);
        om.setUserMetadata(mMetadata);

        PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        upload.waitForUploadResult();
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e);
    } catch (AmazonClientException e) {
        throw new IOException(String.format("saving output %s %s", mKey, e));
    } finally {
        if (!mBackupFile.delete()) {
            LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
        }
        super.close();
    }
    LOG.debug("OutputStream for key '{}' upload complete", mKey);
}

From source file:com.igeekinc.indelible.indeliblefs.uniblock.casstore.s3.S3CASStore.java

License:Open Source License

@Override
public void storeSegment(CASIDDataDescriptor segmentDescriptor) throws IOException {
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(segmentDescriptor.getLength());
    PutObjectResult result = s3Client.putObject(storeID.toString(),
            segmentDescriptor.getCASIdentifier().toString(), segmentDescriptor.getInputStream(), metadata);
    // If we get this far we're golden
}

From source file:com.indeed.imhotep.iql.cache.S3QueryCache.java

License:Apache License

@Override
public OutputStream getOutputStream(final String cachedFileName) throws IOException {
    if (!enabled) {
        throw new IllegalStateException("Can't send data to S3 cache as it is disabled");
    }/* w w  w .  ja v a2 s  .co m*/

    final ByteArrayOutputStream os = new ByteArrayOutputStream();
    // Wrap the returned OutputStream so that we can write to buffer and do actual write on close()
    return new OutputStream() {
        private boolean closed = false;

        @Override
        public void write(byte[] b) throws IOException {
            os.write(b);
        }

        @Override
        public void write(byte[] b, int off, int len) throws IOException {
            os.write(b, off, len);
        }

        @Override
        public void flush() throws IOException {
            os.flush();
        }

        @Override
        public void write(int b) throws IOException {
            os.write(b);
        }

        @Override
        public void close() throws IOException {
            if (closed) {
                return;
            }
            closed = true;
            os.close();

            // do actual write
            byte[] csvData = os.toByteArray();
            ByteArrayInputStream is = new ByteArrayInputStream(csvData);
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentLength(csvData.length);
            client.putObject(bucket, cachedFileName, is, metadata);
        }
    };
}

From source file:com.intuit.s3encrypt.S3Encrypt.java

License:Open Source License

private static void putS3Object(CommandLine cmd, AmazonS3EncryptionClient s3, String bucket, String filename,
        String keyname, String key) {
    String[] searchArgs = cmd.getOptionValues("put");
    System.out.println("Uploading a new object to S3 BUCKET = " + bucket + " FILENAME = " + filename);
    File file = new File(filename);
    PutObjectRequest request = new PutObjectRequest(bucket, filename, file);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.addUserMetadata(keyname, key);
    request.setMetadata(metadata);//  w w  w  .  j  av  a2s.  co  m
    s3.putObject(request);
    System.out.println();
}

From source file:com.intuit.tank.vmManager.environment.amazon.AmazonS3.java

License:Open Source License

/**
 * /*from  w w  w.  ja  v  a2 s . c o m*/
 * @param key
 * @param scriptFile
 * @return
 */
public void storeFile(String bucketName, String path, Map<String, String> metaMap, InputStream in) {
    ObjectMetadata metaData = new ObjectMetadata();
    if (metaMap != null) {
        for (Entry<String, String> entry : metaMap.entrySet()) {
            metaData.addUserMetadata(entry.getKey(), entry.getValue());
        }
    }
    s3Client.putObject(bucketName, path, in, metaData);
}

From source file:com.jeet.s3.AmazonS3ClientWrapper.java

License:Open Source License

public boolean uploadFile(String path, InputStream is, String hash, Long fileLength) {
    boolean isFileUploaded = false;
    try {//from www  . j  av  a2 s .c  o m
        ObjectMetadata objectMetadata = new ObjectMetadata();
        Map userMetadata = new HashMap();
        userMetadata.put(Constants.FILE_LENGTH_KEY, fileLength.toString());
        userMetadata.put(Constants.HASH_KEY, hash);
        userMetadata.put(Constants.LAST_MODIFIED_KEY, String.valueOf(new Date().getTime()));
        objectMetadata.setUserMetadata(userMetadata);
        PutObjectResult objectResult = s3Client
                .putObject(new PutObjectRequest(Constants.BUCKET_NAME, path, is, objectMetadata));
        if (!StringUtils.isEmpty(objectResult.getContentMd5())) {
            isFileUploaded = true;
        }
    } catch (Exception e) {
        e.printStackTrace();
    }
    return isFileUploaded;
}

From source file:com.johnstok.blobs.s3.S3ByteStore.java

License:Open Source License

/** {@inheritDoc}  */
@Override//from w w w  . j a  va 2  s .co  m
public void update(final UUID id, final InputStream in) throws ByteStoreException {
    Objects.requireNonNull(in);
    Objects.requireNonNull(id);
    try {
        /*
        FIXME: In general, when your object size reaches 100 MB, you should consider
        using multipart uploads instead of uploading the object in a single operation.
                
        http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
         */
        _s3Client.putObject(new PutObjectRequest(_bucket, id.toString(), in, new ObjectMetadata()));
    } catch (final AmazonClientException e) {
        throw new ByteStoreException(e);
    }
}

From source file:com.kittypad.music.game.util.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3/* ww  w. j ava 2s  .c om*/
 * @param obj the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(MusicItem obj, byte[] data, boolean reducedRedundancy, CannedAccessControlList acl) {
    // Make sure the bucket exists before we try to use it
    //checkForAndCreateBucket(this.bucketName);
    String key = obj.getUUID() + obj.getMusicName() + "." + obj.getType();
    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(mimeType);
    omd.setContentLength(obj.getSize());
    ByteArrayInputStream is = new ByteArrayInputStream(data);
    PutObjectRequest request = new PutObjectRequest(bucketName, key, is, omd);
    // Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }
    s3client.putObject(request);
    // If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3client.setObjectAcl(bucketName, key, acl);
    }
}

From source file:com.kodemore.aws.s3.KmS3Uploader.java

License:Open Source License

/**
 * Upload the data from the input stream to the remote s3 repository.
 * The toPath (at s3) should NOT begin with a slash (/).
 *///from  w w  w.j  a  va 2 s .c o m
public void upload(String bucketName, String toPath, InputStream is) {
    ObjectMetadata meta = new ObjectMetadata();

    AmazonS3 s3;
    s3 = createClient();
    s3.putObject(bucketName, toPath, is, meta);
}

From source file:com.lithium.flow.filer.S3Filer.java

License:Apache License

@Override
@Nonnull/*from   www .  j  a  v a 2s  .  c o  m*/
public OutputStream writeFile(@Nonnull String path) throws IOException {
    String key = path.substring(1);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    List<Future<PartETag>> futureTags = new ArrayList<>();
    Lazy<String> uploadId = new Lazy<>(
            () -> s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key)).getUploadId());

    return new OutputStream() {
        @Override
        public void write(int b) throws IOException {
            baos.write(b);
            flip(partSize);
        }

        @Override
        public void write(byte[] b) throws IOException {
            baos.write(b);
            flip(partSize);
        }

        @Override
        public void write(byte[] b, int off, int len) throws IOException {
            baos.write(b, off, len);
            flip(partSize);
        }

        @Override
        public void close() throws IOException {
            if (futureTags.size() == 0) {
                InputStream in = new ByteArrayInputStream(baos.toByteArray());
                ObjectMetadata metadata = new ObjectMetadata();
                metadata.setContentLength(baos.size());
                s3.putObject(bucket, key, in, metadata);
            } else {
                flip(1);

                List<PartETag> tags = Lists.newArrayList();
                for (Future<PartETag> futureTag : futureTags) {
                    try {
                        tags.add(futureTag.get());
                    } catch (Exception e) {
                        s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId.get()));
                        throw new IOException("failed to upload: " + path, e);
                    }
                }

                s3.completeMultipartUpload(
                        new CompleteMultipartUploadRequest(bucket, key, uploadId.get(), tags));
            }
        }

        private void flip(long minSize) throws IOException {
            if (baos.size() < minSize) {
                return;
            }

            File file = new File(tempDir, UUID.randomUUID().toString());
            file.deleteOnExit();

            OutputStream out = new FileOutputStream(file);
            out.write(baos.toByteArray());
            out.close();

            baos.reset();

            UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(uploadId.get())
                    .withBucketName(bucket).withKey(key).withPartNumber(futureTags.size() + 1)
                    .withPartSize(file.length()).withFile(file);

            futureTags.add(service.submit(() -> {
                try {
                    return s3.uploadPart(uploadRequest).getPartETag();
                } finally {
                    file.delete();
                }
            }));
        }
    };
}