Example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest.

Prototype

public PutObjectRequest(String bucketName, String key, InputStream input, ObjectMetadata metadata) 

Source Link

Document

Constructs a new PutObjectRequest object to upload a stream of data to the specified bucket and key.

Usage

From source file:org.elasticsearch.repositories.s3.DefaultS3OutputStream.java

License:Apache License

protected void doUpload(S3BlobStore blobStore, String bucketName, String blobName, InputStream is, int length,
        boolean serverSideEncryption) throws AmazonS3Exception {
    ObjectMetadata md = new ObjectMetadata();
    if (serverSideEncryption) {
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    }//w w w  .  j  a v  a2s . com
    md.setContentLength(length);

    PutObjectRequest putRequest = new PutObjectRequest(bucketName, blobName, is, md)
            .withStorageClass(blobStore.getStorageClass()).withCannedAcl(blobStore.getCannedACL());
    blobStore.client().putObject(putRequest);

}

From source file:org.entando.entando.plugins.jps3awsclient.aps.system.services.storage.AmazonS3StorageManager.java

License:Open Source License

public void store(IStorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl)
        throws ApsSystemException {
    try {/*w  w w .  j a v a2  s.  c  o  m*/
        AmazonS3Client client = this.getS3Client();
        String bucketName = obj.getBucketName().toLowerCase();
        this.checkForAndCreateBucket(bucketName, client);
        ObjectMetadata omd = new ObjectMetadata();
        omd.setContentType(obj.getContentType());
        omd.setContentLength(obj.getContentLength());
        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, obj.getStoragePath(),
                obj.getInputStream(), omd);
        // Check if reduced redundancy is enabled
        if (reducedRedundancy) {
            putObjectRequest.setStorageClass(StorageClass.ReducedRedundancy);
        }
        if (null != obj.getUserMetadata()) {
            ObjectMetadata objectMetadata = new ObjectMetadata();
            putObjectRequest.setMetadata(objectMetadata);
            Iterator<String> iter = obj.getUserMetadata().keySet().iterator();
            while (iter.hasNext()) {
                String key = iter.next();
                objectMetadata.addUserMetadata(key, obj.getUserMetadata().get(key));
            }
        }
        client.putObject(putObjectRequest);
        // If we have an ACL set access permissions for the the data on S3
        if (acl != null) {
            client.setObjectAcl(bucketName, obj.getStoragePath(), acl);
        }
    } catch (Throwable t) {
        _logger.error("Error storing object", t);
        throw new ApsSystemException("Error storing object", t);
    }
}

From source file:org.exem.flamingo.web.filesystem.s3.S3BrowserServiceImpl.java

License:Apache License

@Override
public void upload(String bucketName, String key, MultipartFile file) throws IOException {
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(file.getSize());
    PutObjectRequest request = new PutObjectRequest(bucketName, key, file.getInputStream(), metadata);
    this.s3.putObject(request);

}

From source file:org.finra.dm.dao.impl.S3DaoImpl.java

License:Apache License

@Override
public void createDirectory(final S3FileTransferRequestParamsDto params) {
    // Create metadata for the directory marker and set content-length to 0 bytes.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);/*from   w w w.  j a v  a2  s  .  c  o m*/
    prepareMetadata(params, metadata);

    // Create empty content.
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

    // Create a PutObjectRequest passing the folder name suffixed by '/'.
    String directoryName = params.getS3KeyPrefix() + (params.getS3KeyPrefix().endsWith("/") ? "" : "/");
    PutObjectRequest putObjectRequest = new PutObjectRequest(params.getS3BucketName(), directoryName,
            emptyContent, metadata);

    AmazonS3Client s3Client = null;

    try {
        s3Client = getAmazonS3(params);
        s3Operations.putObject(putObjectRequest, s3Client);
    } catch (AmazonServiceException e) {
        throw new IllegalStateException(
                String.format("Failed to create 0 byte S3 object with \"%s\" key in bucket \"%s\". Reason: %s",
                        directoryName, params.getS3BucketName(), e.getMessage()),
                e);
    } finally {
        // Shutdown the AmazonS3Client instance to release resources.
        if (s3Client != null) {
            s3Client.shutdown();
        }
    }
}

From source file:org.finra.herd.dao.impl.S3DaoImpl.java

License:Apache License

@Override
public void createDirectory(final S3FileTransferRequestParamsDto params) {
    // Create metadata for the directory marker and set content-length to 0 bytes.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);//from w  w  w .  j a  v a  2  s .c o m
    prepareMetadata(params, metadata);

    // Create empty content.
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

    // Create a PutObjectRequest passing the folder name suffixed by '/'.
    String directoryName = StringUtils.appendIfMissing(params.getS3KeyPrefix(), "/");
    PutObjectRequest putObjectRequest = new PutObjectRequest(params.getS3BucketName(), directoryName,
            emptyContent, metadata);
    // KMS key ID is being set through prepareMetadata()

    AmazonS3Client s3Client = getAmazonS3(params);

    try {
        s3Operations.putObject(putObjectRequest, s3Client);
    } catch (AmazonServiceException e) {
        throw new IllegalStateException(
                String.format("Failed to create 0 byte S3 object with \"%s\" key in bucket \"%s\". Reason: %s",
                        directoryName, params.getS3BucketName(), e.getMessage()),
                e);
    } finally {
        // Shutdown the AmazonS3Client instance to release resources.
        s3Client.shutdown();
    }
}

From source file:org.finra.herd.service.BusinessObjectDataServiceTestHelper.java

License:Apache License

/**
 * Creates an object in S3 with the prefix constructed from the given parameters. The object's full path will be {prefix}/{UUID}
 *
 * @param businessObjectFormatEntity business object format
 * @param request request with partition values and storage
 * @param businessObjectDataVersion business object data version to put
 *//*w w  w  .  j  ava  2s .c o m*/
public void createS3Object(BusinessObjectFormatEntity businessObjectFormatEntity,
        BusinessObjectDataInvalidateUnregisteredRequest request, int businessObjectDataVersion) {
    StorageEntity storageEntity = storageDao.getStorageByName(request.getStorageName());
    String s3BucketName = storageHelper.getS3BucketAccessParams(storageEntity).getS3BucketName();

    BusinessObjectDataKey businessObjectDataKey = getBusinessObjectDataKey(request);
    businessObjectDataKey.setBusinessObjectDataVersion(businessObjectDataVersion);

    String s3KeyPrefix = s3KeyPrefixHelper.buildS3KeyPrefix(AbstractServiceTest.S3_KEY_PREFIX_VELOCITY_TEMPLATE,
            businessObjectFormatEntity, businessObjectDataKey, storageEntity.getName());
    String s3ObjectKey = s3KeyPrefix + "/test";
    PutObjectRequest putObjectRequest = new PutObjectRequest(s3BucketName, s3ObjectKey,
            new ByteArrayInputStream(new byte[1]), new ObjectMetadata());
    s3Operations.putObject(putObjectRequest, null);
}

From source file:org.geowebcache.s3.S3BlobStore.java

License:Open Source License

@Override
public void put(TileObject obj) throws StorageException {
    final Resource blob = obj.getBlob();
    checkNotNull(blob);//  w  ww .  java 2 s .c o m
    checkNotNull(obj.getBlobFormat());

    final String key = keyBuilder.forTile(obj);
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentLength(blob.getSize());

    String blobFormat = obj.getBlobFormat();
    String mimeType;
    try {
        mimeType = MimeType.createFromFormat(blobFormat).getMimeType();
    } catch (MimeException me) {
        throw Throwables.propagate(me);
    }
    objectMetadata.setContentType(mimeType);

    // don't bother for the extra call if there are no listeners
    final boolean existed;
    ObjectMetadata oldObj;
    if (listeners.isEmpty()) {
        existed = false;
        oldObj = null;
    } else {
        oldObj = s3Ops.getObjectMetadata(key);
        existed = oldObj != null;
    }

    final ByteArrayInputStream input = toByteArray(blob);
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, input, objectMetadata)
            .withCannedAcl(CannedAccessControlList.PublicRead);

    log.trace(log.isTraceEnabled() ? ("Storing " + key) : "");
    s3Ops.putObject(putObjectRequest);

    putParametersMetadata(obj.getLayerName(), obj.getParametersId(), obj.getParameters());

    /*
     * This is important because listeners may be tracking tile existence
     */
    if (!listeners.isEmpty()) {
        if (existed) {
            long oldSize = oldObj.getContentLength();
            listeners.sendTileUpdated(obj, oldSize);
        } else {
            listeners.sendTileStored(obj);
        }
    }
}

From source file:org.geowebcache.s3.S3Ops.java

License:Open Source License

public void putProperties(String resourceKey, Properties properties) throws StorageException {

    ByteArrayOutputStream out = new ByteArrayOutputStream();
    try {/*from ww w .  j av  a  2 s.  c o  m*/
        properties.store(out, "");
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }

    byte[] bytes = out.toByteArray();
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentLength(bytes.length);
    objectMetadata.setContentType("text/plain");

    InputStream in = new ByteArrayInputStream(bytes);
    PutObjectRequest putReq = new PutObjectRequest(bucketName, resourceKey, in, objectMetadata);
    putObject(putReq);
}

From source file:org.gradle.internal.resource.transport.aws.s3.S3Client.java

License:Apache License

public void put(InputStream inputStream, Long contentLength, URI destination) {
    try {/*from w  w  w  . j  a  v  a 2  s.  co m*/
        S3RegionalResource s3RegionalResource = new S3RegionalResource(destination);
        String bucketName = s3RegionalResource.getBucketName();
        String s3BucketKey = s3RegionalResource.getKey();
        configureClient(s3RegionalResource);

        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(contentLength);

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, s3BucketKey, inputStream,
                objectMetadata);
        LOGGER.debug("Attempting to put resource:[{}] into s3 bucket [{}]", s3BucketKey, bucketName);

        amazonS3Client.putObject(putObjectRequest);
    } catch (AmazonClientException e) {
        throw ResourceExceptions.putFailed(destination, e);
    }
}

From source file:org.gytheio.content.handler.s3.S3ContentReferenceHandlerImpl.java

License:Open Source License

@Override
public long putInputStream(InputStream sourceInputStream, ContentReference targetContentReference)
        throws ContentIOException {
    if (!isContentReferenceSupported(targetContentReference)) {
        throw new ContentIOException("ContentReference not supported");
    }//from  w w  w.  j a v  a2 s  .co m

    String remotePath = getRelativePath(targetContentReference.getUri());

    try {
        s3.putObject(new PutObjectRequest(s3BucketName, remotePath, sourceInputStream, new ObjectMetadata()));
        ObjectMetadata metadata = s3.getObjectMetadata(new GetObjectMetadataRequest(s3BucketName, remotePath));
        return metadata.getContentLength();
    } catch (AmazonClientException e) {
        throw new ContentIOException("Failed to write content", e);
    }
}