List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest
public PutObjectRequest(String bucketName, String key, InputStream input, ObjectMetadata metadata)
From source file:com.metamug.mtg.s3.uploader.S3Uploader.java
public static String upload(InputStream inputStream, long fileSize, String URI) { String publicURL;// www .j av a 2s .co m //ClientConfiguration max retry ObjectMetadata objectMetaData = new ObjectMetadata(); objectMetaData.setContentLength(fileSize); // objectMetaData.setContentType(IMAGE_CONTENT_TYPE); objectMetaData.setCacheControl("public"); Calendar c = Calendar.getInstance(); c.setTime(c.getTime()); c.add(Calendar.MONTH, 6); String sdf = new SimpleDateFormat("EEE, d MMM yyyy HH:mm:ss zzz").format(c.getTime()); objectMetaData.setHeader("Expires", sdf);//Thu, 21 Mar 2042 08:16:32 GMT PutObjectResult por = s3Client .putObject(new PutObjectRequest(AWS_S3_BUCKET, URI, inputStream, objectMetaData) .withCannedAcl(CannedAccessControlList.PublicRead)); publicURL = "http://metamug.net/" + URI; return publicURL; }
From source file:com.moxtra.S3StorageManager.java
License:Open Source License
/** * Stores a given item on S3/* w w w .j a v a2 s. co m*/ * @param bucketname * @param key * @param data * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default) */ public void store(String bucketname, String key, byte[] data, CannedAccessControlList acl, String type) { // Make sure the bucket exists before we try to use it checkForAndCreateBucket(bucketname); ObjectMetadata omd = new ObjectMetadata(); //omd.setContentType("text/html"); omd.setContentType(type); omd.setContentLength(data.length); ByteArrayInputStream is = new ByteArrayInputStream(data); PutObjectRequest request = new PutObjectRequest(bucketname, key, is, omd); // Check if reduced redundancy is enabled // if (reducedRedundancy) { // request.setStorageClass(StorageClass.ReducedRedundancy); // } s3Client.putObject(request); // If we have an ACL set access permissions for the the data on S3 if (acl != null) { s3Client.setObjectAcl(bucketname, key, acl); } try { is.close(); } catch (Exception e) { logger.log(Level.SEVERE, "Cannot close store AWS connection"); } }
From source file:com.mrbjoern.blog.api.service.s3.S3Wrapper.java
License:Open Source License
private PutObjectResult upload(InputStream inputStream, final String uploadKey) { PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, uploadKey, inputStream, new ObjectMetadata()); putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); PutObjectResult putObjectResult = amazonS3Client.putObject(putObjectRequest); return putObjectResult; }
From source file:com.mweagle.tereus.aws.S3Resource.java
License:Open Source License
public Optional<String> upload() { try {/* w ww .java2s. c o m*/ DefaultAWSCredentialsProviderChain credentialProviderChain = new DefaultAWSCredentialsProviderChain(); final TransferManager transferManager = new TransferManager(credentialProviderChain.getCredentials()); final ObjectMetadata metadata = new ObjectMetadata(); if (this.inputStreamLength.isPresent()) { metadata.setContentLength(this.inputStreamLength.get()); } final PutObjectRequest uploadRequest = new PutObjectRequest(bucketName, keyName, this.inputStream, metadata); final Upload templateUpload = transferManager.upload(uploadRequest); templateUpload.waitForUploadResult(); this.resourceURL = Optional.of(getS3Path()); } catch (Exception ex) { throw new RuntimeException(ex); } return this.resourceURL; }
From source file:com.netflix.exhibitor.core.config.s3.S3PseudoLock.java
License:Apache License
@Override protected void createFile(String key, byte[] contents) throws Exception { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(contents.length); PutObjectRequest request = new PutObjectRequest(bucket, key, new ByteArrayInputStream(contents), metadata); client.putObject(request);/*from w w w . j a va 2 s .com*/ }
From source file:com.netflix.exhibitor.core.s3.S3Utils.java
License:Apache License
public static ObjectMetadata simpleUploadFile(S3Client client, byte[] bytes, String bucket, String key) throws Exception { byte[] md5 = md5(bytes, bytes.length); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(bytes.length); metadata.setLastModified(new Date()); metadata.setContentMD5(S3Utils.toBase64(md5)); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, new ByteArrayInputStream(bytes), metadata);// ww w. j a v a2 s .c o m PutObjectResult putObjectResult = client.putObject(putObjectRequest); if (!putObjectResult.getETag().equals(S3Utils.toHex(md5))) { throw new Exception("Unable to match MD5 for config"); } return metadata; }
From source file:com.rathravane.clerk.impl.s3.S3IamDb.java
License:Apache License
void storeObject(String key, JSONObject o) throws IamSvcException { try {// w w w .j a v a 2 s. com fCache.put(key, JsonUtil.clone(o)); final String data = o.toString(); final InputStream is = new ByteArrayInputStream(data.getBytes("UTF-8")); final long length = data.length(); final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(length); om.setContentType("application/json"); fDb.putObject(new PutObjectRequest(fBucketId, key, is, om)); } catch (AmazonS3Exception x) { throw new IamSvcException(x); } catch (UnsupportedEncodingException e) { throw new IamSvcException(e); } }
From source file:com.shelfmap.simplequery.domain.impl.DefaultBlobReference.java
License:Apache License
@Override public Upload uploadFrom(InputStream uploadSource, ObjectMetadata metadata) throws BlobOutputException { String bucket = resourceInfo.getBucketName(); String key = resourceInfo.getKey(); try {/* www .j a v a 2 s . c om*/ PutObjectRequest request = new PutObjectRequest(bucket, key, uploadSource, metadata); AmazonS3 s3 = getContext().getS3(); TransferManager transfer = new TransferManager(s3); this.lastUpload = transfer.upload(request); return this.lastUpload; } catch (AmazonServiceException ex) { throw new BlobOutputException("a problem occured in Amazon S3.", ex); } catch (AmazonClientException ex) { throw new BlobOutputException("Client had an problem when uploading data.", ex); } }
From source file:com.streamsets.pipeline.lib.aws.s3.S3Accessor.java
License:Apache License
public Uploader getUploader() { Utils.checkState(hasTransferManager(), "transferManager not available"); return (bucket, key, is) -> { Utils.checkNotNull(bucket, "bucket"); Utils.checkNotNull(key, "key"); Utils.checkNotNull(is, "is"); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, is, getEncryptionMetadataBuilder().build()); Upload upload = getTransferManager().upload(putObjectRequest); upload.addProgressListener(new UploaderProgressListener(bucket + key)); return upload; };/*from ww w.j a va2 s . c o m*/ }
From source file:com.streamsets.pipeline.stage.destination.s3.AmazonS3Target.java
License:Apache License
@Override public void write(Batch batch) throws StageException { Multimap<String, Record> partitions = ELUtils.partitionBatchByExpression(partitionEval, partitionVars, partitionTemplate, batch);// w ww . j ava 2s . co m for (String partition : partitions.keySet()) { // commonPrefix always ends with a delimiter, so no need to append one to the end String keyPrefix = s3TargetConfigBean.s3Config.commonPrefix; // partition is optional if (!partition.isEmpty()) { keyPrefix += partition; if (!partition.endsWith(s3TargetConfigBean.s3Config.delimiter)) { keyPrefix += s3TargetConfigBean.s3Config.delimiter; } } keyPrefix += s3TargetConfigBean.fileNamePrefix + "-" + System.currentTimeMillis() + "-"; Iterator<Record> records = partitions.get(partition).iterator(); int writtenRecordCount = 0; DataGenerator generator; Record currentRecord; try { ByRefByteArrayOutputStream bOut = new ByRefByteArrayOutputStream(); OutputStream out = bOut; // wrap with gzip compression output stream if required if (s3TargetConfigBean.compress) { out = new GZIPOutputStream(bOut); } generator = s3TargetConfigBean.getGeneratorFactory().getGenerator(out); while (records.hasNext()) { currentRecord = records.next(); try { generator.write(currentRecord); writtenRecordCount++; } catch (StageException e) { errorRecordHandler.onError( new OnRecordErrorException(currentRecord, e.getErrorCode(), e.getParams())); } catch (IOException e) { errorRecordHandler.onError(new OnRecordErrorException(currentRecord, Errors.S3_32, currentRecord.getHeader().getSourceId(), e.toString(), e)); } } generator.close(); // upload file on Amazon S3 only if at least one record was successfully written to the stream if (writtenRecordCount > 0) { fileCount++; StringBuilder fileName = new StringBuilder(); fileName = fileName.append(keyPrefix).append(fileCount); if (s3TargetConfigBean.compress) { fileName = fileName.append(GZIP_EXTENSION); } // Avoid making a copy of the internal buffer maintained by the ByteArrayOutputStream by using // ByRefByteArrayOutputStream ByteArrayInputStream byteArrayInputStream = new ByteArrayInputStream(bOut.getInternalBuffer(), 0, bOut.size()); PutObjectRequest putObjectRequest = new PutObjectRequest(s3TargetConfigBean.s3Config.bucket, fileName.toString(), byteArrayInputStream, null); LOG.debug("Uploading object {} into Amazon S3", s3TargetConfigBean.s3Config.bucket + s3TargetConfigBean.s3Config.delimiter + fileName); s3TargetConfigBean.s3Config.getS3Client().putObject(putObjectRequest); LOG.debug("Successfully uploaded object {} into Amazon S3", s3TargetConfigBean.s3Config.bucket + s3TargetConfigBean.s3Config.delimiter + fileName); } } catch (AmazonClientException | IOException e) { LOG.error(Errors.S3_21.getMessage(), e.toString(), e); throw new StageException(Errors.S3_21, e.toString(), e); } } }