List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest
public PutObjectRequest(String bucketName, String key, InputStream input, ObjectMetadata metadata)
From source file:org.adroitlogic.build.aws.maven.SimpleStorageServiceWagon.java
License:Apache License
private PutObjectRequest createDirectoryPutObjectRequest(String key) { ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(0);/* w w w .ja v a2 s . c o m*/ return new PutObjectRequest(this.bucketName, key, inputStream, objectMetadata) .withCannedAcl(CannedAccessControlList.PublicRead); }
From source file:org.alanwilliamson.amazon.s3.Write.java
License:Open Source License
private void writeData(AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String mimetype, cfData data, int retry, int retryseconds, String acl, String aes256key, Map<String, String> customheaders) throws Exception { if (mimetype == null) { if (data.getDataType() == cfData.CFBINARYDATA) mimetype = "application/unknown"; else if (cfData.isSimpleValue(data)) mimetype = "text/plain"; else/*ww w . j a v a 2 s.c o m*/ mimetype = "application/json"; // Check to see if the mime type is in the metadata if (metadata != null && metadata.containsKey("Content-Type")) mimetype = metadata.get("Content-Type"); } InputStream ios = null; long size = 0; if (data.getDataType() == cfData.CFSTRINGDATA) { ios = new java.io.ByteArrayInputStream(data.getString().getBytes()); size = data.getString().length(); } else if (data.getDataType() == cfData.CFBINARYDATA) { ios = new java.io.ByteArrayInputStream(((cfBinaryData) data).getByteArray()); size = ((cfBinaryData) data).getLength(); } else { serializejson json = new serializejson(); StringBuilder out = new StringBuilder(); json.encodeJSON(out, data, false, CaseType.MAINTAIN, DateType.LONG); size = out.length(); mimetype = "application/json"; ios = new java.io.ByteArrayInputStream(out.toString().getBytes()); } // Setup the object data ObjectMetadata omd = new ObjectMetadata(); if (metadata != null) omd.setUserMetadata(metadata); omd.setContentType(mimetype); omd.setContentLength(size); AmazonS3 s3Client = getAmazonS3(amazonKey); // Let us run around the number of attempts int attempts = 0; while (attempts < retry) { try { PutObjectRequest por = new PutObjectRequest(bucket, key, ios, omd); por.setStorageClass(storage); if (aes256key != null && !aes256key.isEmpty()) por.setSSECustomerKey(new SSECustomerKey(aes256key)); if (acl != null && !acl.isEmpty()) por.setCannedAcl(amazonKey.getAmazonCannedAcl(acl)); if (customheaders != null && !customheaders.isEmpty()) { Iterator<String> it = customheaders.keySet().iterator(); while (it.hasNext()) { String k = it.next(); por.putCustomRequestHeader(k, customheaders.get(k)); } } s3Client.putObject(por); break; } catch (Exception e) { cfEngine.log("Failed: AmazonS3Write(bucket=" + bucket + "; key=" + key + "; attempt=" + (attempts + 1) + "; exception=" + e.getMessage() + ")"); attempts++; if (attempts == retry) throw e; else Thread.sleep(retryseconds * 1000); } } }
From source file:org.anhonesteffort.p25.wav.WaveFileS3Sender.java
License:Open Source License
public void writeAndSend() throws IOException { Optional<ByteArrayOutputStream> outStream = waveWriter.write(chunks); if (!outStream.isPresent()) { chunks.forEach(CheckpointingAudioChunk::checkpoint); } else {//from www. j a v a 2s. c o m byte[] outBytes = outStream.get().toByteArray(); ByteArrayInputStream inStream = new ByteArrayInputStream(outBytes); ObjectMetadata metadata = metadata(outBytes.length); ImbeefMetrics.getInstance().wavSize(outBytes.length); ImbeefMetrics.getInstance().wavQueued(); log.info(proto.toString(channelId) + " wave file queued for s3 upload"); transferManager.upload(new PutObjectRequest(config.getS3Bucket(), key(), inStream, metadata), this); } }
From source file:org.apache.apex.malhar.lib.fs.s3.S3BlockUploadOperator.java
License:Apache License
/** * Upload the block into S3 bucket./*w w w .j av a 2s .c o m*/ * @param tuple block data */ protected void uploadBlockIntoS3(AbstractBlockReader.ReaderRecord<Slice> tuple) { if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) { return; } // Check whether the block metadata is present for this block if (blockIdToFilePath.get(tuple.getBlockId()) == null) { if (!waitingTuples.contains(tuple)) { waitingTuples.add(tuple); } return; } String uniqueBlockId = getUniqueBlockIdFromFile(tuple.getBlockId(), blockIdToFilePath.get(tuple.getBlockId())); S3BlockMetaData metaData = blockInfo.get(uniqueBlockId); // Check whether the file metadata is received if (metaData == null) { if (!waitingTuples.contains(tuple)) { waitingTuples.add(tuple); } return; } long partSize = tuple.getRecord().length; PartETag partETag = null; ByteArrayInputStream bis = new ByteArrayInputStream(tuple.getRecord().buffer); // Check if it is a Single block of a file if (metaData.isLastBlock && metaData.partNo == 1) { ObjectMetadata omd = createObjectMetadata(); omd.setContentLength(partSize); PutObjectResult result = s3Client .putObject(new PutObjectRequest(bucketName, metaData.getKeyName(), bis, omd)); partETag = new PartETag(1, result.getETag()); } else { // Else upload use multi-part feature try { // Create request to upload a part. UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucketName) .withKey(metaData.getKeyName()).withUploadId(metaData.getUploadId()) .withPartNumber(metaData.getPartNo()).withInputStream(bis).withPartSize(partSize); partETag = s3Client.uploadPart(uploadRequest).getPartETag(); } catch (Exception e) { throw new RuntimeException(e); } } UploadBlockMetadata uploadmetadata = new UploadBlockMetadata(partETag, metaData.getKeyName()); output.emit(uploadmetadata); currentWindowRecoveryState.put(uniqueBlockId, uploadmetadata); try { bis.close(); } catch (IOException e) { throw new RuntimeException(e); } }
From source file:org.apache.apex.malhar.lib.fs.s3.S3Reconciler.java
License:Apache License
/** * Uploads the file on Amazon S3 using putObject API from S3 client *//* w w w . j a va 2 s .co m*/ @Override protected void processCommittedData(FSRecordCompactionOperator.OutputMetaData outputMetaData) { try { Path path = new Path(outputMetaData.getPath()); if (fs.exists(path) == false) { logger.debug("Ignoring non-existent path assuming replay : {}", path); return; } FSDataInputStream fsinput = fs.open(path); ObjectMetadata omd = new ObjectMetadata(); omd.setContentLength(outputMetaData.getSize()); String keyName = directoryName + Path.SEPARATOR + outputMetaData.getFileName(); PutObjectRequest request = new PutObjectRequest(bucketName, keyName, fsinput, omd); if (outputMetaData.getSize() < Integer.MAX_VALUE) { request.getRequestClientOptions().setReadLimit((int) outputMetaData.getSize()); } else { throw new RuntimeException("PutRequestSize greater than Integer.MAX_VALUE"); } if (fs.exists(path)) { PutObjectResult result = s3client.putObject(request); logger.debug("File {} Uploaded at {}", keyName, result.getETag()); } } catch (FileNotFoundException e) { logger.debug("Ignoring non-existent path assuming replay : {}", outputMetaData.getPath()); } catch (IOException e) { logger.error("Unable to create Stream: {}", e.getMessage()); } }
From source file:org.apache.camel.component.aws.s3.S3Producer.java
License:Apache License
@Override public void process(final Exchange exchange) throws Exception { ObjectMetadata objectMetadata = new ObjectMetadata(); Long contentLength = exchange.getIn().getHeader(S3Constants.CONTENT_LENGTH, Long.class); if (contentLength != null) { objectMetadata.setContentLength(contentLength); }/*w w w . j a v a 2s. co m*/ String contentType = exchange.getIn().getHeader(S3Constants.CONTENT_TYPE, String.class); if (contentType != null) { objectMetadata.setContentType(contentType); } String cacheControl = exchange.getIn().getHeader(S3Constants.CACHE_CONTROL, String.class); if (cacheControl != null) { objectMetadata.setCacheControl(cacheControl); } String contentDisposition = exchange.getIn().getHeader(S3Constants.CONTENT_DISPOSITION, String.class); if (contentDisposition != null) { objectMetadata.setContentDisposition(contentDisposition); } String contentEncoding = exchange.getIn().getHeader(S3Constants.CONTENT_ENCODING, String.class); if (contentEncoding != null) { objectMetadata.setContentEncoding(contentEncoding); } String contentMD5 = exchange.getIn().getHeader(S3Constants.CONTENT_MD5, String.class); if (contentMD5 != null) { objectMetadata.setContentMD5(contentMD5); } Date lastModified = exchange.getIn().getHeader(S3Constants.LAST_MODIFIED, Date.class); if (lastModified != null) { objectMetadata.setLastModified(lastModified); } Map<String, String> userMetadata = exchange.getIn().getHeader(S3Constants.USER_METADATA, Map.class); if (userMetadata != null) { objectMetadata.setUserMetadata(userMetadata); } File filePayload = null; Object obj = exchange.getIn().getMandatoryBody(); if (obj instanceof File) { filePayload = (File) obj; } PutObjectRequest putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(), determineKey(exchange), exchange.getIn().getMandatoryBody(InputStream.class), objectMetadata); String storageClass = determineStorageClass(exchange); if (storageClass != null) { putObjectRequest.setStorageClass(storageClass); } String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class); if (cannedAcl != null) { CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl); putObjectRequest.setCannedAcl(objectAcl); } AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class); if (acl != null) { // note: if cannedacl and acl are both specified the last one will be used. refer to // PutObjectRequest#setAccessControlList for more details putObjectRequest.setAccessControlList(acl); } LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange); PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest); LOG.trace("Received result [{}]", putObjectResult); Message message = getMessageForResponse(exchange); message.setHeader(S3Constants.E_TAG, putObjectResult.getETag()); if (putObjectResult.getVersionId() != null) { message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId()); } if (getConfiguration().isDeleteAfterWrite() && filePayload != null) { IOHelper.close(putObjectRequest.getInputStream()); FileUtil.deleteFile(filePayload); } }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
private void createEmptyObject(final String bucketName, final String objectName) throws AmazonClientException, AmazonServiceException { final InputStream im = new InputStream() { @Override//from ww w. j a v a 2s.co m public int read() throws IOException { return -1; } }; final ObjectMetadata om = new ObjectMetadata(); om.setContentLength(0L); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, objectName, im, om); putObjectRequest.setCannedAcl(cannedACL); s3.putObject(putObjectRequest); statistics.incrementWriteOps(1); }
From source file:org.apache.hadoop.fs.s3r.S3RFastOutputStream.java
License:Apache License
private void putObject() throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("Executing regular upload for bucket '{}' key '{}'", bucket, key); }/*w ww.ja v a 2 s . com*/ final ObjectMetadata om = createDefaultMetadata(); om.setContentLength(buffer.size()); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, new ByteArrayInputStream(buffer.toByteArray()), om); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setGeneralProgressListener(progressListener); ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() { @Override public PutObjectResult call() throws Exception { return client.putObject(putObjectRequest); } }); //wait for completion try { putObjectResult.get(); } catch (InterruptedException ie) { LOG.warn("Interrupted object upload:" + ie, ie); Thread.currentThread().interrupt(); } catch (ExecutionException ee) { throw new IOException("Regular upload failed", ee.getCause()); } }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
public void addMetadataRecord(final InputStream input, final String name) throws DataStoreException { ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {// w w w . jav a 2s.c om Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); Upload upload = tmx.upload(s3ReqDecorator .decorate(new PutObjectRequest(bucket, addMetaKeyPrefix(name), input, new ObjectMetadata()))); upload.waitForUploadResult(); } catch (InterruptedException e) { LOG.error("Error in uploading", e); throw new DataStoreException("Error in uploading", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
From source file:org.apache.nifi.processors.aws.s3.AbstractS3IT.java
License:Apache License
protected void putTestFileEncrypted(String key, File file) throws AmazonS3Exception, FileNotFoundException { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, key, new FileInputStream(file), objectMetadata);/*from w ww .j a v a 2s. c o m*/ client.putObject(putRequest); }