List of usage examples for com.amazonaws.services.s3.model PutObjectRequest PutObjectRequest
public PutObjectRequest(String bucketName, String key, String redirectLocation)
From source file:mx.iteso.desi.cloud.hw3.AWSFaceCompare.java
License:Apache License
private void upload(String filename) { PutObjectRequest request = new PutObjectRequest(Config.srcBucket, "Compare/" + filename, new File(filename)); s3.putObject(request);/*w w w . j a v a 2 s . c o m*/ }
From source file:mx.iteso.desi.cloud.hw3.FaceAddFrame.java
License:Apache License
private void upload(String filename) { PutObjectRequest request = new PutObjectRequest(Config.srcBucket, "Faces/" + filename, new File(filename)); s3.putObject(request);/*from ww w .ja va 2 s . c o m*/ }
From source file:net.oletalk.hellospringboot.dao.S3Dao.java
public void uploadFile(String bucketName, String key, File file) throws S3Exception { AmazonS3 s3client = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); try {/* w w w. j av a 2 s. com*/ LOG.info("Uploading file to S3"); s3client.putObject(new PutObjectRequest(bucketName, key, file)); } catch (AmazonServiceException ase) { LOG.error("Problem uploading file to S3: " + ase.getMessage() + " (status code " + ase.getStatusCode() + ")"); throw new S3Exception("Problem uploading file to S3"); } catch (AmazonClientException ace) { LOG.error("Internal error uploading file to S3: " + ace.getMessage()); throw new S3Exception("Problem uploading file to S3"); } LOG.info("Upload complete"); }
From source file:ohnosequences.ivy.S3Repository.java
License:Apache License
@Override protected void put(File source, String destination, boolean overwrite) { //System.out.print("parent> "); String bucket = S3Utils.getBucket(destination); String key = S3Utils.getKey(destination); // System.out.println("publishing: bucket=" + bucket + " key=" + key); PutObjectRequest request = new PutObjectRequest(bucket, key, source); request = request.withCannedAcl(acl); if (serverSideEncryption) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setMetadata(objectMetadata); }//from w w w . j a v a 2s . c o m if (!getS3Client().doesBucketExist(bucket)) { if (!createBucket(bucket, region)) { throw new Error("couldn't create bucket"); } } if (!this.overwrite && !getS3Client().listObjects(bucket, key).getObjectSummaries().isEmpty()) { throw new Error(destination + " exists but overwriting is disabled"); } getS3Client().putObject(request); }
From source file:org.akvo.flow.deploy.Deploy.java
License:Open Source License
private static void uploadS3(String accessKey, String secretKey, String s3Path, File file) throws AmazonServiceException, AmazonClientException { BasicAWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey); AmazonS3 s3 = new AmazonS3Client(credentials); PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, s3Path, file); ObjectMetadata metadata = new ObjectMetadata(); // set content type as android package file metadata.setContentType("application/vnd.android.package-archive"); // set content length to length of file metadata.setContentLength(file.length()); // set access to public putRequest.setMetadata(metadata);//from ww w . j a va 2 s .c o m putRequest.setCannedAcl(CannedAccessControlList.PublicRead); // try to put the apk in S3 PutObjectResult result = s3.putObject(putRequest); System.out.println("Apk uploaded successfully, with result ETag " + result.getETag()); }
From source file:org.alanwilliamson.amazon.s3.BackgroundUploader.java
License:Open Source License
private void uploadFile(Map<String, Object> jobFile) { File localFile = new File((String) jobFile.get("localpath")); if (!localFile.isFile()) { removeJobFile(jobFile);//from w w w . ja v a2s. c om callbackCfc(jobFile, false, "local file no longer exists"); cfEngine.log("AmazonS3Write.BackgroundUploader: file no longer exists=" + localFile.getName()); return; } // Setup the object data ObjectMetadata omd = new ObjectMetadata(); if (jobFile.containsKey("metadata")) omd.setUserMetadata((Map<String, String>) jobFile.get("metadata")); TransferManager tm = null; AmazonS3 s3Client = null; try { AmazonKey amazonKey = (AmazonKey) jobFile.get("amazonkey"); s3Client = new AmazonBase().getAmazonS3(amazonKey); PutObjectRequest por = new PutObjectRequest((String) jobFile.get("bucket"), (String) jobFile.get("key"), localFile); por.setMetadata(omd); por.setStorageClass((StorageClass) jobFile.get("storage")); if (jobFile.containsKey("acl")) por.setCannedAcl(amazonKey.getAmazonCannedAcl((String) jobFile.get("acl"))); if (jobFile.containsKey("aes256key")) por.setSSECustomerKey(new SSECustomerKey((String) jobFile.get("aes256key"))); if (jobFile.containsKey("customheaders")) { Map<String, String> customheaders = (Map) jobFile.get("customheaders"); Iterator<String> it = customheaders.keySet().iterator(); while (it.hasNext()) { String k = it.next(); por.putCustomRequestHeader(k, customheaders.get(k)); } } long startTime = System.currentTimeMillis(); tm = new TransferManager(s3Client); Upload upload = tm.upload(por); upload.waitForCompletion(); log(jobFile, "Uploaded; timems=" + (System.currentTimeMillis() - startTime)); removeJobFile(jobFile); callbackCfc(jobFile, true, null); if ((Boolean) jobFile.get("deletefile")) localFile.delete(); } catch (Exception e) { log(jobFile, "Failed=" + e.getMessage()); callbackCfc(jobFile, false, e.getMessage()); int retry = (Integer) jobFile.get("retry"); int attempt = (Integer) jobFile.get("attempt") + 1; if (retry == attempt) { removeJobFile(jobFile); } else { jobFile.put("attempt", attempt); jobFile.put("attemptdate", System.currentTimeMillis() + (Long) jobFile.get("retryms")); acceptFile(jobFile); } if (s3Client != null) cleanupMultiPartUploads(s3Client, (String) jobFile.get("bucket")); } finally { if (tm != null) tm.shutdownNow(true); } }
From source file:org.alanwilliamson.amazon.s3.Write.java
License:Open Source License
private void writeFile(AmazonKey amazonKey, String bucket, String key, Map<String, String> metadata, StorageClass storage, String localpath, int retry, int retryseconds, boolean deletefile, boolean background, String callback, String callbackdata, String appname, String acl, String aes256key, Map<String, String> customheaders) throws Exception { File localFile = new File(localpath); if (!localFile.isFile()) throw new Exception("The file specified does not exist: " + localpath); // Push this to the background loader to handle and return immediately if (background) { BackgroundUploader.acceptFile(amazonKey, bucket, key, metadata, storage, localpath, retry, retryseconds, deletefile, callback, callbackdata, appname, acl, aes256key, customheaders); return;/*from w w w. j a v a 2s.c o m*/ } // Setup the object data ObjectMetadata omd = new ObjectMetadata(); if (metadata != null) omd.setUserMetadata(metadata); AmazonS3 s3Client = getAmazonS3(amazonKey); // Let us run around the number of attempts int attempts = 0; while (attempts < retry) { try { PutObjectRequest por = new PutObjectRequest(bucket, key, localFile); por.setMetadata(omd); por.setStorageClass(storage); if (acl != null && !acl.isEmpty()) por.setCannedAcl(amazonKey.getAmazonCannedAcl(acl)); if (aes256key != null && !aes256key.isEmpty()) por.setSSECustomerKey(new SSECustomerKey(aes256key)); if (customheaders != null && !customheaders.isEmpty()) { Iterator<String> it = customheaders.keySet().iterator(); while (it.hasNext()) { String k = it.next(); por.putCustomRequestHeader(k, customheaders.get(k)); } } s3Client.putObject(por); break; } catch (Exception e) { cfEngine.log("Failed: AmazonS3Write(bucket=" + bucket + "key=" + key + "; file=" + localFile + "; attempt=" + (attempts + 1) + "; exception=" + e.getMessage() + ")"); attempts++; if (attempts == retry) throw e; else Thread.sleep(retryseconds * 1000); } } // delete the file now that it is a success if (deletefile) localFile.delete(); }
From source file:org.alfresco.provision.AWSService.java
License:Open Source License
public void put(String key, String filename, boolean overwrite) throws IOException { ensureBucket();/*from ww w .j av a2 s. c o m*/ File f = new File(filename); if (!s3.doesObjectExist(bucketName, key) && overwrite) { s3.putObject(new PutObjectRequest(bucketName, key, f)); } }
From source file:org.apache.druid.storage.s3.S3Utils.java
License:Apache License
/** * Uploads a file to S3 if possible. First trying to set ACL to give the bucket owner full control of the file before uploading. * * @param service S3 client//from ww w.ja v a2s. com * @param disableAcl true if ACL shouldn't be set for the file * @param key The key under which to store the new object. * @param file The path of the file to upload to Amazon S3. */ public static void uploadFileIfPossible(ServerSideEncryptingAmazonS3 service, boolean disableAcl, String bucket, String key, File file) { final PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, file); if (!disableAcl) { putObjectRequest.setAccessControlList(S3Utils.grantFullControlToBucketOwner(service, bucket)); } log.info("Pushing [%s] to bucket[%s] and key[%s].", file, bucket, key); service.putObject(putObjectRequest); }
From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java
License:Apache License
/** * The src file is on the local disk. Add it to FS at * the given dst name.// www. j av a 2 s . c o m * * This version doesn't need to create a temporary file to calculate the md5. Sadly this doesn't seem to be * used by the shell cp :( * * delSrc indicates if the source should be removed * @param delSrc whether to delete the src * @param overwrite whether to overwrite an existing file * @param src path * @param dst path */ @Override public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException { String key = pathToKey(dst); if (!overwrite && exists(dst)) { throw new IOException(dst + " already exists"); } LOG.info("Copying local file from " + src + " to " + dst); // Since we have a local file, we don't need to stream into a temporary file LocalFileSystem local = getLocal(getConf()); File srcfile = local.pathToFile(src); TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration(); transferConfiguration.setMinimumUploadPartSize(partSize); transferConfiguration.setMultipartUploadThreshold(partSizeThreshold); TransferManager transfers = new TransferManager(s3); transfers.setConfiguration(transferConfiguration); final ObjectMetadata om = new ObjectMetadata(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { om.setServerSideEncryption(serverSideEncryptionAlgorithm); } PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile); putObjectRequest.setCannedAcl(cannedACL); putObjectRequest.setMetadata(om); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; } } }; Upload up = transfers.upload(putObjectRequest); up.addProgressListener(progressListener); try { up.waitForUploadResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } finally { transfers.shutdownNow(false); } // This will delete unnecessary fake parent directories finishedWrite(key); if (delSrc) { local.delete(src, false); } }