List of usage examples for com.amazonaws.services.s3 AmazonS3 putObject
public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws SdkClientException, AmazonServiceException;
Uploads a new object to the specified Amazon S3 bucket.
From source file:dataMappers.PictureDataMapper.java
public static void addPictureToReport(DBConnector dbconnector, HttpServletRequest request) throws FileUploadException, IOException, SQLException { if (!ServletFileUpload.isMultipartContent(request)) { System.out.println("Invalid upload request"); return;/*from w w w. j av a2 s . com*/ } // Define limits for disk item DiskFileItemFactory factory = new DiskFileItemFactory(); factory.setSizeThreshold(THRESHOLD_SIZE); // Define limit for servlet upload ServletFileUpload upload = new ServletFileUpload(factory); upload.setFileSizeMax(MAX_FILE_SIZE); upload.setSizeMax(MAX_REQUEST_SIZE); FileItem itemFile = null; int reportID = 0; // Get list of items in request (parameters, files etc.) List formItems = upload.parseRequest(request); Iterator iter = formItems.iterator(); // Loop items while (iter.hasNext()) { FileItem item = (FileItem) iter.next(); if (!item.isFormField()) { itemFile = item; // If not form field, must be item } else if (item.getFieldName().equalsIgnoreCase("reportID")) { // else it is a form field try { System.out.println(item.getString()); reportID = Integer.parseInt(item.getString()); } catch (NumberFormatException e) { reportID = 0; } } } // This will be null if no fields were declared as image/upload. // Also, reportID must be > 0 if (itemFile != null || reportID == 0) { try { // Create credentials from final vars BasicAWSCredentials awsCredentials = new BasicAWSCredentials(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY); // Create client with credentials AmazonS3 s3client = new AmazonS3Client(awsCredentials); // Set region s3client.setRegion(Region.getRegion(Regions.EU_WEST_1)); // Set content length (size) of file ObjectMetadata om = new ObjectMetadata(); om.setContentLength(itemFile.getSize()); // Get extension for file String ext = FilenameUtils.getExtension(itemFile.getName()); // Generate random filename String keyName = UUID.randomUUID().toString() + '.' + ext; // This is the actual upload command s3client.putObject(new PutObjectRequest(S3_BUCKET_NAME, keyName, itemFile.getInputStream(), om)); // Picture was uploaded to S3 if we made it this far. Now we insert the row into the database for the report. PreparedStatement stmt = dbconnector.getCon() .prepareStatement("INSERT INTO reports_pictures" + "(REPORTID, PICTURE) VALUES (?,?)"); stmt.setInt(1, reportID); stmt.setString(2, keyName); stmt.executeUpdate(); stmt.close(); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } } }
From source file:ecplugins.s3.S3Util.java
License:Apache License
public static void UploadObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException, Exception { Properties props = TestUtils.getProperties(); File file = new File(createFile()); BasicAWSCredentials credentials = new BasicAWSCredentials(props.getProperty(StringConstants.ACCESS_ID), props.getProperty(StringConstants.SECRET_ACCESS_ID)); // Create TransferManager TransferManager tx = new TransferManager(credentials); // Get S3 Client AmazonS3 s3 = tx.getAmazonS3Client(); try {// w w w .j ava2 s . c om System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, file)); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException"); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println( "Caught an AmazonClientException, which means the client encountered an internal error while trying to such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:example.uploads3.UploadS3.java
License:Apache License
public static void main(String[] args) throws Exception { String uploadFileName = args[0]; String bucketName = "haos3"; String keyName = "test/byspark.txt"; // Create a Java Spark Context. SparkConf conf = new SparkConf().setAppName("UploadS3"); JavaSparkContext sc = new JavaSparkContext(conf); AmazonS3 s3client = new AmazonS3Client(new ProfileCredentialsProvider()); try {//from w ww . ja v a 2 s . c o m System.out.println("Uploading a new object to S3 from a file\n"); File file = new File(uploadFileName); PutObjectRequest putRequest = new PutObjectRequest(bucketName, keyName, file); // Request server-side encryption. ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setServerSideEncryption("AES256"); putRequest.setMetadata(objectMetadata); s3client.putObject(putRequest); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:exemplos.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*//from w w w .ja va2s . co m * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:fsi_admin.JAwsS3Conn.java
License:Open Source License
@SuppressWarnings("rawtypes") private boolean subirArchivo(StringBuffer msj, AmazonS3 s3, String S3BUKT, String nombre, Vector archivos) { //System.out.println("AwsConn SubirArchivo:" + nombre + ":nombre"); if (!archivos.isEmpty()) { FileItem actual = null;/* w w w . j a v a 2s . c om*/ try { for (int i = 0; i < archivos.size(); i++) { InputStream inputStream = null; try { actual = (FileItem) archivos.elementAt(i); ///////////////////////////////////////////////////////// //Obtain the Content length of the Input stream for S3 header InputStream is = actual.getInputStream(); byte[] contentBytes = IOUtils.toByteArray(is); Long contentLength = Long.valueOf(contentBytes.length); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(contentLength); //Reobtain the tmp uploaded file as input stream inputStream = actual.getInputStream(); //Put the object in S3 //System.out.println("BUCKET: " + S3BUKT + " OBJETO: " + nombre.replace('_', '-')); //System.out.println("BUCKET: " + S3BUKT + " OBJETO: " + nombre.replace('_', '-')); s3.putObject(new PutObjectRequest(S3BUKT, nombre, inputStream, metadata)); } finally { if (inputStream != null) try { inputStream.close(); } catch (IOException e) { e.printStackTrace(); } } //////////////////////////////////////////////////////////// } return true; } catch (AmazonServiceException ase) { ase.printStackTrace(); msj.append("Error de AmazonServiceException al subir archivo a S3.<br>"); msj.append("Mensaje: " + ase.getMessage() + "<br>"); msj.append("Cdigo de Estatus HTTP: " + ase.getStatusCode() + "<br>"); msj.append("Cdigo de Error AWS: " + ase.getErrorCode() + "<br>"); msj.append("Tipo de Error: " + ase.getErrorType() + "<br>"); msj.append("Request ID: " + ase.getRequestId()); return false; } catch (AmazonClientException ace) { ace.printStackTrace(); msj.append("Error de AmazonClientException al subir archivo a S3.<br>"); msj.append("Mensaje: " + ace.getMessage()); return false; } catch (IOException e) { e.printStackTrace(); msj.append("Error de Entrada/Salida al subir archivo a S3: " + e.getMessage()); return false; } } else { msj.append("Error al subir archivo a la nube: No se envi ningun archivo"); return false; } }
From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java
License:Apache License
public void upload(RunFileTransferEntity runFileTransferEntity) { log.debug("Start AWSS3Util upload"); int retryAttempt = 0; int i;/*ww w . j a va 2 s .c o m*/ java.nio.file.Path inputfile = new File(runFileTransferEntity.getLocalPath()).toPath(); String keyName = inputfile.getFileName().toString(); log.info("keyName is: " + keyName); log.info("bucket name is:" + runFileTransferEntity.getBucketName()); log.info("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket()); String amazonFileUploadLocationOriginal = null; FileInputStream stream = null; File filecheck = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getFailOnError()) if (!(filecheck.isFile() || filecheck.isDirectory()) && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) { Log.error("Invalid local path.Please provide valid path"); throw new AWSUtilException("Invalid local path"); } if (runFileTransferEntity.getRetryAttempt() == 0) retryAttempt = 1; else retryAttempt = runFileTransferEntity.getRetryAttempt(); for (i = 0; i < retryAttempt; i++) { log.info("connection attempt: " + (i + 1)); try { AmazonS3 s3Client = null; ClientConfiguration clientConf = new ClientConfiguration(); clientConf.setProtocol(Protocol.HTTPS); if (runFileTransferEntity.getCrediationalPropertiesFile() == null) { BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(), runFileTransferEntity.getSecretAccessKey()); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } else { File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile()); PropertiesCredentials creds = new PropertiesCredentials(securityFile); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } String s3folderName = null; String filepath = runFileTransferEntity.getFolder_name_in_bucket(); log.debug("file path name" + filepath); s3folderName = filepath; if (s3folderName != null && !s3folderName.trim().equals("")) { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName; } else { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName(); } File f = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getLocalPath().contains("hdfs://")) { log.debug("Provided HDFS local path "); String inputPath = runFileTransferEntity.getLocalPath(); String s1 = inputPath.substring(7, inputPath.length()); String s2 = s1.substring(0, s1.indexOf("/")); File file = new File("/tmp"); if (!file.exists()) file.mkdir(); Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://" + s2); FileSystem hdfsFileSystem = FileSystem.get(conf); Path local = new Path("/tmp"); String s = inputPath.substring(7, inputPath.length()); String hdfspath = s.substring(s.indexOf("/"), s.length()); Path hdfs = new Path(hdfspath); ObjectMetadata objectMetadata = new ObjectMetadata(); if (runFileTransferEntity.getEncoding() != null) objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding()); File dir = new File(hdfspath); if (hdfsFileSystem.isDirectory(new Path(hdfspath))) { InputStream is = null; OutputStream os = null; String localDirectory = hdfspath.substring(hdfspath.lastIndexOf("/") + 1); FileStatus[] fileStatus = hdfsFileSystem .listStatus(new Path(runFileTransferEntity.getLocalPath())); Path[] paths = FileUtil.stat2Paths(fileStatus); File dirs = null; try { String folderName = hdfspath.substring(hdfspath.lastIndexOf("/") + 1); DateFormat df = new SimpleDateFormat("dd-MM-yyyy"); String dateWithoutTime = df.format(new Date()).toString(); Random ran = new Random(); String tempFolder = "ftp_sftp_" + System.nanoTime() + "_" + ran.nextInt(1000); dirs = new File("/tmp/" + tempFolder); boolean success = dirs.mkdirs(); for (Path files : paths) { is = hdfsFileSystem.open(files); os = new BufferedOutputStream(new FileOutputStream(dirs + "/" + files.getName())); org.apache.hadoop.io.IOUtils.copyBytes(is, os, conf); } for (File files : dirs.listFiles()) { if (files.isFile()) { s3Client.putObject(new PutObjectRequest( amazonFileUploadLocationOriginal + "/" + folderName, files.getName(), files)); } } } catch (IOException e) { Log.error("IOException occured while transfering the file", e); } finally { org.apache.hadoop.io.IOUtils.closeStream(is); org.apache.hadoop.io.IOUtils.closeStream(os); if (dirs != null) { FileUtils.deleteDirectory(dirs); } } } else { hdfsFileSystem.copyToLocalFile(false, hdfs, local); stream = new FileInputStream("/tmp/" + f.getName()); File S3file = new File("/tmp/" + f.getName()); PutObjectRequest putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file); PutObjectResult result = s3Client.putObject(putObjectRequest); } } else { ObjectMetadata objectMetadata = new ObjectMetadata(); if (runFileTransferEntity.getEncoding() != null) objectMetadata.setContentEncoding(runFileTransferEntity.getEncoding()); if (Files.isDirectory(inputfile)) { File fileloc = new File(inputfile.toAbsolutePath().toString()); String folderName = new File(runFileTransferEntity.getLocalPath()).getName(); for (File files : fileloc.listFiles()) { if (files.isFile()) { PutObjectRequest putObjectRequest = new PutObjectRequest( amazonFileUploadLocationOriginal + "/" + folderName, files.getName(), files); PutObjectResult result = s3Client.putObject(putObjectRequest); } } } else { PutObjectRequest putObjectRequest = null; File file = new File(runFileTransferEntity.getLocalPath()); stream = new FileInputStream(runFileTransferEntity.getLocalPath()); putObjectRequest = new PutObjectRequest(amazonFileUploadLocationOriginal, keyName, file); PutObjectResult result = s3Client.putObject(putObjectRequest); } } } catch (AmazonServiceException e) { if (e.getStatusCode() == 403 || e.getStatusCode() == 404) { if (runFileTransferEntity.getFailOnError()) Log.error("Incorrect details provided.Please provide valid details", e); throw new AWSUtilException("Incorrect details provided"); } { try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } } catch (Exception e) { log.error("error while transferring file", e); try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } catch (Error err) { Log.error("Error occured while uploading the file", err); throw new AWSUtilException(err); } done = true; break; } if (runFileTransferEntity.getFailOnError() && !done) { log.error("File transfer failed"); throw new AWSUtilException("File transfer failed"); } else if (!done) { log.error("File transfer failed but mentioned fail on error as false"); } if (i == runFileTransferEntity.getRetryAttempt()) { if (runFileTransferEntity.getFailOnError()) throw new AWSUtilException("File transfer failed"); } log.debug("Finished AWSS3Util upload"); }
From source file:ics.uci.edu.amazons3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from ww w . j a v a 2 s. c om*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ final AmazonS3 s3 = new AmazonS3Client( new BasicAWSCredentials("AKIAJTW5BOY6EXOGV2YQ", "PDcnFYIf9Hdo9GsKTEjLXretZ3yEg4mRCDQKjxu6")); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:iit.edu.supadyay.s3.S3upload.java
public static boolean upload(String bucketName, String uploadFileName, String keyName) throws IOException, InterruptedException { //access = "AKIAJ2YSLRUZR5B3F5HQ"; //secret = "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"; //AWSCredentials credentials = new BasicAWSCredentials(access, secret); //AmazonS3 s3client = new AmazonS3Client(getCredentials()); AmazonS3 s3client = new AmazonS3Client(new InstanceProfileCredentialsProvider()); try {/* w w w. ja va 2 s. co m*/ System.out.println("Uploading a new object to S3 from a file\n"); File file = new File(uploadFileName); System.out.println("I am before here\n"); s3client.createBucket(bucketName); System.out.println("I am here\n"); s3client.putObject(new PutObjectRequest(bucketName, keyName, file)); s3client.setObjectAcl(bucketName, keyName, CannedAccessControlList.PublicReadWrite); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which " + "means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); return false; } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which " + "means the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); return false; } return true; }
From source file:io.dockstore.common.FileProvisioning.java
License:Apache License
public void provisionOutputFile(FileInfo file, String cwlOutputPath) { File sourceFile = new File(cwlOutputPath); long inputSize = sourceFile.length(); if (file.getUrl().startsWith("s3://")) { AmazonS3 s3Client = FileProvisioning.getAmazonS3Client(config); String trimmedPath = file.getUrl().replace("s3://", ""); List<String> splitPathList = Lists.newArrayList(trimmedPath.split("/")); String bucketName = splitPathList.remove(0); PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, Joiner.on("/").join(splitPathList), sourceFile);/*from w w w.jav a 2s . co m*/ putObjectRequest.setGeneralProgressListener(new ProgressListener() { ProgressPrinter printer = new ProgressPrinter(); long runningTotal = 0; @Override public void progressChanged(ProgressEvent progressEvent) { if (progressEvent.getEventType() == ProgressEventType.REQUEST_BYTE_TRANSFER_EVENT) { runningTotal += progressEvent.getBytesTransferred(); } printer.handleProgress(runningTotal, inputSize); } }); try { s3Client.putObject(putObjectRequest); } finally { System.out.println(); } } else { try { FileSystemManager fsManager; // trigger a copy from the URL to a local file path that's a UUID to avoid collision fsManager = VFS.getManager(); // check for a local file path FileObject dest = fsManager.resolveFile(file.getUrl()); FileObject src = fsManager.resolveFile(sourceFile.getAbsolutePath()); copyFromInputStreamToOutputStream(src.getContent().getInputStream(), inputSize, dest.getContent().getOutputStream()); } catch (IOException e) { throw new RuntimeException("Could not provision output files", e); } } }
From source file:io.druid.storage.s3.S3DataSegmentPusher.java
License:Apache License
private void uploadFileIfPossible(AmazonS3 s3Client, String bucket, String key, File file, boolean replaceExisting) { if (!replaceExisting && S3Utils.isObjectInBucketIgnoringPermission(s3Client, bucket, key)) { log.info("Skipping push because key [%s] exists && replaceExisting == false", key); } else {//from w w w. jav a2 s . c o m final PutObjectRequest indexFilePutRequest = new PutObjectRequest(bucket, key, file); if (!config.getDisableAcl()) { indexFilePutRequest.setAccessControlList(S3Utils.grantFullControlToBucketOwner(s3Client, bucket)); } log.info("Pushing [%s] to bucket[%s] and key[%s].", file, bucket, key); s3Client.putObject(indexFilePutRequest); } }