List of usage examples for com.amazonaws.services.s3.model S3Object getObjectMetadata
public ObjectMetadata getObjectMetadata()
From source file:eu.openg.aws.s3.internal.FakeS3Object.java
License:Apache License
FakeS3Object(S3Object object, Clock clock) { this.clock = clock; this.object = object; setContent(object.getObjectContent()); updateMetadata(object.getObjectMetadata()); }
From source file:exemplos.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*w ww . j a v a 2 s. co m*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:fsi_admin.JAwsS3Conn.java
License:Open Source License
private boolean descargarArchivo(HttpServletResponse response, StringBuffer msj, AmazonS3 s3, String S3BUKT, String nombre, String destino) { //System.out.println("AwsConn DescargarArchivo:" + nombre + ":nombre"); try {//w ww. j av a 2 s.c o m System.out.println("DESCARGA BUCKET: " + S3BUKT + " OBJETO: " + nombre); S3Object object = s3.getObject(new GetObjectRequest(S3BUKT, nombre)); //out.println("Content-Type: " + object.getObjectMetadata().getContentType()); //System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); byte[] byteArray = IOUtils.toByteArray(object.getObjectContent()); ByteArrayInputStream bais = new ByteArrayInputStream(byteArray); JBajarArchivo fd = new JBajarArchivo(); fd.doDownload(response, getServletConfig().getServletContext(), bais, object.getObjectMetadata().getContentType(), byteArray.length, destino); System.out.println("Content-Length: " + object.getObjectMetadata().getContentLength() + " BA: " + byteArray.length); return true; } catch (AmazonServiceException ase) { ase.printStackTrace(); msj.append("Error de AmazonServiceException al descargar archivo de S3.<br>"); msj.append("Mensaje: " + ase.getMessage() + "<br>"); msj.append("Cdigo de Estatus HTTP: " + ase.getStatusCode() + "<br>"); msj.append("Cdigo de Error AWS: " + ase.getErrorCode() + "<br>"); msj.append("Tipo de Error: " + ase.getErrorType() + "<br>"); msj.append("Request ID: " + ase.getRequestId()); return false; } catch (AmazonClientException ace) { ace.printStackTrace(); msj.append("Error de AmazonClientException al descargar archivo de S3.<br>"); msj.append("Mensaje: " + ace.getMessage()); return false; } catch (IOException ace) { ace.printStackTrace(); msj.append("Error de IOException al descargar archivo de S3.<br>"); msj.append("Mensaje: " + ace.getMessage()); return false; } }
From source file:hydrograph.engine.spark.datasource.utils.AWSS3Util.java
License:Apache License
public void download(RunFileTransferEntity runFileTransferEntity) { log.debug("Start AWSS3Util download"); File filecheck = new File(runFileTransferEntity.getLocalPath()); if (runFileTransferEntity.getFailOnError()) if (!(filecheck.exists() && filecheck.isDirectory()) && !(runFileTransferEntity.getLocalPath().contains("hdfs://"))) { throw new AWSUtilException("Invalid local path"); }/*from www . j ava 2 s . c o m*/ boolean fail_if_exist = false; int retryAttempt = 0; int i; String amazonFileUploadLocationOriginal = null; String keyName = null; if (runFileTransferEntity.getRetryAttempt() == 0) retryAttempt = 1; else retryAttempt = runFileTransferEntity.getRetryAttempt(); for (i = 0; i < retryAttempt; i++) { log.info("connection attempt: " + (i + 1)); try { AmazonS3 s3Client = null; ClientConfiguration clientConf = new ClientConfiguration(); clientConf.setProtocol(Protocol.HTTPS); if (runFileTransferEntity.getCrediationalPropertiesFile() == null) { BasicAWSCredentials creds = new BasicAWSCredentials(runFileTransferEntity.getAccessKeyID(), runFileTransferEntity.getSecretAccessKey()); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } else { File securityFile = new File(runFileTransferEntity.getCrediationalPropertiesFile()); PropertiesCredentials creds = new PropertiesCredentials(securityFile); s3Client = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConf) .withRegion(runFileTransferEntity.getRegion()) .withCredentials(new AWSStaticCredentialsProvider(creds)).build(); } String s3folderName = null; String filepath = runFileTransferEntity.getFolder_name_in_bucket(); if (filepath.lastIndexOf("/") != -1) { s3folderName = filepath.substring(0, filepath.lastIndexOf("/")); keyName = filepath.substring(filepath.lastIndexOf("/") + 1); } else { keyName = filepath; } log.debug("keyName is: " + keyName); log.debug("bucket name is:" + runFileTransferEntity.getBucketName()); log.debug("Folder Name is" + runFileTransferEntity.getFolder_name_in_bucket()); if (s3folderName != null) { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName() + "/" + s3folderName; } else { amazonFileUploadLocationOriginal = runFileTransferEntity.getBucketName(); } if (runFileTransferEntity.getLocalPath().contains("hdfs://")) { String outputPath = runFileTransferEntity.getLocalPath(); String s1 = outputPath.substring(7, outputPath.length()); String s2 = s1.substring(0, s1.indexOf("/")); File f = new File("/tmp"); if (!f.exists()) f.mkdir(); GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName); S3Object object = s3Client.getObject(request); if (runFileTransferEntity.getEncoding() != null) object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding()); File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName); if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream("/tmp/" + keyName)); } else { if (!(fexist.exists() && !fexist.isDirectory())) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream( runFileTransferEntity.getLocalPath() + File.separatorChar + keyName)); } else { fail_if_exist = true; Log.error("File already exists"); throw new AWSUtilException("File already exists"); } } Configuration conf = new Configuration(); conf.set("fs.defaultFS", "hdfs://" + s2); FileSystem hdfsFileSystem = FileSystem.get(conf); String s = outputPath.substring(7, outputPath.length()); String hdfspath = s.substring(s.indexOf("/"), s.length()); Path local = new Path("/tmp/" + keyName); Path hdfs = new Path(hdfspath); hdfsFileSystem.copyFromLocalFile(local, hdfs); } else { GetObjectRequest request = new GetObjectRequest(amazonFileUploadLocationOriginal, keyName); S3Object object = s3Client.getObject(request); if (runFileTransferEntity.getEncoding() != null) object.getObjectMetadata().setContentEncoding(runFileTransferEntity.getEncoding()); File fexist = new File(runFileTransferEntity.getLocalPath() + File.separatorChar + keyName); if (runFileTransferEntity.getOverwrite().trim().equalsIgnoreCase("Overwrite If Exists")) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream( runFileTransferEntity.getLocalPath() + File.separatorChar + keyName)); } else { if (!(fexist.exists() && !fexist.isDirectory())) { S3ObjectInputStream objectContent = object.getObjectContent(); IOUtils.copyLarge(objectContent, new FileOutputStream( runFileTransferEntity.getLocalPath() + File.separatorChar + keyName)); } else { fail_if_exist = true; Log.error("File already exists"); throw new AWSUtilException("File already exists"); } } } } catch (AmazonServiceException e) { log.error("Amazon Service Exception", e); if (e.getStatusCode() == 403 || e.getStatusCode() == 404) { if (runFileTransferEntity.getFailOnError()) { Log.error("Incorrect details provided.Please provide correct details", e); throw new AWSUtilException("Incorrect details provided"); } else { Log.error("Unknown amezon exception occured", e); } } { try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { Log.error("Exception occured while sleeping the thread"); } continue; } } catch (Error e) { Log.error("Error occured while sleeping the thread"); throw new AWSUtilException(e); } catch (Exception e) { log.error("error while transfering file", e); try { Thread.sleep(runFileTransferEntity.getRetryAfterDuration()); } catch (Exception e1) { } catch (Error err) { Log.error("Error occured while downloading"); throw new AWSUtilException(err); } continue; } done = true; break; } if (runFileTransferEntity.getFailOnError() && !done) { log.error("File transfer failed"); throw new AWSUtilException("File transfer failed"); } else if (!done) { log.error("File transfer failed but mentioned fail on error as false"); } if (i == runFileTransferEntity.getRetryAttempt()) { if (runFileTransferEntity.getFailOnError()) { throw new AWSUtilException("File transfer failed"); } } log.debug("Finished AWSS3Util download"); }
From source file:ics.uci.edu.amazons3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/* w ww . j av a2 s. c o m*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ final AmazonS3 s3 = new AmazonS3Client( new BasicAWSCredentials("AKIAJTW5BOY6EXOGV2YQ", "PDcnFYIf9Hdo9GsKTEjLXretZ3yEg4mRCDQKjxu6")); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:io.dockstore.common.FileProvisioning.java
License:Apache License
private void downloadFromS3(String path, String targetFilePath) { AmazonS3 s3Client = getAmazonS3Client(config); String trimmedPath = path.replace("s3://", ""); List<String> splitPathList = Lists.newArrayList(trimmedPath.split("/")); String bucketName = splitPathList.remove(0); S3Object object = s3Client.getObject(new GetObjectRequest(bucketName, Joiner.on("/").join(splitPathList))); try {/*from w ww.j ava 2s.co m*/ FileOutputStream outputStream = new FileOutputStream(new File(targetFilePath)); S3ObjectInputStream inputStream = object.getObjectContent(); long inputSize = object.getObjectMetadata().getContentLength(); copyFromInputStreamToOutputStream(inputStream, inputSize, outputStream); } catch (IOException e) { LOG.error(e.getMessage()); throw new RuntimeException("Could not provision input files from S3", e); } }
From source file:io.konig.camel.aws.s3.DeleteObjectEndpoint.java
License:Apache License
public Exchange createExchange(ExchangePattern pattern, final S3Object s3Object) { LOG.trace("Getting object with key [{}] from bucket [{}]...", s3Object.getKey(), s3Object.getBucketName()); ObjectMetadata objectMetadata = s3Object.getObjectMetadata(); LOG.trace("Got object [{}]", s3Object); Exchange exchange = super.createExchange(pattern); Message message = exchange.getIn();//from w w w . jav a 2s . c o m if (configuration.isIncludeBody()) { message.setBody(s3Object.getObjectContent()); } else { message.setBody(null); } message.setHeader(S3Constants.KEY, s3Object.getKey()); message.setHeader(S3Constants.BUCKET_NAME, s3Object.getBucketName()); message.setHeader(S3Constants.E_TAG, objectMetadata.getETag()); message.setHeader(S3Constants.LAST_MODIFIED, objectMetadata.getLastModified()); message.setHeader(S3Constants.VERSION_ID, objectMetadata.getVersionId()); message.setHeader(S3Constants.CONTENT_TYPE, objectMetadata.getContentType()); message.setHeader(S3Constants.CONTENT_MD5, objectMetadata.getContentMD5()); message.setHeader(S3Constants.CONTENT_LENGTH, objectMetadata.getContentLength()); message.setHeader(S3Constants.CONTENT_ENCODING, objectMetadata.getContentEncoding()); message.setHeader(S3Constants.CONTENT_DISPOSITION, objectMetadata.getContentDisposition()); message.setHeader(S3Constants.CACHE_CONTROL, objectMetadata.getCacheControl()); message.setHeader(S3Constants.S3_HEADERS, objectMetadata.getRawMetadata()); message.setHeader(S3Constants.SERVER_SIDE_ENCRYPTION, objectMetadata.getSSEAlgorithm()); message.setHeader(S3Constants.USER_METADATA, objectMetadata.getUserMetadata()); message.setHeader(S3Constants.EXPIRATION_TIME, objectMetadata.getExpirationTime()); message.setHeader(S3Constants.REPLICATION_STATUS, objectMetadata.getReplicationStatus()); message.setHeader(S3Constants.STORAGE_CLASS, objectMetadata.getStorageClass()); /** * If includeBody != true, it is safe to close the object here. If * includeBody == true, the caller is responsible for closing the stream * and object once the body has been fully consumed. As of 2.17, the * consumer does not close the stream or object on commit. */ if (!configuration.isIncludeBody()) { IOHelper.close(s3Object); } else { if (configuration.isAutocloseBody()) { exchange.addOnCompletion(new SynchronizationAdapter() { @Override public void onDone(Exchange exchange) { IOHelper.close(s3Object); } }); } } return exchange; }
From source file:net.henryhu.roxlab2.NotePadProvider.java
License:Apache License
private ContentValues s3query(String id) { if (bucket == null) bucket = s3.createBucket(bucketName); Log.w("s3query()", "query id: " + id); ObjectListing ol = s3.listObjects(bucketName, id + "_"); for (Object o : ol.getObjectSummaries()) { S3ObjectSummary sum = (S3ObjectSummary) o; S3Object obj = s3.getObject(bucketName, sum.getKey()); ObjectMetadata om = obj.getObjectMetadata(); byte[] buf = new byte[(int) om.getContentLength()]; try {//from w w w . j ava2s . c o m InputStream contents = obj.getObjectContent(); contents.read(buf); contents.close(); String sbuf = new String(buf, "UTF-8"); Map<String, String> entries = ParseInfo.getEntries(sbuf); ContentValues vals = new ContentValues(); for (String key : entries.keySet()) { vals.put(key, entries.get(key)); } Log.w("s3query()", "query succ"); return vals; } catch (Exception e) { } } Log.w("s3query()", "query fail"); return null; }
From source file:nl.nn.adapterframework.filesystem.AmazonS3FileSystem.java
License:Apache License
@Override public long getFileSize(S3Object f) throws FileSystemException { return f.getObjectMetadata().getContentLength(); }
From source file:nl.nn.adapterframework.filesystem.AmazonS3FileSystem.java
License:Apache License
@Override public Date getModificationTime(S3Object f) throws FileSystemException { S3Object file; if (f.getKey().isEmpty()) { return null; }//from ww w . j av a2s .co m file = s3Client.getObject(bucketName, f.getKey()); Date date = file.getObjectMetadata().getLastModified(); return date; }