List of usage examples for com.amazonaws.services.s3.model S3Object getObjectMetadata
public ObjectMetadata getObjectMetadata()
From source file:com.atlassian.localstack.sample.S3Sample.java
License:Open Source License
public static void runTest(AWSCredentials credentials) throws IOException { AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2);/*from www .j a v a2s.c o m*/ s3.setEndpoint(LocalstackTestRunner.getEndpointS3()); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println(" - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); }
From source file:com.awscrud.aws.S3StorageManager.java
License:Open Source License
public InputStream loadInputStream(AwscrudStorageObject s3Store) throws IOException { S3Object s3 = getS3Object(s3Store); this.lastUpdate = s3.getObjectMetadata().getLastModified(); return s3.getObjectContent(); }
From source file:com.clicktravel.infrastructure.persistence.aws.s3.S3FileStore.java
License:Apache License
/** * Returns a Map of the user meta-data associated with the given S3 Object * * This is a work-around for a bug in the AWS Java SDK which treats HTTP headers in a case-insensitive manner. * * @see <a href="https://github.com/aws/aws-sdk-java/pull/326">https://github.com/aws/aws-sdk-java/pull/326</a> * * @param s3Object The S3Object for which the user meta-data is to be obtained. * @return key-value map for user meta-data *//*w ww . ja v a 2 s .c o m*/ private Map<String, String> getUserMetaData(final S3Object s3Object) { final ObjectMetadata objectMetaData = s3Object.getObjectMetadata(); final Map<String, String> userMetaData = objectMetaData.getUserMetadata(); if (userMetaData.isEmpty()) { for (final Entry<String, Object> entry : objectMetaData.getRawMetadata().entrySet()) { final String normalisedKey = entry.getKey().toLowerCase(); if (normalisedKey.startsWith(USER_METADATA_HEADER_PREFIX)) { final String value = String.valueOf(entry.getValue()); userMetaData.put(normalisedKey.substring(USER_METADATA_HEADER_PREFIX.length()), value); } } } return userMetaData; }
From source file:com.crickdata.upload.s3.UploadLiveData.java
License:Open Source License
public Map<String, Date> uploadToS3(String fileName, boolean type) throws IOException { Statistics statistics = new Statistics(); Map<String, Date> perfMap = new HashMap<String, Date>(); AWSCredentials credentials = null;//from w w w . jav a 2s . c o m try { credentials = new BasicAWSCredentials("AKIAI6QKTRAQE7MXQOIQ", "wIG6u1yI5ZaseeJbvYSUmD98qelIJNSCVBzt5k2q"); } catch (Exception e) { throw new AmazonClientException("Cannot load the credentials from the credential profiles file. " + "Please make sure that your credentials file is at the correct " + "location (C:\\Users\\bssan_000\\.aws\\credentials), and is in valid format.", e); } AmazonS3 s3 = new AmazonS3Client(credentials); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName; if (!type) bucketName = "cricmatchinfo"; else bucketName = "cricmatchinfoseries"; String key = fileName.replace(".json", "").trim(); try { perfMap.put("S3INSERTREQ", new Date()); statistics.setS3Req(new Date()); File f = readMatchFile(fileName); double bytes = f.length(); double kilobytes = (bytes / 1024); System.out.println("Details :" + kilobytes); s3.putObject(new PutObjectRequest(bucketName, key, f)); statistics.setSize(String.valueOf(kilobytes)); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); perfMap.put("S3SAVERES", object.getObjectMetadata().getLastModified()); statistics.setKey(key); statistics.setS3Res(object.getObjectMetadata().getLastModified()); MyUI.stats.add(statistics); displayTextInputStream(object.getObjectContent()); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } return perfMap; }
From source file:com.davidsoergel.s3napback.StreamingServiceUtils.java
License:Apache License
/** * Downloads an S3Object, as returned from {@link com.amazonaws.services.s3.AmazonS3Client#getObject(com.amazonaws.services.s3.model.GetObjectRequest) * }, to/*from w w w . jav a 2 s .c o m*/ * the specified file. * * @param s3Object The S3Object containing a reference to an InputStream containing the object's data. * @param destinationFile The file to store the object's data in. */ public static void downloadObjectToStream(S3Object s3Object, BufferedOutputStream eventualOutputStream) { /* // attempt to create the parent if it doesn't exist File parentDirectory = destinationFile.getParentFile(); if (parentDirectory != null && !parentDirectory.exists()) { parentDirectory.mkdirs(); } */ ByteArrayOutputStream byteOS = new ByteArrayOutputStream( (int) s3Object.getObjectMetadata().getContentLength()); OutputStream outputStream = null; try { // perf extra copying, left over from file outputstream version outputStream = new BufferedOutputStream(byteOS); byte[] buffer = new byte[1024 * 10]; int bytesRead; while ((bytesRead = s3Object.getObjectContent().read(buffer)) > -1) { outputStream.write(buffer, 0, bytesRead); } } catch (IOException e) { try { s3Object.getObjectContent().abort(); } catch (IOException abortException) { log.warn("Couldn't abort stream", e); } throw new AmazonClientException("Unable to store object contents to disk: " + e.getMessage(), e); } finally { try { outputStream.close(); } catch (Exception e) { } try { s3Object.getObjectContent().close(); } catch (Exception e) { } } try { // Multipart Uploads don't have an MD5 calculated on the service side if (ServiceUtils.isMultipartUploadETag(s3Object.getObjectMetadata().getETag()) == false) { byte[] clientSideHash = Md5Utils.computeMD5Hash(byteOS.toByteArray()); //new FileInputStream(destinationFile)); byte[] serverSideHash = BinaryUtils.fromHex(s3Object.getObjectMetadata().getETag()); if (!Arrays.equals(clientSideHash, serverSideHash)) { throw new AmazonClientException("Unable to verify integrity of data download. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "The data may be corrupt; please try again."); } } } catch (Exception e) { log.warn("Unable to calculate MD5 hash to validate download: " + e.getMessage(), e); } try { eventualOutputStream.write(byteOS.toByteArray()); } catch (Exception e) { log.warn("Unable to write to output stream: " + e.getMessage(), e); } }
From source file:com.davidsoergel.s3napback.StreamingTransferManager.java
License:Apache License
private Download download(final GetObjectRequest getObjectRequest, final BufferedOutputStream os, final TransferStateChangeListener stateListener) { appendUserAgent(getObjectRequest, USER_AGENT); String description = "Downloading from " + getObjectRequest.getBucketName() + "/" + getObjectRequest.getKey(); // Add our own transfer progress listener TransferProgressImpl transferProgress = new TransferProgressImpl(); ProgressListenerChain listenerChain = new ProgressListenerChain( new TransferProgressUpdatingListener(transferProgress), getObjectRequest.getProgressListener()); getObjectRequest.setProgressListener(listenerChain); final S3Object s3Object = s3.getObject(getObjectRequest); final DownloadImpl download = new DownloadImpl(description, transferProgress, listenerChain, s3Object, stateListener);// w w w.j a v a 2s. c o m // null is returned when constraints aren't met if (s3Object == null) { download.setState(Transfer.TransferState.Canceled); download.setMonitor(new DownloadMonitor(download, null)); return download; } long contentLength = s3Object.getObjectMetadata().getContentLength(); if (getObjectRequest.getRange() != null && getObjectRequest.getRange().length == 2) { long startingByte = getObjectRequest.getRange()[0]; long lastByte = getObjectRequest.getRange()[1]; contentLength = lastByte - startingByte; } transferProgress.setTotalBytesToTransfer(contentLength); Future<?> future = threadPool.submit(new Callable<Object>() { //@Override public Object call() throws Exception { try { download.setState(Transfer.TransferState.InProgress); StreamingServiceUtils.downloadObjectToStream(s3Object, os); download.setState(Transfer.TransferState.Completed); return true; } catch (Exception e) { // Downloads aren't allowed to move from canceled to failed if (download.getState() != Transfer.TransferState.Canceled) { download.setState(Transfer.TransferState.Failed); } throw e; } } }); download.setMonitor(new DownloadMonitor(download, future)); return download; }
From source file:com.digitaslbi.helios.mock.utils.ConnectionHelper.java
public static InputStream getObject(String key) { try {//from www .java2 s . c o m log.debug("Downloading an object"); S3Object s3object = s3Client.getObject( new GetObjectRequest(prop.getProperty(MocksConstants.AWS_BUCKET_NAME.getValue()), key)); log.debug("Content-Type: " + s3object.getObjectMetadata().getContentType()); //displayTextInputStream(s3object.getObjectContent()); return s3object.getObjectContent(); } catch (AmazonServiceException ase) { log.error("Caught an AmazonServiceException, which" + " means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); log.error("Error Message: " + ase.getMessage()); log.error("HTTP Status Code: " + ase.getStatusCode()); log.error("AWS Error Code: " + ase.getErrorCode()); log.error("Error Type: " + ase.getErrorType()); log.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { log.error("Caught an AmazonClientException, which means" + " the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); log.error("Error Message: " + ace.getMessage()); } return null; }
From source file:com.digitaslbi.helios.utils.S3Helper.java
public static File getObject(String key) { connect();/*from w w w .j a v a 2 s . co m*/ try { log.info("[S3Helper][getObject] Downloading an object"); S3Object s3object = s3Client .getObject(new GetObjectRequest(S3Properties.getInstance().getBucketName(), key)); byte[] contentBytes = IOUtils.toByteArray(s3object.getObjectContent()); log.info("Content-Type: " + s3object.getObjectMetadata().getContentType()); File aux = new File(); aux.setPath(s3object.getKey()); aux.setIsFile(true); aux.setContent(new String(Base64.encodeBase64String(contentBytes))); return aux; } catch (AmazonServiceException ase) { log.error( "[S3Helper][getObject] Caught an AmazonServiceException, which" + " means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); log.error("Error Message: " + ase.getMessage()); log.error("HTTP Status Code: " + ase.getStatusCode()); log.error("AWS Error Code: " + ase.getErrorCode()); log.error("Error Type: " + ase.getErrorType()); log.error("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { log.error("[S3Helper][getObject] Caught an AmazonClientException, which means" + " the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); log.error("Error Message: " + ace.getMessage()); } catch (IOException e) { log.error("[S3Helper][getObject] Error: " + e); } return null; }
From source file:com.eBilling.util.S3Example.java
void downloadfile(AWSCredentials credentials2) throws IOException { AmazonS3 s3client = new AmazonS3Client(credentials2); try {// w w w . j ava2 s .c om System.out.println("Downloading an object"); S3Object s3object = s3client.getObject(new GetObjectRequest(bucketName, keyName)); System.out.println("Content-Type: " + s3object.getObjectMetadata().getContentType()); InputStream input = s3object.getObjectContent(); BufferedReader reader = new BufferedReader(new InputStreamReader(input)); while (true) { String line = reader.readLine(); if (line == null) break; System.out.println(" " + line); } System.out.println(); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which" + " means your request made it " + "to Amazon S3, but was rejected with an error response" + " for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means" + " the client encountered " + "an internal error while trying to " + "communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.emc.vipr.s3.sample._06_ReadObjectWithMetadata.java
License:Open Source License
public static void main(String[] args) throws Exception { // create the ViPR S3 Client ViPRS3Client s3 = ViPRS3Factory.getS3Client(); // retrieve the object key from user System.out.println("Enter the object key:"); String key = new BufferedReader(new InputStreamReader(System.in)).readLine(); // read the specified object from the demo bucket S3Object object = s3.getObject(ViPRS3Factory.S3_BUCKET, key); // get the metadata for the object ObjectMetadata metadata = object.getObjectMetadata(); // print out the object key/value and metadata for validation System.out.println(String.format("Metadata for [%s/%s]", ViPRS3Factory.S3_BUCKET, key)); Map<String, String> metadataList = metadata.getUserMetadata(); for (Map.Entry<String, String> entry : metadataList.entrySet()) { System.out.println(String.format(" %s = %s", entry.getKey(), entry.getValue())); }/*ww w . j a va 2 s. c o m*/ }