List of usage examples for com.amazonaws.services.s3.model S3Object getObjectContent
public S3ObjectInputStream getObjectContent()
From source file:com.exedosoft.plat.storage.s3.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*ww w . ja v a 2 s.c o m*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "my-first-s3-bucket-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ // System.out.println("Deleting an object\n"); // s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ // System.out.println("Deleting bucket " + bucketName + "\n"); // s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:com.facebook.presto.kinesis.s3config.S3TableConfigClient.java
License:Apache License
/** * Connect to S3 directory to look for new or updated table definitions and then * update the map./* w w w .j ava 2s . c om*/ */ protected void updateTablesFromS3() { long now = System.currentTimeMillis(); List<S3ObjectSummary> objectList = this.getObjectSummaries(); AmazonS3Client s3client = this.clientManager.getS3Client(); AmazonS3URI directoryURI = new AmazonS3URI(this.bucketUrl); // Build map of "deltas" which in the end contains new definitions and deleted tables HashMap<String, KinesisStreamDescription> deltasMap = new HashMap<String, KinesisStreamDescription>(); internalMapLock.readLock().lock(); try { Iterator<String> keysIter = this.internalMap.keySet().iterator(); while (keysIter.hasNext()) { deltasMap.put(keysIter.next(), dummyStreamDesc); } } finally { internalMapLock.readLock().unlock(); } for (S3ObjectSummary objInfo : objectList) { if (!deltasMap.containsKey(objInfo.getKey()) || objInfo.getLastModified().getTime() >= this.lastCheck) { // New or updated file, so we must read from AWS try { if (objInfo.getKey().endsWith("/")) { continue; } log.info("Getting : %s - %s", objInfo.getBucketName(), objInfo.getKey()); S3Object object = s3client .getObject(new GetObjectRequest(objInfo.getBucketName(), objInfo.getKey())); StringBuilder resultStr = new StringBuilder(""); try (BufferedReader reader = new BufferedReader( new InputStreamReader(object.getObjectContent()))) { boolean hasMore = true; while (hasMore) { String line = reader.readLine(); if (line != null) { resultStr.append(line); } else { hasMore = false; } } KinesisStreamDescription table = streamDescriptionCodec.fromJson(resultStr.toString()); deltasMap.put(objInfo.getKey(), table); log.info("Put table description into the map from %s : %s.%s", objInfo.getKey(), table.getSchemaName(), table.getTableName()); } catch (IOException iox) { log.error("Problem reading input stream from object.", iox); } catch (IllegalArgumentException iax) { // Note: this gets thrown by airlift json library when the input is malformed. log.error("Invalid JSON table description.", iax); } } catch (AmazonServiceException ase) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonServiceException, which means your request made it "); sb.append("to Amazon S3, but was rejected with an error response for some reason.\n"); sb.append("Error Message: " + ase.getMessage()); sb.append("HTTP Status Code: " + ase.getStatusCode()); sb.append("AWS Error Code: " + ase.getErrorCode()); sb.append("Error Type: " + ase.getErrorType()); sb.append("Request ID: " + ase.getRequestId()); log.error(sb.toString(), ase); } catch (AmazonClientException ace) { StringBuilder sb = new StringBuilder(); sb.append("Caught an AmazonClientException, " + "which means the client encountered " + "an internal error while trying to communicate" + " with S3, " + "such as not being able to access the network."); sb.append("Error Message: " + ace.getMessage()); log.error(sb.toString(), ace); } } else if (deltasMap.containsKey(objInfo.getKey())) { deltasMap.remove(objInfo.getKey()); } } // end loop through object descriptions // Deltas: key pointing to dummy means delete, key pointing to other object means update. // This approach lets us delete and update while shortening the locked critical section. Iterator<Map.Entry<String, KinesisStreamDescription>> deltasIter = deltasMap.entrySet().iterator(); internalMapLock.writeLock().lock(); try { while (deltasIter.hasNext()) { Map.Entry<String, KinesisStreamDescription> entry = deltasIter.next(); if (entry.getValue().getTableName().equals("__DUMMY__")) { this.internalMap.remove(entry.getKey()); } else { this.internalMap.put(entry.getKey(), entry.getValue()); } } } finally { internalMapLock.writeLock().unlock(); } log.info("Completed updating table definitions from S3."); this.lastCheck = now; return; }
From source file:com.flipzu.PostProcThread.java
License:Apache License
private void consolidateS3(Broadcast bcast) { debug.logPostProc("PostProcThread, consolidate S3 for " + bcast); File file = new File(bcast.getFilename()); if (!file.exists()) { debug.logPostProc("consolidateS3, empty broadcast, doing nothing"); return;// w w w . ja v a 2 s . com } AmazonS3 s3 = null; try { InputStream is = new FileInputStream("aws.properties"); s3 = new AmazonS3Client(new PropertiesCredentials(is)); } catch (Exception e) { debug.logError("consolidateS3 Error ", e); return; } String bucketName = Config.getInstance().getS3Bucket(); String dirName = Config.getInstance().getS3dir(); String objName = dirName + "/" + bcast.getId() + Config.getInstance().getFileWriterExtension(); S3Object obj = null; try { obj = s3.getObject(bucketName, objName); } catch (AmazonServiceException ase) { debug.logPostProc("consolidateS3 for " + bcast + ". File not found, doing nothing..."); return; } catch (AmazonClientException ace) { debug.logPostProc("consolidateS3 for " + bcast + ". File not found, doing nothing..."); return; } if (obj == null) { debug.logPostProc("consolidateS3 for " + bcast + ". File not found, doing nothing."); return; } debug.logPostProc("consolidateS3 for " + bcast + ". File found, consolidating."); String auxFile = Config.getInstance().getFileWriterDestDir() + "/" + bcast.getId() + "-aux" + Config.getInstance().getFileWriterExtension(); BufferedOutputStream bosAux = null; try { FileOutputStream fos = new FileOutputStream(auxFile); bosAux = new BufferedOutputStream(fos); } catch (FileNotFoundException e) { debug.logError("consolidateS3 for, error creating output stream", e); return; } BufferedInputStream is = new BufferedInputStream(obj.getObjectContent()); /* fetch file from S3 */ int r = 0; do { byte[] b = new byte[1024]; try { r = is.read(b); if (r > 0) bosAux.write(b, 0, r); } catch (IOException e) { debug.logError("consolidateS3 error", e); /* cleanup */ File aFile = new File(auxFile); aFile.delete(); return; } } while (r > 0); try { is.close(); } catch (IOException e) { debug.logError("consolidateS3 error", e); } /* append our file to aux file */ BufferedInputStream bis; try { FileInputStream fis = new FileInputStream(bcast.getFilename()); bis = new BufferedInputStream(fis); } catch (FileNotFoundException e) { debug.logPostProc("consolidateS3 error, FileNotFoundException"); return; } r = 0; do { byte[] b = new byte[1024]; try { r = bis.read(b); bosAux.write(b); } catch (IOException e) { debug.logError("consolidateS3 error", e); return; } } while (r > 0); try { bis.close(); bosAux.close(); } catch (IOException e) { debug.logError("consolidateS3 error", e); } /* delete old crap */ file.delete(); bcast.setFilename(auxFile); debug.logPostProc("consolidateS3 for " + bcast + ". File consolidated in " + bcast.getFilename()); return; }
From source file:com.formkiq.core.service.AssetServiceS3Default.java
License:Apache License
@SuppressWarnings("resource") @Override/* w w w . java 2 s.co m*/ public byte[] findAsset(final String folder, final String asset) throws IOException { byte[] bytes = null; String filename = getFilename(folder, asset); try { S3Object object = getS3Connection().getObject(new GetObjectRequest(this.s3BucketName, filename)); InputStream objectData = object.getObjectContent(); try { bytes = IOUtils.toByteArray(objectData); } finally { IOUtils.closeQuietly(objectData); } } catch (SdkClientException e) { LOG.log(Level.WARNING, e.getMessage()); } return bytes; }
From source file:com.ge.predix.sample.blobstore.repository.BlobstoreService.java
License:Apache License
/** * Adds a new Blob to the binded bucket in the Object Store * * @param obj S3Object to be added//from w w w .j ava2 s . co m * @throws Exception */ public void put(S3Object obj) throws Exception { if (obj == null) { log.error("put(): Empty file provided"); throw new Exception("File is null"); } InputStream is = obj.getObjectContent(); List<PartETag> partETags = new ArrayList<>(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey()); InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest); try { int i = 1; int currentPartSize = 0; ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream(); int byteValue; while ((byteValue = is.read()) != -1) { tempBuffer.write(byteValue); currentPartSize = tempBuffer.size(); if (currentPartSize == (50 * 1024 * 1024)) //make this a const { byte[] b = tempBuffer.toByteArray(); ByteArrayInputStream byteStream = new ByteArrayInputStream(b); UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket) .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++) .withInputStream(byteStream).withPartSize(currentPartSize); partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag()); tempBuffer.reset(); } } log.info("currentPartSize: " + currentPartSize); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(currentPartSize); obj.setObjectMetadata(objectMetadata); if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const { s3Client.abortMultipartUpload( new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId())); byte[] b = tempBuffer.toByteArray(); ByteArrayInputStream byteStream = new ByteArrayInputStream(b); objectMetadata.setContentType(getContentType(b)); obj.setObjectMetadata(objectMetadata); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream, obj.getObjectMetadata()); s3Client.putObject(putObjectRequest); return; } if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const { byte[] b = tempBuffer.toByteArray(); ByteArrayInputStream byteStream = new ByteArrayInputStream(b); log.info("currentPartSize: " + currentPartSize); log.info("byteArray: " + b); UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket) .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i) .withInputStream(byteStream).withPartSize(currentPartSize); partETags.add(s3Client.uploadPart(uploadPartRequest).getPartETag()); } } catch (Exception e) { log.error("put(): Exception occurred in put(): " + e.getMessage()); s3Client.abortMultipartUpload( new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId())); throw e; } CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest() .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId()) .withKey(obj.getKey()); s3Client.completeMultipartUpload(completeMultipartUploadRequest); }
From source file:com.ge.predix.sample.blobstore.repository.BlobstoreService.java
License:Apache License
/** * Get the Blob from the binded bucket/* w w w. j a v a 2 s . c o m*/ * * @param fileName String * @throws Exception */ public InputStream get(String fileName, String range) throws Exception { if (range != null && !range.isEmpty()) { String[] r = range.split(":"); if (r.length != 2) { throw new Exception("Invalid range format"); } try { long start = Long.parseLong(r[0]); long end = Long.parseLong(r[1]); GetObjectRequest rangeObjectRequest = new GetObjectRequest(bucket, fileName); rangeObjectRequest.setRange(start, end); S3Object objectPortion = s3Client.getObject(rangeObjectRequest); InputStream objectData = objectPortion.getObjectContent(); return objectData; } catch (NumberFormatException e) { throw new Exception("Invalid range specified ", e); } } else { try { S3Object object = s3Client.getObject(new GetObjectRequest(bucket, fileName)); InputStream objectData = object.getObjectContent(); return objectData; } catch (Exception e) { log.error("Exception Occurred in get(): " + e.getMessage()); throw e; } } }
From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java
License:Apache License
/** * Adds a new Blob to the binded bucket in the Object Store * * @param obj S3Object to be added/*from ww w . j a v a 2 s . c o m*/ */ @Override public String saveBlob(S3Object obj) { if (obj == null) { this.log.error("put(): Empty file provided"); //$NON-NLS-1$ throw new RuntimeException("File is null"); //$NON-NLS-1$ } List<PartETag> partETags = new ArrayList<>(); String bucket = this.blobstoreConfig.getBucketName(); InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, obj.getKey()); InitiateMultipartUploadResult initResponse = this.s3Client.initiateMultipartUpload(initRequest); try (InputStream is = obj.getObjectContent();) { int i = 1; int currentPartSize = 0; ByteArrayOutputStream tempBuffer = new ByteArrayOutputStream(); int byteValue; while ((byteValue = is.read()) != -1) { tempBuffer.write(byteValue); currentPartSize = tempBuffer.size(); if (currentPartSize == (50 * 1024 * 1024)) //make this a const { byte[] b = tempBuffer.toByteArray(); ByteArrayInputStream byteStream = new ByteArrayInputStream(b); UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket) .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i++) .withInputStream(byteStream).withPartSize(currentPartSize); partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag()); tempBuffer.reset(); } } this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$ ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(currentPartSize); if (this.enableSSE) { objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } obj.setObjectMetadata(objectMetadata); if (i == 1 && currentPartSize < (5 * 1024 * 1024)) // make this a const { this.s3Client.abortMultipartUpload( new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId())); byte[] b = tempBuffer.toByteArray(); ByteArrayInputStream byteStream = new ByteArrayInputStream(b); objectMetadata.setContentType(getContentType(b)); if (this.enableSSE) { objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); } obj.setObjectMetadata(objectMetadata); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, obj.getKey(), byteStream, obj.getObjectMetadata()); this.s3Client.putObject(putObjectRequest); ObjectMetadata meta = this.s3Client.getObjectMetadata(bucket, obj.getKey()); Map<String, Object> headers = meta.getRawMetadata(); for (Map.Entry<String, Object> entry : headers.entrySet()) { this.log.info("Object Metadata -- " + entry.getKey() + ": " + entry.getValue().toString()); //$NON-NLS-1$ //$NON-NLS-2$ } return initResponse.getUploadId(); } if (currentPartSize > 0 && currentPartSize <= (50 * 1024 * 1024)) // make this a const { byte[] b = tempBuffer.toByteArray(); ByteArrayInputStream byteStream = new ByteArrayInputStream(b); this.log.info("currentPartSize: " + currentPartSize); //$NON-NLS-1$ this.log.info("byteArray: " + b); //$NON-NLS-1$ UploadPartRequest uploadPartRequest = new UploadPartRequest().withBucketName(bucket) .withKey(obj.getKey()).withUploadId(initResponse.getUploadId()).withPartNumber(i) .withInputStream(byteStream).withPartSize(currentPartSize); partETags.add(this.s3Client.uploadPart(uploadPartRequest).getPartETag()); } CompleteMultipartUploadRequest completeMultipartUploadRequest = new CompleteMultipartUploadRequest() .withBucketName(bucket).withPartETags(partETags).withUploadId(initResponse.getUploadId()) .withKey(obj.getKey()); this.s3Client.completeMultipartUpload(completeMultipartUploadRequest); return initResponse.getUploadId(); } catch (Exception e) { this.log.error("put(): Exception occurred in put(): " + e.getMessage()); //$NON-NLS-1$ this.s3Client.abortMultipartUpload( new AbortMultipartUploadRequest(bucket, obj.getKey(), initResponse.getUploadId())); throw new RuntimeException("put(): Exception occurred in put(): ", e); //$NON-NLS-1$ } }
From source file:com.ge.predix.solsvc.blobstore.bootstrap.BlobstoreClientImpl.java
License:Apache License
/** * Gets the list of available Blobs for the binded bucket from the * BlobStore./* w w w . j a v a 2 s. c o m*/ * * @return List of DataFile Blobs */ @Override public List<DataFile> getBlob() { S3Object obj = null; try { List<DataFile> objs = new ArrayList<DataFile>(); // Get the List from BlobStore ObjectListing objectList = this.s3Client.listObjects(this.blobstoreConfig.getBucketName()); for (S3ObjectSummary objectSummary : objectList.getObjectSummaries()) { obj = this.s3Client.getObject( new GetObjectRequest(this.blobstoreConfig.getBucketName(), objectSummary.getKey())); DataFile data = new DataFile(); data.setFile(IOUtils.toByteArray(obj.getObjectContent())); objs.add(data); } return objs; } catch (IOException e) { throw new RuntimeException(e); } finally { if (obj != null) { try { obj.close(); } catch (IOException e) { throw new RuntimeException( "unable to close object object=" + obj + " throwing original exception", //$NON-NLS-1$ //$NON-NLS-2$ e); } } } }
From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java
License:Open Source License
@Override public InputStream getObject(final String bucketName, final String key) throws AmazonClientException, AmazonServiceException { LOGGER.info("getObject invoked, bucketName: {}, key: {}", bucketName, key); final GetObjectRequest getObjRequest = new GetObjectRequest(bucketName, key); final S3Object s3Object = getObject(getObjRequest); return s3Object.getObjectContent(); }
From source file:com.github.lbroudoux.elasticsearch.river.s3.connector.S3Connector.java
License:Apache License
/** * Download Amazon S3 file as byte array. * @param summary The summary of the S3 Object to download * @return This file bytes or null if something goes wrong. *//* ww w. ja va 2 s. co m*/ public byte[] getContent(S3ObjectSummary summary) { String key = getDecodedKey(summary); // Retrieve object corresponding to key into bucket. if (logger.isDebugEnabled()) { logger.debug("Downloading file content from {}", key); } S3Object object = s3Client.getObject(bucketName, key); InputStream is = null; ByteArrayOutputStream bos = null; try { // Get input stream on S3 Object. is = object.getObjectContent(); bos = new ByteArrayOutputStream(); byte[] buffer = new byte[4096]; int len = is.read(buffer); while (len > 0) { bos.write(buffer, 0, len); len = is.read(buffer); } // Flush and return result. bos.flush(); return bos.toByteArray(); } catch (IOException e) { e.printStackTrace(); return null; } finally { if (bos != null) { try { bos.close(); } catch (IOException e) { } } try { is.close(); } catch (IOException e) { } } }