List of usage examples for com.amazonaws.services.s3.model S3Object getObjectMetadata
public ObjectMetadata getObjectMetadata()
From source file:org.alanwilliamson.amazon.s3.Read.java
License:Open Source License
private cfData readToMemory(AmazonS3 s3Client, String bucket, String key, String aes256key, int retry, int retryseconds) throws Exception { // Let us run around the number of attempts int attempts = 0; while (attempts < retry) { try {/* w ww . ja va 2 s .co m*/ GetObjectRequest gor = new GetObjectRequest(bucket, key); if (aes256key != null && !aes256key.isEmpty()) gor.setSSECustomerKey(new SSECustomerKey(aes256key)); S3Object s3object = s3Client.getObject(gor); String contentType = s3object.getObjectMetadata().getContentType(); ByteArrayOutputStream baos = new ByteArrayOutputStream(32000); StreamUtil.copyTo(s3object.getObjectContent(), baos, false); if (contentType.indexOf("text") != -1 || contentType.indexOf("javascript") != -1) { return new cfStringData(baos.toString()); } else { return new cfBinaryData(baos.toByteArray()); } } catch (Exception e) { cfEngine.log("Failed: AmazonS3Read(bucket=" + bucket + "; key=" + key + "; attempt=" + (attempts + 1) + "; exception=" + e.getMessage() + ")"); attempts++; if (attempts == retry) throw e; else Thread.sleep(retryseconds * 1000); } } return null; // should never }
From source file:org.apache.camel.component.aws.s3.S3Endpoint.java
License:Apache License
public Exchange createExchange(ExchangePattern pattern, S3Object s3Object) { LOG.trace("Getting object with key [{}] from bucket [{}]...", s3Object.getKey(), s3Object.getBucketName()); ObjectMetadata objectMetadata = s3Object.getObjectMetadata(); LOG.trace("Got object [{}]", s3Object); Exchange exchange = new DefaultExchange(this, pattern); Message message = exchange.getIn();//from w w w . j ava 2s . c o m message.setBody(s3Object.getObjectContent()); message.setHeader(S3Constants.KEY, s3Object.getKey()); message.setHeader(S3Constants.BUCKET_NAME, s3Object.getBucketName()); message.setHeader(S3Constants.E_TAG, objectMetadata.getETag()); message.setHeader(S3Constants.LAST_MODIFIED, objectMetadata.getLastModified()); message.setHeader(S3Constants.VERSION_ID, objectMetadata.getVersionId()); message.setHeader(S3Constants.CONTENT_TYPE, objectMetadata.getContentType()); message.setHeader(S3Constants.CONTENT_MD5, objectMetadata.getContentMD5()); message.setHeader(S3Constants.CONTENT_LENGTH, objectMetadata.getContentLength()); message.setHeader(S3Constants.CONTENT_ENCODING, objectMetadata.getContentEncoding()); message.setHeader(S3Constants.CONTENT_DISPOSITION, objectMetadata.getContentDisposition()); message.setHeader(S3Constants.CACHE_CONTROL, objectMetadata.getCacheControl()); return exchange; }
From source file:org.apache.manifoldcf.crawler.connectors.amazons3.AmazonS3Connector.java
License:Apache License
@Override public void processDocuments(String[] documentIdentifiers, IExistingVersions statuses, Specification spec, IProcessActivity activities, int jobMode, boolean usesDefaultAuthority) throws ManifoldCFException, ServiceInterruption { AmazonS3 amazons3Client = getClient(); if (amazons3Client == null) throw new ManifoldCFException("Amazon client can not connect at the moment"); String[] acls = null;/*from w ww . ja v a 2s .com*/ // loop documents and process for (String documentIdentifier : documentIdentifiers) { try { if (documentIdentifier != null && StringUtils.isNotEmpty(documentIdentifier)) { String versionString; String[] aclsToUse; if (documentIdentifier.split(STD_SEPARATOR_BUCKET_AND_KEY) == null && documentIdentifier.length() < 1) { continue; } S3Artifact s3Artifact = getS3Artifact(documentIdentifier); S3Object s3Obj = amazons3Client .getObject(new GetObjectRequest(s3Artifact.getBucketName(), s3Artifact.getKey())); if (s3Obj == null) { // no such document in the bucket now // delete document activities.deleteDocument(documentIdentifier); continue; } Logging.connectors.info("Content-Type: " + s3Obj.getObjectMetadata().getContentType()); ObjectMetadata objectMetadata = s3Obj.getObjectMetadata(); Date lastModified = objectMetadata.getLastModified(); StringBuilder sb = new StringBuilder(); if (lastModified == null) { // remove the content activities.deleteDocument(documentIdentifier); continue; } aclsToUse = new String[0]; AccessControlList objectAcl = amazons3Client.getObjectAcl(s3Artifact.getBucketName(), s3Artifact.getKey()); Set<Grant> grants = objectAcl.getGrants(); String[] users = getUsers(grants); // sort aclsToUse = users; Arrays.sort(aclsToUse); packList(sb, aclsToUse, '+'); if (aclsToUse.length > 0) { sb.append('+'); pack(sb, AmazonS3Config.defaultAuthorityDenyToken, '+'); } else sb.append('-'); // sb.append(lastModified.toString()); versionString = sb.toString(); Logging.connectors.debug("version string : " + versionString); if (versionString.length() > 0 && !activities.checkDocumentNeedsReindexing(documentIdentifier, versionString)) { Logging.connectors.info("Document need not to be reindexed : " + documentIdentifier); continue; } Logging.connectors.debug("JIRA: Processing document identifier '" + documentIdentifier + "'"); long startTime = System.currentTimeMillis(); String errorCode = null; String errorDesc = null; Long fileSize = null; try { String mimeType = "text/plain";// default // tika works starts InputStream in = null; String document = null; try { in = s3Obj.getObjectContent(); parser.parse(in, handler, metadata, context); mimeType = tika.detect(in); document = handler.toString(); if (document == null) continue; metadata.set(Metadata.CONTENT_TYPE, mimeType); } catch (Exception e) { Logging.connectors.error("Error while parsing tika contents", e); } finally { if (in != null) IOUtils.closeQuietly(in); } String documentURI = getDocumentURI(s3Artifact); Logging.connectors.debug("document : " + documentURI); // need some investigation if (!activities.checkURLIndexable(documentURI)) { errorCode = activities.EXCLUDED_URL; errorDesc = "Excluded because of URL ('" + documentURI + "')"; activities.noDocument(documentIdentifier, versionString); continue; } if (!activities.checkMimeTypeIndexable(mimeType)) { errorCode = activities.EXCLUDED_MIMETYPE; errorDesc = "Excluded because of mime type ('" + mimeType + "')"; activities.noDocument(documentIdentifier, versionString); continue; } if (!activities.checkDateIndexable(lastModified)) { errorCode = activities.EXCLUDED_DATE; errorDesc = "Excluded because of date (" + lastModified + ")"; activities.noDocument(documentIdentifier, versionString); continue; } // otherwise process RepositoryDocument rd = new RepositoryDocument(); // Turn into acls and add into // description String[] denyAclsToUse; if (aclsToUse.length > 0) denyAclsToUse = new String[] { AmazonS3Config.defaultAuthorityDenyToken }; else denyAclsToUse = new String[0]; rd.setSecurity(RepositoryDocument.SECURITY_TYPE_DOCUMENT, aclsToUse, denyAclsToUse); rd.setMimeType(mimeType); if (lastModified != null) rd.setModifiedDate(lastModified); // set all meta-data fields addAllMetaData(rd, metadata); // get document try { byte[] documentBytes = document.getBytes(StandardCharsets.UTF_8); long fileLength = documentBytes.length; if (!activities.checkLengthIndexable(fileLength)) { errorCode = activities.EXCLUDED_LENGTH; errorDesc = "Excluded because of document length (" + fileLength + ")"; activities.noDocument(documentIdentifier, versionString); continue; } InputStream is = new ByteArrayInputStream(documentBytes); try { rd.setBinary(is, fileLength); activities.ingestDocumentWithException(documentIdentifier, versionString, documentURI, rd); errorCode = "OK"; fileSize = new Long(fileLength); } finally { if (is != null) IOUtils.closeQuietly(is); } } catch (Exception e) { Logging.connectors.error(e); } } catch (Exception e) { Logging.connectors.error(e); } } } catch (AmazonServiceException e) { Logging.connectors.error(e); } catch (AmazonClientException e) { Logging.connectors.error(e); } } }
From source file:org.boriken.s3fileuploader.S3SampleRefactored.java
License:Open Source License
public static void downloadFile(AmazonS3 s3, String bucketName, String key) throws IOException { /*//from w ww .j ava2 s .com * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); }
From source file:org.chodavarapu.jgitaws.jgit.S3ObjectReadableChannel.java
License:Eclipse Distribution License
@Override public int read(ByteBuffer dst) throws IOException { S3Object object = configuration.getS3Client() .getObject(new GetObjectRequest(configuration.getPacksBucketName(), objectName).withRange(position, position + dst.remaining() + readAhead)); try (InputStream inputStream = object.getObjectContent()) { size = object.getObjectMetadata().getInstanceLength(); int readLength = dst.remaining() + readAhead; byte[] buffer = new byte[readLength]; inputStream.read(buffer);/*from w ww . java2s .c om*/ dst.put(buffer, 0, readLength); return readLength; } }
From source file:org.crypto.sse.IEX2LevAMAZON.java
License:Open Source License
/** * @param args//from w w w. jav a 2 s . com * @throws Exception */ @SuppressWarnings("null") public static void main(String[] args) throws Exception { //First Job Configuration conf = new Configuration(); Job job = Job.getInstance(conf, "IEX-2Lev"); job.setJarByClass(IEX2LevAMAZON.class); job.setMapperClass(MLK1.class); job.setReducerClass(RLK1.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setOutputKeyClass(Text.class); job.setNumReduceTasks(1); job.setOutputValueClass(ArrayListWritable.class); job.setInputFormatClass(FileNameKeyInputFormat.class); FileInputFormat.addInputPath(job, new Path(args[0])); FileOutputFormat.setOutputPath(job, new Path(args[1])); //Second Job Configuration conf2 = new Configuration(); Job job2 = Job.getInstance(conf2, "IEX-2Lev"); job2.setJarByClass(IEX2LevAMAZON.class); job2.setMapperClass(MLK2.class); job2.setReducerClass(RLK2.class); job2.setNumReduceTasks(1); job2.setMapOutputKeyClass(Text.class); job2.setMapOutputValueClass(Text.class); job2.setOutputKeyClass(Text.class); job2.setOutputValueClass(ArrayListWritable.class); job2.setInputFormatClass(FileNameKeyInputFormat.class); FileInputFormat.addInputPath(job2, new Path(args[0])); FileOutputFormat.setOutputPath(job2, new Path(args[2])); job.waitForCompletion(true); job2.waitForCompletion(true); //Here add your Amazon Credentials AWSCredentials credentials = new BasicAWSCredentials("XXXXXXXXXXXXXXXX", "XXXXXXXXXXXXXXXX"); // create a client connection based on credentials AmazonS3 s3client = new AmazonS3Client(credentials); // create bucket - name must be unique for all S3 users String bucketName = "iexmaptest"; S3Object s3object = s3client.getObject(new GetObjectRequest(bucketName, args[4])); System.out.println(s3object.getObjectMetadata().getContentType()); System.out.println(s3object.getObjectMetadata().getContentLength()); List<String> lines = new ArrayList<String>(); String folderName = "2"; BufferedReader reader = new BufferedReader(new InputStreamReader(s3object.getObjectContent())); String line; int counter = 0; while ((line = reader.readLine()) != null) { // can copy the content locally as well // using a buffered writer lines.add(line); System.out.println(line); // upload file to folder String fileName = folderName + "/" + Integer.toString(counter); ByteArrayInputStream input = new ByteArrayInputStream(line.getBytes()); s3client.putObject(bucketName, fileName, input, new ObjectMetadata()); counter++; } Multimap<String, String> lookup = ArrayListMultimap.create(); for (int i = 0; i < lines.size(); i++) { String[] tokens = lines.get(i).split("\\s+"); for (int j = 1; j < tokens.length; j++) { lookup.put(tokens[0], tokens[j]); } } // Loading inverted index that associates files identifiers to keywords lines = new ArrayList<String>(); s3object = s3client.getObject(new GetObjectRequest(bucketName, args[5])); System.out.println(s3object.getObjectMetadata().getContentType()); System.out.println(s3object.getObjectMetadata().getContentLength()); // Loading inverted index that associates keywords to identifiers reader = new BufferedReader(new InputStreamReader(s3object.getObjectContent())); while ((line = reader.readLine()) != null) { lines.add(line); } Multimap<String, String> lookup2 = ArrayListMultimap.create(); for (int i = 0; i < lines.size(); i++) { String[] tokens = lines.get(i).split("\\s+"); for (int j = 1; j < tokens.length; j++) { lookup2.put(tokens[0], tokens[j]); } } // Delete File try { s3client.deleteObject(new DeleteObjectRequest(bucketName, args[4])); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException."); System.out.println("Error Message: " + ace.getMessage()); } /* * Start of IEX-2Lev construction */ // Generation of keys for IEX-2Lev BufferedReader keyRead = new BufferedReader(new InputStreamReader(System.in)); System.out.println("Enter your password :"); String pass = keyRead.readLine(); // You can change the size of the key; Here we set it to 128 List<byte[]> listSK = IEX2Lev.keyGen(128, pass, "salt/salt", 100); // Generation of Local Multi-maps with Mapper job only without reducer Configuration conf3 = new Configuration(); String testSerialization1 = new String(Base64.encodeBase64(Serializer.serialize(lookup))); String testSerialization2 = new String(Base64.encodeBase64(Serializer.serialize(lookup2))); String testSerialization3 = new String(Base64.encodeBase64(Serializer.serialize(listSK))); //String testSerialization2 = gson.toJson(lookup2); conf3.set("lookup", testSerialization1); conf3.set("lookup2", testSerialization2); conf3.set("setKeys", testSerialization3); Job job3 = Job.getInstance(conf3, "Local MM"); job3.setJarByClass(IEX2LevAMAZON.class); job3.setMapperClass(LocalMM.class); job3.setNumReduceTasks(0); FileInputFormat.addInputPath(job3, new Path(args[2])); FileOutputFormat.setOutputPath(job3, new Path(args[3])); job3.waitForCompletion(true); }
From source file:org.cto.VVS3Box.S3Sample.java
License:Open Source License
public static void main(String[] args) throws IOException { /*/*from w ww.j a v a 2 s. c om*/ * This credentials provider implementation loads your AWS credentials * from a properties file at the root of your classpath. * * Important: Be sure to fill in your AWS access credentials in the * AwsCredentials.properties file before you try to run this * sample. * http://aws.amazon.com/security-credentials */ AmazonS3 s3 = new AmazonS3Client(new ClasspathPropertiesFileCredentialsProvider()); Region usWest2 = Region.getRegion(Regions.US_WEST_2); s3.setRegion(usWest2); String bucketName = "lior.test-" + UUID.randomUUID(); String key = "MyObjectKey"; System.out.println("==========================================="); System.out.println("Getting Started with Amazon S3"); System.out.println("===========================================\n"); try { /* * Create a new S3 bucket - Amazon S3 bucket names are globally unique, * so once a bucket name has been taken by any user, you can't create * another bucket with that same name. * * You can optionally specify a location for your bucket if you want to * keep your data closer to your applications or users. */ System.out.println("Creating bucket " + bucketName + "\n"); s3.createBucket(bucketName); /* * List the buckets in your account */ System.out.println("Listing buckets"); for (Bucket bucket : s3.listBuckets()) { System.out.println(" - " + bucket.getName()); } System.out.println(); /* * Upload an object to your bucket - You can easily upload a file to * S3, or upload directly an InputStream if you know the length of * the data in the stream. You can also specify your own metadata * when uploading to S3, which allows you set a variety of options * like content-type and content-encoding, plus additional metadata * specific to your applications. */ System.out.println("Uploading a new object to S3 from a file\n"); s3.putObject(new PutObjectRequest(bucketName, key, createSampleFile())); /* * Download an object - When you download an object, you get all of * the object's metadata and a stream from which to read the contents. * It's important to read the contents of the stream as quickly as * possibly since the data is streamed directly from Amazon S3 and your * network connection will remain open until you read all the data or * close the input stream. * * GetObjectRequest also supports several other options, including * conditional downloading of objects based on modification times, * ETags, and selectively downloading a range of an object. */ System.out.println("Downloading an object"); S3Object object = s3.getObject(new GetObjectRequest(bucketName, key)); System.out.println("Content-Type: " + object.getObjectMetadata().getContentType()); displayTextInputStream(object.getObjectContent()); /* * List objects in your bucket by prefix - There are many options for * listing the objects in your bucket. Keep in mind that buckets with * many objects might truncate their results when listing their objects, * so be sure to check if the returned object listing is truncated, and * use the AmazonS3.listNextBatchOfObjects(...) operation to retrieve * additional results. */ System.out.println("Listing objects"); ObjectListing objectListing = s3 .listObjects(new ListObjectsRequest().withBucketName(bucketName).withPrefix("My")); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { System.out.println( " - " + objectSummary.getKey() + " " + "(size = " + objectSummary.getSize() + ")"); } System.out.println(); /* * Delete an object - Unless versioning has been turned on for your bucket, * there is no way to undelete an object, so use caution when deleting objects. */ System.out.println("Deleting an object\n"); s3.deleteObject(bucketName, key); /* * Delete a bucket - A bucket must be completely empty before it can be * deleted, so remember to delete any objects from your buckets before * you try to delete them. */ System.out.println("Deleting bucket " + bucketName + "\n"); s3.deleteBucket(bucketName); } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, which means the client encountered " + "a serious internal problem while trying to communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); } }
From source file:org.duracloud.s3storage.S3StorageProvider.java
License:Apache License
/** * {@inheritDoc}/*from www. j a v a2 s . c o m*/ */ public RetrievedContent getContent(String spaceId, String contentId, String range) { log.debug("getContent(" + spaceId + ", " + contentId + ", " + range + ")"); // Will throw if bucket does not exist String bucketName = getBucketName(spaceId); try { GetObjectRequest getRequest = new GetObjectRequest(bucketName, contentId); if (StringUtils.isNotEmpty(range)) { ContentByteRange byteRange = new ContentByteRange(range); if (null == byteRange.getRangeStart()) { // While this should be a valid setting, it is not currently // supported due to a limitation of the AWS S3 client // see: https://github.com/aws/aws-sdk-java/issues/1551 throw new IllegalArgumentException(byteRange.getUsage(range)); } else if (null == byteRange.getRangeEnd()) { getRequest.setRange(byteRange.getRangeStart()); } else { getRequest.setRange(byteRange.getRangeStart(), byteRange.getRangeEnd()); } } S3Object contentItem = s3Client.getObject(getRequest); RetrievedContent retrievedContent = new RetrievedContent(); retrievedContent.setContentStream(contentItem.getObjectContent()); retrievedContent.setContentProperties(prepContentProperties(contentItem.getObjectMetadata())); return retrievedContent; } catch (AmazonClientException e) { throwIfContentNotExist(bucketName, contentId); String err = "Could not retrieve content " + contentId + " in S3 bucket " + bucketName + " due to error: " + e.getMessage(); throw new StorageException(err, e, RETRY); } }
From source file:org.eclipse.hawkbit.artifact.repository.S3Repository.java
License:Open Source License
@Override public DbArtifact getArtifactBySha1(final String sha1) { LOG.info("Retrieving S3 object from bucket {} and key {}", s3Properties.getBucketName(), sha1); final S3Object s3Object = amazonS3.getObject(s3Properties.getBucketName(), sha1); if (s3Object == null) { return null; }/* w w w . java 2 s . c o m*/ final ObjectMetadata s3ObjectMetadata = s3Object.getObjectMetadata(); final S3Artifact s3Artifact = new S3Artifact(amazonS3, s3Properties, sha1); s3Artifact.setArtifactId(sha1); s3Artifact.setSize(s3ObjectMetadata.getContentLength()); // the MD5Content is stored in the ETag s3Artifact.setHashes(new DbArtifactHash(sha1, BaseEncoding.base16().lowerCase() .encode(BaseEncoding.base64().decode(s3ObjectMetadata.getETag())))); s3Artifact.setContentType(s3ObjectMetadata.getContentType()); return s3Artifact; }
From source file:org.entando.entando.plugins.jps3awsclient.aps.system.services.storage.AmazonS3StorageManager.java
License:Open Source License
@Override public BasicFileAttributeView getAttributes(String subPath, boolean isProtectedResource) throws ApsSystemException { BasicFileAttributeView bfav = null; if (!this.isActive()) { return bfav; }//from w ww . jav a 2s. c o m String key = this.getKey(subPath, isProtectedResource); S3Object object = this.getS3Object(this.getBucketName(), key, true); if (null != object) { bfav = new BasicFileAttributeView(); bfav.setName(object.getKey()); bfav.setSize(object.getObjectMetadata().getContentLength()); bfav.setDirectory(false); bfav.setLastModifiedTime(object.getObjectMetadata().getLastModified()); } return bfav; }