Example usage for com.amazonaws.services.s3.model ObjectMetadata getUserMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata getUserMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata getUserMetadata.

Prototype

public Map<String, String> getUserMetadata() 

Source Link

Document

Gets the custom user-metadata for the associated object.

Usage

From source file:eu.openg.aws.s3.internal.FakeS3Object.java

License:Apache License

private void updateMetadata(ObjectMetadata metadata) {
    metadata.setHeader("Accept-Ranges", "bytes");
    metadata.setUserMetadata(serializeUserMetadata(metadata.getUserMetadata()));
}

From source file:io.dockstore.webservice.core.tooltester.ToolTesterS3Client.java

License:Apache License

private List<ToolTesterLog> convertObjectListingToTooltesterLogs(ObjectListing firstListing) {
    ObjectListing listing = firstListing;
    List<S3ObjectSummary> summaries = listing.getObjectSummaries();
    while (listing.isTruncated()) {
        listing = s3.listNextBatchOfObjects(listing);
        summaries.addAll(listing.getObjectSummaries());
    }//  www.  j av a 2s .com
    return summaries.stream().map(summary -> {
        ObjectMetadata objectMetadata = s3.getObjectMetadata(bucketName, summary.getKey());
        Map<String, String> userMetadata = objectMetadata.getUserMetadata();
        String filename = getFilenameFromSummary(summary);
        return convertUserMetadataToToolTesterLog(userMetadata, filename);
    }).collect(Collectors.toList());
}

From source file:io.konig.camel.aws.s3.DeleteObjectEndpoint.java

License:Apache License

public Exchange createExchange(ExchangePattern pattern, final S3Object s3Object) {
    LOG.trace("Getting object with key [{}] from bucket [{}]...", s3Object.getKey(), s3Object.getBucketName());

    ObjectMetadata objectMetadata = s3Object.getObjectMetadata();

    LOG.trace("Got object [{}]", s3Object);

    Exchange exchange = super.createExchange(pattern);
    Message message = exchange.getIn();// ww w  .j  av a2 s . co m

    if (configuration.isIncludeBody()) {
        message.setBody(s3Object.getObjectContent());
    } else {
        message.setBody(null);
    }

    message.setHeader(S3Constants.KEY, s3Object.getKey());
    message.setHeader(S3Constants.BUCKET_NAME, s3Object.getBucketName());
    message.setHeader(S3Constants.E_TAG, objectMetadata.getETag());
    message.setHeader(S3Constants.LAST_MODIFIED, objectMetadata.getLastModified());
    message.setHeader(S3Constants.VERSION_ID, objectMetadata.getVersionId());
    message.setHeader(S3Constants.CONTENT_TYPE, objectMetadata.getContentType());
    message.setHeader(S3Constants.CONTENT_MD5, objectMetadata.getContentMD5());
    message.setHeader(S3Constants.CONTENT_LENGTH, objectMetadata.getContentLength());
    message.setHeader(S3Constants.CONTENT_ENCODING, objectMetadata.getContentEncoding());
    message.setHeader(S3Constants.CONTENT_DISPOSITION, objectMetadata.getContentDisposition());
    message.setHeader(S3Constants.CACHE_CONTROL, objectMetadata.getCacheControl());
    message.setHeader(S3Constants.S3_HEADERS, objectMetadata.getRawMetadata());
    message.setHeader(S3Constants.SERVER_SIDE_ENCRYPTION, objectMetadata.getSSEAlgorithm());
    message.setHeader(S3Constants.USER_METADATA, objectMetadata.getUserMetadata());
    message.setHeader(S3Constants.EXPIRATION_TIME, objectMetadata.getExpirationTime());
    message.setHeader(S3Constants.REPLICATION_STATUS, objectMetadata.getReplicationStatus());
    message.setHeader(S3Constants.STORAGE_CLASS, objectMetadata.getStorageClass());

    /**
    * If includeBody != true, it is safe to close the object here. If
    * includeBody == true, the caller is responsible for closing the stream
    * and object once the body has been fully consumed. As of 2.17, the
    * consumer does not close the stream or object on commit.
    */
    if (!configuration.isIncludeBody()) {
        IOHelper.close(s3Object);
    } else {
        if (configuration.isAutocloseBody()) {
            exchange.addOnCompletion(new SynchronizationAdapter() {
                @Override
                public void onDone(Exchange exchange) {
                    IOHelper.close(s3Object);
                }
            });
        }
    }

    return exchange;
}

From source file:org.apache.nifi.processors.aws.s3.FetchS3Object.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();/*ww  w. j a v  a 2 s .c om*/
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();
    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String versionId = context.getProperty(VERSION_ID).evaluateAttributeExpressions(flowFile).getValue();

    final AmazonS3 client = getClient();
    final GetObjectRequest request;
    if (versionId == null) {
        request = new GetObjectRequest(bucket, key);
    } else {
        request = new GetObjectRequest(bucket, key, versionId);
    }

    final Map<String, String> attributes = new HashMap<>();
    try (final S3Object s3Object = client.getObject(request)) {
        flowFile = session.importFrom(s3Object.getObjectContent(), flowFile);
        attributes.put("s3.bucket", s3Object.getBucketName());

        final ObjectMetadata metadata = s3Object.getObjectMetadata();
        if (metadata.getContentDisposition() != null) {
            final String fullyQualified = metadata.getContentDisposition();
            final int lastSlash = fullyQualified.lastIndexOf("/");
            if (lastSlash > -1 && lastSlash < fullyQualified.length() - 1) {
                attributes.put(CoreAttributes.PATH.key(), fullyQualified.substring(0, lastSlash));
                attributes.put(CoreAttributes.ABSOLUTE_PATH.key(), fullyQualified);
                attributes.put(CoreAttributes.FILENAME.key(), fullyQualified.substring(lastSlash + 1));
            } else {
                attributes.put(CoreAttributes.FILENAME.key(), metadata.getContentDisposition());
            }
        }
        if (metadata.getContentMD5() != null) {
            attributes.put("hash.value", metadata.getContentMD5());
            attributes.put("hash.algorithm", "MD5");
        }
        if (metadata.getContentType() != null) {
            attributes.put(CoreAttributes.MIME_TYPE.key(), metadata.getContentType());
        }
        if (metadata.getETag() != null) {
            attributes.put("s3.etag", metadata.getETag());
        }
        if (metadata.getExpirationTime() != null) {
            attributes.put("s3.expirationTime", String.valueOf(metadata.getExpirationTime().getTime()));
        }
        if (metadata.getExpirationTimeRuleId() != null) {
            attributes.put("s3.expirationTimeRuleId", metadata.getExpirationTimeRuleId());
        }
        if (metadata.getUserMetadata() != null) {
            attributes.putAll(metadata.getUserMetadata());
        }
        if (metadata.getSSEAlgorithm() != null) {
            attributes.put("s3.sseAlgorithm", metadata.getSSEAlgorithm());
        }
        if (metadata.getVersionId() != null) {
            attributes.put("s3.version", metadata.getVersionId());
        }
    } catch (final IOException | AmazonClientException ioe) {
        getLogger().error("Failed to retrieve S3 Object for {}; routing to failure",
                new Object[] { flowFile, ioe });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }

    if (!attributes.isEmpty()) {
        flowFile = session.putAllAttributes(flowFile, attributes);
    }

    session.transfer(flowFile, REL_SUCCESS);
    final long transferMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
    getLogger().info("Successfully retrieved S3 Object for {} in {} millis; routing to success",
            new Object[] { flowFile, transferMillis });
    session.getProvenanceReporter().fetch(flowFile, "http://" + bucket + ".amazonaws.com/" + key,
            transferMillis);
}

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

private Map<String, String> prepContentProperties(ObjectMetadata objMetadata) {
    Map<String, String> contentProperties = new HashMap<>();

    // Set the user properties
    Map<String, String> userProperties = objMetadata.getUserMetadata();
    for (String metaName : userProperties.keySet()) {
        String metaValue = userProperties.get(metaName);
        contentProperties.put(getWithSpace(decodeHeaderKey(metaName)), decodeHeaderValue(metaValue));
    }/*from   w  w w.ja v  a2  s. c o m*/

    // Set the response metadata
    Map<String, Object> responseMeta = objMetadata.getRawMetadata();
    for (String metaName : responseMeta.keySet()) {
        Object metaValue = responseMeta.get(metaName);
        if (metaValue instanceof String) {
            contentProperties.put(metaName, (String) metaValue);
        }
    }

    // Set MIMETYPE
    String contentType = objMetadata.getContentType();
    if (contentType != null) {
        contentProperties.put(PROPERTIES_CONTENT_MIMETYPE, contentType);
        contentProperties.put(Headers.CONTENT_TYPE, contentType);
    }

    // Set CONTENT_ENCODING
    String encoding = objMetadata.getContentEncoding();
    if (encoding != null) {
        contentProperties.put(Headers.CONTENT_ENCODING, encoding);
    }

    // Set SIZE
    long contentLength = objMetadata.getContentLength();
    if (contentLength >= 0) {
        String size = String.valueOf(contentLength);
        contentProperties.put(PROPERTIES_CONTENT_SIZE, size);
        contentProperties.put(Headers.CONTENT_LENGTH, size);
    }

    // Set CHECKSUM
    String checksum = objMetadata.getETag();
    if (checksum != null) {
        String eTagValue = getETagValue(checksum);
        contentProperties.put(PROPERTIES_CONTENT_CHECKSUM, eTagValue);
        contentProperties.put(PROPERTIES_CONTENT_MD5, eTagValue);
        contentProperties.put(Headers.ETAG, eTagValue);
    }

    // Set MODIFIED
    Date modified = objMetadata.getLastModified();
    if (modified != null) {
        String modDate = formattedDate(modified);
        contentProperties.put(PROPERTIES_CONTENT_MODIFIED, modDate);
        contentProperties.put(Headers.LAST_MODIFIED, modDate);
    }

    return contentProperties;
}

From source file:org.fcrepo.modeshape.binary.S3BinaryStore.java

License:Apache License

private void setS3ObjectUserProperty(BinaryKey binaryKey, String metadataKey, String metadataValue)
        throws BinaryStoreException {
    try {//w  ww  .  j av  a  2s. c  o m
        String key = binaryKey.toString();
        ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, key);
        Map<String, String> userMetadata = metadata.getUserMetadata();

        if (null != metadataValue && metadataValue.equals(userMetadata.get(metadataKey))) {
            return; // The key/value pair already exists in user metadata, skip update
        }

        userMetadata.put(metadataKey, metadataValue);
        metadata.setUserMetadata(userMetadata);

        // Update the object in place
        CopyObjectRequest copyRequest = new CopyObjectRequest(bucketName, key, bucketName, key);
        copyRequest.setNewObjectMetadata(metadata);
        s3Client.copyObject(copyRequest);
    } catch (AmazonClientException e) {
        throw new BinaryStoreException(e);
    }
}

From source file:org.fcrepo.modeshape.binary.S3BinaryStore.java

License:Apache License

@Override
public String getExtractedText(BinaryValue binaryValue) throws BinaryStoreException {
    try {//from www .  ja  v a2 s.c  o  m
        String key = binaryValue.getKey().toString();
        ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, key);
        return metadata.getUserMetadata().get(EXTRACTED_TEXT_KEY);
    } catch (AmazonClientException e) {
        throw new BinaryStoreException(e);
    }
}

From source file:org.fcrepo.modeshape.binary.S3BinaryStore.java

License:Apache License

@Override
public BinaryValue storeValue(InputStream stream, boolean markAsUnused) throws BinaryStoreException {
    // Cache file on the file system in order to have SHA-1 hash calculated
    BinaryValue cachedFile = fileSystemCache.storeValue(stream, markAsUnused);
    try {/*  ww w.  java  2 s .  co m*/
        // Retrieve SHA-1 hash
        BinaryKey key = new BinaryKey(cachedFile.getKey().toString());

        // If file is NOT already in S3 storage, store it
        if (!s3Client.doesObjectExist(bucketName, key.toString())) {
            ObjectMetadata metadata = new ObjectMetadata();
            // Set Mimetype
            metadata.setContentType(fileSystemCache.getMimeType(cachedFile, key.toString()));
            // Set Unused value
            Map<String, String> userMetadata = metadata.getUserMetadata();
            userMetadata.put(UNUSED_KEY, String.valueOf(markAsUnused));
            metadata.setUserMetadata(userMetadata);
            // Store content in S3
            s3Client.putObject(bucketName, key.toString(), fileSystemCache.getInputStream(key), metadata);
        } else {
            // Set the unused value, if necessary
            if (markAsUnused) {
                markAsUnused(Collections.singleton(key));
            } else {
                markAsUsed(Collections.singleton(key));
            }
        }
        return new StoredBinaryValue(this, key, cachedFile.getSize());
    } catch (AmazonClientException | RepositoryException | IOException e) {
        throw new BinaryStoreException(e);
    } finally {
        // Remove cached file
        fileSystemCache.markAsUnused(Collections.singleton(cachedFile.getKey()));
        fileSystemCache.removeValuesUnusedLongerThan(1, TimeUnit.MICROSECONDS);
    }
}

From source file:org.fcrepo.modeshape.binary.S3BinaryStore.java

License:Apache License

@Override
public void removeValuesUnusedLongerThan(long minimumAge, TimeUnit timeUnit) throws BinaryStoreException {
    Date deadline = new Date(System.currentTimeMillis() - timeUnit.toMillis(minimumAge));

    // There is no capacity in S3 to query on object properties. This must be done
    // by straight iteration, so may take a very long time for large data sets.
    try {/*from ww  w .jav  a 2  s . c o m*/
        for (BinaryKey key : getAllBinaryKeys()) {
            ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, key.toString());
            String unused = metadata.getUserMetadata().get(UNUSED_KEY);
            if (null != unused && unused.equals(String.valueOf(true))) {
                Date lastMod = metadata.getLastModified();
                if (lastMod.before(deadline)) {
                    try {
                        s3Client.deleteObject(bucketName, key.toString());
                    } catch (AmazonClientException e) {
                        Logger log = Logger.getLogger(getClass());
                        log.warn(e, JcrI18n.unableToDeleteTemporaryFile, e.getMessage());
                    }
                }
            } // Assumes that if no value is set, content is used
        }
    } catch (AmazonClientException e) {
        throw new BinaryStoreException(e);
    }
}

From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java

License:Open Source License

@Override
public void close() {
    this.closed = true;
    try {// w ww.ja  v a2s. co m
        SDFSLogger.getLog().info("############ Closing Bucket##################");
        HashBlobArchive.close();

        ObjectMetadata omd = s3Service.getObjectMetadata(name, binm);
        Map<String, String> md = omd.getUserMetadata();
        ObjectMetadata nmd = new ObjectMetadata();
        nmd.setUserMetadata(md);
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("currentsize", Long.toString(HashBlobArchive.currentLength.get()));
        md.put("currentcompressedsize", Long.toString(HashBlobArchive.compressedLength.get()));
        md.put("lastupdate", Long.toString(System.currentTimeMillis()));
        md.put("hostname", InetAddress.getLocalHost().getHostName());
        md.put("port", Integer.toString(Main.sdfsCliPort));
        byte[] sz = Long.toString(System.currentTimeMillis()).getBytes();
        String st = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(sz));
        md.put("md5sum", st);
        nmd.setContentMD5(st);
        nmd.setContentLength(sz.length);
        nmd.setUserMetadata(md);
        try {
            s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
        } catch (AmazonS3Exception e1) {
            if (e1.getStatusCode() == 409) {
                try {
                    s3Service.putObject(this.name, binm, new ByteArrayInputStream(sz), nmd);
                } catch (Exception e2) {
                    throw new IOException(e2);
                }
            } else {

                throw new IOException(e1);
            }
        } catch (Exception e1) {
            // SDFSLogger.getLog().error("error uploading", e1);
            throw new IOException(e1);
        }
    } catch (Exception e) {
        SDFSLogger.getLog().warn("error while closing bucket " + this.name, e);
    } finally {
        try {
            s3Service.shutdown();
        } catch (Exception e) {
            SDFSLogger.getLog().debug("error while closing bucket " + this.name, e);
        }
    }

}