Example usage for com.amazonaws.services.s3.model ObjectMetadata getContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata getContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata getContentLength.

Prototype

public long getContentLength() 

Source Link

Document

<p> Gets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java

License:Apache License

@Override
public long getLength(DataIdentifier identifier) throws DataStoreException {
    long start = System.currentTimeMillis();
    String key = getKeyName(identifier);
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {/*from w  w w. j av a 2  s.co  m*/
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        ObjectMetadata object = s3service.getObjectMetadata(bucket, key);
        long length = object.getContentLength();
        LOG.debug("Identifier [{}]'s length = [{}] took [{}]ms.",
                new Object[] { identifier, length, (System.currentTimeMillis() - start) });
        return length;
    } catch (AmazonServiceException e) {
        throw new DataStoreException("Could not length of dataIdentifier " + identifier, e);
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
}

From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java

License:Apache License

private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback)
        throws DataStoreException {
    String key = getKeyName(identifier);
    ObjectMetadata objectMetaData = null;
    long start = System.currentTimeMillis();
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {/*from  ww w .j ava 2 s.c om*/
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        // check if the same record already exists
        try {
            objectMetaData = s3service.getObjectMetadata(bucket, key);
        } catch (AmazonServiceException ase) {
            if (ase.getStatusCode() != 404) {
                throw ase;
            }
        }
        if (objectMetaData != null) {
            long l = objectMetaData.getContentLength();
            if (l != file.length()) {
                throw new DataStoreException(
                        "Collision: " + key + " new length: " + file.length() + " old length: " + l);
            }
            LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime());
            CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key);
            copReq.setNewObjectMetadata(objectMetaData);
            s3service.copyObject(copReq);
            LOG.debug("lastModified of [{}] updated successfully.", identifier);
            if (callback != null) {
                callback.onSuccess(new AsyncUploadResult(identifier, file));
            }
        }

        if (objectMetaData == null) {
            try {
                // start multipart parallel upload using amazon sdk
                Upload up = tmx.upload(new PutObjectRequest(bucket, key, file));
                // wait for upload to finish
                if (asyncUpload) {
                    up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback));
                    LOG.debug("added upload progress listener to identifier [{}]", identifier);
                } else {
                    up.waitForUploadResult();
                    LOG.debug("synchronous upload to identifier [{}] completed.", identifier);
                    if (callback != null) {
                        callback.onSuccess(new AsyncUploadResult(identifier, file));
                    }
                }
            } catch (Exception e2) {
                if (!asyncUpload) {
                    callback.onAbort(new AsyncUploadResult(identifier, file));
                }
                throw new DataStoreException("Could not upload " + key, e2);
            }
        }
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
    LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms",
            new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) });
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java

License:Apache License

public DataRecord getMetadataRecord(String name) {
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {/*from   w  w  w . j ava  2 s .  co  m*/
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        ObjectMetadata meta = s3service.getObjectMetadata(bucket, addMetaKeyPrefix(name));
        return new S3DataRecord(s3service, bucket, name, meta.getLastModified().getTime(),
                meta.getContentLength());
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
}

From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java

License:Apache License

private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback)
        throws DataStoreException {
    String key = getKeyName(identifier);
    ObjectMetadata objectMetaData = null;
    long start = System.currentTimeMillis();
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {/*from   www  .ja  v a2  s  . c o  m*/
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        // check if the same record already exists
        try {
            objectMetaData = s3service.getObjectMetadata(bucket, key);
        } catch (AmazonServiceException ase) {
            if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) {
                throw ase;
            }
        }
        if (objectMetaData != null) {
            long l = objectMetaData.getContentLength();
            if (l != file.length()) {
                throw new DataStoreException(
                        "Collision: " + key + " new length: " + file.length() + " old length: " + l);
            }
            LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime());
            CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key);
            copReq.setNewObjectMetadata(objectMetaData);
            Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq));
            try {
                copy.waitForCopyResult();
                LOG.debug("lastModified of [{}] updated successfully.", identifier);
                if (callback != null) {
                    callback.onSuccess(new AsyncUploadResult(identifier, file));
                }
            } catch (Exception e2) {
                AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file);
                asyncUpRes.setException(e2);
                if (callback != null) {
                    callback.onAbort(asyncUpRes);
                }
                throw new DataStoreException("Could not upload " + key, e2);
            }
        }

        if (objectMetaData == null) {
            try {
                // start multipart parallel upload using amazon sdk
                Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file)));
                // wait for upload to finish
                if (asyncUpload) {
                    up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback));
                    LOG.debug("added upload progress listener to identifier [{}]", identifier);
                } else {
                    up.waitForUploadResult();
                    LOG.debug("synchronous upload to identifier [{}] completed.", identifier);
                    if (callback != null) {
                        callback.onSuccess(new AsyncUploadResult(identifier, file));
                    }
                }
            } catch (Exception e2) {
                AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file);
                asyncUpRes.setException(e2);
                if (callback != null) {
                    callback.onAbort(asyncUpRes);
                }
                throw new DataStoreException("Could not upload " + key, e2);
            }
        }
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
    LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms",
            new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) });
}

From source file:org.broadleafcommerce.vendor.amazon.s3.S3FileServiceProvider.java

License:Apache License

protected List<String> addOrUpdateResourcesInternal(S3Configuration s3config, AmazonS3Client s3,
        FileWorkArea workArea, List<File> files, boolean removeFilesFromWorkArea) {
    final List<String> resourcePaths = new ArrayList<String>();
    for (final File srcFile : files) {
        if (!srcFile.getAbsolutePath().startsWith(workArea.getFilePathLocation())) {
            throw new FileServiceException("Attempt to update file " + srcFile.getAbsolutePath()
                    + " that is not in the passed in WorkArea " + workArea.getFilePathLocation());
        }//from   w  w  w  . j a v  a2 s. co  m
        final long ts1 = System.currentTimeMillis();
        final String fileName = srcFile.getAbsolutePath().substring(workArea.getFilePathLocation().length());
        final String resourceName = buildResourceName(s3config, fileName);

        ObjectMetadata meta = null;
        try {
            final GetObjectMetadataRequest get = new GetObjectMetadataRequest(s3config.getDefaultBucketName(),
                    resourceName);
            meta = s3.getObjectMetadata(get);
        } catch (AmazonS3Exception ex) {
            meta = null;
        }
        final long ts2 = System.currentTimeMillis();

        if (meta == null || meta.getContentLength() != srcFile.length()) {
            final PutObjectRequest put = new PutObjectRequest(s3config.getDefaultBucketName(), resourceName,
                    srcFile);

            if ((s3config.getStaticAssetFileExtensionPattern() != null) && s3config
                    .getStaticAssetFileExtensionPattern().matcher(getExtension(fileName)).matches()) {
                put.setCannedAcl(CannedAccessControlList.PublicRead);
            }

            s3.putObject(put);
            final long ts3 = System.currentTimeMillis();

            if (LOG.isTraceEnabled()) {
                final String s3Uri = String.format("s3://%s/%s", s3config.getDefaultBucketName(), resourceName);
                final String msg = String.format(
                        "%s copied/updated to %s; queryTime = %dms; uploadTime = %dms; totalTime = %dms",
                        srcFile.getAbsolutePath(), s3Uri, ts2 - ts1, ts3 - ts2, ts3 - ts1);

                LOG.trace(msg);
            }
        } else {
            if (LOG.isTraceEnabled()) {
                final String s3Uri = String.format("s3://%s/%s", s3config.getDefaultBucketName(), resourceName);
                final String msg = String.format(
                        "%s already at %s with same filesize = %dbytes; queryTime = %dms",
                        srcFile.getAbsolutePath(), s3Uri, srcFile.length(), ts2 - ts1);

                LOG.trace(msg);
            }
        }

        resourcePaths.add(fileName);
    }
    return resourcePaths;
}

From source file:org.caboclo.clients.AmazonClient.java

License:Open Source License

private boolean isFolder(ObjectMetadata om) {
    return om.getContentLength() == 0 && om.getContentType().equals("inode/directory");
}

From source file:org.dspace.storage.bitstore.S3BitStoreService.java

License:BSD License

/**
 * Obtain technical metadata about an asset in the asset store.
 *
 * Checksum used is (ETag) hex encoded 128-bit MD5 digest of an object's content as calculated by Amazon S3
 * (Does not use getContentMD5, as that is 128-bit MD5 digest calculated on caller's side)
 *
 * @param bitstream//  ww  w .  ja v a2s  .  c om
 *            The asset to describe
 * @param attrs
 *            A Map whose keys consist of desired metadata fields
 *
 * @exception java.io.IOException
 *            If a problem occurs while obtaining metadata
 * @return attrs
 *            A Map with key/value pairs of desired metadata
 *            If file not found, then return null
 */
public Map about(Bitstream bitstream, Map attrs) throws IOException {
    String key = getFullKey(bitstream.getInternalId());
    try {
        ObjectMetadata objectMetadata = s3Service.getObjectMetadata(bucketName, key);

        if (objectMetadata != null) {
            if (attrs.containsKey("size_bytes")) {
                attrs.put("size_bytes", objectMetadata.getContentLength());
            }
            if (attrs.containsKey("checksum")) {
                attrs.put("checksum", objectMetadata.getETag());
                attrs.put("checksum_algorithm", CSA);
            }
            if (attrs.containsKey("modified")) {
                attrs.put("modified", String.valueOf(objectMetadata.getLastModified().getTime()));
            }
            return attrs;
        }
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
            return null;
        }
    } catch (Exception e) {
        log.error("about(" + key + ", attrs)", e);
        throw new IOException(e);
    }
    return null;
}

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

private Map<String, String> prepContentProperties(ObjectMetadata objMetadata) {
    Map<String, String> contentProperties = new HashMap<>();

    // Set the user properties
    Map<String, String> userProperties = objMetadata.getUserMetadata();
    for (String metaName : userProperties.keySet()) {
        String metaValue = userProperties.get(metaName);
        contentProperties.put(getWithSpace(decodeHeaderKey(metaName)), decodeHeaderValue(metaValue));
    }/* w  w w. j  a  va  2s .  co  m*/

    // Set the response metadata
    Map<String, Object> responseMeta = objMetadata.getRawMetadata();
    for (String metaName : responseMeta.keySet()) {
        Object metaValue = responseMeta.get(metaName);
        if (metaValue instanceof String) {
            contentProperties.put(metaName, (String) metaValue);
        }
    }

    // Set MIMETYPE
    String contentType = objMetadata.getContentType();
    if (contentType != null) {
        contentProperties.put(PROPERTIES_CONTENT_MIMETYPE, contentType);
        contentProperties.put(Headers.CONTENT_TYPE, contentType);
    }

    // Set CONTENT_ENCODING
    String encoding = objMetadata.getContentEncoding();
    if (encoding != null) {
        contentProperties.put(Headers.CONTENT_ENCODING, encoding);
    }

    // Set SIZE
    long contentLength = objMetadata.getContentLength();
    if (contentLength >= 0) {
        String size = String.valueOf(contentLength);
        contentProperties.put(PROPERTIES_CONTENT_SIZE, size);
        contentProperties.put(Headers.CONTENT_LENGTH, size);
    }

    // Set CHECKSUM
    String checksum = objMetadata.getETag();
    if (checksum != null) {
        String eTagValue = getETagValue(checksum);
        contentProperties.put(PROPERTIES_CONTENT_CHECKSUM, eTagValue);
        contentProperties.put(PROPERTIES_CONTENT_MD5, eTagValue);
        contentProperties.put(Headers.ETAG, eTagValue);
    }

    // Set MODIFIED
    Date modified = objMetadata.getLastModified();
    if (modified != null) {
        String modDate = formattedDate(modified);
        contentProperties.put(PROPERTIES_CONTENT_MODIFIED, modDate);
        contentProperties.put(Headers.LAST_MODIFIED, modDate);
    }

    return contentProperties;
}

From source file:org.eclipse.hawkbit.artifact.repository.S3Repository.java

License:Open Source License

@Override
public DbArtifact getArtifactBySha1(final String sha1) {
    LOG.info("Retrieving S3 object from bucket {} and key {}", s3Properties.getBucketName(), sha1);
    final S3Object s3Object = amazonS3.getObject(s3Properties.getBucketName(), sha1);
    if (s3Object == null) {
        return null;
    }/*from   w  w w.  j av  a2s. c  om*/

    final ObjectMetadata s3ObjectMetadata = s3Object.getObjectMetadata();

    final S3Artifact s3Artifact = new S3Artifact(amazonS3, s3Properties, sha1);
    s3Artifact.setArtifactId(sha1);
    s3Artifact.setSize(s3ObjectMetadata.getContentLength());
    // the MD5Content is stored in the ETag
    s3Artifact.setHashes(new DbArtifactHash(sha1, BaseEncoding.base16().lowerCase()
            .encode(BaseEncoding.base64().decode(s3ObjectMetadata.getETag()))));
    s3Artifact.setContentType(s3ObjectMetadata.getContentType());
    return s3Artifact;
}

From source file:org.finra.dm.dao.impl.S3DaoImpl.java

License:Apache License

@Override
public void validateS3File(S3FileTransferRequestParamsDto params, Long fileSizeInBytes)
        throws RuntimeException {
    ObjectMetadata objectMetadata = getObjectMetadata(params);

    if (objectMetadata == null) {
        throw new ObjectNotFoundException(String.format("File not found at s3://%s/%s location.",
                params.getS3BucketName(), params.getS3KeyPrefix()));
    }// w w w  . j  a  v  a  2  s. c  o m

    Assert.isTrue(fileSizeInBytes == null || fileSizeInBytes.compareTo(objectMetadata.getContentLength()) == 0,
            String.format(
                    "Specified file size (%d bytes) does not match to the actual file size (%d bytes) reported by S3 for s3://%s/%s file.",
                    fileSizeInBytes, objectMetadata.getContentLength(), params.getS3BucketName(),
                    params.getS3KeyPrefix()));
}