Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength.

Prototype

public void setContentLength(long contentLength) 

Source Link

Document

<p> Sets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:net.smartcosmos.plugin.service.aws.storage.AwsS3StorageService.java

License:Apache License

@Override
public StorageResponse store(StorageRequest request) throws IOException {

    final IFile requestFile = request.getFile();

    AmazonS3 s3 = new AmazonS3Client(credentials, new ClientConfiguration().withProtocol(Protocol.HTTPS));

    Map<String, String> fileMetadata = new HashMap<String, String>();

    fileMetadata.put("accountUrn", request.getUser().getAccount().getUrn());
    fileMetadata.put("userUrn", request.getUser().getUrn());
    fileMetadata.put("fileUrn", requestFile.getUrn());
    fileMetadata.put("entityReferenceType", requestFile.getEntityReferenceType().name());
    fileMetadata.put("referenceUrn", requestFile.getReferenceUrn());
    fileMetadata.put("recordedTimestamp", Long.toString(request.getFile().getTimestamp()));
    //            fileMetadata.put("mimeType", request.getVfsObject().getMimeType());

    ObjectMetadata metadata = new ObjectMetadata();
    if (request.getContentLength() > 0) {
        LOG.debug("Including content length : " + request.getContentLength());
        metadata.setContentLength(request.getContentLength());
    }/*from ww  w . ja  va2  s .  co m*/
    //            metadata.setContentMD5(streamMD5);
    metadata.setUserMetadata(fileMetadata);

    try {
        LOG.trace("Bucket name: " + getBucketName());
        LOG.trace("File name: " + request.getFileName());
        LOG.trace("inputStream == null?  " + (request.getInputStream() == null));

        HashingInputStream his = new HashingInputStream(request.getInputStream(), "SHA-256");

        PutObjectResult putResult = s3
                .putObject(new PutObjectRequest(getBucketName(), request.getFileName(), his, metadata));

        String finalUrl = getUrl(request.getFileName());
        LOG.trace("File URL: " + finalUrl);

        requestFile.setUrl(getUrl(request.getFileName()));

        byte[] signature = his.getSignature();

        JSONObject jsonObject = HashUtil.signFile(requestFile, signature);
        LOG.info("File Signature\n\n{}\n\n", jsonObject.toString(3));

        return new StorageResponse(requestFile, finalUrl, jsonObject.toString(3));
    } catch (AmazonS3Exception e) {
        e.printStackTrace();
        throw e;
    } catch (JSONException | NoSuchAlgorithmException e) {
        e.printStackTrace();
        throw new IOException(e);
    }

}

From source file:net.solarnetwork.node.backup.s3.S3BackupService.java

License:Open Source License

private String calculateContentDigest(BackupResource rsrc, MessageDigest digest, byte[] buf,
        ObjectMetadata objectMetadata) throws IOException {
    // S3 client buffers to RAM unless content length set; so since we have to calculate the 
    // SHA256 digest of the content anyway, also calculate the content length at the same time
    long contentLength = 0;
    digest.reset();//www . j a  v  a  2 s.c o  m
    int len = 0;
    try (InputStream rsrcIn = rsrc.getInputStream()) {
        while ((len = rsrcIn.read(buf)) >= 0) {
            digest.update(buf, 0, len);
            contentLength += len;
        }
    }
    objectMetadata.setContentLength(contentLength);
    return new String(Hex.encodeHex(digest.digest()));
}

From source file:net.solarnetwork.node.backup.s3.S3BackupService.java

License:Open Source License

private Backup performBackupInternal(final Iterable<BackupResource> resources, final Calendar now,
        Map<String, String> props) {
    if (resources == null) {
        return null;
    }/*w ww.j  a va 2  s  .c  om*/
    final Iterator<BackupResource> itr = resources.iterator();
    if (!itr.hasNext()) {
        log.debug("No resources provided, nothing to backup");
        return null;
    }
    S3Client client = this.s3Client;
    if (!status.compareAndSet(BackupStatus.Configured, BackupStatus.RunningBackup)) {
        // try to reset from error
        if (!status.compareAndSet(BackupStatus.Error, BackupStatus.RunningBackup)) {
            return null;
        }
    }
    S3BackupMetadata result = null;
    try {
        final Long nodeId = nodeId(props);
        final String metaName = String.format(META_NAME_FORMAT, now, nodeId);
        final String metaObjectKey = objectKeyForPath(META_OBJECT_KEY_PREFIX + metaName);
        log.info("Starting backup to archive {}", metaObjectKey);

        final Set<S3ObjectReference> allDataObjects = client
                .listObjects(objectKeyForPath(DATA_OBJECT_KEY_PREFIX));

        S3BackupMetadata meta = new S3BackupMetadata();
        meta.setNodeId(nodeId);
        MessageDigest digest = DigestUtils.getSha256Digest();
        byte[] buf = new byte[4096];
        for (BackupResource rsrc : resources) {
            ObjectMetadata objectMetadata = new ObjectMetadata();
            if (rsrc.getModificationDate() > 0) {
                objectMetadata.setLastModified(new Date(rsrc.getModificationDate()));
            }
            String sha = calculateContentDigest(rsrc, digest, buf, objectMetadata);
            String objectKey = objectKeyForPath(DATA_OBJECT_KEY_PREFIX + sha);

            // see if already exists
            if (!allDataObjects.contains(new S3ObjectReference(objectKey))) {
                log.info("Saving resource to S3: {}", rsrc.getBackupPath());
                client.putObject(objectKey, rsrc.getInputStream(), objectMetadata);
            } else {
                log.info("Backup resource already saved to S3: {}", rsrc.getBackupPath());
            }
            meta.addBackupResource(rsrc, objectKey, sha);
        }

        // now save metadata
        meta.setComplete(true);
        meta.setDate(now.getTime());
        meta.setKey(metaName);
        byte[] metaJsonBytes = OBJECT_MAPPER.writeValueAsBytes(meta);
        try (ByteArrayInputStream in = new ByteArrayInputStream(metaJsonBytes)) {
            ObjectMetadata metaObjectMetadata = new ObjectMetadata();
            metaObjectMetadata.setContentType("application/json;charset=UTF-8");
            metaObjectMetadata.setContentLength(metaJsonBytes.length);
            metaObjectMetadata.setLastModified(meta.getDate());
            S3ObjectReference metaRef = client.putObject(metaObjectKey, in, metaObjectMetadata);
            result = new S3BackupMetadata(metaRef);
        }

        if (additionalBackupCount < 1) {
            // add this backup to the cached data
            CachedResult<List<Backup>> cached = cachedBackupList.get();
            if (cached != null) {
                List<Backup> list = cached.getResult();
                List<Backup> newList = new ArrayList<>(list);
                newList.add(0, result);
                updateCachedBackupList(newList);
            }
        } else {
            // clean out older backups
            List<Backup> knownBackups = getAvailableBackupsInternal();
            List<String> backupsForNode = knownBackups.stream().filter(b -> nodeId.equals(b.getNodeId()))
                    .map(b -> b.getKey()).collect(Collectors.toList());
            if (backupsForNode.size() > additionalBackupCount + 1) {
                Set<String> keysToDelete = backupsForNode.stream()
                        .limit(backupsForNode.size() - additionalBackupCount - 1).collect(Collectors.toSet());
                log.info("Deleting {} expired backups for node {}: {}", keysToDelete.size(), nodeId,
                        keysToDelete);
                client.deleteObjects(keysToDelete);

                // update cache
                knownBackups = knownBackups.stream().filter(b -> !keysToDelete.contains(b.getKey()))
                        .collect(Collectors.toList());
                updateCachedBackupList(knownBackups);
            }
        }
    } catch (IOException e) {
        log.error("IO error performing backup", e);
    } finally {
        status.compareAndSet(BackupStatus.RunningBackup, BackupStatus.Configured);
    }
    return result;
}

From source file:nl.kpmg.lcm.server.data.s3.S3FileAdapter.java

License:Apache License

@Override
public void write(InputStream stream, Long size) throws IOException {
    if (size == null || size <= 0) {
        throw new LcmExposableException("Error! Unable to transfer file to s3 storage with unknown size.");
    }//from w  ww .  j  av a 2  s.com

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(size);
    if (!s3Client.doesBucketExist(bucketName)) {
        s3Client.createBucket(bucketName);
    }
    s3Client.putObject(new PutObjectRequest(bucketName, fileName, stream, metadata));
    LOGGER.info("Successfully written data in s3 storage. Bucket:  " + bucketName);
}

From source file:nl.nn.adapterframework.filesystem.AmazonS3FileSystem.java

License:Apache License

@Override
public Iterator<S3Object> listFiles(String folder) throws FileSystemException {
    List<S3ObjectSummary> summaries = null;
    String prefix = folder != null ? folder + "/" : "";
    try {// w ww  . j  a v  a2s.co  m
        ObjectListing listing = s3Client.listObjects(bucketName, prefix);
        summaries = listing.getObjectSummaries();
        while (listing.isTruncated()) {
            listing = s3Client.listNextBatchOfObjects(listing);
            summaries.addAll(listing.getObjectSummaries());
        }
    } catch (AmazonServiceException e) {
        throw new FileSystemException("Cannot process requested action", e);
    }

    List<S3Object> list = new ArrayList<S3Object>();
    for (S3ObjectSummary summary : summaries) {
        S3Object object = new S3Object();
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(summary.getSize());

        object.setBucketName(summary.getBucketName());
        object.setKey(summary.getKey());
        object.setObjectMetadata(metadata);
        if (!object.getKey().endsWith("/") && !(prefix.isEmpty() && object.getKey().contains("/"))) {
            list.add(object);
        }
    }

    return list.iterator();
}

From source file:nl.nn.adapterframework.filesystem.AmazonS3FileSystem.java

License:Apache License

@Override
public OutputStream createFile(final S3Object f) throws FileSystemException, IOException {
    String fileName = FileUtils.getTempDirectory().getAbsolutePath() + "tempFile";

    final File file = new File(fileName);
    final FileOutputStream fos = new FileOutputStream(file);
    final BufferedOutputStream bos = new BufferedOutputStream(fos);

    FilterOutputStream filterOutputStream = new FilterOutputStream(bos) {
        boolean isClosed = false;

        @Override//  www  .  j  a  v  a 2 s . co  m
        public void close() throws IOException {
            super.close();
            bos.close();
            if (!isClosed) {
                FileInputStream fis = new FileInputStream(file);
                ObjectMetadata metaData = new ObjectMetadata();
                metaData.setContentLength(file.length());

                s3Client.putObject(bucketName, f.getKey(), fis, metaData);

                fis.close();
                file.delete();
                isClosed = true;
            }
        }
    };
    return filterOutputStream;
}

From source file:onl.area51.filesystem.s3.S3Sender.java

License:Apache License

@Override
public void send(char[] path) throws IOException {
    String pathValue = String.valueOf(path);

    ObjectMetadata meta = new ObjectMetadata();
    meta.setContentLength(getDelegate().size(path));
    meta.setContentType(pathValue);/*  w w  w.ja  v  a 2s .  c om*/

    try (InputStream is = getDelegate().newInputStream(path)) {
        LOG.log(Level.FINE, () -> "Sending " + getBucketName() + ":" + pathValue);
        getS3().putObject(new PutObjectRequest(getBucketName(), pathValue, is, meta));
        LOG.log(Level.FINE, () -> "Sent " + getBucketName() + ":" + pathValue);
    } catch (AmazonS3Exception ex) {
        LOG.log(Level.FINE, () -> "Send error " + ex.getStatusCode() + " " + getBucketName() + ":" + pathValue);
        throw new IOException(ex.getStatusCode() + ": Failed to put " + pathValue, ex);
    } catch (IOException ex) {
        throw new IOException("Failed to put " + pathValue, ex);
    }
}

From source file:org.adroitlogic.build.aws.maven.SimpleStorageServiceWagon.java

License:Apache License

@Override
protected void putResource(File source, String destination, TransferProgress transferProgress)
        throws TransferFailedException, ResourceDoesNotExistException {
    String key = getKey(destination);

    mkdirs(key, 0);//  www.j  a v a2s .com

    InputStream in = null;
    try {
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentLength(source.length());
        objectMetadata.setContentType(Mimetypes.getInstance().getMimetype(source));

        in = new TransferProgressFileInputStream(source, transferProgress);

        this.amazonS3.putObject(new PutObjectRequest(this.bucketName, key, in, objectMetadata));
    } catch (AmazonServiceException e) {
        throw new TransferFailedException(String.format("Cannot write file to '%s'", destination), e);
    } catch (FileNotFoundException e) {
        throw new ResourceDoesNotExistException(String.format("Cannot read file from '%s'", source), e);
    } finally {
        IoUtils.closeQuietly(in);
    }
}

From source file:org.adroitlogic.build.aws.maven.SimpleStorageServiceWagon.java

License:Apache License

private PutObjectRequest createDirectoryPutObjectRequest(String key) {
    ByteArrayInputStream inputStream = new ByteArrayInputStream(new byte[0]);

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentLength(0);

    return new PutObjectRequest(this.bucketName, key, inputStream, objectMetadata)
            .withCannedAcl(CannedAccessControlList.PublicRead);
}

From source file:org.akvo.flow.deploy.Deploy.java

License:Open Source License

private static void uploadS3(String accessKey, String secretKey, String s3Path, File file)
        throws AmazonServiceException, AmazonClientException {
    BasicAWSCredentials credentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonS3 s3 = new AmazonS3Client(credentials);

    PutObjectRequest putRequest = new PutObjectRequest(BUCKET_NAME, s3Path, file);
    ObjectMetadata metadata = new ObjectMetadata();

    // set content type as android package file
    metadata.setContentType("application/vnd.android.package-archive");

    // set content length to length of file
    metadata.setContentLength(file.length());

    // set access to public
    putRequest.setMetadata(metadata);/* w ww. ja  va2  s  . c  om*/
    putRequest.setCannedAcl(CannedAccessControlList.PublicRead);

    // try to put the apk in S3
    PutObjectResult result = s3.putObject(putRequest);
    System.out.println("Apk uploaded successfully, with result ETag " + result.getETag());
}