Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength.

Prototype

public void setContentLength(long contentLength) 

Source Link

Document

<p> Sets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:org.apache.storm.s3.output.S3MemBufferedOutputStream.java

License:Apache License

public void close(T key, String identifier, long rotation) throws IOException {
    String name = fileNameFormat.getName(key, identifier, rotation, System.currentTimeMillis());
    LOG.info("uploading {}/{} to S3", bucketName, name);
    outputStream.close();//from  www .  j a v a  2s  .c  o  m
    final byte[] buf = outputStream.toByteArray();
    InputStream input = new ByteArrayInputStream(buf);
    ObjectMetadata meta = new ObjectMetadata();
    meta.setContentType(contentType);
    meta.setContentLength(buf.length);
    uploader.upload(key, bucketName, name, input, meta);
    input.close();
}

From source file:org.apache.streams.s3.S3OutputStreamWrapper.java

License:Apache License

private void addFile() throws Exception {

    InputStream is = new ByteArrayInputStream(this.outputStream.toByteArray());
    int contentLength = outputStream.size();

    TransferManager transferManager = new TransferManager(amazonS3Client);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setExpirationTime(DateTime.now().plusDays(365 * 3).toDate());
    metadata.setContentLength(contentLength);

    metadata.addUserMetadata("writer", "org.apache.streams");

    for (String s : metaData.keySet())
        metadata.addUserMetadata(s, metaData.get(s));

    String fileNameToWrite = path + fileName;
    Upload upload = transferManager.upload(bucketName, fileNameToWrite, is, metadata);
    try {/* ww  w . j  ava 2s  .  c o  m*/
        upload.waitForUploadResult();

        is.close();
        transferManager.shutdownNow(false);
        LOGGER.info("S3 File Close[{} kb] - {}", contentLength / 1024, path + fileName);
    } catch (Exception e) {
        // No Op
    }

}

From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java

License:Apache License

@Override
public void write(final UUID appId, final Entity entity, InputStream inputStream) throws Exception {

    String uploadFileName = AssetUtils.buildAssetKey(appId, entity);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    long written = IOUtils.copyLarge(inputStream, baos, 0, FIVE_MB);

    byte[] data = baos.toByteArray();

    InputStream awsInputStream = new ByteArrayInputStream(data);

    final Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity);
    fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis());

    String mimeType = AssetMimeHandler.get().getMimeType(entity, data);

    Boolean overSizeLimit = false;

    EntityManager em = emf.getEntityManager(appId);

    if (written < FIVE_MB) { // total smaller than 5mb

        ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(written);
        om.setContentType(mimeType);//from  ww  w .j  a  v  a  2 s .co  m
        PutObjectResult result = null;
        result = getS3Client().putObject(bucketName, uploadFileName, awsInputStream, om);

        String md5sum = Hex.encodeHexString(Base64.decodeBase64(result.getContentMd5()));
        String eTag = result.getETag();

        fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);

        if (md5sum != null)
            fileMetadata.put(AssetUtils.CHECKSUM, md5sum);
        fileMetadata.put(AssetUtils.E_TAG, eTag);

        em.update(entity);

    } else { // bigger than 5mb... dump 5 mb tmp files and upload from them
        written = 0; //reset written to 0, we still haven't wrote anything in fact
        int partNumber = 1;
        int firstByte = 0;
        Boolean isFirstChunck = true;
        List<PartETag> partETags = new ArrayList<PartETag>();

        //get the s3 client in order to initialize the multipart request
        getS3Client();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName,
                uploadFileName);
        InitiateMultipartUploadResult initResponse = getS3Client().initiateMultipartUpload(initRequest);

        InputStream firstChunck = new ByteArrayInputStream(data);
        PushbackInputStream chunckableInputStream = new PushbackInputStream(inputStream, 1);

        // determine max size file allowed, default to 50mb
        long maxSizeBytes = 50 * FileUtils.ONE_MB;
        String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50");
        if (StringUtils.isNumeric(maxSizeMbString)) {
            maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB;
        }

        // always allow files up to 5mb
        if (maxSizeBytes < 5 * FileUtils.ONE_MB) {
            maxSizeBytes = 5 * FileUtils.ONE_MB;
        }

        while (-1 != (firstByte = chunckableInputStream.read())) {
            long partSize = 0;
            chunckableInputStream.unread(firstByte);
            File tempFile = File.createTempFile(
                    entity.getUuid().toString().concat("-part").concat(String.valueOf(partNumber)), "tmp");

            tempFile.deleteOnExit();
            OutputStream os = null;
            try {
                os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath()));

                if (isFirstChunck == true) {
                    partSize = IOUtils.copyLarge(firstChunck, os, 0, (FIVE_MB));
                    isFirstChunck = false;
                } else {
                    partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (FIVE_MB));
                }
                written += partSize;

                if (written > maxSizeBytes) {
                    overSizeLimit = true;
                    logger.error("OVERSIZED FILE ({}). STARTING ABORT", written);
                    break;
                    //set flag here and break out of loop to run abort
                }
            } finally {
                IOUtils.closeQuietly(os);
            }

            FileInputStream chunk = new FileInputStream(tempFile);

            Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
            if (!isLastPart)
                chunckableInputStream.unread(firstByte);

            UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(initResponse.getUploadId())
                    .withBucketName(bucketName).withKey(uploadFileName).withInputStream(chunk)
                    .withPartNumber(partNumber).withPartSize(partSize).withLastPart(isLastPart);
            partETags.add(getS3Client().uploadPart(uploadRequest).getPartETag());
            partNumber++;
        }

        //check for flag here then abort.
        if (overSizeLimit) {

            AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId());

            ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucketName);

            MultipartUploadListing listResult = getS3Client().listMultipartUploads(listRequest);

            //upadte the entity with the error.
            try {
                logger.error("starting update of entity due to oversized asset");
                fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes);
                em.update(entity);
            } catch (Exception e) {
                logger.error("Error updating entity with error message", e);
            }

            int timesIterated = 20;
            //loop and abort all the multipart uploads
            while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {

                getS3Client().abortMultipartUpload(abortRequest);
                Thread.sleep(1000);
                timesIterated--;
                listResult = getS3Client().listMultipartUploads(listRequest);
                if (logger.isDebugEnabled()) {
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }

            }
            if (timesIterated == 0) {
                logger.error("Files parts that couldn't be aborted in 20 seconds are:");
                Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads().iterator();
                while (multipartUploadIterator.hasNext()) {
                    logger.error(multipartUploadIterator.next().getKey());
                }
            }
        } else {
            CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId(), partETags);
            CompleteMultipartUploadResult amazonResult = getS3Client().completeMultipartUpload(request);
            fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);
            fileMetadata.put(AssetUtils.E_TAG, amazonResult.getETag());
            em.update(entity);
        }
    }
}

From source file:org.apereo.portal.portlets.dynamicskin.storage.s3.AwsS3DynamicSkinService.java

License:Apache License

private void addContentMetadata(final ObjectMetadata metadata, final String content) {
    metadata.setContentMD5(this.calculateBase64EncodedMd5Digest(content));
    metadata.setContentLength(content.length());
    metadata.setContentType("text/css");
    final String cacheControl = this.awsS3BucketConfig.getObjectCacheControl();
    if (StringUtils.isNotEmpty(cacheControl)) {
        metadata.setCacheControl(cacheControl);
    }//from   w  ww  .  j a  v a 2s  .c  o m
}

From source file:org.broadleafcommerce.vendor.amazon.s3.S3FileServiceProvider.java

License:Apache License

protected void addOrUpdateResourcesInternalStreamVersion(S3Configuration s3config, AmazonS3Client s3,
        InputStream inputStream, String fileName, long fileSizeInBytes) {
    final String bucketName = s3config.getDefaultBucketName();

    final ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(fileSizeInBytes);
    final String resourceName = buildResourceName(s3config, fileName);
    final PutObjectRequest objToUpload = new PutObjectRequest(bucketName, resourceName, inputStream, metadata);

    if ((s3config.getStaticAssetFileExtensionPattern() != null)
            && s3config.getStaticAssetFileExtensionPattern().matcher(getExtension(fileName)).matches()) {
        objToUpload.setCannedAcl(CannedAccessControlList.PublicRead);
    }//  www .  j a v  a 2  s. co m

    s3.putObject(objToUpload);

    if (LOG.isTraceEnabled()) {
        final String s3Uri = String.format("s3://%s/%s", s3config.getDefaultBucketName(), resourceName);
        final String msg = String.format("%s copied/updated to %s", fileName, s3Uri);

        LOG.trace(msg);
    }
}

From source file:org.caboclo.clients.AmazonClient.java

License:Open Source License

private void mkdir(String path) {
    ObjectMetadata om = new ObjectMetadata();
    om.setLastModified(new Date());
    om.setContentLength(0);
    om.setContentType("inode/directory");
    ByteArrayInputStream bis = new ByteArrayInputStream(new byte[0]);

    s3.putObject(getBucketName(), path, bis, om);

    System.out.println("Creating Directory: " + path);
}

From source file:org.caboclo.clients.AmazonClient.java

License:Open Source License

@Override
public void putFile(File file, String path) throws IOException {
    ObjectMetadata om = new ObjectMetadata();
    om.setLastModified(new Date());
    om.setContentType(URLConnection.guessContentTypeFromName(file.getName()));
    om.setContentLength(file.length());
    BufferedInputStream stream = new BufferedInputStream(new FileInputStream(file));

    s3.putObject(getBucketName(), path, stream, om);

}

From source file:org.caboclo.clients.AmazonClient.java

License:Open Source License

void createFolder(String bucketName, String path) {
    ObjectMetadata om = new ObjectMetadata();
    om.setLastModified(new Date());
    om.setContentLength(0);
    om.setContentType("inode/directory");
    ByteArrayInputStream bis = new ByteArrayInputStream(new byte[0]);

    s3.putObject(bucketName, path, bis, om);

    System.out.println("Creating folder: " + path);
}

From source file:org.chodavarapu.jgitaws.repositories.PackRepository.java

License:Eclipse Distribution License

public DfsOutputStream savePack(String repositoryName, String packName, long length) throws IOException {
    PipedInputStream pipedInputStream = new PipedInputStream(configuration.getStreamingBlockSize());

    ObjectMetadata metaData = new ObjectMetadata();
    metaData.setContentLength(length);

    String objectName = objectName(repositoryName, packName);

    Async.fromAction(() -> {//from  w w w  .j  ava2 s  .  c o m
        logger.debug("Attempting to save pack {} to S3 bucket", objectName);
        try {
            configuration.getS3Client().putObject(configuration.getPacksBucketName(), objectName,
                    pipedInputStream, metaData);
        } catch (AmazonServiceException e) {
            if ("InvalidBucketName".equals(e.getErrorCode()) || "InvalidBucketState".equals(e.getErrorCode())) {
                logger.debug("S3 packs bucket does not exist yet, creating it");
                configuration.getS3Client()
                        .createBucket(new CreateBucketRequest(configuration.getPacksBucketName()));
                configuration.getS3Client().setBucketVersioningConfiguration(
                        new SetBucketVersioningConfigurationRequest(configuration.getPacksBucketName(),
                                new BucketVersioningConfiguration(BucketVersioningConfiguration.OFF)));

                logger.debug("Created bucket, saving pack {}", objectName);
                configuration.getS3Client().putObject(configuration.getPacksBucketName(), objectName,
                        pipedInputStream, metaData);
            } else {
                throw e;
            }
        }
    }, null, Schedulers.io());

    return new PipedDfsOutputStream(pipedInputStream, objectName, (int) length,
            configuration.getStreamingBlockSize());
}

From source file:org.clothocad.phagebook.adaptors.S3Adapter.java

private static void createS3Folder(String bucketName, String folderName, AmazonS3 client) {
    // create meta-data for your folder and set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);
    // create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);
    // create a PutObjectRequest passing the folder name suffixed by /
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, folderName + "/", emptyContent,
            metadata); //folder name should be clothoID
    // send request to S3 to create folder
    client.putObject(putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead));

}