Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength.

Prototype

public void setContentLength(long contentLength) 

Source Link

Document

<p> Sets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:com.cloudbees.plugins.binarydeployer.s3.S3Repository.java

License:Open Source License

private PutObjectRequest prepareUpload(VirtualFile file, String name) throws IOException {
    log.debug("Preparing upload for " + name + " to S3::" + bucketName);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(file.length());

    return new PutObjectRequest(bucketName, name, file.open(), metadata);
}

From source file:com.davidsoergel.s3napback.S3ops.java

License:Apache License

public static void upload(TransferManager tx, String bucket, String filename, int chunkSize)
        throws InterruptedException, IOException {
    //throw new NotImplementedException();

    // break input stream into chunks

    // fully read each chunk into memory before sending, in order to know the size and the md5

    // ** prepare the next chunk while the last is sending; need to deal with multithreading properly
    // ** 4 concurrent streams?

    InputStream in = new BufferedInputStream(System.in);
    int chunkNum = 0;
    while (in.available() > 0) {
        byte[] buf = new byte[chunkSize];
        int bytesRead = in.read(buf);

        String md5 = new MD5(buf);

        // presume AWS does its own buffering, no need for BufferedInputStream (?)

        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(bytesRead);
        meta.setContentMD5(md5);/*  w w  w. jav a  2 s.  co m*/

        Upload myUpload = tx.upload(bucket, filename + ":" + chunkNum, new ByteArrayInputStream(buf), meta);
        UploadResult result = myUpload.waitForUploadResult();

        while (myUpload.isDone() == false) {
            System.out.println("Transfer: " + myUpload.getDescription());
            System.out.println("  - State: " + myUpload.getState());
            System.out.println("  - Progress: " + myUpload.getProgress().getBytesTransfered());
            // Do work while we wait for our upload to complete...
            Thread.sleep(500);
        }
    }
}

From source file:com.digitalpebble.stormcrawler.aws.s3.S3Cacher.java

License:Apache License

@Override
public void execute(Tuple tuple) {
    // stores the binary content on S3

    byte[] content = tuple.getBinaryByField("content");
    String url = tuple.getStringByField("url");
    final Metadata metadata = (Metadata) tuple.getValueByField("metadata");

    // If there is no content
    byte[] contentToCache = getContentToCache(metadata, content, url);
    if (contentToCache == null) {
        LOG.info("{} had no data to cache", url);
        _collector.emit(tuple, new Values(url, content, metadata));
        // ack it no matter what
        _collector.ack(tuple);//from  www  . j ava2 s. c  om
        return;
    }

    // already in the cache
    // don't need to recache it
    if (!shouldOverwrite(metadata)) {
        eventCounter.scope("already_in_cache").incr();
        _collector.emit(tuple, new Values(url, content, metadata));
        // ack it no matter what
        _collector.ack(tuple);
        return;
    }

    // normalises URL
    String key = "";
    try {
        key = URLEncoder.encode(url, "UTF-8");
    } catch (UnsupportedEncodingException e) {
        // ignore it - we know UTF-8 is valid
    }
    // check size of the key
    if (key.length() >= 1024) {
        LOG.info("Key too large : {}", key);
        eventCounter.scope("key_too_large").incr();
        _collector.emit(tuple, new Values(url, content, metadata));
        // ack it no matter what
        _collector.ack(tuple);
        return;
    }

    ByteArrayInputStream input = new ByteArrayInputStream(contentToCache);

    ObjectMetadata md = new ObjectMetadata();
    md.setContentLength(contentToCache.length);
    md.setHeader("x-amz-storage-class", "STANDARD_IA");

    try {
        PutObjectResult result = client.putObject(bucketName, getKeyPrefix() + key, input, md);
        eventCounter.scope("cached").incr();
        // TODO check something with the result?
    } catch (AmazonS3Exception exception) {
        LOG.error("AmazonS3Exception while storing {}", url, exception);
        eventCounter.scope("s3_exception").incr();
    } finally {
        try {
            input.close();
        } catch (IOException e) {
            LOG.error("Error while closing ByteArrayInputStream", e);
        }
    }

    _collector.emit(tuple, new Values(url, content, metadata));
    // ack it no matter what
    _collector.ack(tuple);
}

From source file:com.digitaslbi.helios.mock.utils.ConnectionHelper.java

public static void createFolder(String bucketName, String folderName) {
    // create meta-data for your folder and set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);

    // create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

    // create a PutObjectRequest passing the folder name suffixed by /
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName,
            folderName + MocksConstants.AWS_PARENT_DELIMITER.getValue(), emptyContent, metadata);

    // send request to S3 to create folder
    try {//w w  w  .j  a  va  2 s  .  co  m
        s3Client.putObject(putObjectRequest);
    } catch (AmazonServiceException ase) {
        log.error("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    }
}

From source file:com.digitaslbi.helios.utils.S3Helper.java

public static void createFolder(String folderName) {
    connect();/* w ww. j  av  a 2s . c  om*/

    // create meta-data for your folder and set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);

    // create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

    // create a PutObjectRequest passing the folder name suffixed by /
    PutObjectRequest putObjectRequest = new PutObjectRequest(S3Properties.getInstance().getBucketName(),
            folderName + Constants.AWS_PARENT_DELIMITER.getValue(), emptyContent, metadata);

    // send request to S3 to create folder
    try {
        s3Client.putObject(putObjectRequest);
    } catch (AmazonServiceException ase) {
        log.error("[S3Helper][createFolder] Caught an AmazonServiceException, which "
                + "means your request made it " + "to Amazon S3, but was rejected with an error response"
                + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("[S3Helper][createFolder] Caught an AmazonClientException, which "
                + "means the client encountered " + "an internal error while trying to "
                + "communicate with S3, " + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    }
}

From source file:com.digitaslbi.helios.utils.S3Helper.java

public static void uploadFile(String fileName, byte[] content) {
    connect();//from   w ww.j  a v a2 s  .co m

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(content.length);

    try {
        log.info("[S3Helper][uploadFile] Uploading a new object to S3: " + fileName);

        PutObjectRequest putObjectRequest = new PutObjectRequest(S3Properties.getInstance().getBucketName(),
                fileName, new ByteArrayInputStream(content), metadata);
        putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead);

        s3Client.putObject(putObjectRequest);
    } catch (AmazonServiceException ase) {
        log.error("[S3Helper][uploadFile] Caught an AmazonServiceException, which "
                + "means your request made it " + "to Amazon S3, but was rejected with an error response"
                + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("[S3Helper][uploadFile] Caught an AmazonClientException, which "
                + "means the client encountered " + "an internal error while trying to "
                + "communicate with S3, " + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    }
}

From source file:com.dustindoloff.s3websitedeploy.Main.java

License:Apache License

private static boolean upload(final AmazonS3 s3Client, final String bucket, final ZipFile zipFile) {
    boolean failed = false;
    final ObjectMetadata data = new ObjectMetadata();
    final Enumeration<? extends ZipEntry> entries = zipFile.entries();
    while (entries.hasMoreElements()) {
        final ZipEntry entry = entries.nextElement();
        data.setContentLength(entry.getSize());
        try {// www  .  j  a va  2  s.co m
            s3Client.putObject(bucket, entry.getName(), zipFile.getInputStream(entry), data);
        } catch (final AmazonClientException | IOException e) {
            failed = true;
        }
    }
    return !failed;
}

From source file:com.easarrive.aws.plugins.common.service.impl.S3Service.java

License:Open Source License

/**
 * {@inheritDoc}//from  w  w  w.  ja v a 2 s  .  co  m
 */
@Override
public PutObjectResult createFolder(AmazonS3 client, String bucketName, String folderName) {
    if (client == null) {
        return null;
    } else if (StringUtil.isEmpty(bucketName)) {
        return null;
    } else if (StringUtil.isEmpty(folderName)) {
        return null;
    }
    // Create metadata for my folder & set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);
    // Create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);
    if (!folderName.endsWith("/")) {
        folderName += "/";
    }
    return this.putObject(client, bucketName, folderName, emptyContent, metadata);
}

From source file:com.emc.ecs.sync.util.AwsS3Util.java

License:Open Source License

public static ObjectMetadata s3MetaFromSyncMeta(SyncMetadata syncMeta) {
    ObjectMetadata om = new ObjectMetadata();
    if (syncMeta.getCacheControl() != null)
        om.setCacheControl(syncMeta.getCacheControl());
    if (syncMeta.getContentDisposition() != null)
        om.setContentDisposition(syncMeta.getContentDisposition());
    if (syncMeta.getContentEncoding() != null)
        om.setContentEncoding(syncMeta.getContentEncoding());
    om.setContentLength(syncMeta.getContentLength());
    if (syncMeta.getChecksum() != null && syncMeta.getChecksum().getAlgorithm().equals("MD5"))
        om.setContentMD5(syncMeta.getChecksum().getValue());
    if (syncMeta.getContentType() != null)
        om.setContentType(syncMeta.getContentType());
    if (syncMeta.getHttpExpires() != null)
        om.setHttpExpiresDate(syncMeta.getHttpExpires());
    om.setUserMetadata(formatUserMetadata(syncMeta));
    if (syncMeta.getModificationTime() != null)
        om.setLastModified(syncMeta.getModificationTime());
    return om;//from   ww w  .  ja v a2  s.c  om
}

From source file:com.emc.vipr.services.s3.ViPRS3Client.java

License:Open Source License

/**
 * Executes a (Subclass of) PutObjectRequest.  In particular, we check for subclasses
 * of the UpdateObjectRequest and inject the value of the Range header.  This version
 * also returns the raw ObjectMetadata for the response so callers can construct
 * their own result objects./*from w w  w . j  a  v a 2 s .  c o m*/
 * @param putObjectRequest the request to execute
 * @return an ObjectMetadata containing the response headers.
 */
protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) {
    assertParameterNotNull(putObjectRequest,
            "The PutObjectRequest parameter must be specified when uploading an object");

    String bucketName = putObjectRequest.getBucketName();
    String key = putObjectRequest.getKey();
    ObjectMetadata metadata = putObjectRequest.getMetadata();
    InputStream input = putObjectRequest.getInputStream();
    if (metadata == null)
        metadata = new ObjectMetadata();

    assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object");
    assertParameterNotNull(key, "The key parameter must be specified when uploading an object");

    /*
     * This is compatible with progress listener set by either the legacy
     * method GetObjectRequest#setProgressListener or the new method
     * GetObjectRequest#setGeneralProgressListener.
     */
    com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener();
    ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor
            .wrapListener(progressListener);

    // If a file is specified for upload, we need to pull some additional
    // information from it to auto-configure a few options
    if (putObjectRequest.getFile() != null) {
        File file = putObjectRequest.getFile();

        // Always set the content length, even if it's already set
        metadata.setContentLength(file.length());

        // Only set the content type if it hasn't already been set
        if (metadata.getContentType() == null) {
            metadata.setContentType(Mimetypes.getInstance().getMimetype(file));
        }

        FileInputStream fileInputStream = null;
        try {
            fileInputStream = new FileInputStream(file);
            byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream);
            metadata.setContentMD5(BinaryUtils.toBase64(md5Hash));
        } catch (Exception e) {
            throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e);
        } finally {
            try {
                fileInputStream.close();
            } catch (Exception e) {
            }
        }

        try {
            input = new RepeatableFileInputStream(file);
        } catch (FileNotFoundException fnfe) {
            throw new AmazonClientException("Unable to find file to upload", fnfe);
        }
    }

    Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT);

    if (putObjectRequest.getAccessControlList() != null) {
        addAclHeaders(request, putObjectRequest.getAccessControlList());
    } else if (putObjectRequest.getCannedAcl() != null) {
        request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString());
    }

    if (putObjectRequest.getStorageClass() != null) {
        request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass());
    }

    if (putObjectRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation());
        if (input == null) {
            input = new ByteArrayInputStream(new byte[0]);
        }
    }

    // Use internal interface to differentiate 0 from unset.
    if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) {
        /*
         * There's nothing we can do except for let the HTTP client buffer
         * the input stream contents if the caller doesn't tell us how much
         * data to expect in a stream since we have to explicitly tell
         * Amazon S3 how much we're sending before we start sending any of
         * it.
         */
        log.warn("No content length specified for stream data.  "
                + "Stream contents will be buffered in memory and could result in " + "out of memory errors.");
    }

    if (progressListenerCallbackExecutor != null) {
        com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream(
                input, progressListenerCallbackExecutor);
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE);
    }

    if (!input.markSupported()) {
        int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE;
        String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize");
        if (bufferSizeOverride != null) {
            try {
                streamBufferSize = Integer.parseInt(bufferSizeOverride);
            } catch (Exception e) {
                log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride);
            }
        }

        input = new RepeatableInputStream(input, streamBufferSize);
    }

    MD5DigestCalculatingInputStream md5DigestStream = null;
    if (metadata.getContentMD5() == null) {
        /*
         * If the user hasn't set the content MD5, then we don't want to
         * buffer the whole stream in memory just to calculate it. Instead,
         * we can calculate it on the fly and validate it with the returned
         * ETag from the object upload.
         */
        try {
            md5DigestStream = new MD5DigestCalculatingInputStream(input);
            input = md5DigestStream;
        } catch (NoSuchAlgorithmException e) {
            log.warn("No MD5 digest algorithm available.  Unable to calculate "
                    + "checksum and verify data integrity.", e);
        }
    }

    if (metadata.getContentType() == null) {
        /*
         * Default to the "application/octet-stream" if the user hasn't
         * specified a content type.
         */
        metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
    }

    populateRequestMetadata(request, metadata);
    request.setContent(input);

    if (putObjectRequest instanceof UpdateObjectRequest) {
        request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange());
    }

    ObjectMetadata returnedMetadata = null;
    try {
        returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
    } catch (AmazonClientException ace) {
        fireProgressEvent(progressListenerCallbackExecutor,
                com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
        throw ace;
    } finally {
        try {
            input.close();
        } catch (Exception e) {
            log.warn("Unable to cleanly close input stream: " + e.getMessage(), e);
        }
    }

    String contentMd5 = metadata.getContentMD5();
    if (md5DigestStream != null) {
        contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest());
    }

    // Can't verify MD5 on appends/update (yet).
    if (!(putObjectRequest instanceof UpdateObjectRequest)) {
        if (returnedMetadata != null && contentMd5 != null) {
            byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5);
            byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag());

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                fireProgressEvent(progressListenerCallbackExecutor,
                        com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE);
                throw new AmazonClientException("Unable to verify integrity of data upload.  "
                        + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                        + "You may need to delete the data stored in Amazon S3.");
            }
        }
    }

    fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE);

    return returnedMetadata;
}