Example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata.

Prototype

public ObjectMetadata() 

Source Link

Usage

From source file:com.davidsoergel.s3napback.S3ops.java

License:Apache License

public static void upload(TransferManager tx, String bucket, String filename, int chunkSize)
        throws InterruptedException, IOException {
    //throw new NotImplementedException();

    // break input stream into chunks

    // fully read each chunk into memory before sending, in order to know the size and the md5

    // ** prepare the next chunk while the last is sending; need to deal with multithreading properly
    // ** 4 concurrent streams?

    InputStream in = new BufferedInputStream(System.in);
    int chunkNum = 0;
    while (in.available() > 0) {
        byte[] buf = new byte[chunkSize];
        int bytesRead = in.read(buf);

        String md5 = new MD5(buf);

        // presume AWS does its own buffering, no need for BufferedInputStream (?)

        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(bytesRead);
        meta.setContentMD5(md5);/*from   w  ww .ja  v a2  s  .  c om*/

        Upload myUpload = tx.upload(bucket, filename + ":" + chunkNum, new ByteArrayInputStream(buf), meta);
        UploadResult result = myUpload.waitForUploadResult();

        while (myUpload.isDone() == false) {
            System.out.println("Transfer: " + myUpload.getDescription());
            System.out.println("  - State: " + myUpload.getState());
            System.out.println("  - Progress: " + myUpload.getProgress().getBytesTransfered());
            // Do work while we wait for our upload to complete...
            Thread.sleep(500);
        }
    }
}

From source file:com.digitalpebble.stormcrawler.aws.s3.S3Cacher.java

License:Apache License

@Override
public void execute(Tuple tuple) {
    // stores the binary content on S3

    byte[] content = tuple.getBinaryByField("content");
    String url = tuple.getStringByField("url");
    final Metadata metadata = (Metadata) tuple.getValueByField("metadata");

    // If there is no content
    byte[] contentToCache = getContentToCache(metadata, content, url);
    if (contentToCache == null) {
        LOG.info("{} had no data to cache", url);
        _collector.emit(tuple, new Values(url, content, metadata));
        // ack it no matter what
        _collector.ack(tuple);/*from w  w  w .  j a  va  2 s.  co m*/
        return;
    }

    // already in the cache
    // don't need to recache it
    if (!shouldOverwrite(metadata)) {
        eventCounter.scope("already_in_cache").incr();
        _collector.emit(tuple, new Values(url, content, metadata));
        // ack it no matter what
        _collector.ack(tuple);
        return;
    }

    // normalises URL
    String key = "";
    try {
        key = URLEncoder.encode(url, "UTF-8");
    } catch (UnsupportedEncodingException e) {
        // ignore it - we know UTF-8 is valid
    }
    // check size of the key
    if (key.length() >= 1024) {
        LOG.info("Key too large : {}", key);
        eventCounter.scope("key_too_large").incr();
        _collector.emit(tuple, new Values(url, content, metadata));
        // ack it no matter what
        _collector.ack(tuple);
        return;
    }

    ByteArrayInputStream input = new ByteArrayInputStream(contentToCache);

    ObjectMetadata md = new ObjectMetadata();
    md.setContentLength(contentToCache.length);
    md.setHeader("x-amz-storage-class", "STANDARD_IA");

    try {
        PutObjectResult result = client.putObject(bucketName, getKeyPrefix() + key, input, md);
        eventCounter.scope("cached").incr();
        // TODO check something with the result?
    } catch (AmazonS3Exception exception) {
        LOG.error("AmazonS3Exception while storing {}", url, exception);
        eventCounter.scope("s3_exception").incr();
    } finally {
        try {
            input.close();
        } catch (IOException e) {
            LOG.error("Error while closing ByteArrayInputStream", e);
        }
    }

    _collector.emit(tuple, new Values(url, content, metadata));
    // ack it no matter what
    _collector.ack(tuple);
}

From source file:com.digitaslbi.helios.mock.utils.ConnectionHelper.java

public static void createFolder(String bucketName, String folderName) {
    // create meta-data for your folder and set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);/*from w w  w .j a  v a 2s. c  o m*/

    // create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

    // create a PutObjectRequest passing the folder name suffixed by /
    PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName,
            folderName + MocksConstants.AWS_PARENT_DELIMITER.getValue(), emptyContent, metadata);

    // send request to S3 to create folder
    try {
        s3Client.putObject(putObjectRequest);
    } catch (AmazonServiceException ase) {
        log.error("Caught an AmazonServiceException, which " + "means your request made it "
                + "to Amazon S3, but was rejected with an error response" + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("Caught an AmazonClientException, which " + "means the client encountered "
                + "an internal error while trying to " + "communicate with S3, "
                + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    }
}

From source file:com.digitaslbi.helios.utils.S3Helper.java

public static void createFolder(String folderName) {
    connect();/*from   w  w  w .j  a  v  a2s .c  o m*/

    // create meta-data for your folder and set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);

    // create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);

    // create a PutObjectRequest passing the folder name suffixed by /
    PutObjectRequest putObjectRequest = new PutObjectRequest(S3Properties.getInstance().getBucketName(),
            folderName + Constants.AWS_PARENT_DELIMITER.getValue(), emptyContent, metadata);

    // send request to S3 to create folder
    try {
        s3Client.putObject(putObjectRequest);
    } catch (AmazonServiceException ase) {
        log.error("[S3Helper][createFolder] Caught an AmazonServiceException, which "
                + "means your request made it " + "to Amazon S3, but was rejected with an error response"
                + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("[S3Helper][createFolder] Caught an AmazonClientException, which "
                + "means the client encountered " + "an internal error while trying to "
                + "communicate with S3, " + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    }
}

From source file:com.digitaslbi.helios.utils.S3Helper.java

public static void uploadFile(String fileName, byte[] content) {
    connect();/* w ww  . ja v a2 s  . c  om*/

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(content.length);

    try {
        log.info("[S3Helper][uploadFile] Uploading a new object to S3: " + fileName);

        PutObjectRequest putObjectRequest = new PutObjectRequest(S3Properties.getInstance().getBucketName(),
                fileName, new ByteArrayInputStream(content), metadata);
        putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead);

        s3Client.putObject(putObjectRequest);
    } catch (AmazonServiceException ase) {
        log.error("[S3Helper][uploadFile] Caught an AmazonServiceException, which "
                + "means your request made it " + "to Amazon S3, but was rejected with an error response"
                + " for some reason.");
        log.error("Error Message:    " + ase.getMessage());
        log.error("HTTP Status Code: " + ase.getStatusCode());
        log.error("AWS Error Code:   " + ase.getErrorCode());
        log.error("Error Type:       " + ase.getErrorType());
        log.error("Request ID:       " + ase.getRequestId());
    } catch (AmazonClientException ace) {
        log.error("[S3Helper][uploadFile] Caught an AmazonClientException, which "
                + "means the client encountered " + "an internal error while trying to "
                + "communicate with S3, " + "such as not being able to access the network.");
        log.error("Error Message: " + ace.getMessage());
    }
}

From source file:com.dss.sframework.tools.amazon.models.UploadModel.java

License:Open Source License

public void upload() {
    if (mFile != null) {
        try {/*from w  w  w. j  a v  a2s .  c  o m*/

            ObjectMetadata metaData = new ObjectMetadata();
            metaData.setContentType(mMediaType == Enums.MEDIA_TYPE.VIDEO ? "video/" : "image/" + mExtension);

            if (mMediaType == Enums.MEDIA_TYPE.VIDEO) {
                mediaURL = AmazonUtil.getPrefix(getContext()) + "videos/" + super.getFileName() + "."
                        + mExtension;
            } else {
                mediaURL = AmazonUtil.getPrefix(getContext()) + "imagens/" + super.getFileName() + "."
                        + mExtension;
            }

            mUpload = getTransferManager().upload(
                    new PutObjectRequest(ConstantAmazon.BUCKET_NAME.toLowerCase(Locale.US), mediaURL, mFile)
                            .withCannedAcl(CannedAccessControlList.PublicRead).withMetadata(metaData));

            Log.i("TAG", "Upload: " + ConstantAmazon.BUCKET_NAME + mediaURL);

            mUpload.addProgressListener(mListener);
        } catch (Exception e) {
            Log.e(TAG, "", e);
            mCallbacks.onErrorListenerUploadFile("Erro no upload");
        }
    }
}

From source file:com.dustindoloff.s3websitedeploy.Main.java

License:Apache License

private static boolean upload(final AmazonS3 s3Client, final String bucket, final ZipFile zipFile) {
    boolean failed = false;
    final ObjectMetadata data = new ObjectMetadata();
    final Enumeration<? extends ZipEntry> entries = zipFile.entries();
    while (entries.hasMoreElements()) {
        final ZipEntry entry = entries.nextElement();
        data.setContentLength(entry.getSize());
        try {/*  www. ja v  a 2  s.c o  m*/
            s3Client.putObject(bucket, entry.getName(), zipFile.getInputStream(entry), data);
        } catch (final AmazonClientException | IOException e) {
            failed = true;
        }
    }
    return !failed;
}

From source file:com.easarrive.aws.plugins.common.service.impl.S3Service.java

License:Open Source License

/**
 * {@inheritDoc}/*from   w ww  .  j a  va  2s  .co m*/
 */
@Override
public PutObjectResult putObject(AmazonS3 client, String bucketName, String key, InputStream input,
        ObjectMetadata metadata) {
    if (client == null) {
        return null;
    } else if (StringUtil.isEmpty(bucketName)) {
        return null;
    } else if (StringUtil.isEmpty(key)) {
        return null;
    } else if (input == null) {
        return null;
    }
    if (metadata == null) {
        metadata = new ObjectMetadata();
    }
    PutObjectResult result = null;
    if (!client.doesObjectExist(bucketName, key)) {
        result = client.putObject(bucketName, key, input, metadata);
    }
    return result;
}

From source file:com.easarrive.aws.plugins.common.service.impl.S3Service.java

License:Open Source License

/**
 * {@inheritDoc}//from   ww w.  j a va  2s.  com
 */
@Override
public PutObjectResult createFolder(AmazonS3 client, String bucketName, String folderName) {
    if (client == null) {
        return null;
    } else if (StringUtil.isEmpty(bucketName)) {
        return null;
    } else if (StringUtil.isEmpty(folderName)) {
        return null;
    }
    // Create metadata for my folder & set content-length to 0
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);
    // Create empty content
    InputStream emptyContent = new ByteArrayInputStream(new byte[0]);
    if (!folderName.endsWith("/")) {
        folderName += "/";
    }
    return this.putObject(client, bucketName, folderName, emptyContent, metadata);
}

From source file:com.emc.ecs.sync.util.AwsS3Util.java

License:Open Source License

public static ObjectMetadata s3MetaFromSyncMeta(SyncMetadata syncMeta) {
    ObjectMetadata om = new ObjectMetadata();
    if (syncMeta.getCacheControl() != null)
        om.setCacheControl(syncMeta.getCacheControl());
    if (syncMeta.getContentDisposition() != null)
        om.setContentDisposition(syncMeta.getContentDisposition());
    if (syncMeta.getContentEncoding() != null)
        om.setContentEncoding(syncMeta.getContentEncoding());
    om.setContentLength(syncMeta.getContentLength());
    if (syncMeta.getChecksum() != null && syncMeta.getChecksum().getAlgorithm().equals("MD5"))
        om.setContentMD5(syncMeta.getChecksum().getValue());
    if (syncMeta.getContentType() != null)
        om.setContentType(syncMeta.getContentType());
    if (syncMeta.getHttpExpires() != null)
        om.setHttpExpiresDate(syncMeta.getHttpExpires());
    om.setUserMetadata(formatUserMetadata(syncMeta));
    if (syncMeta.getModificationTime() != null)
        om.setLastModified(syncMeta.getModificationTime());
    return om;/*  w w w  . ja  v  a  2 s . com*/
}