Example usage for com.amazonaws.services.s3.model PutObjectResult getETag

List of usage examples for com.amazonaws.services.s3.model PutObjectResult getETag

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model PutObjectResult getETag.

Prototype

public String getETag() 

Source Link

Document

Gets the server-side ETag value for the newly created object.

Usage

From source file:org.apache.camel.component.aws.s3.S3Producer.java

License:Apache License

@Override
public void process(final Exchange exchange) throws Exception {
    ObjectMetadata objectMetadata = new ObjectMetadata();

    Long contentLength = exchange.getIn().getHeader(S3Constants.CONTENT_LENGTH, Long.class);
    if (contentLength != null) {
        objectMetadata.setContentLength(contentLength);
    }//from   w  w  w  .ja v a 2  s .  com

    String contentType = exchange.getIn().getHeader(S3Constants.CONTENT_TYPE, String.class);
    if (contentType != null) {
        objectMetadata.setContentType(contentType);
    }

    String cacheControl = exchange.getIn().getHeader(S3Constants.CACHE_CONTROL, String.class);
    if (cacheControl != null) {
        objectMetadata.setCacheControl(cacheControl);
    }

    String contentDisposition = exchange.getIn().getHeader(S3Constants.CONTENT_DISPOSITION, String.class);
    if (contentDisposition != null) {
        objectMetadata.setContentDisposition(contentDisposition);
    }

    String contentEncoding = exchange.getIn().getHeader(S3Constants.CONTENT_ENCODING, String.class);
    if (contentEncoding != null) {
        objectMetadata.setContentEncoding(contentEncoding);
    }

    String contentMD5 = exchange.getIn().getHeader(S3Constants.CONTENT_MD5, String.class);
    if (contentMD5 != null) {
        objectMetadata.setContentMD5(contentMD5);
    }

    Date lastModified = exchange.getIn().getHeader(S3Constants.LAST_MODIFIED, Date.class);
    if (lastModified != null) {
        objectMetadata.setLastModified(lastModified);
    }

    Map<String, String> userMetadata = exchange.getIn().getHeader(S3Constants.USER_METADATA, Map.class);
    if (userMetadata != null) {
        objectMetadata.setUserMetadata(userMetadata);
    }

    File filePayload = null;

    Object obj = exchange.getIn().getMandatoryBody();
    if (obj instanceof File) {
        filePayload = (File) obj;
    }
    PutObjectRequest putObjectRequest = new PutObjectRequest(getConfiguration().getBucketName(),
            determineKey(exchange), exchange.getIn().getMandatoryBody(InputStream.class), objectMetadata);

    String storageClass = determineStorageClass(exchange);
    if (storageClass != null) {
        putObjectRequest.setStorageClass(storageClass);
    }

    String cannedAcl = exchange.getIn().getHeader(S3Constants.CANNED_ACL, String.class);
    if (cannedAcl != null) {
        CannedAccessControlList objectAcl = CannedAccessControlList.valueOf(cannedAcl);
        putObjectRequest.setCannedAcl(objectAcl);
    }

    AccessControlList acl = exchange.getIn().getHeader(S3Constants.ACL, AccessControlList.class);
    if (acl != null) {
        // note: if cannedacl and acl are both specified the last one will be used. refer to
        // PutObjectRequest#setAccessControlList for more details
        putObjectRequest.setAccessControlList(acl);
    }
    LOG.trace("Put object [{}] from exchange [{}]...", putObjectRequest, exchange);

    PutObjectResult putObjectResult = getEndpoint().getS3Client().putObject(putObjectRequest);

    LOG.trace("Received result [{}]", putObjectResult);

    Message message = getMessageForResponse(exchange);
    message.setHeader(S3Constants.E_TAG, putObjectResult.getETag());
    if (putObjectResult.getVersionId() != null) {
        message.setHeader(S3Constants.VERSION_ID, putObjectResult.getVersionId());
    }

    if (getConfiguration().isDeleteAfterWrite() && filePayload != null) {
        IOHelper.close(putObjectRequest.getInputStream());
        FileUtil.deleteFile(filePayload);
    }
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();//from  w  ww.  j  av  a 2 s.  com
    if (flowFile == null) {
        return;
    }

    final long startNanos = System.nanoTime();

    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions(flowFile).getValue();
    final String key = context.getProperty(KEY).evaluateAttributeExpressions(flowFile).getValue();
    final String cacheKey = getIdentifier() + "/" + bucket + "/" + key;

    final AmazonS3Client s3 = getClient();
    final FlowFile ff = flowFile;
    final Map<String, String> attributes = new HashMap<>();
    final String ffFilename = ff.getAttributes().get(CoreAttributes.FILENAME.key());
    attributes.put(S3_BUCKET_KEY, bucket);
    attributes.put(S3_OBJECT_KEY, key);

    final Long multipartThreshold = context.getProperty(MULTIPART_THRESHOLD).asDataSize(DataUnit.B).longValue();
    final Long multipartPartSize = context.getProperty(MULTIPART_PART_SIZE).asDataSize(DataUnit.B).longValue();

    final long now = System.currentTimeMillis();

    /*
     * If necessary, run age off for existing uploads in AWS S3 and local state
     */
    ageoffS3Uploads(context, s3, now);

    /*
     * Then
     */
    try {
        session.read(flowFile, new InputStreamCallback() {
            @Override
            public void process(final InputStream rawIn) throws IOException {
                try (final InputStream in = new BufferedInputStream(rawIn)) {
                    final ObjectMetadata objectMetadata = new ObjectMetadata();
                    objectMetadata.setContentDisposition(ff.getAttribute(CoreAttributes.FILENAME.key()));
                    objectMetadata.setContentLength(ff.getSize());

                    final String contentType = context.getProperty(CONTENT_TYPE)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (contentType != null) {
                        objectMetadata.setContentType(contentType);
                        attributes.put(S3_CONTENT_TYPE, contentType);
                    }

                    final String expirationRule = context.getProperty(EXPIRATION_RULE_ID)
                            .evaluateAttributeExpressions(ff).getValue();
                    if (expirationRule != null) {
                        objectMetadata.setExpirationTimeRuleId(expirationRule);
                    }

                    final Map<String, String> userMetadata = new HashMap<>();
                    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties()
                            .entrySet()) {
                        if (entry.getKey().isDynamic()) {
                            final String value = context.getProperty(entry.getKey())
                                    .evaluateAttributeExpressions(ff).getValue();
                            userMetadata.put(entry.getKey().getName(), value);
                        }
                    }

                    final String serverSideEncryption = context.getProperty(SERVER_SIDE_ENCRYPTION).getValue();
                    if (!serverSideEncryption.equals(NO_SERVER_SIDE_ENCRYPTION)) {
                        objectMetadata.setSSEAlgorithm(serverSideEncryption);
                        attributes.put(S3_SSE_ALGORITHM, serverSideEncryption);
                    }

                    if (!userMetadata.isEmpty()) {
                        objectMetadata.setUserMetadata(userMetadata);
                    }

                    if (ff.getSize() <= multipartThreshold) {
                        //----------------------------------------
                        // single part upload
                        //----------------------------------------
                        final PutObjectRequest request = new PutObjectRequest(bucket, key, in, objectMetadata);
                        request.setStorageClass(
                                StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                        final AccessControlList acl = createACL(context, ff);
                        if (acl != null) {
                            request.setAccessControlList(acl);
                        }
                        final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                        if (cannedAcl != null) {
                            request.withCannedAcl(cannedAcl);
                        }

                        try {
                            final PutObjectResult result = s3.putObject(request);
                            if (result.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, result.getVersionId());
                            }
                            if (result.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, result.getETag());
                            }
                            if (result.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY, result.getExpirationTime().toString());
                            }
                            if (result.getMetadata().getRawMetadata().keySet()
                                    .contains(S3_STORAGECLASS_META_KEY)) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY, result.getMetadata()
                                        .getRawMetadataValue(S3_STORAGECLASS_META_KEY).toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_PUTOBJECT);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    } else {
                        //----------------------------------------
                        // multipart upload
                        //----------------------------------------

                        // load or create persistent state
                        //------------------------------------------------------------
                        MultipartState currentState;
                        try {
                            currentState = getLocalStateIfInS3(s3, bucket, cacheKey);
                            if (currentState != null) {
                                if (currentState.getPartETags().size() > 0) {
                                    final PartETag lastETag = currentState.getPartETags()
                                            .get(currentState.getPartETags().size() - 1);
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' partsLoaded={} lastPart={}/{}",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength(),
                                                    currentState.getPartETags().size(),
                                                    Integer.toString(lastETag.getPartNumber()),
                                                    lastETag.getETag() });
                                } else {
                                    getLogger().info("Resuming upload for flowfile='{}' bucket='{}' key='{}' "
                                            + "uploadID='{}' filePosition='{}' partSize='{}' storageClass='{}' "
                                            + "contentLength='{}' no partsLoaded",
                                            new Object[] { ffFilename, bucket, key, currentState.getUploadId(),
                                                    currentState.getFilePosition(), currentState.getPartSize(),
                                                    currentState.getStorageClass().toString(),
                                                    currentState.getContentLength() });
                                }
                            } else {
                                currentState = new MultipartState();
                                currentState.setPartSize(multipartPartSize);
                                currentState.setStorageClass(
                                        StorageClass.valueOf(context.getProperty(STORAGE_CLASS).getValue()));
                                currentState.setContentLength(ff.getSize());
                                persistLocalState(cacheKey, currentState);
                                getLogger().info("Starting new upload for flowfile='{}' bucket='{}' key='{}'",
                                        new Object[] { ffFilename, bucket, key });
                            }
                        } catch (IOException e) {
                            getLogger().error("IOException initiating cache state while processing flow files: "
                                    + e.getMessage());
                            throw (e);
                        }

                        // initiate multipart upload or find position in file
                        //------------------------------------------------------------
                        if (currentState.getUploadId().isEmpty()) {
                            final InitiateMultipartUploadRequest initiateRequest = new InitiateMultipartUploadRequest(
                                    bucket, key, objectMetadata);
                            initiateRequest.setStorageClass(currentState.getStorageClass());
                            final AccessControlList acl = createACL(context, ff);
                            if (acl != null) {
                                initiateRequest.setAccessControlList(acl);
                            }
                            final CannedAccessControlList cannedAcl = createCannedACL(context, ff);
                            if (cannedAcl != null) {
                                initiateRequest.withCannedACL(cannedAcl);
                            }
                            try {
                                final InitiateMultipartUploadResult initiateResult = s3
                                        .initiateMultipartUpload(initiateRequest);
                                currentState.setUploadId(initiateResult.getUploadId());
                                currentState.getPartETags().clear();
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state while processing flow file: "
                                            + e.getMessage());
                                    throw (new ProcessException("Exception saving cache state", e));
                                }
                                getLogger().info(
                                        "Success initiating upload flowfile={} available={} position={} "
                                                + "length={} bucket={} key={} uploadId={}",
                                        new Object[] { ffFilename, in.available(),
                                                currentState.getFilePosition(), currentState.getContentLength(),
                                                bucket, key, currentState.getUploadId() });
                                if (initiateResult.getUploadId() != null) {
                                    attributes.put(S3_UPLOAD_ID_ATTR_KEY, initiateResult.getUploadId());
                                }
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure initiating upload flowfile={} bucket={} key={} reason={}",
                                        new Object[] { ffFilename, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        } else {
                            if (currentState.getFilePosition() > 0) {
                                try {
                                    final long skipped = in.skip(currentState.getFilePosition());
                                    if (skipped != currentState.getFilePosition()) {
                                        getLogger().info(
                                                "Failure skipping to resume upload flowfile={} "
                                                        + "bucket={} key={} position={} skipped={}",
                                                new Object[] { ffFilename, bucket, key,
                                                        currentState.getFilePosition(), skipped });
                                    }
                                } catch (Exception e) {
                                    getLogger().info(
                                            "Failure skipping to resume upload flowfile={} bucket={} "
                                                    + "key={} position={} reason={}",
                                            new Object[] { ffFilename, bucket, key,
                                                    currentState.getFilePosition(), e.getMessage() });
                                    throw (new ProcessException(e));
                                }
                            }
                        }

                        // upload parts
                        //------------------------------------------------------------
                        long thisPartSize;
                        for (int part = currentState.getPartETags().size() + 1; currentState
                                .getFilePosition() < currentState.getContentLength(); part++) {
                            if (!PutS3Object.this.isScheduled()) {
                                throw new IOException(S3_PROCESS_UNSCHEDULED_MESSAGE + " flowfile=" + ffFilename
                                        + " part=" + part + " uploadId=" + currentState.getUploadId());
                            }
                            thisPartSize = Math.min(currentState.getPartSize(),
                                    (currentState.getContentLength() - currentState.getFilePosition()));
                            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                                    .withKey(key).withUploadId(currentState.getUploadId()).withInputStream(in)
                                    .withPartNumber(part).withPartSize(thisPartSize);
                            try {
                                UploadPartResult uploadPartResult = s3.uploadPart(uploadRequest);
                                currentState.addPartETag(uploadPartResult.getPartETag());
                                currentState.setFilePosition(currentState.getFilePosition() + thisPartSize);
                                try {
                                    persistLocalState(cacheKey, currentState);
                                } catch (Exception e) {
                                    getLogger().info("Exception saving cache state processing flow file: "
                                            + e.getMessage());
                                }
                                getLogger().info(
                                        "Success uploading part flowfile={} part={} available={} "
                                                + "etag={} uploadId={}",
                                        new Object[] { ffFilename, part, in.available(),
                                                uploadPartResult.getETag(), currentState.getUploadId() });
                            } catch (AmazonClientException e) {
                                getLogger().info(
                                        "Failure uploading part flowfile={} part={} bucket={} key={} "
                                                + "reason={}",
                                        new Object[] { ffFilename, part, bucket, key, e.getMessage() });
                                throw (e);
                            }
                        }

                        // complete multipart upload
                        //------------------------------------------------------------
                        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(
                                bucket, key, currentState.getUploadId(), currentState.getPartETags());
                        try {
                            CompleteMultipartUploadResult completeResult = s3
                                    .completeMultipartUpload(completeRequest);
                            getLogger().info("Success completing upload flowfile={} etag={} uploadId={}",
                                    new Object[] { ffFilename, completeResult.getETag(),
                                            currentState.getUploadId() });
                            if (completeResult.getVersionId() != null) {
                                attributes.put(S3_VERSION_ATTR_KEY, completeResult.getVersionId());
                            }
                            if (completeResult.getETag() != null) {
                                attributes.put(S3_ETAG_ATTR_KEY, completeResult.getETag());
                            }
                            if (completeResult.getExpirationTime() != null) {
                                attributes.put(S3_EXPIRATION_ATTR_KEY,
                                        completeResult.getExpirationTime().toString());
                            }
                            if (currentState.getStorageClass() != null) {
                                attributes.put(S3_STORAGECLASS_ATTR_KEY,
                                        currentState.getStorageClass().toString());
                            }
                            if (userMetadata.size() > 0) {
                                StringBuilder userMetaBldr = new StringBuilder();
                                for (String userKey : userMetadata.keySet()) {
                                    userMetaBldr.append(userKey).append("=").append(userMetadata.get(userKey));
                                }
                                attributes.put(S3_USERMETA_ATTR_KEY, userMetaBldr.toString());
                            }
                            attributes.put(S3_API_METHOD_ATTR_KEY, S3_API_METHOD_MULTIPARTUPLOAD);
                        } catch (AmazonClientException e) {
                            getLogger().info("Failure completing upload flowfile={} bucket={} key={} reason={}",
                                    new Object[] { ffFilename, bucket, key, e.getMessage() });
                            throw (e);
                        }
                    }
                }
            }
        });

        if (!attributes.isEmpty()) {
            flowFile = session.putAllAttributes(flowFile, attributes);
        }
        session.transfer(flowFile, REL_SUCCESS);

        final String url = s3.getResourceUrl(bucket, key);
        final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
        session.getProvenanceReporter().send(flowFile, url, millis);

        getLogger().info("Successfully put {} to Amazon S3 in {} milliseconds", new Object[] { ff, millis });
        try {
            removeLocalState(cacheKey);
        } catch (IOException e) {
            getLogger().info("Error trying to delete key {} from cache: {}",
                    new Object[] { cacheKey, e.getMessage() });
        }
    } catch (final ProcessException | AmazonClientException pe) {
        if (pe.getMessage().contains(S3_PROCESS_UNSCHEDULED_MESSAGE)) {
            getLogger().info(pe.getMessage());
            session.rollback();
        } else {
            getLogger().error("Failed to put {} to Amazon S3 due to {}", new Object[] { flowFile, pe });
            flowFile = session.penalize(flowFile);
            session.transfer(flowFile, REL_FAILURE);
        }
    }

}

From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java

License:Apache License

@Override
public void write(final UUID appId, final Entity entity, InputStream inputStream) throws Exception {

    String uploadFileName = AssetUtils.buildAssetKey(appId, entity);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    long written = IOUtils.copyLarge(inputStream, baos, 0, FIVE_MB);

    byte[] data = baos.toByteArray();

    InputStream awsInputStream = new ByteArrayInputStream(data);

    final Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity);
    fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis());

    String mimeType = AssetMimeHandler.get().getMimeType(entity, data);

    Boolean overSizeLimit = false;

    EntityManager em = emf.getEntityManager(appId);

    if (written < FIVE_MB) { // total smaller than 5mb

        ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(written);//  w ww  .ja v a  2  s. c  om
        om.setContentType(mimeType);
        PutObjectResult result = null;
        result = getS3Client().putObject(bucketName, uploadFileName, awsInputStream, om);

        String md5sum = Hex.encodeHexString(Base64.decodeBase64(result.getContentMd5()));
        String eTag = result.getETag();

        fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);

        if (md5sum != null)
            fileMetadata.put(AssetUtils.CHECKSUM, md5sum);
        fileMetadata.put(AssetUtils.E_TAG, eTag);

        em.update(entity);

    } else { // bigger than 5mb... dump 5 mb tmp files and upload from them
        written = 0; //reset written to 0, we still haven't wrote anything in fact
        int partNumber = 1;
        int firstByte = 0;
        Boolean isFirstChunck = true;
        List<PartETag> partETags = new ArrayList<PartETag>();

        //get the s3 client in order to initialize the multipart request
        getS3Client();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName,
                uploadFileName);
        InitiateMultipartUploadResult initResponse = getS3Client().initiateMultipartUpload(initRequest);

        InputStream firstChunck = new ByteArrayInputStream(data);
        PushbackInputStream chunckableInputStream = new PushbackInputStream(inputStream, 1);

        // determine max size file allowed, default to 50mb
        long maxSizeBytes = 50 * FileUtils.ONE_MB;
        String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50");
        if (StringUtils.isNumeric(maxSizeMbString)) {
            maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB;
        }

        // always allow files up to 5mb
        if (maxSizeBytes < 5 * FileUtils.ONE_MB) {
            maxSizeBytes = 5 * FileUtils.ONE_MB;
        }

        while (-1 != (firstByte = chunckableInputStream.read())) {
            long partSize = 0;
            chunckableInputStream.unread(firstByte);
            File tempFile = File.createTempFile(
                    entity.getUuid().toString().concat("-part").concat(String.valueOf(partNumber)), "tmp");

            tempFile.deleteOnExit();
            OutputStream os = null;
            try {
                os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath()));

                if (isFirstChunck == true) {
                    partSize = IOUtils.copyLarge(firstChunck, os, 0, (FIVE_MB));
                    isFirstChunck = false;
                } else {
                    partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (FIVE_MB));
                }
                written += partSize;

                if (written > maxSizeBytes) {
                    overSizeLimit = true;
                    logger.error("OVERSIZED FILE ({}). STARTING ABORT", written);
                    break;
                    //set flag here and break out of loop to run abort
                }
            } finally {
                IOUtils.closeQuietly(os);
            }

            FileInputStream chunk = new FileInputStream(tempFile);

            Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
            if (!isLastPart)
                chunckableInputStream.unread(firstByte);

            UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(initResponse.getUploadId())
                    .withBucketName(bucketName).withKey(uploadFileName).withInputStream(chunk)
                    .withPartNumber(partNumber).withPartSize(partSize).withLastPart(isLastPart);
            partETags.add(getS3Client().uploadPart(uploadRequest).getPartETag());
            partNumber++;
        }

        //check for flag here then abort.
        if (overSizeLimit) {

            AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId());

            ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucketName);

            MultipartUploadListing listResult = getS3Client().listMultipartUploads(listRequest);

            //upadte the entity with the error.
            try {
                logger.error("starting update of entity due to oversized asset");
                fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes);
                em.update(entity);
            } catch (Exception e) {
                logger.error("Error updating entity with error message", e);
            }

            int timesIterated = 20;
            //loop and abort all the multipart uploads
            while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {

                getS3Client().abortMultipartUpload(abortRequest);
                Thread.sleep(1000);
                timesIterated--;
                listResult = getS3Client().listMultipartUploads(listRequest);
                if (logger.isDebugEnabled()) {
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }

            }
            if (timesIterated == 0) {
                logger.error("Files parts that couldn't be aborted in 20 seconds are:");
                Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads().iterator();
                while (multipartUploadIterator.hasNext()) {
                    logger.error(multipartUploadIterator.next().getKey());
                }
            }
        } else {
            CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId(), partETags);
            CompleteMultipartUploadResult amazonResult = getS3Client().completeMultipartUpload(request);
            fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);
            fileMetadata.put(AssetUtils.E_TAG, amazonResult.getETag());
            em.update(entity);
        }
    }
}

From source file:org.apache.usergrid.tools.WarehouseExport.java

License:Apache License

private void copyToS3(String fileName) {

    String bucketName = (String) properties.get(BUCKET_PROPNAME);
    String accessId = (String) properties.get(ACCESS_ID_PROPNAME);
    String secretKey = (String) properties.get(SECRET_KEY_PROPNAME);

    Properties overrides = new Properties();
    overrides.setProperty("s3" + ".identity", accessId);
    overrides.setProperty("s3" + ".credential", secretKey);

    final Iterable<? extends Module> MODULES = ImmutableSet.of(new JavaUrlHttpCommandExecutorServiceModule(),
            new Log4JLoggingModule(), new NettyPayloadModule());

    AWSCredentials credentials = new BasicAWSCredentials(accessId, secretKey);
    ClientConfiguration clientConfig = new ClientConfiguration();
    clientConfig.setProtocol(Protocol.HTTP);

    AmazonS3Client s3Client = new AmazonS3Client(credentials, clientConfig);

    s3Client.createBucket(bucketName);//from   ww  w. j av  a2 s .c  o m
    File uploadFile = new File(fileName);
    PutObjectResult putObjectResult = s3Client.putObject(bucketName, uploadFile.getName(), uploadFile);
    logger.info("Uploaded file etag={}", putObjectResult.getETag());
}

From source file:org.dspace.storage.bitstore.S3BitStoreService.java

License:BSD License

/**
 * Store a stream of bits.//from w  ww  .jav  a  2 s  .  c  o m
 *
 * <p>
 * If this method returns successfully, the bits have been stored.
 * If an exception is thrown, the bits have not been stored.
 * </p>
 *
 * @param in
 *            The stream of bits to store
 * @exception java.io.IOException
 *             If a problem occurs while storing the bits
 *
 * @return Map containing technical metadata (size, checksum, etc)
 */
public void put(Bitstream bitstream, InputStream in) throws IOException {
    String key = getFullKey(bitstream.getInternalId());
    //Copy istream to temp file, and send the file, with some metadata
    File scratchFile = File.createTempFile(bitstream.getInternalId(), "s3bs");
    try {
        FileUtils.copyInputStreamToFile(in, scratchFile);
        Long contentLength = Long.valueOf(scratchFile.length());

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, scratchFile);
        PutObjectResult putObjectResult = s3Service.putObject(putObjectRequest);

        bitstream.setSizeBytes(contentLength);
        bitstream.setChecksum(putObjectResult.getETag());
        bitstream.setChecksumAlgorithm(CSA);

        scratchFile.delete();

    } catch (Exception e) {
        log.error("put(" + bitstream.getInternalId() + ", is)", e);
        throw new IOException(e);
    } finally {
        if (scratchFile.exists()) {
            scratchFile.delete();
        }
    }
}

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

/**
 * Adds content to a hidden space./*from www.j  a  v  a 2  s. c om*/
 *
 * @param spaceId         hidden spaceId
 * @param contentId
 * @param contentMimeType
 * @param content
 * @return
 */
public String addHiddenContent(String spaceId, String contentId, String contentMimeType, InputStream content) {
    log.debug("addHiddenContent(" + spaceId + ", " + contentId + ", " + contentMimeType + ")");

    // Will throw if bucket does not exist
    String bucketName = getBucketName(spaceId);

    // Wrap the content in order to be able to retrieve a checksum

    if (contentMimeType == null || contentMimeType.equals("")) {
        contentMimeType = DEFAULT_MIMETYPE;
    }

    ObjectMetadata objMetadata = new ObjectMetadata();
    objMetadata.setContentType(contentMimeType);

    PutObjectRequest putRequest = new PutObjectRequest(bucketName, contentId, content, objMetadata);
    putRequest.setStorageClass(DEFAULT_STORAGE_CLASS);
    putRequest.setCannedAcl(CannedAccessControlList.Private);

    try {
        PutObjectResult putResult = s3Client.putObject(putRequest);
        return putResult.getETag();
    } catch (AmazonClientException e) {
        String err = "Could not add content " + contentId + " with type " + contentMimeType + " to S3 bucket "
                + bucketName + " due to error: " + e.getMessage();
        throw new StorageException(err, e, NO_RETRY);
    }

}

From source file:org.duracloud.s3storage.S3StorageProvider.java

License:Apache License

/**
 * {@inheritDoc}/*from  w w  w. j  a  v a2s. co  m*/
 */
public String addContent(String spaceId, String contentId, String contentMimeType,
        Map<String, String> userProperties, long contentSize, String contentChecksum, InputStream content) {
    log.debug("addContent(" + spaceId + ", " + contentId + ", " + contentMimeType + ", " + contentSize + ", "
            + contentChecksum + ")");

    // Will throw if bucket does not exist
    String bucketName = getBucketName(spaceId);

    // Wrap the content in order to be able to retrieve a checksum
    ChecksumInputStream wrappedContent = new ChecksumInputStream(content, contentChecksum);

    String contentEncoding = removeContentEncoding(userProperties);

    userProperties = removeCalculatedProperties(userProperties);

    if (contentMimeType == null || contentMimeType.equals("")) {
        contentMimeType = DEFAULT_MIMETYPE;
    }

    ObjectMetadata objMetadata = new ObjectMetadata();
    objMetadata.setContentType(contentMimeType);
    if (contentSize > 0) {
        objMetadata.setContentLength(contentSize);
    }
    if (null != contentChecksum && !contentChecksum.isEmpty()) {
        String encodedChecksum = ChecksumUtil.convertToBase64Encoding(contentChecksum);
        objMetadata.setContentMD5(encodedChecksum);
    }

    if (contentEncoding != null) {
        objMetadata.setContentEncoding(contentEncoding);
    }

    if (userProperties != null) {
        for (String key : userProperties.keySet()) {
            String value = userProperties.get(key);

            if (log.isDebugEnabled()) {
                log.debug("[" + key + "|" + value + "]");
            }

            objMetadata.addUserMetadata(getSpaceFree(encodeHeaderKey(key)), encodeHeaderValue(value));
        }
    }

    PutObjectRequest putRequest = new PutObjectRequest(bucketName, contentId, wrappedContent, objMetadata);
    putRequest.setStorageClass(DEFAULT_STORAGE_CLASS);
    putRequest.setCannedAcl(CannedAccessControlList.Private);

    // Add the object
    String etag;
    try {
        PutObjectResult putResult = s3Client.putObject(putRequest);
        etag = putResult.getETag();
    } catch (AmazonClientException e) {
        if (e instanceof AmazonS3Exception) {
            AmazonS3Exception s3Ex = (AmazonS3Exception) e;
            String errorCode = s3Ex.getErrorCode();
            Integer statusCode = s3Ex.getStatusCode();
            String message = MessageFormat.format(
                    "exception putting object {0} into {1}: errorCode={2},"
                            + "  statusCode={3}, errorMessage={4}",
                    contentId, bucketName, errorCode, statusCode, e.getMessage());

            if (errorCode.equals("InvalidDigest") || errorCode.equals("BadDigest")) {
                log.error(message, e);

                String err = "Checksum mismatch detected attempting to add " + "content " + contentId
                        + " to S3 bucket " + bucketName + ". Content was not added.";
                throw new ChecksumMismatchException(err, e, NO_RETRY);
            } else if (errorCode.equals("IncompleteBody")) {
                log.error(message, e);
                throw new StorageException("The content body was incomplete for " + contentId + " to S3 bucket "
                        + bucketName + ". Content was not added.", e, NO_RETRY);
            } else if (!statusCode.equals(HttpStatus.SC_SERVICE_UNAVAILABLE)
                    && !statusCode.equals(HttpStatus.SC_NOT_FOUND)) {
                log.error(message, e);
            } else {
                log.warn(message, e);
            }
        } else {
            String err = MessageFormat.format("exception putting object {0} into {1}: {2}", contentId,
                    bucketName, e.getMessage());
            log.error(err, e);
        }

        // Check to see if file landed successfully in S3, despite the exception
        etag = doesContentExistWithExpectedChecksum(bucketName, contentId, contentChecksum);
        if (null == etag) {
            String err = "Could not add content " + contentId + " with type " + contentMimeType + " and size "
                    + contentSize + " to S3 bucket " + bucketName + " due to error: " + e.getMessage();
            throw new StorageException(err, e, NO_RETRY);
        }
    }

    // Compare checksum
    String providerChecksum = getETagValue(etag);
    String checksum = wrappedContent.getMD5();
    StorageProviderUtil.compareChecksum(providerChecksum, spaceId, contentId, checksum);
    return providerChecksum;
}