Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength.

Prototype

public void setContentLength(long contentLength) 

Source Link

Document

<p> Sets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:com.github.jramos.snowplow.RedshiftSinkEmitter.java

License:Apache License

private List<byte[]> emitToS3(final UnmodifiableBuffer<byte[]> buffer) throws IOException {
    List<byte[]> records = buffer.getRecords();

    // Get the S3 filename (used for target bucket and local temp)
    String s3FileName = getS3FileName(buffer.getFirstSequenceNumber(), buffer.getLastSequenceNumber());

    File localS3File = new File(tempDir, s3FileName);
    if (localS3File.exists()) {
        localS3File.delete();/*from  w  w w . j a v  a 2 s. c  om*/
    }

    // write all records to local temp file
    OutputStream out = null;
    try {
        out = new BufferedOutputStream(new FileOutputStream(localS3File));
        for (byte[] record : records) {
            out.write(record, 0, record.length);
        }
        out.flush();
    } catch (IOException ioe) {
        LOG.error(ioe);
        return buffer.getRecords();
    } finally {
        if (out != null) {
            try {
                out.close();
            } catch (IOException ioe) {
            }
        }
    }

    // now stream to s3
    ObjectMetadata metaData = new ObjectMetadata();
    metaData.setContentLength(localS3File.length());

    InputStream in = null;
    try {
        in = new FileInputStream(localS3File);

        s3client.putObject(s3Bucket, s3FileName, in, metaData);
        LOG.info("Successfully emitted " + buffer.getRecords().size() + " records to S3 in s3://" + s3Bucket
                + "/" + s3FileName);

        // delete the local temp
        localS3File.delete();

        return Collections.emptyList();
    } catch (AmazonServiceException e) {
        LOG.error(e);
        return buffer.getRecords();
    } finally {
        if (in != null) {
            try {
                in.close();
            } catch (IOException ioe) {
            }
        }
    }
}

From source file:com.greglturnquist.springagram.fileservice.s3.FileService.java

License:Apache License

public void saveFile(InputStream input, long length, String filename) throws IOException {

    try {//from www .j a  v  a2 s . c o m
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(length);
        this.s3Client.putObject(this.bucket, filename, input, metadata);
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() == 301) {
            updateEndpoint(e);
            saveFile(input, length, filename);
        }
    }
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???//from   www .j a  va2s. c  o m
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}

From source file:com.hpe.caf.worker.datastore.s3.S3DataStore.java

License:Apache License

private String store(InputStream inputStream, String partialReference, Long length) throws DataStoreException {
    try {//from   w  w  w .  j  av a2s .c  o  m
        String fullReference = partialReference + UUID.randomUUID().toString();

        ObjectMetadata objectMetadata = new ObjectMetadata();
        if (length != null) {
            objectMetadata.setContentLength(length);
        }

        TransferManager transferManager = new TransferManager(amazonS3Client);
        Upload upload = transferManager.upload(bucketName, fullReference, inputStream, objectMetadata);

        upload.waitForCompletion();
        //            amazonS3Client.putObject(bucketName, fullReference, inputStream, objectMetadata);

        transferManager.shutdownNow(false);
        return fullReference;
    } catch (Exception ex) {
        errors.incrementAndGet();
        throw new DataStoreException("Could not store input stream.", ex);
    }
}

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

@Override
public FSDataOutputStream createObject(String objName, String contentType, Map<String, String> metadata,
        Statistics statistics) throws IOException {
    try {/*  w w  w. j av  a  2s  .c  o m*/
        String objNameWithoutBuket = objName;
        if (objName.startsWith(mBucket + "/")) {
            objNameWithoutBuket = objName.substring(mBucket.length() + 1);
        }
        if (blockUploadEnabled) {
            return new FSDataOutputStream(new COSBlockOutputStream(this, objNameWithoutBuket,
                    new SemaphoredDelegatingExecutor(threadPoolExecutor, blockOutputActiveBlocks, true),
                    partSize, blockFactory, contentType, new WriteOperationHelper(objNameWithoutBuket),
                    metadata), null);
        }

        if (!contentType.equals(Constants.APPLICATION_DIRECTORY)) {
            return new FSDataOutputStream(
                    new COSOutputStream(mBucket, objName, mClient, contentType, metadata, transfers, this),
                    statistics);
        } else {
            final InputStream im = new InputStream() {
                @Override
                public int read() throws IOException {
                    return -1;
                }
            };
            final ObjectMetadata om = new ObjectMetadata();
            om.setContentLength(0L);
            om.setContentType(contentType);
            om.setUserMetadata(metadata);
            // Remove the bucket name prefix from key path
            if (objName.startsWith(mBucket + "/")) {
                objName = objName.substring(mBucket.length() + 1);
            }
            /*
            if (!objName.endsWith("/")) {
              objName = objName + "/";
            }*/
            LOG.debug("bucket: {}, key {}", mBucket, objName);
            PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, objName, im, om);
            Upload upload = transfers.upload(putObjectRequest);
            upload.waitForUploadResult();
            OutputStream fakeStream = new OutputStream() {

                @Override
                public void write(int b) throws IOException {
                }

                @Override
                public void close() throws IOException {
                    super.close();
                }
            };
            return new FSDataOutputStream(fakeStream, statistics);
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException("Interrupted creating " + objName);
    } catch (IOException e) {
        LOG.error(e.getMessage());
        throw e;
    }
}

From source file:com.ibm.stocator.fs.cos.COSOutputStream.java

License:Apache License

@Override
public void close() throws IOException {
    if (closed.getAndSet(true)) {
        return;/*from  w ww .  ja v  a 2 s .  c o m*/
    }
    mBackupOutputStream.close();
    LOG.debug("OutputStream for key '{}' closed. Now beginning upload", mKey);
    try {
        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(mBackupFile.length());
        om.setContentType(mContentType);
        om.setUserMetadata(mMetadata);

        PutObjectRequest putObjectRequest = new PutObjectRequest(mBucketName, mKey, mBackupFile);
        putObjectRequest.setMetadata(om);

        Upload upload = transfers.upload(putObjectRequest);

        upload.waitForUploadResult();
    } catch (InterruptedException e) {
        throw (InterruptedIOException) new InterruptedIOException(e.toString()).initCause(e);
    } catch (AmazonClientException e) {
        throw new IOException(String.format("saving output %s %s", mKey, e));
    } finally {
        if (!mBackupFile.delete()) {
            LOG.warn("Could not delete temporary cos file: {}", mBackupOutputStream);
        }
        super.close();
    }
    LOG.debug("OutputStream for key '{}' upload complete", mKey);
}

From source file:com.igeekinc.indelible.indeliblefs.uniblock.casstore.s3.S3CASStore.java

License:Open Source License

@Override
public void storeSegment(CASIDDataDescriptor segmentDescriptor) throws IOException {
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(segmentDescriptor.getLength());
    PutObjectResult result = s3Client.putObject(storeID.toString(),
            segmentDescriptor.getCASIdentifier().toString(), segmentDescriptor.getInputStream(), metadata);
    // If we get this far we're golden
}

From source file:com.indeed.imhotep.iql.cache.S3QueryCache.java

License:Apache License

@Override
public OutputStream getOutputStream(final String cachedFileName) throws IOException {
    if (!enabled) {
        throw new IllegalStateException("Can't send data to S3 cache as it is disabled");
    }/*  w ww .j a  v a 2  s . com*/

    final ByteArrayOutputStream os = new ByteArrayOutputStream();
    // Wrap the returned OutputStream so that we can write to buffer and do actual write on close()
    return new OutputStream() {
        private boolean closed = false;

        @Override
        public void write(byte[] b) throws IOException {
            os.write(b);
        }

        @Override
        public void write(byte[] b, int off, int len) throws IOException {
            os.write(b, off, len);
        }

        @Override
        public void flush() throws IOException {
            os.flush();
        }

        @Override
        public void write(int b) throws IOException {
            os.write(b);
        }

        @Override
        public void close() throws IOException {
            if (closed) {
                return;
            }
            closed = true;
            os.close();

            // do actual write
            byte[] csvData = os.toByteArray();
            ByteArrayInputStream is = new ByteArrayInputStream(csvData);
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentLength(csvData.length);
            client.putObject(bucket, cachedFileName, is, metadata);
        }
    };
}

From source file:com.kittypad.music.game.util.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3/* ww  w  . j av a  2s. c o  m*/
 * @param obj the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(MusicItem obj, byte[] data, boolean reducedRedundancy, CannedAccessControlList acl) {
    // Make sure the bucket exists before we try to use it
    //checkForAndCreateBucket(this.bucketName);
    String key = obj.getUUID() + obj.getMusicName() + "." + obj.getType();
    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(mimeType);
    omd.setContentLength(obj.getSize());
    ByteArrayInputStream is = new ByteArrayInputStream(data);
    PutObjectRequest request = new PutObjectRequest(bucketName, key, is, omd);
    // Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }
    s3client.putObject(request);
    // If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3client.setObjectAcl(bucketName, key, acl);
    }
}

From source file:com.lithium.flow.filer.S3Filer.java

License:Apache License

@Override
@Nonnull/*  ww  w.j a v  a  2s  . c o m*/
public OutputStream writeFile(@Nonnull String path) throws IOException {
    String key = path.substring(1);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    List<Future<PartETag>> futureTags = new ArrayList<>();
    Lazy<String> uploadId = new Lazy<>(
            () -> s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucket, key)).getUploadId());

    return new OutputStream() {
        @Override
        public void write(int b) throws IOException {
            baos.write(b);
            flip(partSize);
        }

        @Override
        public void write(byte[] b) throws IOException {
            baos.write(b);
            flip(partSize);
        }

        @Override
        public void write(byte[] b, int off, int len) throws IOException {
            baos.write(b, off, len);
            flip(partSize);
        }

        @Override
        public void close() throws IOException {
            if (futureTags.size() == 0) {
                InputStream in = new ByteArrayInputStream(baos.toByteArray());
                ObjectMetadata metadata = new ObjectMetadata();
                metadata.setContentLength(baos.size());
                s3.putObject(bucket, key, in, metadata);
            } else {
                flip(1);

                List<PartETag> tags = Lists.newArrayList();
                for (Future<PartETag> futureTag : futureTags) {
                    try {
                        tags.add(futureTag.get());
                    } catch (Exception e) {
                        s3.abortMultipartUpload(new AbortMultipartUploadRequest(bucket, key, uploadId.get()));
                        throw new IOException("failed to upload: " + path, e);
                    }
                }

                s3.completeMultipartUpload(
                        new CompleteMultipartUploadRequest(bucket, key, uploadId.get(), tags));
            }
        }

        private void flip(long minSize) throws IOException {
            if (baos.size() < minSize) {
                return;
            }

            File file = new File(tempDir, UUID.randomUUID().toString());
            file.deleteOnExit();

            OutputStream out = new FileOutputStream(file);
            out.write(baos.toByteArray());
            out.close();

            baos.reset();

            UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(uploadId.get())
                    .withBucketName(bucket).withKey(key).withPartNumber(futureTags.size() + 1)
                    .withPartSize(file.length()).withFile(file);

            futureTags.add(service.submit(() -> {
                try {
                    return s3.uploadPart(uploadRequest).getPartETag();
                } finally {
                    file.delete();
                }
            }));
        }
    };
}