Example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata.

Prototype

public ObjectMetadata() 

Source Link

Usage

From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java

License:Open Source License

@Override
public PutObjectResult createDirectory(final String bucketName, final String dirName,
        final boolean isPublicAccessible) throws AmazonClientException, AmazonServiceException {
    LOGGER.info("createDirectory invoked, bucketName: {}, dirName: {} and isPublicAccessible: {}", bucketName,
            dirName, isPublicAccessible);
    final ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);//from  ww  w  . ja v a2 s  .  c  o m
    // Create empty content,since creating empty folder needs an empty content
    final InputStream emptyContent = new ByteArrayInputStream(new byte[0]);
    // Create a PutObjectRequest passing the directory name suffixed by '/'
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName,
            dirName + AWSUtilConstants.SEPARATOR, emptyContent, metadata);
    if (isPublicAccessible) {
        putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
    }
    return s3client.putObject(putObjectRequest);
}

From source file:com.github.abhinavmishra14.aws.s3.service.impl.AwsS3IamServiceImpl.java

License:Open Source License

@Override
public boolean hasWritePermissionOnBucket(final String bucketName) {
    LOGGER.info("Checking bucket write permission..");
    boolean hasWritePermissions = false;
    final ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);/*from   ww w . j a  v  a  2s . com*/
    // Create empty content
    final InputStream emptyContent = new ByteArrayInputStream(new byte[0]);
    // Create a PutObjectRequest with test object
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName,
            AWSUtilConstants.SAMPLE_FILE_NAME, emptyContent, metadata);
    try {
        if (s3client.putObject(putObjectRequest) != null) {
            LOGGER.info("Permissions validated!");
            //User has write permissions, TestPassed. 
            hasWritePermissions = true;
            //Delete the test object
            deleteObject(bucketName, AWSUtilConstants.SAMPLE_FILE_NAME);
        }
    } catch (AmazonClientException s3Ex) {
        LOGGER.warn("Write permissions not available!", s3Ex.getMessage());
    }
    return hasWritePermissions;
}

From source file:com.github.jramos.snowplow.RedshiftSinkEmitter.java

License:Apache License

private List<byte[]> emitToS3(final UnmodifiableBuffer<byte[]> buffer) throws IOException {
    List<byte[]> records = buffer.getRecords();

    // Get the S3 filename (used for target bucket and local temp)
    String s3FileName = getS3FileName(buffer.getFirstSequenceNumber(), buffer.getLastSequenceNumber());

    File localS3File = new File(tempDir, s3FileName);
    if (localS3File.exists()) {
        localS3File.delete();/* w  w w .  j a  va 2s  . co  m*/
    }

    // write all records to local temp file
    OutputStream out = null;
    try {
        out = new BufferedOutputStream(new FileOutputStream(localS3File));
        for (byte[] record : records) {
            out.write(record, 0, record.length);
        }
        out.flush();
    } catch (IOException ioe) {
        LOG.error(ioe);
        return buffer.getRecords();
    } finally {
        if (out != null) {
            try {
                out.close();
            } catch (IOException ioe) {
            }
        }
    }

    // now stream to s3
    ObjectMetadata metaData = new ObjectMetadata();
    metaData.setContentLength(localS3File.length());

    InputStream in = null;
    try {
        in = new FileInputStream(localS3File);

        s3client.putObject(s3Bucket, s3FileName, in, metaData);
        LOG.info("Successfully emitted " + buffer.getRecords().size() + " records to S3 in s3://" + s3Bucket
                + "/" + s3FileName);

        // delete the local temp
        localS3File.delete();

        return Collections.emptyList();
    } catch (AmazonServiceException e) {
        LOG.error(e);
        return buffer.getRecords();
    } finally {
        if (in != null) {
            try {
                in.close();
            } catch (IOException ioe) {
            }
        }
    }
}

From source file:com.github.rholder.esthree.command.Put.java

License:Apache License

@Override
public Integer call() throws Exception {
    TransferManager t = new TransferManager(amazonS3Client);

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setUserMetadata(metadata);

    Upload u = t.upload(new PutObjectRequest(bucket, key, inputFile).withMetadata(objectMetadata));

    // TODO this listener spews out garbage >100% on a retry, add a test to verify
    if (progressListener != null) {
        progressListener.withTransferProgress(new TransferProgressWrapper(u.getProgress()));
        u.addProgressListener(progressListener);
    }//ww  w.  ja  v a 2s .  c om
    try {
        u.waitForCompletion();
    } finally {
        t.shutdownNow();
    }
    return 0;
}

From source file:com.greglturnquist.springagram.fileservice.s3.FileService.java

License:Apache License

public void saveFile(InputStream input, long length, String filename) throws IOException {

    try {//from w  ww . j  a  v  a  2 s  . c o m
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(length);
        this.s3Client.putObject(this.bucket, filename, input, metadata);
    } catch (AmazonS3Exception e) {
        if (e.getStatusCode() == 301) {
            updateEndpoint(e);
            saveFile(input, length, filename);
        }
    }
}

From source file:com.gst.infrastructure.documentmanagement.contentrepository.S3ContentRepository.java

License:Apache License

private void uploadDocument(final String filename, final InputStream inputStream, final String s3UploadLocation)
        throws ContentManagementException {
    try {/*from  ww  w .  ja  va 2  s  .  co  m*/
        logger.info("Uploading a new object to S3 from a file to " + s3UploadLocation);
        this.s3Client.putObject(
                new PutObjectRequest(this.s3BucketName, s3UploadLocation, inputStream, new ObjectMetadata()));
    } catch (final AmazonClientException ace) {
        final String message = ace.getMessage();
        throw new ContentManagementException(filename, message);
    }
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???//  w w  w .j  a v  a2 s .co  m
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}

From source file:com.hpe.caf.worker.datastore.s3.S3DataStore.java

License:Apache License

private String store(InputStream inputStream, String partialReference, Long length) throws DataStoreException {
    try {/* w  w  w .j av a  2s . c om*/
        String fullReference = partialReference + UUID.randomUUID().toString();

        ObjectMetadata objectMetadata = new ObjectMetadata();
        if (length != null) {
            objectMetadata.setContentLength(length);
        }

        TransferManager transferManager = new TransferManager(amazonS3Client);
        Upload upload = transferManager.upload(bucketName, fullReference, inputStream, objectMetadata);

        upload.waitForCompletion();
        //            amazonS3Client.putObject(bucketName, fullReference, inputStream, objectMetadata);

        transferManager.shutdownNow(false);
        return fullReference;
    } catch (Exception ex) {
        errors.incrementAndGet();
        throw new DataStoreException("Could not store input stream.", ex);
    }
}

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

@Override
public FSDataOutputStream createObject(String objName, String contentType, Map<String, String> metadata,
        Statistics statistics) throws IOException {
    try {//from   w w  w. j  a v  a2 s.  c om
        String objNameWithoutBuket = objName;
        if (objName.startsWith(mBucket + "/")) {
            objNameWithoutBuket = objName.substring(mBucket.length() + 1);
        }
        if (blockUploadEnabled) {
            return new FSDataOutputStream(new COSBlockOutputStream(this, objNameWithoutBuket,
                    new SemaphoredDelegatingExecutor(threadPoolExecutor, blockOutputActiveBlocks, true),
                    partSize, blockFactory, contentType, new WriteOperationHelper(objNameWithoutBuket),
                    metadata), null);
        }

        if (!contentType.equals(Constants.APPLICATION_DIRECTORY)) {
            return new FSDataOutputStream(
                    new COSOutputStream(mBucket, objName, mClient, contentType, metadata, transfers, this),
                    statistics);
        } else {
            final InputStream im = new InputStream() {
                @Override
                public int read() throws IOException {
                    return -1;
                }
            };
            final ObjectMetadata om = new ObjectMetadata();
            om.setContentLength(0L);
            om.setContentType(contentType);
            om.setUserMetadata(metadata);
            // Remove the bucket name prefix from key path
            if (objName.startsWith(mBucket + "/")) {
                objName = objName.substring(mBucket.length() + 1);
            }
            /*
            if (!objName.endsWith("/")) {
              objName = objName + "/";
            }*/
            LOG.debug("bucket: {}, key {}", mBucket, objName);
            PutObjectRequest putObjectRequest = new PutObjectRequest(mBucket, objName, im, om);
            Upload upload = transfers.upload(putObjectRequest);
            upload.waitForUploadResult();
            OutputStream fakeStream = new OutputStream() {

                @Override
                public void write(int b) throws IOException {
                }

                @Override
                public void close() throws IOException {
                    super.close();
                }
            };
            return new FSDataOutputStream(fakeStream, statistics);
        }
    } catch (InterruptedException e) {
        throw new InterruptedIOException("Interrupted creating " + objName);
    } catch (IOException e) {
        LOG.error(e.getMessage());
        throw e;
    }
}

From source file:com.ibm.stocator.fs.cos.COSBlockOutputStream.java

License:Apache License

/**
 * Upload the current block as a single PUT request; if the buffer is empty a
 * 0-byte PUT will be invoked, as it is needed to create an entry at the far
 * end.//from  w ww.ja  v a2 s .c  o  m
 *
 * @throws IOException any problem
 */
private void putObject() throws IOException {
    LOG.debug("Executing regular upload for {}", writeOperationHelper);

    final COSDataBlocks.DataBlock block = getActiveBlock();
    int size = block.dataSize();
    final COSDataBlocks.BlockUploadData uploadData = block.startUpload();
    final PutObjectRequest putObjectRequest = uploadData.hasFile()
            ? writeOperationHelper.newPutRequest(uploadData.getFile())
            : writeOperationHelper.newPutRequest(uploadData.getUploadStream(), size);

    final ObjectMetadata om = new ObjectMetadata();
    om.setUserMetadata(mMetadata);
    if (contentType != null && !contentType.isEmpty()) {
        om.setContentType(contentType);
    } else {
        om.setContentType("application/octet-stream");
    }
    putObjectRequest.setMetadata(om);
    ListenableFuture<PutObjectResult> putObjectResult = executorService.submit(new Callable<PutObjectResult>() {
        @Override
        public PutObjectResult call() throws Exception {
            PutObjectResult result;
            try {
                // the putObject call automatically closes the input
                // stream afterwards.
                result = writeOperationHelper.putObject(putObjectRequest);
            } finally {
                closeAll(LOG, uploadData, block);
            }
            return result;
        }
    });
    clearActiveBlock();
    // wait for completion
    try {
        putObjectResult.get();
    } catch (InterruptedException ie) {
        LOG.warn("Interrupted object upload", ie);
        Thread.currentThread().interrupt();
    } catch (ExecutionException ee) {
        throw extractException("regular upload", key, ee);
    }
}