Example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata.

Prototype

public ObjectMetadata() 

Source Link

Usage

From source file:com.rathravane.clerk.impl.s3.S3IamDb.java

License:Apache License

void storeObject(String key, JSONObject o) throws IamSvcException {
    try {// w  w  w.  j a v a2 s .  c o m
        fCache.put(key, JsonUtil.clone(o));

        final String data = o.toString();
        final InputStream is = new ByteArrayInputStream(data.getBytes("UTF-8"));
        final long length = data.length();

        final ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(length);
        om.setContentType("application/json");
        fDb.putObject(new PutObjectRequest(fBucketId, key, is, om));
    } catch (AmazonS3Exception x) {
        throw new IamSvcException(x);
    } catch (UnsupportedEncodingException e) {
        throw new IamSvcException(e);
    }
}

From source file:com.sangupta.urn.service.impl.AmazonS3UrnStorageServiceImpl.java

License:Apache License

@Override
protected String save(UrnObject urnObject) {
    InputStream stream = new ByteArrayInputStream(urnObject.bytes);

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(urnObject.bytes.length);

    if (AssertUtils.isNotEmpty(urnObject.name)) {
        metadata.setContentDisposition("filename=" + urnObject.name);
    }//from ww w. ja va2s  .  co  m

    if (AssertUtils.isNotEmpty(urnObject.mime)) {
        metadata.setContentType(urnObject.mime);
    }

    if (urnObject.expiry > 0) {
        metadata.setHttpExpiresDate(new Date(urnObject.expiry));
    }

    PutObjectResult result = this.client.putObject(this.bucketName, urnObject.key, stream, metadata);
    if (result == null) {
        return null;
    }

    return this.client.getResourceUrl(this.bucketName, urnObject.key);
}

From source file:com.shareplaylearn.models.UserItemManager.java

License:Open Source License

private ObjectMetadata makeBasicMetadata(int bufferLength, boolean isPublic, String itemName) {
    ObjectMetadata fileMetadata = new ObjectMetadata();
    fileMetadata.setContentEncoding(MediaType.APPLICATION_OCTET_STREAM);
    if (isPublic) {
        fileMetadata.addUserMetadata(UploadMetadataFields.PUBLIC, UploadMetadataFields.TRUE_VALUE);
    } else {//from w w w .  ja va 2  s  . co m
        fileMetadata.addUserMetadata(UploadMetadataFields.PUBLIC, UploadMetadataFields.FALSE_VALUE);
    }
    fileMetadata.addUserMetadata(UploadMetadataFields.DISPLAY_NAME, itemName);
    fileMetadata.setContentLength(bufferLength);
    return fileMetadata;
}

From source file:com.streamsets.datacollector.bundles.SupportBundleManager.java

License:Apache License

/**
 * Instead of providing support bundle directly to user, upload it to StreamSets backend services.
 *///from   ww w  . j av  a2s. c om
public void uploadNewBundleFromInstances(List<BundleContentGenerator> generators, BundleType bundleType)
        throws IOException {
    // Generate bundle
    SupportBundle bundle = generateNewBundleFromInstances(generators, bundleType);

    boolean enabled = configuration.get(Constants.UPLOAD_ENABLED, Constants.DEFAULT_UPLOAD_ENABLED);
    String accessKey = configuration.get(Constants.UPLOAD_ACCESS, Constants.DEFAULT_UPLOAD_ACCESS);
    String secretKey = configuration.get(Constants.UPLOAD_SECRET, Constants.DEFAULT_UPLOAD_SECRET);
    String bucket = configuration.get(Constants.UPLOAD_BUCKET, Constants.DEFAULT_UPLOAD_BUCKET);
    int bufferSize = configuration.get(Constants.UPLOAD_BUFFER_SIZE, Constants.DEFAULT_UPLOAD_BUFFER_SIZE);

    if (!enabled) {
        throw new IOException("Uploading support bundles was disabled by administrator.");
    }

    AWSCredentialsProvider credentialsProvider = new StaticCredentialsProvider(
            new BasicAWSCredentials(accessKey, secretKey));
    AmazonS3Client s3Client = new AmazonS3Client(credentialsProvider, new ClientConfiguration());
    s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
    s3Client.setRegion(Region.getRegion(Regions.US_WEST_2));

    // Object Metadata
    ObjectMetadata s3Metadata = new ObjectMetadata();
    for (Map.Entry<Object, Object> entry : getMetadata(bundleType).entrySet()) {
        s3Metadata.addUserMetadata((String) entry.getKey(), (String) entry.getValue());
    }

    List<PartETag> partETags;
    InitiateMultipartUploadResult initResponse = null;
    try {
        // Uploading part by part
        LOG.info("Initiating multi-part support bundle upload");
        partETags = new ArrayList<>();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket,
                bundle.getBundleKey());
        initRequest.setObjectMetadata(s3Metadata);
        initResponse = s3Client.initiateMultipartUpload(initRequest);
    } catch (AmazonClientException e) {
        LOG.error("Support bundle upload failed: ", e);
        throw new IOException("Support bundle upload failed", e);
    }

    try {
        byte[] buffer = new byte[bufferSize];
        int partId = 1;
        int size = -1;
        while ((size = readFully(bundle.getInputStream(), buffer)) != -1) {
            LOG.debug("Uploading part {} of size {}", partId, size);
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(bucket)
                    .withKey(bundle.getBundleKey()).withUploadId(initResponse.getUploadId())
                    .withPartNumber(partId++).withInputStream(new ByteArrayInputStream(buffer))
                    .withPartSize(size);

            partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
        }

        CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucket,
                bundle.getBundleKey(), initResponse.getUploadId(), partETags);

        s3Client.completeMultipartUpload(compRequest);
        LOG.info("Support bundle upload finished");
    } catch (Exception e) {
        LOG.error("Support bundle upload failed", e);
        s3Client.abortMultipartUpload(
                new AbortMultipartUploadRequest(bucket, bundle.getBundleKey(), initResponse.getUploadId()));

        throw new IOException("Can't upload support bundle", e);
    } finally {
        // Close the client
        s3Client.shutdown();
    }
}

From source file:com.streamsets.pipeline.lib.aws.s3.S3Accessor.java

License:Apache License

public EncryptionMetadataBuilder createEncryptionMetadataBuilder() {
    return () -> {
        ObjectMetadata metadata = null;/* w w w. j  av  a2 s. c  o m*/
        if (sseConfigs != null) {
            switch (sseConfigs.getEncryption()) {
            case NONE:
                metadata = null;
                break;
            case S3:
                metadata = new ObjectMetadata();
                metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
                break;
            case KMS:
                metadata = new ObjectMetadata();
                metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
                metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
                        sseConfigs.getKmsKeyId().get());
                metadata.setHeader("x-amz-server-side-encryption-context",
                        sseConfigs.getEncryptionContext().entrySet().stream().collect(
                                Collectors.toMap(e -> e.getKey(), e -> Caller.call(() -> e.getValue().get()))));
                break;
            case CUSTOMER:
                metadata = new ObjectMetadata();
                metadata.setSSECustomerAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
                metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
                        sseConfigs.getCustomerKey().get());
                metadata.setHeader(Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
                        sseConfigs.getCustomerKeyMd5().get());
                break;
            default:
                throw new IllegalArgumentException(
                        String.format("Invalid encryption option '%s'", sseConfigs.getEncryption()));
            }
        }
        return metadata;
    };
}

From source file:com.streamsets.pipeline.stage.destination.s3.FileHelper.java

License:Apache License

protected ObjectMetadata getObjectMetadata() throws StageException {
    ObjectMetadata metadata = null;/*from w  w  w. j  a v  a2  s. co  m*/
    if (s3TargetConfigBean.sseConfig.useSSE) {
        metadata = new ObjectMetadata();
        switch (s3TargetConfigBean.sseConfig.encryption) {
        case S3:
            metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
            break;
        case KMS:
            metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
            metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID,
                    s3TargetConfigBean.sseConfig.kmsKeyId.get());
            if (!s3TargetConfigBean.sseConfig.encryptionContext.isEmpty()) {
                metadata.setHeader("x-amz-server-side-encryption-context",
                        s3TargetConfigBean.sseConfig.resolveEncryptionContext());
            }
            break;
        case CUSTOMER:
            metadata.setSSECustomerAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
            metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
                    s3TargetConfigBean.sseConfig.customerKey.get());
            metadata.setHeader(Headers.COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
                    s3TargetConfigBean.sseConfig.customerKeyMd5.get());
            break;
        default:
            throw new IllegalStateException(
                    Utils.format("Unknown encryption option: ", s3TargetConfigBean.sseConfig.encryption));
        }
    }
    return metadata;
}

From source file:com.streamsets.pipeline.stage.destination.s3.WholeFileHelper.java

License:Apache License

@Override
public List<UploadMetadata> handle(Iterator<Record> recordIterator, String bucket, String keyPrefix)
        throws IOException, StageException {
    List<UploadMetadata> uploads = new ArrayList<>();
    //Only one record per batch if whole file
    if (recordIterator.hasNext()) {
        Record record = recordIterator.next();
        try {/* w  w  w .j a  v a  2  s .  c o m*/

            try {
                FileRefUtil.validateWholeFileRecord(record);
            } catch (IllegalArgumentException e) {
                LOGGER.error("Validation Failed For Record {}", e);
                throw new OnRecordErrorException(record, Errors.S3_52, e);
            }

            String fileName = getFileNameFromFileNameEL(keyPrefix, record);

            checkForWholeFileExistence(bucket, fileName);

            FileRef fileRef = record.get(FileRefUtil.FILE_REF_FIELD_PATH).getValueAsFileRef();

            ObjectMetadata metadata = getObjectMetadata();
            metadata = (metadata == null) ? new ObjectMetadata() : metadata;

            //Mandatory field path specifying size.
            metadata.setContentLength(
                    record.get(FileRefUtil.FILE_INFO_FIELD_PATH + "/" + SIZE).getValueAsLong());

            EventRecord eventRecord = createEventRecordForFileTransfer(record, bucket, fileName);

            //Fyi this gets closed automatically after upload completes.
            InputStream is = FileRefUtil.getReadableStream(context, fileRef, InputStream.class,
                    generatorService.wholeFileIncludeChecksumInTheEvents(),
                    ChecksumAlgorithm.forApi(generatorService.wholeFileChecksumAlgorithm()),
                    new FileRefStreamCloseEventHandler(eventRecord));
            //We are bypassing the generator because S3 has a convenient notion of taking input stream as a parameter.
            Upload upload = doUpload(bucket, fileName, is, metadata);
            uploads.add(new UploadMetadata(upload, bucket, ImmutableList.of(record),
                    ImmutableList.of(eventRecord)));

            //Add event to event lane.
        } catch (OnRecordErrorException e) {
            LOGGER.error("Error on record: {}", e);
            errorRecordHandler.onError(new OnRecordErrorException(record, e.getErrorCode(), e.getParams()));
        }
    }
    return uploads;
}

From source file:com.tfnsnproject.util.S3StorageManager.java

License:Open Source License

/**
 * Stores a given item on S3/* ww  w . j  av  a  2  s . c  om*/
 *
 * @param obj               the data to be stored
 * @param reducedRedundancy whether or not to use reduced redundancy storage
 * @param acl               a canned access control list indicating what permissions to store this object with (can be null to leave it set to default)
 */
public void store(S3StorageObject obj, boolean reducedRedundancy, CannedAccessControlList acl) {
    // Make sure the bucket exists before we try to use it
    checkForAndCreateBucket(obj.getBucketName());

    ObjectMetadata omd = new ObjectMetadata();
    omd.setContentType(obj.getMimeType());
    omd.setContentLength(obj.getData().length);

    ByteArrayInputStream is = new ByteArrayInputStream(obj.getData());
    PutObjectRequest request = new PutObjectRequest(obj.getBucketName(), obj.getStoragePath(), is, omd);

    // Check if reduced redundancy is enabled
    if (reducedRedundancy) {
        request.setStorageClass(StorageClass.ReducedRedundancy);
    }

    s3client.putObject(request);

    // If we have an ACL set access permissions for the the data on S3
    if (acl != null) {
        s3client.setObjectAcl(obj.getBucketName(), obj.getStoragePath(), acl);
    }

}

From source file:com.tracermedia.maven.plugins.CreateVersionMojo.java

License:Open Source License

protected void copyFileToS3(final String s3Bucket, final String s3Key, final File file) throws IOException {

    final ObjectMetadata meta = new ObjectMetadata();

    InputStream in = new FileInputStream(file);
    try {/*  w ww  .jav a  2  s  .c om*/
        meta.setContentLength(file.length());
        meta.setContentType(Mimetypes.getInstance().getMimetype(file));
    } finally {
        in.close();
    }

    in = new ProgressReportingInputStream(new RepeatableFileInputStream(file), new ProgressListener() {
        int lastPercent = -1;
        int bytes = 0;

        public void progressChanged(ProgressEvent progressEvent) {
            bytes += progressEvent.getBytesTransfered();
            double percent = 100.0 * bytes / meta.getContentLength();
            if ((int) percent != lastPercent) {
                System.out.print(
                        String.format("\rCopying file [%s] to S3, bucket: %s, key: %s, progress: %.0f%%   ",
                                file.getName(), s3Bucket, s3Key, percent));
                lastPercent = (int) percent;
            }
        }
    });

    try {
        final PutObjectRequest request = new PutObjectRequest(s3Bucket, s3Key, in, meta);
        getS3Client().putObject(request);
        System.out.println(String.format("\rCopying file [%s] to S3, bucket: %s, key: %s, progress: %.0f%%   ",
                file.getName(), s3Bucket, s3Key, 100.0));
    } finally {
        in.close();
    }
}

From source file:com.tvarit.plugin.TvaritTomcatDeployerMojo.java

License:Open Source License

@Override
public void execute() throws MojoExecutionException, MojoFailureException {

    final MavenProject project = (MavenProject) this.getPluginContext().getOrDefault("project", null);
    if (templateUrl == null)
        try {/*  w  w w.  j a v a2  s .  c o m*/
            templateUrl = new TemplateUrlMaker().makeUrl(project, "newinstance.template").toString();
        } catch (MalformedURLException e) {
            throw new MojoExecutionException(
                    "Could not create default url for templates. Please open an issue on github.", e);
        }

    final BasicAWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secretKey);
    AmazonS3Client s3Client = new AmazonS3Client(awsCredentials);
    final File warFile = project.getArtifact().getFile();
    final String key = "deployables/" + project.getGroupId() + "/" + project.getArtifactId() + "/"
            + project.getVersion() + "/" + warFile.getName();
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, warFile);
    final ObjectMetadata metadata = new ObjectMetadata();
    final HashMap<String, String> userMetadata = new HashMap<>();
    userMetadata.put("project_name", projectName);
    userMetadata.put("stack_template_url", templateUrl);
    userMetadata.put("private_key_name", sshKeyName);
    metadata.setUserMetadata(userMetadata);
    putObjectRequest.withMetadata(metadata);
    final PutObjectResult putObjectResult = s3Client.putObject(putObjectRequest);

    /*
            AmazonCloudFormationClient amazonCloudFormationClient = new AmazonCloudFormationClient(awsCredentials);
            final com.amazonaws.services.cloudformation.model.Parameter projectNameParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("projectName").withParameterValue(this.projectName);
            final com.amazonaws.services.cloudformation.model.Parameter publicSubnetsParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("publicSubnets").withParameterValue(commaSeparatedSubnetIds);
            final com.amazonaws.services.cloudformation.model.Parameter tvaritRoleParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("tvaritRole").withParameterValue(tvaritRole);
            final com.amazonaws.services.cloudformation.model.Parameter tvaritInstanceProfileParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("tvaritInstanceProfile").withParameterValue(this.tvaritInstanceProfile);
            final com.amazonaws.services.cloudformation.model.Parameter tvaritBucketNameParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("bucketName").withParameterValue(this.bucketName);
            final com.amazonaws.services.cloudformation.model.Parameter instanceSecurityGroupIdParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("sgId").withParameterValue(this.instanceSecurityGroupId);
            final com.amazonaws.services.cloudformation.model.Parameter sshKeyNameParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("keyName").withParameterValue(this.sshKeyName);
            final String warFileUrl = s3Client.getUrl(bucketName, key).toString();
            final com.amazonaws.services.cloudformation.model.Parameter warFileUrlParameter = new com.amazonaws.services.cloudformation.model.Parameter().withParameterKey("warFileUrl").withParameterValue(warFileUrl);
            final CreateStackRequest createStackRequest = new CreateStackRequest();
            if (templateUrl == null) {
    try {
        templateUrl = new TemplateUrlMaker().makeUrl(project, "newinstance.template").toString();
    } catch (MalformedURLException e) {
        throw new MojoExecutionException("Could not create default url for templates. Please open an issue on github.", e);
    }
            }
            createStackRequest.
        withStackName(projectName + "-instance-" + project.getVersion().replace(".", "-")).
        withParameters(
                projectNameParameter,
                publicSubnetsParameter,
                tvaritInstanceProfileParameter,
                tvaritRoleParameter,
                tvaritBucketNameParameter,
                instanceSecurityGroupIdParameter,
                warFileUrlParameter,
                sshKeyNameParameter
        ).
        withDisableRollback(true).
        withTemplateURL(templateUrl);
            createStackRequest.withDisableRollback(true);
            final Stack stack = new StackMaker().makeStack(createStackRequest, amazonCloudFormationClient, getLog());
            AmazonAutoScalingClient amazonAutoScalingClient = new AmazonAutoScalingClient(awsCredentials);
            final AttachInstancesRequest attachInstancesRequest = new AttachInstancesRequest();
            attachInstancesRequest.withInstanceIds(stack.getOutputs().get(0).getOutputValue(), stack.getOutputs().get(1).getOutputValue()).withAutoScalingGroupName(autoScalingGroupName);
            amazonAutoScalingClient.attachInstances(attachInstancesRequest);
    */

}