List of usage examples for com.amazonaws.http HttpMethodName PUT
HttpMethodName PUT
To view the source code for com.amazonaws.http HttpMethodName PUT.
Click Source Link
From source file:com.emc.vipr.services.s3.ViPRS3Client.java
License:Open Source License
public BucketFileAccessModeResult setBucketFileAccessMode(SetBucketFileAccessModeRequest putAccessModeRequest) throws AmazonClientException { assertParameterNotNull(putAccessModeRequest, "The SetBucketFileAccessModeRequest parameter must be specified"); String bucketName = putAccessModeRequest.getBucketName(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when changing access mode"); Request<SetBucketFileAccessModeRequest> request = createRequest(bucketName, null, putAccessModeRequest, HttpMethodName.PUT); request.addParameter(ViPRConstants.ACCESS_MODE_PARAMETER, null); request.addHeader(Headers.CONTENT_TYPE, Mimetypes.MIMETYPE_XML); if (putAccessModeRequest.getAccessMode() != null) { request.addHeader(ViPRConstants.FILE_ACCESS_MODE_HEADER, putAccessModeRequest.getAccessMode().toString()); }/*from w w w. j a v a2 s. c o m*/ if (putAccessModeRequest.getDuration() != 0) { request.addHeader(ViPRConstants.FILE_ACCESS_DURATION_HEADER, Long.toString(putAccessModeRequest.getDuration())); } if (putAccessModeRequest.getHostList() != null) { request.addHeader(ViPRConstants.FILE_ACCESS_HOST_LIST_HEADER, join(",", putAccessModeRequest.getHostList())); } if (putAccessModeRequest.getUid() != null) { request.addHeader(ViPRConstants.FILE_ACCESS_UID_HEADER, putAccessModeRequest.getUid()); } if (putAccessModeRequest.getToken() != null) { request.addHeader(ViPRConstants.FILE_ACCESS_TOKEN_HEADER, putAccessModeRequest.getToken()); } if (putAccessModeRequest.isPreserveIngestPaths()) { request.addHeader(ViPRConstants.FILE_ACCESS_PRESERVE_INGEST_PATHS, "true"); } return invoke(request, new AbstractS3ResponseHandler<BucketFileAccessModeResult>() { public AmazonWebServiceResponse<BucketFileAccessModeResult> handle(HttpResponse response) throws Exception { BucketFileAccessModeResult result = new BucketFileAccessModeResult(); Map<String, String> headers = response.getHeaders(); if (headers.containsKey(ViPRConstants.FILE_ACCESS_MODE_HEADER)) result.setAccessMode(ViPRConstants.FileAccessMode .valueOf(headers.get(ViPRConstants.FILE_ACCESS_MODE_HEADER))); if (headers.containsKey(ViPRConstants.FILE_ACCESS_DURATION_HEADER)) result.setDuration(Long.parseLong(headers.get(ViPRConstants.FILE_ACCESS_DURATION_HEADER))); if (headers.containsKey(ViPRConstants.FILE_ACCESS_HOST_LIST_HEADER)) result.setHostList( Arrays.asList(headers.get(ViPRConstants.FILE_ACCESS_HOST_LIST_HEADER).split(","))); if (headers.containsKey(ViPRConstants.FILE_ACCESS_UID_HEADER)) result.setUid(headers.get(ViPRConstants.FILE_ACCESS_UID_HEADER)); if (headers.containsKey(ViPRConstants.FILE_ACCESS_START_TOKEN_HEADER)) result.setStartToken(headers.get(ViPRConstants.FILE_ACCESS_START_TOKEN_HEADER)); if (headers.containsKey(ViPRConstants.FILE_ACCESS_END_TOKEN_HEADER)) result.setEndToken(headers.get(ViPRConstants.FILE_ACCESS_END_TOKEN_HEADER)); if (headers.containsKey(ViPRConstants.FILE_ACCESS_PRESERVE_INGEST_PATHS)) result.setPreserveIngestPaths( Boolean.parseBoolean(headers.get(ViPRConstants.FILE_ACCESS_PRESERVE_INGEST_PATHS))); AmazonWebServiceResponse<BucketFileAccessModeResult> awsResponse = parseResponseMetadata(response); awsResponse.setResult(result); return awsResponse; } }, bucketName, null); }
From source file:com.emc.vipr.services.s3.ViPRS3Client.java
License:Open Source License
/** * Executes a (Subclass of) PutObjectRequest. In particular, we check for subclasses * of the UpdateObjectRequest and inject the value of the Range header. This version * also returns the raw ObjectMetadata for the response so callers can construct * their own result objects.//from w w w.ja v a 2 s . c o m * @param putObjectRequest the request to execute * @return an ObjectMetadata containing the response headers. */ protected ObjectMetadata doPut(PutObjectRequest putObjectRequest) { assertParameterNotNull(putObjectRequest, "The PutObjectRequest parameter must be specified when uploading an object"); String bucketName = putObjectRequest.getBucketName(); String key = putObjectRequest.getKey(); ObjectMetadata metadata = putObjectRequest.getMetadata(); InputStream input = putObjectRequest.getInputStream(); if (metadata == null) metadata = new ObjectMetadata(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when uploading an object"); assertParameterNotNull(key, "The key parameter must be specified when uploading an object"); /* * This is compatible with progress listener set by either the legacy * method GetObjectRequest#setProgressListener or the new method * GetObjectRequest#setGeneralProgressListener. */ com.amazonaws.event.ProgressListener progressListener = putObjectRequest.getGeneralProgressListener(); ProgressListenerCallbackExecutor progressListenerCallbackExecutor = ProgressListenerCallbackExecutor .wrapListener(progressListener); // If a file is specified for upload, we need to pull some additional // information from it to auto-configure a few options if (putObjectRequest.getFile() != null) { File file = putObjectRequest.getFile(); // Always set the content length, even if it's already set metadata.setContentLength(file.length()); // Only set the content type if it hasn't already been set if (metadata.getContentType() == null) { metadata.setContentType(Mimetypes.getInstance().getMimetype(file)); } FileInputStream fileInputStream = null; try { fileInputStream = new FileInputStream(file); byte[] md5Hash = Md5Utils.computeMD5Hash(fileInputStream); metadata.setContentMD5(BinaryUtils.toBase64(md5Hash)); } catch (Exception e) { throw new AmazonClientException("Unable to calculate MD5 hash: " + e.getMessage(), e); } finally { try { fileInputStream.close(); } catch (Exception e) { } } try { input = new RepeatableFileInputStream(file); } catch (FileNotFoundException fnfe) { throw new AmazonClientException("Unable to find file to upload", fnfe); } } Request<PutObjectRequest> request = createRequest(bucketName, key, putObjectRequest, HttpMethodName.PUT); if (putObjectRequest.getAccessControlList() != null) { addAclHeaders(request, putObjectRequest.getAccessControlList()); } else if (putObjectRequest.getCannedAcl() != null) { request.addHeader(Headers.S3_CANNED_ACL, putObjectRequest.getCannedAcl().toString()); } if (putObjectRequest.getStorageClass() != null) { request.addHeader(Headers.STORAGE_CLASS, putObjectRequest.getStorageClass()); } if (putObjectRequest.getRedirectLocation() != null) { request.addHeader(Headers.REDIRECT_LOCATION, putObjectRequest.getRedirectLocation()); if (input == null) { input = new ByteArrayInputStream(new byte[0]); } } // Use internal interface to differentiate 0 from unset. if (metadata.getRawMetadata().get(Headers.CONTENT_LENGTH) == null) { /* * There's nothing we can do except for let the HTTP client buffer * the input stream contents if the caller doesn't tell us how much * data to expect in a stream since we have to explicitly tell * Amazon S3 how much we're sending before we start sending any of * it. */ log.warn("No content length specified for stream data. " + "Stream contents will be buffered in memory and could result in " + "out of memory errors."); } if (progressListenerCallbackExecutor != null) { com.amazonaws.event.ProgressReportingInputStream progressReportingInputStream = new com.amazonaws.event.ProgressReportingInputStream( input, progressListenerCallbackExecutor); fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.STARTED_EVENT_CODE); } if (!input.markSupported()) { int streamBufferSize = Constants.DEFAULT_STREAM_BUFFER_SIZE; String bufferSizeOverride = System.getProperty("com.amazonaws.sdk.s3.defaultStreamBufferSize"); if (bufferSizeOverride != null) { try { streamBufferSize = Integer.parseInt(bufferSizeOverride); } catch (Exception e) { log.warn("Unable to parse buffer size override from value: " + bufferSizeOverride); } } input = new RepeatableInputStream(input, streamBufferSize); } MD5DigestCalculatingInputStream md5DigestStream = null; if (metadata.getContentMD5() == null) { /* * If the user hasn't set the content MD5, then we don't want to * buffer the whole stream in memory just to calculate it. Instead, * we can calculate it on the fly and validate it with the returned * ETag from the object upload. */ try { md5DigestStream = new MD5DigestCalculatingInputStream(input); input = md5DigestStream; } catch (NoSuchAlgorithmException e) { log.warn("No MD5 digest algorithm available. Unable to calculate " + "checksum and verify data integrity.", e); } } if (metadata.getContentType() == null) { /* * Default to the "application/octet-stream" if the user hasn't * specified a content type. */ metadata.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM); } populateRequestMetadata(request, metadata); request.setContent(input); if (putObjectRequest instanceof UpdateObjectRequest) { request.addHeader(Headers.RANGE, "bytes=" + ((UpdateObjectRequest) putObjectRequest).getUpdateRange()); } ObjectMetadata returnedMetadata = null; try { returnedMetadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key); } catch (AmazonClientException ace) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw ace; } finally { try { input.close(); } catch (Exception e) { log.warn("Unable to cleanly close input stream: " + e.getMessage(), e); } } String contentMd5 = metadata.getContentMD5(); if (md5DigestStream != null) { contentMd5 = BinaryUtils.toBase64(md5DigestStream.getMd5Digest()); } // Can't verify MD5 on appends/update (yet). if (!(putObjectRequest instanceof UpdateObjectRequest)) { if (returnedMetadata != null && contentMd5 != null) { byte[] clientSideHash = BinaryUtils.fromBase64(contentMd5); byte[] serverSideHash = BinaryUtils.fromHex(returnedMetadata.getETag()); if (!Arrays.equals(clientSideHash, serverSideHash)) { fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.FAILED_EVENT_CODE); throw new AmazonClientException("Unable to verify integrity of data upload. " + "Client calculated content hash didn't match hash calculated by Amazon S3. " + "You may need to delete the data stored in Amazon S3."); } } } fireProgressEvent(progressListenerCallbackExecutor, com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE); return returnedMetadata; }
From source file:com.emc.vipr.services.s3.ViPRS3Client.java
License:Open Source License
/** * ViPR-specific create bucket command. This version of the command adds some * options specific to EMC ViPR, specifically the ability to set the ViPR project ID * and Object Virtual Pool ID on the new bucket. * @param createBucketRequest the configuration parameters for the new bucket. *//*from w w w . jav a 2 s . c o m*/ public Bucket createBucket(ViPRCreateBucketRequest createBucketRequest) throws AmazonClientException, AmazonServiceException { assertParameterNotNull(createBucketRequest, "The CreateBucketRequest parameter must be specified when creating a bucket"); String bucketName = createBucketRequest.getBucketName(); String region = createBucketRequest.getRegion(); assertParameterNotNull(bucketName, "The bucket name parameter must be specified when creating a bucket"); if (bucketName != null) bucketName = bucketName.trim(); BucketNameUtils.validateBucketName(bucketName); Request<ViPRCreateBucketRequest> request = createRequest(bucketName, null, createBucketRequest, HttpMethodName.PUT); if (createBucketRequest.getAccessControlList() != null) { addAclHeaders(request, createBucketRequest.getAccessControlList()); } else if (createBucketRequest.getCannedAcl() != null) { request.addHeader(Headers.S3_CANNED_ACL, createBucketRequest.getCannedAcl().toString()); } // ViPR specific: projectId, vpoolId and fsAccessEnabled. if (createBucketRequest.getProjectId() != null) { request.addHeader(ViPRConstants.PROJECT_HEADER, createBucketRequest.getProjectId()); } if (createBucketRequest.getVpoolId() != null) { request.addHeader(ViPRConstants.VPOOL_HEADER, createBucketRequest.getVpoolId()); } if (createBucketRequest.isFsAccessEnabled()) { request.addHeader(ViPRConstants.FS_ACCESS_ENABLED, "true"); } /* * If we're talking to a region-specific endpoint other than the US, we * *must* specify a location constraint. Try to derive the region from * the endpoint. */ if (!(this.endpoint.getHost().equals(Constants.S3_HOSTNAME)) && (region == null || region.isEmpty())) { try { region = RegionUtils.getRegionByEndpoint(this.endpoint.getHost()).getName(); } catch (IllegalArgumentException exception) { // Endpoint does not correspond to a known region; send the // request with no location constraint and hope for the best. } } /* * We can only send the CreateBucketConfiguration if we're *not* * creating a bucket in the US region. */ if (region != null && !region.toUpperCase().equals(Region.US_Standard.toString())) { XmlWriter xml = new XmlWriter(); xml.start("CreateBucketConfiguration", "xmlns", Constants.XML_NAMESPACE); xml.start("LocationConstraint").value(region).end(); xml.end(); request.setContent(new ByteArrayInputStream(xml.getBytes())); } invoke(request, voidResponseHandler, bucketName, null); return new Bucket(bucketName); }
From source file:com.ibm.og.s3.v4.AWSS3V4Signer.java
License:Open Source License
/** * Determine whether to use aws-chunked for signing *///from www .ja va 2 s . co m private boolean useChunkEncoding(final SignableRequest<?> request) { // If chunked encoding is explicitly disabled through client options // return right here. if (isChunkedEncodingDisabled(request)) { return false; } // FIXME this may break with POST or part upload return this.chunkedEncoding && request.getHttpMethod() == HttpMethodName.PUT; }
From source file:org.apache.nifi.processors.aws.wag.AbstractAWSGatewayApiProcessor.java
License:Apache License
protected GenericApiGatewayRequest configureRequest(final ProcessContext context, final ProcessSession session, final String resourcePath, final FlowFile requestFlowFile, final HttpMethodName methodName) { GenericApiGatewayRequestBuilder builder = new GenericApiGatewayRequestBuilder() .withResourcePath(resourcePath); final Map<String, List<String>> parameters = getParameters(context); builder = builder.withParameters(parameters); InputStream requestBody = null; switch (methodName) { case GET:// ww w .ja v a 2s . com builder = builder.withHttpMethod(HttpMethodName.GET); break; case POST: requestBody = getRequestBodyToSend(session, context, requestFlowFile); builder = builder.withHttpMethod(HttpMethodName.POST).withBody(requestBody); break; case PUT: requestBody = getRequestBodyToSend(session, context, requestFlowFile); builder = builder.withHttpMethod(HttpMethodName.PUT).withBody(requestBody); break; case PATCH: requestBody = getRequestBodyToSend(session, context, requestFlowFile); builder = builder.withHttpMethod(HttpMethodName.PATCH).withBody(requestBody); break; case HEAD: builder = builder.withHttpMethod(HttpMethodName.HEAD); break; case DELETE: builder = builder.withHttpMethod(HttpMethodName.DELETE); break; case OPTIONS: requestBody = getRequestBodyToSend(session, context, requestFlowFile); builder = builder.withHttpMethod(HttpMethodName.OPTIONS).withBody(requestBody); break; } builder = setHeaderProperties(context, builder, methodName, requestFlowFile); return builder.build(); }
From source file:org.apache.nifi.processors.aws.wag.AbstractAWSGatewayApiProcessor.java
License:Apache License
protected GenericApiGatewayRequestBuilder setHeaderProperties(final ProcessContext context, GenericApiGatewayRequestBuilder requestBuilder, HttpMethodName methodName, final FlowFile requestFlowFile) { Map<String, String> headers = new HashMap<>(); for (String headerKey : dynamicPropertyNames) { String headerValue = context.getProperty(headerKey).evaluateAttributeExpressions(requestFlowFile) .getValue();// w ww . j av a2 s . c o m headers.put(headerKey, headerValue); } // iterate through the flowfile attributes, adding any attribute that // matches the attributes-to-send pattern. if the pattern is not set // (it's an optional property), ignore that attribute entirely if (regexAttributesToSend != null && requestFlowFile != null) { Map<String, String> attributes = requestFlowFile.getAttributes(); Matcher m = regexAttributesToSend.matcher(""); for (Map.Entry<String, String> entry : attributes.entrySet()) { String headerKey = trimToEmpty(entry.getKey()); // don't include any of the ignored attributes if (IGNORED_ATTRIBUTES.contains(headerKey)) { continue; } // check if our attribute key matches the pattern // if so, include in the request as a header m.reset(headerKey); if (m.matches()) { String headerVal = trimToEmpty(entry.getValue()); headers.put(headerKey, headerVal); } } } String contentType = context.getProperty(PROP_CONTENT_TYPE).evaluateAttributeExpressions(requestFlowFile) .getValue(); boolean sendBody = context.getProperty(PROP_SEND_BODY).asBoolean(); contentType = StringUtils.isBlank(contentType) ? DEFAULT_CONTENT_TYPE : contentType; if (methodName == HttpMethodName.PUT || methodName == HttpMethodName.POST || methodName == HttpMethodName.PATCH) { if (sendBody) { headers.put("Content-Type", contentType); } } else { headers.put("Content-Type", contentType); } if (!headers.isEmpty()) { requestBuilder = requestBuilder.withHeaders(headers); } return requestBuilder; }