List of usage examples for com.amazonaws AmazonServiceException getStatusCode
public int getStatusCode()
From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java
License:Apache License
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException { String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; long start = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {//ww w . j av a2 s . com Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); // check if the same record already exists try { objectMetaData = s3service.getObjectMetadata(bucket, key); } catch (AmazonServiceException ase) { if (ase.getStatusCode() != 404) { throw ase; } } if (objectMetaData != null) { long l = objectMetaData.getContentLength(); if (l != file.length()) { throw new DataStoreException( "Collision: " + key + " new length: " + file.length() + " old length: " + l); } LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime()); CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); s3service.copyObject(copReq); LOG.debug("lastModified of [{}] updated successfully.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } if (objectMetaData == null) { try { // start multipart parallel upload using amazon sdk Upload up = tmx.upload(new PutObjectRequest(bucket, key, file)); // wait for upload to finish if (asyncUpload) { up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug("added upload progress listener to identifier [{}]", identifier); } else { up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } } catch (Exception e2) { if (!asyncUpload) { callback.onAbort(new AsyncUploadResult(identifier, file)); } throw new DataStoreException("Could not upload " + key, e2); } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) }); }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
/** * Check if record identified by identifier exists in Amazon S3. *//*from www .ja v a 2s.c om*/ @Override public boolean exists(DataIdentifier identifier) throws DataStoreException { long start = System.currentTimeMillis(); String key = getKeyName(identifier); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); ObjectMetadata objectMetaData = s3service.getObjectMetadata(bucket, key); if (objectMetaData != null) { LOG.trace("exists [{}]: [true] took [{}] ms.", identifier, (System.currentTimeMillis() - start)); return true; } return false; } catch (AmazonServiceException e) { if (e.getStatusCode() == 404 || e.getStatusCode() == 403) { LOG.debug("exists [{}]: [false] took [{}] ms.", identifier, (System.currentTimeMillis() - start)); return false; } throw new DataStoreException( "Error occured to getObjectMetadata for key [" + identifier.toString() + "]", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
@Override public boolean exists(DataIdentifier identifier, boolean touch) throws DataStoreException { long start = System.currentTimeMillis(); String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; boolean retVal = false; ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {//from w ww. j av a 2 s .c o m Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); objectMetaData = s3service.getObjectMetadata(bucket, key); if (objectMetaData != null) { retVal = true; if (touch) { CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); copy.waitForCopyResult(); LOG.debug("[{}] touched took [{}] ms. ", identifier, (System.currentTimeMillis() - start)); } } else { retVal = false; } } catch (AmazonServiceException e) { if (e.getStatusCode() == 404 || e.getStatusCode() == 403) { retVal = false; } else { throw new DataStoreException("Error occured to find exists for key [" + identifier.toString() + "]", e); } } catch (Exception e) { throw new DataStoreException("Error occured to find exists for key " + identifier.toString(), e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("exists [{}]: [{}] took [{}] ms.", new Object[] { identifier, retVal, (System.currentTimeMillis() - start) }); return retVal; }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
@Override public long getLastModified(DataIdentifier identifier) throws DataStoreException { long start = System.currentTimeMillis(); String key = getKeyName(identifier); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/*from ww w .j a v a2s .c o m*/ Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); ObjectMetadata object = s3service.getObjectMetadata(bucket, key); long lastModified = object.getLastModified().getTime(); LOG.debug("Identifier [{}]'s lastModified = [{}] took [{}]ms.", new Object[] { identifier, lastModified, (System.currentTimeMillis() - start) }); return lastModified; } catch (AmazonServiceException e) { if (e.getStatusCode() == 404 || e.getStatusCode() == 403) { LOG.info("getLastModified:Identifier [{}] not found. Took [{}] ms.", identifier, (System.currentTimeMillis() - start)); } throw new DataStoreException(e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException { String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; long start = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/* w w w. j a va 2 s. c o m*/ Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); // check if the same record already exists try { objectMetaData = s3service.getObjectMetadata(bucket, key); } catch (AmazonServiceException ase) { if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) { throw ase; } } if (objectMetaData != null) { long l = objectMetaData.getContentLength(); if (l != file.length()) { throw new DataStoreException( "Collision: " + key + " new length: " + file.length() + " old length: " + l); } LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime()); CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); try { copy.waitForCopyResult(); LOG.debug("lastModified of [{}] updated successfully.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } catch (Exception e2) { AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file); asyncUpRes.setException(e2); if (callback != null) { callback.onAbort(asyncUpRes); } throw new DataStoreException("Could not upload " + key, e2); } } if (objectMetaData == null) { try { // start multipart parallel upload using amazon sdk Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file))); // wait for upload to finish if (asyncUpload) { up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug("added upload progress listener to identifier [{}]", identifier); } else { up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } } catch (Exception e2) { AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file); asyncUpRes.setException(e2); if (callback != null) { callback.onAbort(asyncUpRes); } throw new DataStoreException("Could not upload " + key, e2); } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) }); }
From source file:org.apache.nifi.processors.aws.dynamodb.AbstractDynamoDBProcessor.java
License:Apache License
protected List<FlowFile> processServiceException(final ProcessSession session, List<FlowFile> flowFiles, AmazonServiceException exception) { List<FlowFile> failedFlowFiles = new ArrayList<>(); for (FlowFile flowFile : flowFiles) { Map<String, String> attributes = new HashMap<>(); attributes.put(DYNAMODB_ERROR_EXCEPTION_MESSAGE, exception.getMessage()); attributes.put(DYNAMODB_ERROR_CODE, exception.getErrorCode()); attributes.put(DYNAMODB_ERROR_MESSAGE, exception.getErrorMessage()); attributes.put(DYNAMODB_ERROR_TYPE, exception.getErrorType().name()); attributes.put(DYNAMODB_ERROR_SERVICE, exception.getServiceName()); attributes.put(DYNAMODB_ERROR_RETRYABLE, Boolean.toString(exception.isRetryable())); attributes.put(DYNAMODB_ERROR_REQUEST_ID, exception.getRequestId()); attributes.put(DYNAMODB_ERROR_STATUS_CODE, Integer.toString(exception.getStatusCode())); attributes.put(DYNAMODB_ERROR_EXCEPTION_MESSAGE, exception.getMessage()); attributes.put(DYNAMODB_ERROR_RETRYABLE, Boolean.toString(exception.isRetryable())); flowFile = session.putAllAttributes(flowFile, attributes); failedFlowFiles.add(flowFile);/* ww w. j ava2s.c o m*/ } return failedFlowFiles; }
From source file:org.apache.nifi.processors.aws.lambda.PutLambda.java
License:Apache License
/** * Populate exception attributes in the flow file * @param session process session/* w ww . j ava2 s .co m*/ * @param flowFile the flow file * @param exception exception thrown during invocation * @return FlowFile the updated flow file */ private FlowFile populateExceptionAttributes(final ProcessSession session, FlowFile flowFile, final AmazonServiceException exception) { Map<String, String> attributes = new HashMap<>(); attributes.put(AWS_LAMBDA_EXCEPTION_MESSAGE, exception.getErrorMessage()); attributes.put(AWS_LAMBDA_EXCEPTION_ERROR_CODE, exception.getErrorCode()); attributes.put(AWS_LAMBDA_EXCEPTION_REQUEST_ID, exception.getRequestId()); attributes.put(AWS_LAMBDA_EXCEPTION_STATUS_CODE, Integer.toString(exception.getStatusCode())); if (exception.getCause() != null) attributes.put(AWS_LAMBDA_EXCEPTION_CAUSE, exception.getCause().getMessage()); attributes.put(AWS_LAMBDA_EXCEPTION_ERROR_TYPE, exception.getErrorType().toString()); attributes.put(AWS_LAMBDA_EXCEPTION_MESSAGE, exception.getErrorMessage()); flowFile = session.putAllAttributes(flowFile, attributes); return flowFile; }
From source file:org.apache.s4.serializer.dynamodb.EventCountAndReportPE.java
License:Apache License
public void onEvent(TopicEvent event) { if (firstEvent) { logger.info("Handling new Event [{}]", getId()); firstEvent = false;//from w w w. ja va 2s . c om firstInsert = true; } count += event.getCount(); // countUsedEvents++; // SB // logger.info("Used Data Events counter [{}]", countUsedEvents); // SB if (false) { // BEGINNING OF THE BLOCK!!!!!!!!!!! if (firstInsert) { firstInsert = false; try { // Data fusion config file: try { // File fusionPropsFile = new File(System.getProperty("user.home") + "/DataFusion.properties"); File fusionPropsFile = new File("/home/ec2-user/DataFusion.properties"); if (!fusionPropsFile.exists()) { fusionPropsFile = new File(System.getProperty("user.home") + "/DataFusion.properties"); if (!fusionPropsFile.exists()) { logger.error( "Cannot find Data fusion properties file in this location :[{}]. Make sure it is available at this place and includes AWS credentials (accessKey, secretKey)", fusionPropsFile.getAbsolutePath()); } } fusionProperties.load(new FileInputStream(fusionPropsFile)); accuracy = Double.parseDouble(fusionProperties.getProperty("accuracy")); confidence = Double.parseDouble(fusionProperties.getProperty("confidence")); } catch (Exception e) { logger.error("Cannot find Data fusion config file", e); } // Create and configure DynamoDB client AWSCredentials credentials = new BasicAWSCredentials(awsProperties.getProperty("accessKey"), awsProperties.getProperty("secretKey")); AmazonDynamoDBClient dynamoDBClient = new AmazonDynamoDBClient(credentials); logger.info("Create DynamoDB client"); dynamoDBClient.setEndpoint("dynamodb.eu-west-1.amazonaws.com"); logger.info("DynamoDB client credentials are accepted and endpoint selected"); // try { // Extracted context, e.g query, activity String searchQueryAPI = "Test KnowledgeDiscovery API Query"; String object = "Object detected"; Map<String, AttributeValue> itemRT = new HashMap<String, AttributeValue>(); Map<String, AttributeValue> itemDQ = new HashMap<String, AttributeValue>(); Iterable<String> dataSplit = Splitter.on(' ').omitEmptyStrings().trimResults().split(getId()); // List<String> dataList = Lists.newArrayList(Elements.getElements(dataSplit)); // String receivedMsgs = dataList.get(dataList.size()-1); // countReceivedMsgs = Integer.parseInt(receivedMsgs);; int i = 0; for (String token : dataSplit) { i++; receivedMsgs = token; } int k = 0; for (String token : dataSplit) { k++; if (k == (i - 2)) { receivedAppID = token; } else if (k == (i - 1)) { receivedUserID = token; } } appID = Double.parseDouble(receivedAppID); userID = Double.parseDouble(receivedUserID); // STUPID HARDCODE but fast for prototype, should change to class later :) if (appID == 0 && userID > 0) { // CV app and serialization table rtEventsTableName = "TableEventVector_CV"; tableDataQuality = "EventVectorQuality_CV"; db_orig = db_base_dir + "/cv.db"; countReceivedMsgs_CV = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev_CV; countReceivedMsgsPrev_CV = Integer.parseInt(receivedMsgs); countUsedMsgs_CV++; countReceivedMsgs = countReceivedMsgs_CV; countUsedMsgs = countUsedMsgs_CV; } else if (appID == 1 && userID > 0) { // NLP rtEventsTableName = "TableEventVector_NLP"; tableDataQuality = "EventVectorSetQuality_NLP"; db_orig = db_base_dir + "/nlp.db"; countReceivedMsgs_NLP = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev_NLP; countReceivedMsgsPrev_NLP = Integer.parseInt(receivedMsgs); countUsedMsgs_NLP++; countReceivedMsgs = countReceivedMsgs_NLP; countUsedMsgs = countUsedMsgs_NLP; } else if (appID == 2 && userID > 0) { // Audio rtEventsTableName = "TableEventVector_Audio"; tableDataQuality = "EventVectorQuality_Audio"; db_orig = db_base_dir + "/audio.db"; countReceivedMsgs_Audio = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev_Audio; countReceivedMsgsPrev_Audio = Integer.parseInt(receivedMsgs); countUsedMsgs_Audio++; countReceivedMsgs = countReceivedMsgs_Audio; countUsedMsgs = countUsedMsgs_Audio; } else { // all others Events available in DB rtEventsTableName = "TableEventVector"; tableDataQuality = "EventVectorQuality"; countReceivedMsgs = Integer.parseInt(receivedMsgs) - countReceivedMsgsPrev; countReceivedMsgsPrev = Integer.parseInt(receivedMsgs); countUsedMsgs++; } try { // Users database connection db_conn = DriverManager.getConnection("jdbc:sqlite:" + db_orig); //Actual invocation of Users DB without "rating" field db_stmt = db_conn.prepareStatement( "SELECT id, title, country, name, surname FROM user WHERE appID = ? AND userID = ?"); db_stmt.setDouble(1, userID); db_stmt.setDouble(2, appID); rs = db_stmt.executeQuery(); // Index updates/inserts String ID = rs.getString(1); String location = rs.getString(2); String country = rs.getString(3); String name = rs.getString(4); String surname = rs.getString(5); // resultSet adjustment according to the Accuracy and Confidence levels (1 / number of results and multiplied by 100%) accuracyRT = (1 / rs.getFetchSize()) * 100; confidence = sqrt(accuracyRT * accuracyRT + accuracy * accuracy); // Collect to DynamoDB items (CandidateSet and CandidateSetQuality) itemRT.put("id", new AttributeValue().withS(placesID)); itemRT.put("country", new AttributeValue().withS(country)); itemRT.put("name", new AttributeValue().withS(String.valueOf(lat))); itemRT.put("surname", new AttributeValue().withS(String.valueOf(lon))); itemRT.put("query", new AttributeValue().withS(searchQueryAPI)); itemRT.put("rating", new AttributeValue().withN(String.valueOf(count))); itemRT.put("title", new AttributeValue().withS(location)); itemRT.put("topic", new AttributeValue().withS(getId())); itemRT.put("event", new AttributeValue().withS(activity)); itemRT.put("ts", new AttributeValue().withS(dateFormatter.format(new Date()))); itemDQ.put("TimeStamp", new AttributeValue().withS(dateFormatter.format(new Date()))); itemDQ.put("ReceivedMsgs", new AttributeValue().withN(String.valueOf(countReceivedMsgs))); itemDQ.put("UsedMsgs", new AttributeValue().withN(String.valueOf(countUsedMsgs))); itemDQ.put("Accuracy", new AttributeValue().withN(String.valueOf(count))); itemDQ.put("Timeliness", new AttributeValue().withS(dateFormatter.format(new Date()))); itemDQ.put("Completeness", new AttributeValue().withN(String.valueOf(count))); itemDQ.put("Consistency", new AttributeValue().withN(String.valueOf(count))); itemDQ.put("Confidence", new AttributeValue().withN(String.valueOf(count))); itemDQ.put("Privacy", new AttributeValue().withS("anonymised")); PutItemRequest itemRequestRT = new PutItemRequest().withTableName(rtEventsTableName) .withItem(itemRT); PutItemRequest itemRequestDQ = new PutItemRequest().withTableName(tableDataQuality) .withItem(itemDQ); dynamoDBClient.putItem(itemRequestRT); dynamoDBClient.putItem(itemRequestDQ); itemRT.clear(); itemDQ.clear(); logger.info("TableEvent set size [{}], last known size [{}] ", countReceivedMsgs, countReceivedMsgsPrev); logger.info("Wrote EventVector to DynamoDB [{}] ", rtEventsTableName); logger.info("Wrote EventVector Quality measurements to DynamoDB [{}] ", tableDataQuality); // Closing second "try" } catch (Exception e) { // logger.error("Cannot close DB file", e); } finally { try { rs.close(); } catch (SQLException e) { logger.error("Cannot close ResultSet", e); } try { db_stmt.close(); } catch (SQLException e) { logger.error("Cannot close Statement", e); } try { db_conn.close(); } catch (SQLException e) { logger.error("Cannot close DB file", e); } } // Closing first "try" } catch (AmazonServiceException ase) { logger.error( "Caught an AmazonServiceException, which means your request made it to AWS, but was rejected with an error response for some reason."); logger.error("Error Message: " + ase.getMessage()); logger.error("HTTP Status Code: " + ase.getStatusCode()); logger.error("AWS Error Code: " + ase.getErrorCode()); logger.error("Error Type: " + ase.getErrorType()); logger.error("Request ID: " + ase.getRequestId()); } } // end of if (count == 1) } // END OF THE BLOCK !!!!!!!!!!!!!!! }
From source file:org.apache.usergrid.rest.applications.ServiceResource.java
License:Apache License
@CheckPermissionsForPath @GET/*from w ww . j a v a 2 s .c om*/ @Produces(MediaType.WILDCARD) public Response executeStreamGet(@Context UriInfo ui, @PathParam("entityId") PathSegment entityId, @HeaderParam("range") String rangeHeader, @HeaderParam("if-modified-since") String modifiedSince) throws Exception { if (logger.isTraceEnabled()) { logger.trace("ServiceResource.executeStreamGet"); } //Needed for testing if (properties.getProperty(PROPERTIES_USERGRID_BINARY_UPLOADER).equals("local")) { this.binaryStore = localFileBinaryStore; } else { this.binaryStore = awsSdkS3BinaryStore; } ApiResponse response = createApiResponse(); response.setAction("get"); response.setApplication(services.getApplication()); response.setParams(ui.getQueryParameters()); ServiceResults serviceResults = executeServiceRequest(ui, response, ServiceAction.GET, null); Entity entity = serviceResults.getEntity(); if (logger.isTraceEnabled()) { logger.trace("In ServiceResource.executeStreamGet with id: {}, range: {}, modifiedSince: {}", entityId, rangeHeader, modifiedSince); } Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity); // return a 302 if not modified Date modified = AssetUtils.fromIfModifiedSince(modifiedSince); if (modified != null) { Long lastModified = (Long) fileMetadata.get(AssetUtils.LAST_MODIFIED); if (lastModified - modified.getTime() < 0) { return Response.status(Response.Status.NOT_MODIFIED).build(); } } boolean range = StringUtils.isNotBlank(rangeHeader); long start = 0, end = 0, contentLength = 0; InputStream inputStream; if (range) { // honor range request, calculate start & end String rangeValue = rangeHeader.trim().substring("bytes=".length()); contentLength = (Long) fileMetadata.get(AssetUtils.CONTENT_LENGTH); end = contentLength - 1; if (rangeValue.startsWith("-")) { start = contentLength - 1 - Long.parseLong(rangeValue.substring("-".length())); } else { String[] startEnd = rangeValue.split("-"); long parsedStart = Long.parseLong(startEnd[0]); if (parsedStart > start && parsedStart < end) { start = parsedStart; } if (startEnd.length > 1) { long parsedEnd = Long.parseLong(startEnd[1]); if (parsedEnd > start && parsedEnd < end) { end = parsedEnd; } } } try { inputStream = binaryStore.read(getApplicationId(), entity, start, end - start); } catch (AwsPropertiesNotFoundException apnfe) { logger.error("Amazon Property needed for this operation not found", apnfe); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build(); } catch (RuntimeException re) { logger.error(re.getMessage()); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build(); } } else { // no range try { inputStream = binaryStore.read(getApplicationId(), entity); } catch (AwsPropertiesNotFoundException apnfe) { logger.error("Amazon Property needed for this operation not found", apnfe); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build(); } catch (AmazonServiceException ase) { if (ase.getStatusCode() > 499) { logger.error(ase.getMessage()); } else if (logger.isDebugEnabled()) { logger.debug(ase.getMessage()); } return Response.status(ase.getStatusCode()).build(); } catch (RuntimeException re) { logger.error(re.getMessage()); return Response.status(Response.Status.INTERNAL_SERVER_ERROR).build(); } } // return 404 if not found if (inputStream == null) { return Response.status(Response.Status.NOT_FOUND).build(); } Long lastModified = (Long) fileMetadata.get(AssetUtils.LAST_MODIFIED); Response.ResponseBuilder responseBuilder = Response.ok(inputStream) .type((String) fileMetadata.get(AssetUtils.CONTENT_TYPE)).lastModified(new Date(lastModified)); if (fileMetadata.get(AssetUtils.E_TAG) != null) { responseBuilder.tag((String) fileMetadata.get(AssetUtils.E_TAG)); } if (range) { responseBuilder.header("Content-Range", "bytes " + start + "-" + end + "/" + contentLength); } return responseBuilder.build(); }
From source file:org.apereo.portal.portlets.dynamicskin.storage.s3.AwsS3DynamicSkinService.java
License:Apache License
private ObjectMetadata getMetadataFromAwsS3Bucket(final String objectKey) { final GetObjectMetadataRequest request = new GetObjectMetadataRequest( this.awsS3BucketConfig.getBucketName(), objectKey); try {//from w ww.java 2 s . co m return this.amazonS3Client.getObjectMetadata(request); } catch (AmazonServiceException ase) { if (ase.getStatusCode() == 404) { return null; } this.logAmazonServiceException(ase, request); throw new DynamicSkinException("AWS S3 'get object metadata' failure for: " + request, ase); } catch (AmazonClientException ace) { this.logAmazonClientException(ace, request); throw new DynamicSkinException("AWS S3 'get object metadata' failure for: " + request, ace); } }