List of usage examples for org.apache.commons.collections4 CollectionUtils isNotEmpty
public static boolean isNotEmpty(final Collection<?> coll)
From source file:org.finra.herd.service.helper.DefaultNotificationMessageBuilderTest.java
/** * Validates a business object data status change notification message with JSON payload. * * @param expectedMessageType the expected message type * @param expectedMessageDestination the expected message destination * @param expectedBusinessObjectDataKey the expected business object data key * @param expectedNewBusinessObjectDataStatus the expected new business object data status * @param expectedOldBusinessObjectDataStatus the expected old business object data status * @param expectedBusinessObjectDataAttributes the list of expected business object data attributes * @param expectedMessageHeaders the list of expected message headers * @param notificationMessage the notification message to be validated *///w w w . j a v a2s. com private void validateBusinessObjectDataStatusChangeMessageWithJsonPayload(String expectedMessageType, String expectedMessageDestination, BusinessObjectDataKey expectedBusinessObjectDataKey, String expectedNewBusinessObjectDataStatus, String expectedOldBusinessObjectDataStatus, List<Attribute> expectedBusinessObjectDataAttributes, List<MessageHeader> expectedMessageHeaders, NotificationMessage notificationMessage) throws IOException { assertNotNull(notificationMessage); assertEquals(expectedMessageType, notificationMessage.getMessageType()); assertEquals(expectedMessageDestination, notificationMessage.getMessageDestination()); BusinessObjectDataStatusChangeJsonMessagePayload businessObjectDataStatusChangeJsonMessagePayload = jsonHelper .unmarshallJsonToObject(BusinessObjectDataStatusChangeJsonMessagePayload.class, notificationMessage.getMessageText()); assertEquals(StringUtils.length(businessObjectDataStatusChangeJsonMessagePayload.eventDate), StringUtils.length(HerdDateUtils.now().toString())); assertEquals(expectedBusinessObjectDataKey, businessObjectDataStatusChangeJsonMessagePayload.businessObjectDataKey); assertEquals(expectedNewBusinessObjectDataStatus, businessObjectDataStatusChangeJsonMessagePayload.newBusinessObjectDataStatus); assertEquals(expectedOldBusinessObjectDataStatus, businessObjectDataStatusChangeJsonMessagePayload.oldBusinessObjectDataStatus); assertEquals(CollectionUtils.size(expectedBusinessObjectDataAttributes), CollectionUtils.size(businessObjectDataStatusChangeJsonMessagePayload.attributes)); if (CollectionUtils.isNotEmpty(expectedBusinessObjectDataAttributes)) { for (Attribute expectedAttribute : expectedBusinessObjectDataAttributes) { assertTrue(businessObjectDataStatusChangeJsonMessagePayload.attributes .containsKey(expectedAttribute.getName())); assertEquals(expectedAttribute.getValue(), businessObjectDataStatusChangeJsonMessagePayload.attributes .get(expectedAttribute.getName())); } } assertEquals(expectedMessageHeaders, notificationMessage.getMessageHeaders()); }
From source file:org.finra.herd.service.helper.EmrClusterDefinitionHelper.java
/** * Validates an EMR cluster definition configuration. * * @param emrClusterDefinition the EMR cluster definition configuration * * @throws IllegalArgumentException if any validation errors were found *//*from w w w . j a va2 s . c o m*/ public void validateEmrClusterDefinitionConfiguration(EmrClusterDefinition emrClusterDefinition) throws IllegalArgumentException { Assert.notNull(emrClusterDefinition, "An EMR cluster definition configuration must be specified."); Assert.isTrue(StringUtils.isNotBlank(emrClusterDefinition.getSubnetId()), "Subnet ID must be specified"); for (String token : emrClusterDefinition.getSubnetId().split(",")) { Assert.isTrue(StringUtils.isNotBlank(token), "No blank is allowed in the list of subnet IDs"); } Assert.isTrue( !emrHelper.isInstanceDefinitionsEmpty(emrClusterDefinition.getInstanceDefinitions()) || CollectionUtils.isNotEmpty(emrClusterDefinition.getInstanceFleets()), "Instance group definitions or instance fleets must be specified."); if (!emrHelper.isInstanceDefinitionsEmpty(emrClusterDefinition.getInstanceDefinitions())) { // Check master instances. Assert.notNull(emrClusterDefinition.getInstanceDefinitions().getMasterInstances(), "Master instances must be specified."); validateMasterInstanceDefinition(emrClusterDefinition.getInstanceDefinitions().getMasterInstances()); // Check core instances. if (emrClusterDefinition.getInstanceDefinitions().getCoreInstances() != null) { validateInstanceDefinition("core", emrClusterDefinition.getInstanceDefinitions().getCoreInstances(), 0); // If instance count is <= 0, remove the entire core instance definition since it is redundant. if (emrClusterDefinition.getInstanceDefinitions().getCoreInstances().getInstanceCount() <= 0) { emrClusterDefinition.getInstanceDefinitions().setCoreInstances(null); } } // Check task instances if (emrClusterDefinition.getInstanceDefinitions().getTaskInstances() != null) { validateInstanceDefinition("task", emrClusterDefinition.getInstanceDefinitions().getTaskInstances(), 1); } // Check that total number of instances does not exceed the max allowed. int maxEmrInstanceCount = configurationHelper.getProperty(ConfigurationValue.MAX_EMR_INSTANCES_COUNT, Integer.class); if (maxEmrInstanceCount > 0) { int instancesRequested = emrClusterDefinition.getInstanceDefinitions().getMasterInstances() .getInstanceCount(); if (emrClusterDefinition.getInstanceDefinitions().getCoreInstances() != null) { instancesRequested += emrClusterDefinition.getInstanceDefinitions().getCoreInstances() .getInstanceCount(); } if (emrClusterDefinition.getInstanceDefinitions().getTaskInstances() != null) { instancesRequested += emrClusterDefinition.getInstanceDefinitions().getTaskInstances() .getInstanceCount(); } Assert.isTrue((maxEmrInstanceCount >= instancesRequested), "Total number of instances requested can not exceed : " + maxEmrInstanceCount); } } // Validate node tags including checking for required tags and detecting any duplicate node tag names in case sensitive manner. Assert.notEmpty(emrClusterDefinition.getNodeTags(), "Node tags must be specified."); HashSet<String> nodeTagNameValidationSet = new HashSet<>(); for (NodeTag nodeTag : emrClusterDefinition.getNodeTags()) { Assert.hasText(nodeTag.getTagName(), "A node tag name must be specified."); Assert.hasText(nodeTag.getTagValue(), "A node tag value must be specified."); Assert.isTrue(!nodeTagNameValidationSet.contains(nodeTag.getTagName()), String.format("Duplicate node tag \"%s\" is found.", nodeTag.getTagName())); nodeTagNameValidationSet.add(nodeTag.getTagName()); } // Validate the mandatory AWS tags are there for (String mandatoryTag : herdStringHelper.splitStringWithDefaultDelimiter( configurationHelper.getProperty(ConfigurationValue.MANDATORY_AWS_TAGS))) { Assert.isTrue(nodeTagNameValidationSet.contains(mandatoryTag), String.format("Mandatory AWS tag not specified: \"%s\"", mandatoryTag)); } emrClusterDefinition.setAdditionalMasterSecurityGroups(assertNotBlankAndTrim( emrClusterDefinition.getAdditionalMasterSecurityGroups(), "additionalMasterSecurityGroup")); emrClusterDefinition.setAdditionalSlaveSecurityGroups(assertNotBlankAndTrim( emrClusterDefinition.getAdditionalSlaveSecurityGroups(), "additionalSlaveSecurityGroup")); // Fail if security configuration is specified for EMR version less than 4.8.0. if (StringUtils.isNotBlank(emrClusterDefinition.getSecurityConfiguration())) { final DefaultArtifactVersion securityConfigurationMinEmrVersion = new DefaultArtifactVersion("4.8.0"); Assert.isTrue( StringUtils.isNotBlank(emrClusterDefinition.getReleaseLabel()) && securityConfigurationMinEmrVersion.compareTo(new DefaultArtifactVersion( emrClusterDefinition.getReleaseLabel().replaceFirst("^(emr-)", ""))) <= 0, "EMR security configuration is not supported prior to EMR release 4.8.0."); } }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Processes partition filters for DDL generation as per generate DDL request. * * @param generateDdlRequest the generate DDL request * @param sb the string builder to be updated with the "alter table add partition" statements * @param replacements the hash map of string values to be used to substitute the custom DDL tokens with their actual values * @param businessObjectFormat the business object format * @param ifNotExistsOption specifies if generated DDL contains "if not exists" option *//*w w w . j a va 2 s. co m*/ private void processPartitionFiltersForGenerateDdl(GenerateDdlRequest generateDdlRequest, StringBuilder sb, HashMap<String, String> replacements, BusinessObjectFormat businessObjectFormat, String ifNotExistsOption) { // Get the business object format key from the entity. BusinessObjectFormatKey businessObjectFormatKey = businessObjectFormatHelper .getBusinessObjectFormatKey(generateDdlRequest.businessObjectFormatEntity); // Override the business object format version with the original (optional) value from the request. businessObjectFormatKey.setBusinessObjectFormatVersion(generateDdlRequest.businessObjectFormatVersion); // Retrieve a list of storage unit availability DTOs for the specified list of partition filters. The list will be sorted by partition values and // storage names. For a non-partitioned table, there should only exist a single business object data entity (with partitionValue equals to "none"). // We do validate that all specified storage entities are of "S3" storage platform type, so we specify S3 storage platform type in the herdDao call // below, so we select storage units only from all S3 storage entities, when the specified list of storage names is empty. We also specify to select // only "available" storage units. List<StorageUnitAvailabilityDto> storageUnitAvailabilityDtos = storageUnitDao .getStorageUnitsByPartitionFilters(businessObjectFormatKey, generateDdlRequest.partitionFilters, generateDdlRequest.businessObjectDataVersion, BusinessObjectDataStatusEntity.VALID, generateDdlRequest.storageNames, StoragePlatformEntity.S3, null, true); // Exclude duplicate business object data per specified list of storage names. // If storage names are not specified, the method fails on business object data instances registered with multiple storage. storageUnitAvailabilityDtos = excludeDuplicateBusinessObjectData(storageUnitAvailabilityDtos, generateDdlRequest.storageNames); // Build a list of matched partition filters. Please note that each request partition // filter might result in multiple available business object data entities. List<List<String>> matchedAvailablePartitionFilters = new ArrayList<>(); List<List<String>> availablePartitions = new ArrayList<>(); for (StorageUnitAvailabilityDto storageUnitAvailabilityDto : storageUnitAvailabilityDtos) { BusinessObjectDataKey businessObjectDataKey = storageUnitAvailabilityDto.getBusinessObjectDataKey(); matchedAvailablePartitionFilters.add(businessObjectDataHelper.getPartitionFilter(businessObjectDataKey, generateDdlRequest.partitionFilters.get(0))); availablePartitions .add(businessObjectDataHelper.getPrimaryAndSubPartitionValues(businessObjectDataKey)); } // If request specifies to include all registered sub-partitions, fail if any of "non-available" registered sub-partitions are found. if (generateDdlRequest.businessObjectDataVersion == null && BooleanUtils.isTrue(generateDdlRequest.includeAllRegisteredSubPartitions) && !CollectionUtils.isEmpty(matchedAvailablePartitionFilters)) { notAllowNonAvailableRegisteredSubPartitions(businessObjectFormatKey, matchedAvailablePartitionFilters, availablePartitions, generateDdlRequest.storageNames); } // Fail on any missing business object data unless the flag is set to allow missing business object data. if (!BooleanUtils.isTrue(generateDdlRequest.allowMissingData)) { // Get a list of unmatched partition filters. List<List<String>> unmatchedPartitionFilters = new ArrayList<>(generateDdlRequest.partitionFilters); unmatchedPartitionFilters.removeAll(matchedAvailablePartitionFilters); // Throw an exception if we have any unmatched partition filters. if (!unmatchedPartitionFilters.isEmpty()) { // Get the first unmatched partition filter and throw exception. List<String> unmatchedPartitionFilter = getFirstUnmatchedPartitionFilter(unmatchedPartitionFilters); throw new ObjectNotFoundException(String.format( "Business object data {namespace: \"%s\", businessObjectDefinitionName: \"%s\", businessObjectFormatUsage: \"%s\", " + "businessObjectFormatFileType: \"%s\", businessObjectFormatVersion: %d, partitionValue: \"%s\", " + "subpartitionValues: \"%s\", businessObjectDataVersion: %d} is not available in \"%s\" storage(s).", businessObjectFormatKey.getNamespace(), businessObjectFormatKey.getBusinessObjectDefinitionName(), businessObjectFormatKey.getBusinessObjectFormatUsage(), businessObjectFormatKey.getBusinessObjectFormatFileType(), businessObjectFormatKey.getBusinessObjectFormatVersion(), unmatchedPartitionFilter.get(0), StringUtils.join(unmatchedPartitionFilter.subList(1, unmatchedPartitionFilter.size()), ","), generateDdlRequest.businessObjectDataVersion, StringUtils.join(generateDdlRequest.storageNames, ","))); } } // We still need to close/complete the create table statement when there is no custom DDL, // the table is non-partitioned, and there is no business object data found. if (generateDdlRequest.customDdlEntity == null && !generateDdlRequest.isPartitioned && CollectionUtils.isEmpty(storageUnitAvailabilityDtos)) { // Add a LOCATION clause with a token. sb.append(String.format("LOCATION '%s';", NON_PARTITIONED_TABLE_LOCATION_CUSTOM_DDL_TOKEN)); } // The table is partitioned, custom DDL is specified, or there is at least one business object data instance found. else { // If drop partitions flag is set and the table is partitioned, drop partitions specified by the partition filters. if (generateDdlRequest.isPartitioned && BooleanUtils.isTrue(generateDdlRequest.includeDropPartitions)) { // Generate the beginning of the alter table statement. String alterTableFirstToken = String.format("ALTER TABLE `%s` DROP IF EXISTS", generateDdlRequest.tableName); // Create a drop partition statement for each partition filter entry. List<String> dropPartitionStatements = new ArrayList<>(); for (List<String> partitionFilter : generateDdlRequest.partitionFilters) { // Start building a drop partition statement for this partition filter. StringBuilder dropPartitionStatement = new StringBuilder(); dropPartitionStatement.append(String.format("%s PARTITION (", BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable) ? " " : alterTableFirstToken)); // Specify all partition column values as per this partition filter. List<String> partitionKeyValuePairs = new ArrayList<>(); for (int i = 0; i < partitionFilter.size(); i++) { if (StringUtils.isNotBlank(partitionFilter.get(i))) { // We cannot hit ArrayIndexOutOfBoundsException on getPartitions() since partitionFilter would // not have a value set at an index that is greater or equal than the number of partitions in the schema. String partitionColumnName = businessObjectFormat.getSchema().getPartitions().get(i) .getName(); partitionKeyValuePairs .add(String.format("`%s`='%s'", partitionColumnName, partitionFilter.get(i))); } } // Complete the drop partition statement. dropPartitionStatement.append(StringUtils.join(partitionKeyValuePairs, ", ")).append(')'); // Add this drop partition statement to the list. dropPartitionStatements.add(dropPartitionStatement.toString()); } // Add all drop partition statements to the main string builder. if (CollectionUtils.isNotEmpty(dropPartitionStatements)) { // If specified, combine dropping multiple partitions in a single ALTER TABLE statement. if (BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable)) { sb.append(alterTableFirstToken).append('\n'); } sb.append(StringUtils.join(dropPartitionStatements, BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable) ? ",\n" : ";\n")) .append(";\n\n"); } } // Process storage unit entities. if (!CollectionUtils.isEmpty(storageUnitAvailabilityDtos)) { processStorageUnitsForGenerateDdl(generateDdlRequest, sb, replacements, businessObjectFormat, ifNotExistsOption, storageUnitAvailabilityDtos); } } }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Adds the relative "alter table add partition" statements for each storage unit entity. Please note that each request partition value might result in * multiple available storage unit entities (subpartitions). * * @param sb the string builder to be updated with the "alter table add partition" statements * @param replacements the hash map of string values to be used to substitute the custom DDL tokens with their actual values * @param businessObjectFormatForSchema the business object format to be used for schema * @param ifNotExistsOption specifies if generated DDL contains "if not exists" option * @param storageUnitAvailabilityDtos the list of storage unit availability DTOs *//*w ww . ja va 2 s . c o m*/ private void processStorageUnitsForGenerateDdl(GenerateDdlRequest generateDdlRequest, StringBuilder sb, HashMap<String, String> replacements, BusinessObjectFormat businessObjectFormatForSchema, String ifNotExistsOption, List<StorageUnitAvailabilityDto> storageUnitAvailabilityDtos) { // If flag is not set to suppress scan for unregistered sub-partitions, retrieve all storage // file paths for the relative storage units loaded in a multi-valued map for easy access. MultiValuedMap<Integer, String> storageUnitIdToStorageFilePathsMap = BooleanUtils.isTrue( generateDdlRequest.suppressScanForUnregisteredSubPartitions) ? new ArrayListValuedHashMap<>() : storageFileDao.getStorageFilePathsByStorageUnitIds( storageUnitHelper.getStorageUnitIds(storageUnitAvailabilityDtos)); // Crete a map of storage names in upper case to their relative S3 key prefix velocity templates. Map<String, String> s3KeyPrefixVelocityTemplates = new HashMap<>(); // Crete a map of business object format keys to their relative business object format instances. Map<BusinessObjectFormatKey, BusinessObjectFormat> businessObjectFormats = new HashMap<>(); // Get data provider for the business object definition. BusinessObjectDefinitionEntity businessObjectDefinitionEntity = businessObjectDefinitionDaoHelper .getBusinessObjectDefinitionEntity( new BusinessObjectDefinitionKey(businessObjectFormatForSchema.getNamespace(), businessObjectFormatForSchema.getBusinessObjectDefinitionName())); String dataProviderName = businessObjectDefinitionEntity.getDataProvider().getName(); // Generate the beginning of the alter table statement. String alterTableFirstToken = String .format("ALTER TABLE `%s` ADD %s", generateDdlRequest.tableName, ifNotExistsOption).trim(); // Process all available business object data instances. List<String> addPartitionStatements = new ArrayList<>(); for (StorageUnitAvailabilityDto storageUnitAvailabilityDto : storageUnitAvailabilityDtos) { // Get storage name in upper case for this storage unit. String upperCaseStorageName = storageUnitAvailabilityDto.getStorageName().toUpperCase(); // Get storage entity for this storage unit. StorageEntity storageEntity = getStorageEntity(upperCaseStorageName, generateDdlRequest.storageEntities); // Get business object data key for this business object data. BusinessObjectDataKey businessObjectDataKey = storageUnitAvailabilityDto.getBusinessObjectDataKey(); // Get business object format key for this business object data. BusinessObjectFormatKey businessObjectFormatKey = businessObjectFormatHelper .getBusinessObjectFormatKey(businessObjectDataKey); // Retrieve s3 key prefix velocity template for this storage. String s3KeyPrefixVelocityTemplate = getS3KeyPrefixVelocityTemplate(upperCaseStorageName, storageEntity, s3KeyPrefixVelocityTemplates); // Retrieve business object format for this business object data. BusinessObjectFormat businessObjectFormat = getBusinessObjectFormat(businessObjectFormatKey, businessObjectFormats); // Build the expected S3 key prefix for this storage unit. String s3KeyPrefix = s3KeyPrefixHelper.buildS3KeyPrefix(s3KeyPrefixVelocityTemplate, dataProviderName, businessObjectFormat, businessObjectDataKey, storageUnitAvailabilityDto.getStorageName()); // If flag is set to suppress scan for unregistered sub-partitions, use the directory path or the S3 key prefix // as the partition's location, otherwise, use storage files to discover all unregistered sub-partitions. Collection<String> storageFilePaths = new ArrayList<>(); if (BooleanUtils.isTrue(generateDdlRequest.suppressScanForUnregisteredSubPartitions)) { // Validate the directory path value if it is present. if (storageUnitAvailabilityDto.getStorageUnitDirectoryPath() != null) { Assert.isTrue(storageUnitAvailabilityDto.getStorageUnitDirectoryPath().equals(s3KeyPrefix), String.format( "Storage directory path \"%s\" registered with business object data {%s} " + "in \"%s\" storage does not match the expected S3 key prefix \"%s\".", storageUnitAvailabilityDto.getStorageUnitDirectoryPath(), businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey), storageUnitAvailabilityDto.getStorageName(), s3KeyPrefix)); } // Add the S3 key prefix to the list of storage files. // We add a trailing '/' character to the prefix, since it represents a directory. storageFilePaths.add(StringUtils.appendIfMissing(s3KeyPrefix, "/")); } else { // Retrieve storage file paths registered with this business object data in the specified storage. storageFilePaths = storageUnitIdToStorageFilePathsMap .containsKey(storageUnitAvailabilityDto.getStorageUnitId()) ? storageUnitIdToStorageFilePathsMap .get(storageUnitAvailabilityDto.getStorageUnitId()) : new ArrayList<>(); // Validate storage file paths registered with this business object data in the specified storage. // The validation check below is required even if we have no storage files registered. storageFileHelper.validateStorageFilePaths(storageFilePaths, s3KeyPrefix, businessObjectDataKey, storageUnitAvailabilityDto.getStorageName()); // If there are no storage files registered for this storage unit, we should use the storage directory path value. if (storageFilePaths.isEmpty()) { // Validate that directory path value is present and it matches the S3 key prefix. Assert.isTrue(storageUnitAvailabilityDto.getStorageUnitDirectoryPath() != null && storageUnitAvailabilityDto.getStorageUnitDirectoryPath().startsWith(s3KeyPrefix), String.format( "Storage directory path \"%s\" registered with business object data {%s} " + "in \"%s\" storage does not match the expected S3 key prefix \"%s\".", storageUnitAvailabilityDto.getStorageUnitDirectoryPath(), businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey), storageUnitAvailabilityDto.getStorageName(), s3KeyPrefix)); // Add storage directory path the empty storage files list. // We add a trailing '/' character to the path, since it represents a directory. storageFilePaths.add(storageUnitAvailabilityDto.getStorageUnitDirectoryPath() + "/"); } } // Retrieve the s3 bucket name. String s3BucketName = getS3BucketName(upperCaseStorageName, storageEntity, generateDdlRequest.s3BucketNames); // For partitioned table, add the relative partitions to the generated DDL. if (generateDdlRequest.isPartitioned) { // If flag is set to suppress scan for unregistered sub-partitions, validate that the number of primary and sub-partition values specified for // the business object data equals to the number of partition columns defined in schema for the format selected for DDL generation. if (BooleanUtils.isTrue(generateDdlRequest.suppressScanForUnregisteredSubPartitions)) { int businessObjectDataRegisteredPartitions = 1 + CollectionUtils.size(businessObjectDataKey.getSubPartitionValues()); Assert.isTrue( businessObjectFormatForSchema.getSchema().getPartitions() .size() == businessObjectDataRegisteredPartitions, String.format( "Number of primary and sub-partition values (%d) specified for the business object data is not equal to " + "the number of partition columns (%d) defined in the schema of the business object format selected for DDL generation. " + "Business object data: {%s}, business object format: {%s}", businessObjectDataRegisteredPartitions, businessObjectFormatForSchema.getSchema().getPartitions().size(), businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey), businessObjectFormatHelper .businessObjectFormatKeyToString(businessObjectFormatHelper .getBusinessObjectFormatKey(businessObjectFormatForSchema)))); } // Otherwise, since the format version selected for DDL generation might not match the relative business object format version that business // object data is registered against, validate that the number of sub-partition values specified for the business object data is less than // the number of partition columns defined in schema for the format selected for DDL generation. else { Assert.isTrue( businessObjectFormatForSchema.getSchema().getPartitions().size() > CollectionUtils .size(businessObjectDataKey.getSubPartitionValues()), String.format( "Number of subpartition values specified for the business object data is greater than or equal to " + "the number of partition columns defined in the schema of the business object format selected for DDL generation. " + "Business object data: {%s}, business object format: {%s}", businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey), businessObjectFormatHelper .businessObjectFormatKeyToString(businessObjectFormatHelper .getBusinessObjectFormatKey(businessObjectFormatForSchema)))); } // Get partition information. For multiple level partitioning, auto-discover subpartitions (subdirectories) not already included into the S3 key // prefix. Each discovered partition requires a standalone "add partition" clause. Please note that due to the above validation check, there // should be no auto discoverable sub-partition columns, when flag is set to suppress scan for unregistered sub-partitions. List<SchemaColumn> autoDiscoverableSubPartitionColumns = businessObjectFormatForSchema.getSchema() .getPartitions() .subList(1 + CollectionUtils.size(businessObjectDataKey.getSubPartitionValues()), businessObjectFormatForSchema.getSchema().getPartitions().size()); // Get and process Hive partitions. for (HivePartitionDto hivePartition : getHivePartitions(businessObjectDataKey, autoDiscoverableSubPartitionColumns, s3KeyPrefix, storageFilePaths, storageUnitAvailabilityDto.getStorageName())) { // Build an add partition statement for this hive partition. StringBuilder addPartitionStatement = new StringBuilder(); addPartitionStatement.append(String.format("%s PARTITION (", BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable) ? " " : alterTableFirstToken)); // Specify all partition column values. List<String> partitionKeyValuePairs = new ArrayList<>(); for (int i = 0; i < businessObjectFormatForSchema.getSchema().getPartitions().size(); i++) { String partitionColumnName = businessObjectFormatForSchema.getSchema().getPartitions() .get(i).getName(); String partitionValue = hivePartition.getPartitionValues().get(i); partitionKeyValuePairs.add(String.format("`%s`='%s'", partitionColumnName, partitionValue)); } addPartitionStatement.append(StringUtils.join(partitionKeyValuePairs, ", ")); addPartitionStatement.append(String.format(") LOCATION 's3n://%s/%s%s'", s3BucketName, s3KeyPrefix, StringUtils.isNotBlank(hivePartition.getPath()) ? hivePartition.getPath() : "")); // Add this add partition statement to the list. addPartitionStatements.add(addPartitionStatement.toString()); } } else // This is a non-partitioned table. { // Get location for this non-partitioned table. String tableLocation = String.format("s3n://%s/%s", s3BucketName, s3KeyPrefix); if (generateDdlRequest.customDdlEntity == null) { // Since custom DDL was not specified and this table is not partitioned, add a LOCATION clause. // This is the last line in the non-partitioned table DDL. sb.append(String.format("LOCATION '%s';", tableLocation)); } else { // Since custom DDL was used for a non-partitioned table, substitute the relative custom DDL token with the actual table location. replacements.put(NON_PARTITIONED_TABLE_LOCATION_CUSTOM_DDL_TOKEN, tableLocation); } } } // Add all add partition statements to the main string builder. if (CollectionUtils.isNotEmpty(addPartitionStatements)) { // If specified, combine adding multiple partitions in a single ALTER TABLE statement. if (BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable)) { sb.append(alterTableFirstToken).append('\n'); } sb.append(StringUtils.join(addPartitionStatements, BooleanUtils.isTrue(generateDdlRequest.combineMultiplePartitionsInSingleAlterTable) ? ",\n" : ";\n")) .append(";\n"); } }
From source file:org.finra.herd.service.helper.Hive13DdlGenerator.java
/** * Eliminate storage units that belong to the same business object data by picking storage unit registered in a storage listed earlier in the list of * storage names specified in the request. If storage names are not specified, simply fail on business object data instances registered with multiple * storage.//from w w w .j a v a2s. c o m * * @param storageUnitAvailabilityDtos the list of storage unit availability DTOs * @param storageNames the list of storage names * * @return the updated list of storage unit availability DTOs * @throws IllegalArgumentException on business object data being registered in multiple storage and storage names are not specified to resolve this */ protected List<StorageUnitAvailabilityDto> excludeDuplicateBusinessObjectData( List<StorageUnitAvailabilityDto> storageUnitAvailabilityDtos, List<String> storageNames) throws IllegalArgumentException { // Convert the list of storage names to upper case. List<String> upperCaseStorageNames = new ArrayList<>(); if (CollectionUtils.isNotEmpty(storageNames)) { for (String storageName : storageNames) { upperCaseStorageNames.add(storageName.toUpperCase()); } } // If storage names are not specified, fail on business object data instance registered with multiple storage. // Otherwise, in a case when the same business object data is registered with multiple storage, // pick storage unit registered in a storage listed earlier in the list of storage names specified in the request. Map<BusinessObjectDataKey, StorageUnitAvailabilityDto> businessObjectDataToStorageUnitMap = new LinkedHashMap<>(); for (StorageUnitAvailabilityDto storageUnitAvailabilityDto : storageUnitAvailabilityDtos) { BusinessObjectDataKey businessObjectDataKey = storageUnitAvailabilityDto.getBusinessObjectDataKey(); if (businessObjectDataToStorageUnitMap.containsKey(businessObjectDataKey)) { // Duplicate business object data is found, so check if storage names are specified. if (CollectionUtils.isEmpty(upperCaseStorageNames)) { // Fail on business object data registered in multiple storage. throw new IllegalArgumentException(String.format( "Found business object data registered in more than one storage. " + "Please specify storage(s) in the request to resolve this. Business object data {%s}", businessObjectDataHelper.businessObjectDataKeyToString(businessObjectDataKey))); } else { // Replace the storage unit entity if it belongs to a "higher priority" storage. String currentUpperCaseStorageName = businessObjectDataToStorageUnitMap .get(businessObjectDataKey).getStorageName().toUpperCase(); int currentStorageIndex = upperCaseStorageNames.indexOf(currentUpperCaseStorageName); int newStorageIndex = upperCaseStorageNames .indexOf(storageUnitAvailabilityDto.getStorageName().toUpperCase()); if (newStorageIndex < currentStorageIndex) { businessObjectDataToStorageUnitMap.put(businessObjectDataKey, storageUnitAvailabilityDto); } } } else { businessObjectDataToStorageUnitMap.put(businessObjectDataKey, storageUnitAvailabilityDto); } } return new ArrayList<>(businessObjectDataToStorageUnitMap.values()); }
From source file:org.finra.herd.service.helper.notification.AbstractNotificationMessageBuilder.java
/** * Builds a list of notification messages for the change event. The result list might be empty if if no messages should be sent. * * @param notificationEvent the notification event * * @return the list of notification messages *///w ww . j ava2 s .co m public List<NotificationMessage> buildNotificationMessages(NotificationEvent notificationEvent) { // Create a result list. List<NotificationMessage> notificationMessages = new ArrayList<>(); // Get notification message definitions. NotificationMessageDefinitions notificationMessageDefinitions = configurationDaoHelper .getXmlClobPropertyAndUnmarshallToObject(NotificationMessageDefinitions.class, getMessageDefinitionKey(notificationEvent)); // Continue processing if notification message definitions are configured. if (notificationMessageDefinitions != null && CollectionUtils.isNotEmpty(notificationMessageDefinitions.getNotificationMessageDefinitions())) { // Create a Velocity context map and initialize it with common keys and values. Map<String, Object> velocityContextMap = getBaseVelocityContextMap(); // Add notification message type specific keys and values to the context map. velocityContextMap.putAll(getNotificationMessageVelocityContextMap(notificationEvent)); // Generate notification message for each notification message definition. for (NotificationMessageDefinition notificationMessageDefinition : notificationMessageDefinitions .getNotificationMessageDefinitions()) { // Validate the notification message type. if (StringUtils.isBlank(notificationMessageDefinition.getMessageType())) { throw new IllegalStateException(String.format( "Notification message type must be specified. Please update \"%s\" configuration entry.", getMessageDefinitionKey(notificationEvent))); } else if (!notificationMessageDefinition.getMessageType().toUpperCase() .equals(MessageTypeEntity.MessageEventTypes.SNS.toString())) { throw new IllegalStateException(String.format( "Only \"%s\" notification message type is supported. Please update \"%s\" configuration entry.", MessageTypeEntity.MessageEventTypes.SNS.toString(), getMessageDefinitionKey(notificationEvent))); } // Validate the notification message destination. if (StringUtils.isBlank(notificationMessageDefinition.getMessageDestination())) { throw new IllegalStateException(String.format( "Notification message destination must be specified. Please update \"%s\" configuration entry.", getMessageDefinitionKey(notificationEvent))); } // Evaluate the template to generate the message text. String messageText = evaluateVelocityTemplate( notificationMessageDefinition.getMessageVelocityTemplate(), velocityContextMap, notificationEvent.getClass().getCanonicalName()); // Build a list of optional message headers. List<MessageHeader> messageHeaders = new ArrayList<>(); if (CollectionUtils.isNotEmpty(notificationMessageDefinition.getMessageHeaderDefinitions())) { for (MessageHeaderDefinition messageHeaderDefinition : notificationMessageDefinition .getMessageHeaderDefinitions()) { messageHeaders.add(new MessageHeader(messageHeaderDefinition.getKey(), evaluateVelocityTemplate(messageHeaderDefinition.getValueVelocityTemplate(), velocityContextMap, String.format("%s_messageHeader_%s", notificationEvent.getClass().getCanonicalName(), messageHeaderDefinition.getKey())))); } } // Create a notification message and add it to the result list. notificationMessages.add(new NotificationMessage(notificationMessageDefinition.getMessageType(), notificationMessageDefinition.getMessageDestination(), messageText, messageHeaders)); } } return notificationMessages; }
From source file:org.finra.herd.service.impl.AttributeValueListServiceImpl.java
@Override public AttributeValueListKeys getAttributeValueLists() { // Get the namespaces which the current user is authorized to READ. Set<String> authorizedNamespaces = namespaceSecurityHelper .getAuthorizedNamespaces(NamespacePermissionEnum.READ); // Create an empty list of keys. List<AttributeValueListKey> attributeValueListKeys = new ArrayList<>(); // Continue the processing only when the list of authorized namespaces is not empty. if (CollectionUtils.isNotEmpty(authorizedNamespaces)) { attributeValueListKeys.addAll(attributeValueListDao.getAttributeValueLists(authorizedNamespaces)); }/*from w w w. j a v a2 s . c o m*/ // Return the list of keys. return new AttributeValueListKeys(attributeValueListKeys); }
From source file:org.finra.herd.service.impl.BusinessObjectDataServiceImpl.java
@NamespacePermission(fields = "#businessObjectDataSearchRequest.businessObjectDataSearchFilters[0].BusinessObjectDataSearchKeys[0].namespace", permissions = NamespacePermissionEnum.READ) @Override// www. j a va 2s.c o m public BusinessObjectDataSearchResultPagingInfoDto searchBusinessObjectData(Integer pageNum, Integer pageSize, BusinessObjectDataSearchRequest businessObjectDataSearchRequest) { // TODO: Check name space permission for all entries in the request. // Validate the business object data search request. businessObjectDataSearchHelper.validateBusinessObjectDataSearchRequest(businessObjectDataSearchRequest); // Get the maximum number of results that can be returned on any page of data. The "pageSize" query parameter should not be greater than // this value or an HTTP status of 400 (Bad Request) error would be returned. int maxResultsPerPage = configurationHelper .getProperty(ConfigurationValue.BUSINESS_OBJECT_DATA_SEARCH_MAX_PAGE_SIZE, Integer.class); // Validate the page number and page size // Set the defaults if pageNum and pageSize are null // Page number must be greater than 0 // Page size must be greater than 0 and less than maximum page size pageNum = businessObjectDataSearchHelper.validatePagingParameter("pageNum", pageNum, 1, Integer.MAX_VALUE); pageSize = businessObjectDataSearchHelper.validatePagingParameter("pageSize", pageSize, maxResultsPerPage, maxResultsPerPage); // Get the maximum record count that is configured in the system. Integer businessObjectDataSearchMaxResultCount = configurationHelper .getProperty(ConfigurationValue.BUSINESS_OBJECT_DATA_SEARCH_MAX_RESULT_COUNT, Integer.class); // Get the business object data search key. // We assume that the input list contains only one filter with a single search key, since validation should be passed by now. BusinessObjectDataSearchKey businessObjectDataSearchKey = businessObjectDataSearchRequest .getBusinessObjectDataSearchFilters().get(0).getBusinessObjectDataSearchKeys().get(0); // Validate partition keys in partition value filters. if (CollectionUtils.isNotEmpty(businessObjectDataSearchKey.getPartitionValueFilters())) { // Get a count of business object formats that match the business object data search key parameters without the list of partition keys. Long businessObjectFormatRecordCount = businessObjectFormatDao .getBusinessObjectFormatCountByPartitionKeys(businessObjectDataSearchKey.getNamespace(), businessObjectDataSearchKey.getBusinessObjectDefinitionName(), businessObjectDataSearchKey.getBusinessObjectFormatUsage(), businessObjectDataSearchKey.getBusinessObjectFormatFileType(), businessObjectDataSearchKey.getBusinessObjectFormatVersion(), null); // If business object format record count is zero, we return an empty result list. if (businessObjectFormatRecordCount == 0) { return new BusinessObjectDataSearchResultPagingInfoDto(pageNum.longValue(), pageSize.longValue(), 0L, 0L, 0L, (long) maxResultsPerPage, new BusinessObjectDataSearchResult(new ArrayList<>())); } // Get partition keys from the list of partition value filters. List<String> partitionKeys = new ArrayList<>(); for (PartitionValueFilter partitionValueFilter : businessObjectDataSearchKey .getPartitionValueFilters()) { // Get partition key from the partition value filter. Partition key should not be empty, since validation is passed by now. partitionKeys.add(partitionValueFilter.getPartitionKey()); } // Get a count of business object formats that match the business object data search key parameters and the list of partition keys. businessObjectFormatRecordCount = businessObjectFormatDao.getBusinessObjectFormatCountByPartitionKeys( businessObjectDataSearchKey.getNamespace(), businessObjectDataSearchKey.getBusinessObjectDefinitionName(), businessObjectDataSearchKey.getBusinessObjectFormatUsage(), businessObjectDataSearchKey.getBusinessObjectFormatFileType(), businessObjectDataSearchKey.getBusinessObjectFormatVersion(), partitionKeys); // Fail if business object formats found that contain specified partition keys in their schema. Assert.isTrue(businessObjectFormatRecordCount > 0, String.format( "There are no registered business object formats with \"%s\" namespace, \"%s\" business object definition name", businessObjectDataSearchKey.getNamespace(), businessObjectDataSearchKey.getBusinessObjectDefinitionName()) + (StringUtils.isNotBlank(businessObjectDataSearchKey.getBusinessObjectFormatUsage()) ? String.format(", \"%s\" business object format usage", businessObjectDataSearchKey.getBusinessObjectFormatUsage()) : "") + (StringUtils.isNotBlank(businessObjectDataSearchKey.getBusinessObjectFormatFileType()) ? String.format(", \"%s\" business object format file type", businessObjectDataSearchKey.getBusinessObjectFormatFileType()) : "") + (businessObjectDataSearchKey.getBusinessObjectFormatVersion() != null ? String.format(", \"%d\" business object format version", businessObjectDataSearchKey.getBusinessObjectFormatVersion()) : "") + String.format(" that have schema with partition columns matching \"%s\" partition key(s).", String.join(", ", partitionKeys))); } // Get the total record count up to to the maximum allowed record count that is configured in the system plus one more record. Integer totalRecordCount = businessObjectDataDao.getBusinessObjectDataLimitedCountBySearchKey( businessObjectDataSearchKey, businessObjectDataSearchMaxResultCount + 1); // Validate the total record count. if (totalRecordCount > businessObjectDataSearchMaxResultCount) { throw new IllegalArgumentException( String.format("Result limit of %d exceeded. Modify filters to further limit results.", businessObjectDataSearchMaxResultCount)); } // If total record count is zero, we return an empty result list. Otherwise, execute the search. List<BusinessObjectData> businessObjectDataList = totalRecordCount == 0 ? new ArrayList<>() : businessObjectDataDao.searchBusinessObjectData(businessObjectDataSearchKey, pageNum, pageSize); // Get the page count. Integer pageCount = totalRecordCount / pageSize + (totalRecordCount % pageSize > 0 ? 1 : 0); // Build and return the business object data search result with the paging information. return new BusinessObjectDataSearchResultPagingInfoDto(pageNum.longValue(), pageSize.longValue(), pageCount.longValue(), (long) businessObjectDataList.size(), totalRecordCount.longValue(), (long) maxResultsPerPage, new BusinessObjectDataSearchResult(businessObjectDataList)); }
From source file:org.finra.herd.service.impl.BusinessObjectDataServiceImpl.java
@NamespacePermission(fields = "#businessObjectDataKey.namespace", permissions = NamespacePermissionEnum.WRITE) @Override/*from ww w . ja v a 2 s . c om*/ public BusinessObjectData updateBusinessObjectDataParents(BusinessObjectDataKey businessObjectDataKey, BusinessObjectDataParentsUpdateRequest businessObjectDataParentsUpdateRequest) { // Validate and trim the business object data key. businessObjectDataHelper.validateBusinessObjectDataKey(businessObjectDataKey, true, true); // Validate the update request. Assert.notNull(businessObjectDataParentsUpdateRequest, "A business object data parents update request must be specified."); // Validate and trim the parents' keys. businessObjectDataDaoHelper.validateBusinessObjectDataKeys( businessObjectDataParentsUpdateRequest.getBusinessObjectDataParents()); // Retrieve the business object data and ensure it exists. BusinessObjectDataEntity businessObjectDataEntity = businessObjectDataDaoHelper .getBusinessObjectDataEntity(businessObjectDataKey); // Fail if this business object data is not in a pre-registration status. if (BooleanUtils.isNotTrue(businessObjectDataEntity.getStatus().getPreRegistrationStatus())) { throw new IllegalArgumentException(String.format( "Unable to update parents for business object data because it has \"%s\" status, which is not one of pre-registration statuses.", businessObjectDataEntity.getStatus().getCode())); } // Update parents. List<BusinessObjectDataEntity> businessObjectDataParents = businessObjectDataEntity .getBusinessObjectDataParents(); // Remove all existing parents. businessObjectDataParents.clear(); // Loop through all business object data parents specified in the request and add them one by one. if (CollectionUtils.isNotEmpty(businessObjectDataParentsUpdateRequest.getBusinessObjectDataParents())) { for (BusinessObjectDataKey businessObjectDataParentKey : businessObjectDataParentsUpdateRequest .getBusinessObjectDataParents()) { // Look up parent business object data. BusinessObjectDataEntity businessObjectDataParentEntity = businessObjectDataDaoHelper .getBusinessObjectDataEntity(businessObjectDataParentKey); // Add business object data entity being updated as a dependent (i.e. child) of the looked up parent. businessObjectDataParentEntity.getBusinessObjectDataChildren().add(businessObjectDataEntity); // Add the looked up parent as a parent of the business object data entity being updated. businessObjectDataParents.add(businessObjectDataParentEntity); } } // Persist and refresh the entity. businessObjectDataEntity = businessObjectDataDao.saveAndRefresh(businessObjectDataEntity); // Create and return the business object data object from the persisted entity. return businessObjectDataHelper.createBusinessObjectDataFromEntity(businessObjectDataEntity); }
From source file:org.finra.herd.service.impl.BusinessObjectDataStorageUnitServiceImpl.java
/** * Creates and populates a business object data storage unit create response. * * @param storageUnitEntity the storage unit entity * * @return the business object data storage unit create response */// w ww. jav a2 s . c o m protected BusinessObjectDataStorageUnitCreateResponse createBusinessObjectDataStorageUnitCreateResponse( StorageUnitEntity storageUnitEntity) { // Get business object data key from the business object data entity. BusinessObjectDataKey businessObjectDataKey = businessObjectDataHelper .createBusinessObjectDataKeyFromEntity(storageUnitEntity.getBusinessObjectData()); // Create a business object data storage unit create response. BusinessObjectDataStorageUnitCreateResponse response = new BusinessObjectDataStorageUnitCreateResponse(); // Add business object data storage unit key. response.setBusinessObjectDataStorageUnitKey(storageUnitHelper.createBusinessObjectDataStorageUnitKey( businessObjectDataKey, storageUnitEntity.getStorage().getName())); // Add storage directory. if (storageUnitEntity.getDirectoryPath() != null) { response.setStorageDirectory(new StorageDirectory(storageUnitEntity.getDirectoryPath())); } // Add storage files. if (CollectionUtils.isNotEmpty(storageUnitEntity.getStorageFiles())) { response.setStorageFiles( storageFileHelper.createStorageFilesFromEntities(storageUnitEntity.getStorageFiles())); } // Return the response. return response; }