Example usage for java.util EnumMap EnumMap

List of usage examples for java.util EnumMap EnumMap

Introduction

In this page you can find the example usage for java.util EnumMap EnumMap.

Prototype

public EnumMap(Map<K, ? extends V> m) 

Source Link

Document

Creates an enum map initialized from the specified map.

Usage

From source file:org.broadleafcommerce.cms.admin.server.handler.StaticAssetCustomPersistenceHandler.java

@Override
public DynamicResultSet inspect(PersistencePackage persistencePackage, DynamicEntityDao dynamicEntityDao,
        InspectHelper helper) throws ServiceException {
    try {//www.  jav a 2  s  . c om
        Map<MergedPropertyType, Map<String, FieldMetadata>> allMergedProperties = new EnumMap<MergedPropertyType, Map<String, FieldMetadata>>(
                MergedPropertyType.class);
        Class<?>[] entityClasses = dynamicEntityDao.getAllPolymorphicEntitiesFromCeiling(StaticAsset.class);
        if (getMergedProperties() == null) {
            createMergedProperties(persistencePackage, dynamicEntityDao, entityClasses);
        }

        allMergedProperties.put(MergedPropertyType.PRIMARY, getMergedProperties());
        ClassMetadata mergedMetadata = helper.getMergedClassMetadata(entityClasses, allMergedProperties);
        return new DynamicResultSet(mergedMetadata);
    } catch (Exception e) {
        ServiceException ex = new ServiceException("Unable to retrieve inspection results for "
                + persistencePackage.getCeilingEntityFullyQualifiedClassname(), e);
        LOG.error("Unable to retrieve inspection results for "
                + persistencePackage.getCeilingEntityFullyQualifiedClassname(), ex);
        throw ex;
    }
}

From source file:at.ac.tuwien.dsg.quelle.elasticityQuantification.engines.RequirementsMatchingEngine.java

private Set<RequirementsMatchingReport<Quality>> matchOptionalQualityConfiguration(
        List<Unit> optionalConfiguration, List<Requirement> requirementsToMatch) {
    //the results will be sorted after the nr of matched stuff
    Set<RequirementsMatchingReport<Quality>> matchedReportSet = new TreeSet<RequirementsMatchingReport<Quality>>(
            new Comparator<RequirementsMatchingReport>() {
                public int compare(RequirementsMatchingReport o1, RequirementsMatchingReport o2) {
                    Integer matched1 = o1.getMatchedResourceCountForMetricType(Metric.MetricType.QUALITY);
                    Integer matched2 = o2.getMatchedResourceCountForMetricType(Metric.MetricType.QUALITY);
                    return -1 * matched1.compareTo(matched2); //multiplied by -1 to have the largest number first
                }/* ww w.j  a v  a  2 s  .  co  m*/
            });

    //1 go trough each optional Resource and match their requirements
    for (Unit entity : optionalConfiguration) {
        if (!(entity instanceof Quality)) {
            continue;
        } else {
            Quality quality = (Quality) entity;
            Map<Metric, MetricValue> resourceProperties = quality.getProperties();
            //match as many requirements to resource properties as possible
            List<Requirement> requirementsMatchedForThisResource = matchRequirementsToProperties(
                    resourceProperties, requirementsToMatch);

            if (requirementsMatchedForThisResource.size() > 0) {
                //create the report entry 
                Map<Metric.MetricType, List<Requirement>> matchedRequirementsMap = new EnumMap<Metric.MetricType, List<Requirement>>(
                        Metric.MetricType.class);
                matchedRequirementsMap.put(Metric.MetricType.QUALITY, requirementsMatchedForThisResource);
                RequirementsMatchingReport<Quality> matchingReport = new RequirementsMatchingReport<Quality>(
                        matchedRequirementsMap, quality);

                //add report entry to the report
                matchedReportSet.add(matchingReport);
            }
        }
    }

    return matchedReportSet;

}

From source file:org.omnaest.utils.structure.map.MapUtils.java

/**
 * Returns an {@link EnumMap} filled with all available values of the given {@link Enum} type as keys and the result of the
 * {@link Factory} as value for each {@link Enum} key.
 * //from   ww  w  .  ja va 2 s  . c  o m
 * @param enumType
 * @param factory
 * @return {@link EnumMap}
 */
public static <K extends Enum<K>, V> EnumMap<K, V> initializedEnumMap(Class<K> enumType, Factory<V> factory) {
    //    
    final EnumMap<K, V> retmap = enumType != null ? new EnumMap<K, V>(enumType) : null;

    //
    if (retmap != null) {
        for (K key : EnumUtils.getEnumList(enumType)) {
            V value = factory != null ? factory.newInstance() : null;
            retmap.put(key, value);
        }
    }

    //
    return retmap;
}

From source file:org.openecomp.sdc.be.servlets.AbstractValidationsServlet.java

protected void validateMD5(Wrapper<Response> responseWrapper, User user, UploadResourceInfo resourceInfo,
        HttpServletRequest request, String resourceInfoJsonString) {
    boolean isValid;
    String recievedMD5 = request.getHeader(Constants.MD5_HEADER);
    if (recievedMD5 == null) {
        isValid = false;/*from   w ww.  j av  a 2 s . c  o  m*/
    } else {
        String calculateMD5 = GeneralUtility.calculateMD5ByString(resourceInfoJsonString);
        isValid = calculateMD5.equals(recievedMD5);
    }
    if (!isValid) {
        ResponseFormat responseFormat = getComponentsUtils()
                .getResponseFormat(ActionStatus.INVALID_RESOURCE_CHECKSUM);
        Response errorResponse = buildErrorResponse(responseFormat);
        EnumMap<AuditingFieldsKeysEnum, Object> additionalParam = new EnumMap<AuditingFieldsKeysEnum, Object>(
                AuditingFieldsKeysEnum.class);
        additionalParam.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_NAME, resourceInfo.getName());
        getComponentsUtils().auditResource(responseFormat, user, null, "", "",
                AuditingActionEnum.IMPORT_RESOURCE, additionalParam);
        responseWrapper.setInnerElement(errorResponse);
    }
}

From source file:gov.nih.nci.firebird.service.annual.registration.AnnualRegistrationServiceBean.java

private void sendRegistrationDeletionEmailToCoordinator(AnnualRegistration registration) {
    if (registration.getProfile().hasCtepRegistrationCoordinator()) {
        String coordinatorEmail = registration.getProfile().getCtepRegistrationCoordinatorMapping().getUser()
                .getPerson().getEmail();
        Map<FirebirdTemplateParameter, Object> parameterValues = new EnumMap<FirebirdTemplateParameter, Object>(
                FirebirdTemplateParameter.class);
        parameterValues.put(FirebirdTemplateParameter.INVESTIGATOR, registration.getProfile().getPerson());
        FirebirdMessage message = getTemplateService().generateMessage(
                FirebirdMessageTemplate.INVESTIGATOR_DELETED_ANNUAL_REGISTRATION_EMAIL_TO_OTHERS,
                parameterValues);//w  w w  . j ava2 s .c  o  m
        getEmailService().sendMessage(coordinatorEmail, null, null, message);
    }
}

From source file:org.apache.ambari.server.controller.internal.ServiceResourceProvider.java

protected synchronized RequestStageContainer updateServices(RequestStageContainer requestStages,
        Set<ServiceRequest> requests, Map<String, String> requestProperties, boolean runSmokeTest,
        boolean reconfigureClients, boolean startDependencies) throws AmbariException {

    AmbariManagementController controller = getManagementController();

    if (requests.isEmpty()) {
        LOG.warn("Received an empty requests set");
        return null;
    }//  w w w  .  j  a  v  a 2  s . co  m

    Map<State, List<Service>> changedServices = new EnumMap<State, List<Service>>(State.class);
    Map<State, List<ServiceComponent>> changedComps = new EnumMap<State, List<ServiceComponent>>(State.class);
    Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts = new HashMap<String, Map<State, List<ServiceComponentHost>>>();
    Collection<ServiceComponentHost> ignoredScHosts = new ArrayList<ServiceComponentHost>();

    Set<String> clusterNames = new HashSet<String>();
    Map<String, Set<String>> serviceNames = new HashMap<String, Set<String>>();
    Set<State> seenNewStates = new HashSet<State>();

    // Determine operation level
    Resource.Type reqOpLvl;
    if (requestProperties.containsKey(RequestOperationLevel.OPERATION_LEVEL_ID)) {
        RequestOperationLevel operationLevel = new RequestOperationLevel(requestProperties);
        reqOpLvl = operationLevel.getLevel();
    } else {
        String message = "Can not determine request operation level. " + "Operation level property should "
                + "be specified for this request.";
        LOG.warn(message);
        reqOpLvl = Resource.Type.Cluster;
    }

    Clusters clusters = controller.getClusters();

    // We don't expect batch requests for different clusters, that's why
    // nothing bad should happen if value is overwritten few times
    String maintenanceCluster = null;

    for (ServiceRequest request : requests) {
        if (request.getClusterName() == null || request.getClusterName().isEmpty()
                || request.getServiceName() == null || request.getServiceName().isEmpty()) {
            throw new IllegalArgumentException("Invalid arguments, cluster name"
                    + " and service name should be provided to update services");
        }

        LOG.info("Received a updateService request" + ", clusterName=" + request.getClusterName()
                + ", serviceName=" + request.getServiceName() + ", request=" + request.toString());

        clusterNames.add(request.getClusterName());

        if (clusterNames.size() > 1) {
            throw new IllegalArgumentException("Updates to multiple clusters is not" + " supported");
        }

        if (!serviceNames.containsKey(request.getClusterName())) {
            serviceNames.put(request.getClusterName(), new HashSet<String>());
        }
        if (serviceNames.get(request.getClusterName()).contains(request.getServiceName())) {
            // TODO throw single exception
            throw new IllegalArgumentException("Invalid request contains duplicate" + " service names");
        }
        serviceNames.get(request.getClusterName()).add(request.getServiceName());

        Cluster cluster = clusters.getCluster(request.getClusterName());
        Service s = cluster.getService(request.getServiceName());
        State oldState = s.getDesiredState();
        State newState = null;
        if (request.getDesiredState() != null) {
            newState = State.valueOf(request.getDesiredState());
            if (!newState.isValidDesiredState()) {
                throw new IllegalArgumentException(
                        "Invalid arguments, invalid" + " desired state, desiredState=" + newState);
            }
        }

        // Setting Maintenance state for service
        if (null != request.getMaintenanceState()) {
            MaintenanceState newMaint = MaintenanceState.valueOf(request.getMaintenanceState());
            if (newMaint != s.getMaintenanceState()) {
                if (newMaint.equals(MaintenanceState.IMPLIED_FROM_HOST)
                        || newMaint.equals(MaintenanceState.IMPLIED_FROM_SERVICE)) {
                    throw new IllegalArgumentException(
                            "Invalid arguments, can only set " + "maintenance state to one of "
                                    + EnumSet.of(MaintenanceState.OFF, MaintenanceState.ON));
                } else {
                    s.setMaintenanceState(newMaint);
                    maintenanceCluster = cluster.getClusterName();
                }
            }
        }

        if (newState == null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Nothing to do for new updateService request" + ", clusterName="
                        + request.getClusterName() + ", serviceName=" + request.getServiceName()
                        + ", newDesiredState=null");
            }
            continue;
        }

        if (!maintenanceStateHelper.isOperationAllowed(reqOpLvl, s)) {
            LOG.info("Operations cannot be applied to service " + s.getName() + " in the maintenance state of "
                    + s.getMaintenanceState());
            continue;
        }

        seenNewStates.add(newState);

        if (newState != oldState) {
            if (!State.isValidDesiredStateTransition(oldState, newState)) {
                throw new AmbariException("Invalid transition for" + " service" + ", clusterName="
                        + cluster.getClusterName() + ", clusterId=" + cluster.getClusterId() + ", serviceName="
                        + s.getName() + ", currentDesiredState=" + oldState + ", newDesiredState=" + newState);

            }
            if (!changedServices.containsKey(newState)) {
                changedServices.put(newState, new ArrayList<Service>());
            }
            changedServices.get(newState).add(s);
        }

        // TODO should we check whether all servicecomponents and
        // servicecomponenthosts are in the required desired state?

        updateServiceComponents(requestStages, changedComps, changedScHosts, ignoredScHosts, reqOpLvl, s,
                newState);
    }

    if (startDependencies && changedServices.containsKey(State.STARTED)) {
        HashSet<Service> depServices = new HashSet<Service>();
        for (Service service : changedServices.get(State.STARTED)) {
            RoleCommandOrder rco = controller.getRoleCommandOrder(service.getCluster());
            Set<Service> dependencies = rco.getTransitiveServices(service, RoleCommand.START);
            for (Service dependency : dependencies) {
                if (!changedServices.get(State.STARTED).contains(dependency)) {
                    depServices.add(dependency);
                }
            }
        }
        for (Service service : depServices) {
            updateServiceComponents(requestStages, changedComps, changedScHosts, ignoredScHosts, reqOpLvl,
                    service, State.STARTED);
            changedServices.get(State.STARTED).add(service);
        }

    }

    if (seenNewStates.size() > 1) {
        // TODO should we handle this scenario
        throw new IllegalArgumentException(
                "Cannot handle different desired state" + " changes for a set of services at the same time");
    }

    Cluster cluster = clusters.getCluster(clusterNames.iterator().next());

    return controller.addStages(requestStages, cluster, requestProperties, null, changedServices, changedComps,
            changedScHosts, ignoredScHosts, runSmokeTest, reconfigureClients);
}

From source file:org.apache.ambari.server.controller.internal.HostComponentResourceProvider.java

/**
 * Update the host component identified by the given request object with the
 * values carried by the given request object.
 *
 * @param stages             stages of the associated request
 * @param requests           the request object which defines which host component to
 *                           update and the values to set
 * @param requestProperties  the request properties
 * @param runSmokeTest       indicates whether or not to run a smoke test
 *
 * @return a track action response/*from  ww  w  .j a va2  s  .  c  o m*/
 *
 * @throws AmbariException thrown if the resource cannot be updated
 */
//todo: This was moved from AmbariManagementController and needs a lot of refactoring.
//todo: Look into using the predicate instead of Set<ServiceComponentHostRequest>
//todo: change to private access when all AMC tests have been moved.
protected synchronized RequestStageContainer updateHostComponents(RequestStageContainer stages,
        Set<ServiceComponentHostRequest> requests, Map<String, String> requestProperties, boolean runSmokeTest)
        throws AmbariException {

    Clusters clusters = getManagementController().getClusters();

    Map<String, Map<State, List<ServiceComponentHost>>> changedScHosts = new HashMap<String, Map<State, List<ServiceComponentHost>>>();
    Collection<ServiceComponentHost> ignoredScHosts = new ArrayList<ServiceComponentHost>();
    Set<String> clusterNames = new HashSet<String>();
    Map<String, Map<String, Map<String, Set<String>>>> requestClusters = new HashMap<String, Map<String, Map<String, Set<String>>>>();
    Map<ServiceComponentHost, State> directTransitionScHosts = new HashMap<ServiceComponentHost, State>();

    Resource.Type reqOpLvl = determineOperationLevel(requestProperties);

    String clusterName = requestProperties.get(RequestOperationLevel.OPERATION_CLUSTER_ID);
    if (clusterName != null && !clusterName.isEmpty()) {
        clusterNames.add(clusterName);
    }

    for (ServiceComponentHostRequest request : requests) {
        validateServiceComponentHostRequest(request);

        Cluster cluster = clusters.getCluster(request.getClusterName());

        if (StringUtils.isEmpty(request.getServiceName())) {
            request.setServiceName(
                    getManagementController().findServiceName(cluster, request.getComponentName()));
        }

        ServiceComponent sc = getServiceComponent(request.getClusterName(), request.getServiceName(),
                request.getComponentName());

        logRequestInfo("Received a updateHostComponent request", request);

        if ((clusterName == null || clusterName.isEmpty())
                && (request.getClusterName() != null && !request.getClusterName().isEmpty())) {
            clusterNames.add(request.getClusterName());
        }

        if (clusterNames.size() > 1) {
            throw new IllegalArgumentException("Updates to multiple clusters is not" + " supported");
        }

        // maps of cluster->services, services->components, components->hosts
        Map<String, Map<String, Set<String>>> clusterServices = requestClusters.get(request.getClusterName());
        if (clusterServices == null) {
            clusterServices = new HashMap<String, Map<String, Set<String>>>();
            requestClusters.put(request.getClusterName(), clusterServices);
        }

        Map<String, Set<String>> serviceComponents = clusterServices.get(request.getServiceName());
        if (serviceComponents == null) {
            serviceComponents = new HashMap<String, Set<String>>();
            clusterServices.put(request.getServiceName(), serviceComponents);
        }

        Set<String> componentHosts = serviceComponents.get(request.getComponentName());
        if (componentHosts == null) {
            componentHosts = new HashSet<String>();
            serviceComponents.put(request.getComponentName(), componentHosts);
        }

        if (componentHosts.contains(request.getHostname())) {
            throw new IllegalArgumentException("Invalid request contains duplicate hostcomponents");
        }

        componentHosts.add(request.getHostname());

        ServiceComponentHost sch = sc.getServiceComponentHost(request.getHostname());
        State oldState = sch.getState();
        State newState = null;
        if (request.getDesiredState() != null) {
            // set desired state on host component
            newState = State.valueOf(request.getDesiredState());

            // throw exception if desired state isn't a valid desired state (static check)
            if (!newState.isValidDesiredState()) {
                throw new IllegalArgumentException(
                        "Invalid arguments, invalid" + " desired state, desiredState=" + newState.toString());
            }
        }

        // Setting Maintenance state for host component
        if (null != request.getMaintenanceState()) {
            MaintenanceState newMaint = MaintenanceState.valueOf(request.getMaintenanceState());
            MaintenanceState oldMaint = maintenanceStateHelper.getEffectiveState(sch);

            if (newMaint != oldMaint) {
                if (sc.isClientComponent()) {
                    throw new IllegalArgumentException(
                            "Invalid arguments, cannot set maintenance state on a client component");
                } else if (newMaint.equals(MaintenanceState.IMPLIED_FROM_HOST)
                        || newMaint.equals(MaintenanceState.IMPLIED_FROM_SERVICE)) {
                    throw new IllegalArgumentException(
                            "Invalid arguments, can only set maintenance state to one of "
                                    + EnumSet.of(MaintenanceState.OFF, MaintenanceState.ON));
                } else {
                    sch.setMaintenanceState(newMaint);
                }
            }
        }

        if (newState == null) {
            logComponentInfo("Nothing to do for new updateServiceComponentHost", request, oldState, null);
            continue;
        }

        if (sc.isClientComponent() && !newState.isValidClientComponentState()) {
            throw new IllegalArgumentException("Invalid desired state for a client" + " component");
        }

        State oldSchState = sch.getState();
        // Client component reinstall allowed
        if (newState == oldSchState && !sc.isClientComponent()
                && !requestProperties.containsKey(sch.getServiceComponentName().toLowerCase())) {

            ignoredScHosts.add(sch);
            logComponentInfo("Ignoring ServiceComponentHost", request, oldState, newState);
            continue;
        }

        if (!maintenanceStateHelper.isOperationAllowed(reqOpLvl, sch)) {
            ignoredScHosts.add(sch);
            logComponentInfo("Ignoring ServiceComponentHost", request, oldState, newState);
            continue;
        }

        if (!isValidStateTransition(stages, oldSchState, newState, sch)) {
            throw new AmbariException("Invalid state transition for host component" + ", clusterName="
                    + cluster.getClusterName() + ", clusterId=" + cluster.getClusterId() + ", serviceName="
                    + sch.getServiceName() + ", componentName=" + sch.getServiceComponentName() + ", hostname="
                    + sch.getHostName() + ", currentState=" + oldSchState + ", newDesiredState=" + newState);
        }

        if (isDirectTransition(oldSchState, newState)) {
            logComponentInfo("Handling direct transition update to host component", request, oldState,
                    newState);
            directTransitionScHosts.put(sch, newState);
        } else {
            if (!changedScHosts.containsKey(sc.getName())) {
                changedScHosts.put(sc.getName(), new EnumMap<State, List<ServiceComponentHost>>(State.class));
            }
            if (!changedScHosts.get(sc.getName()).containsKey(newState)) {
                changedScHosts.get(sc.getName()).put(newState, new ArrayList<ServiceComponentHost>());
            }
            logComponentInfo("Handling update to host component", request, oldState, newState);
            changedScHosts.get(sc.getName()).get(newState).add(sch);
        }
    }

    doDirectTransitions(directTransitionScHosts);

    // just getting the first cluster
    Cluster cluster = clusters.getCluster(clusterNames.iterator().next());

    return getManagementController().addStages(stages, cluster, requestProperties, null, null, null,
            changedScHosts, ignoredScHosts, runSmokeTest, false);
}

From source file:it.unimi.di.big.mg4j.tool.Scan.java

/**
 * Creates a new scanner instance./*from w w  w .j  a va  2  s  .c o m*/
 * 
 * @param ioFactory the factory that will be used to perform I/O.
 * @param basename the basename (usually a global filename followed by the field name, separated
 * by a dash).
 * @param field the field to be indexed.
 * @param termProcessor the term processor for this index.
 * @param indexingType the type of indexing procedure.
 * @param numVirtualDocs the number of virtual documents that will be used, in case of a virtual
 * index; otherwise, immaterial.
 * @param virtualDocumentGap the artificial gap introduced between virtual documents fragments, in case
 * of a virtual index; otherwise, immaterial.
 * @param bufferSize the buffer size used in all I/O.
 * @param builder a builder used to create a compressed document collection on the fly.
 * @param batchDir a directory for batch files; batch names will be relativised to this
 * directory if it is not {@code null}.
 */
public Scan(final IOFactory ioFactory, final String basename, final String field,
        final Completeness completeness, final TermProcessor termProcessor, final IndexingType indexingType,
        final long numVirtualDocs, final int virtualDocumentGap, final int bufferSize,
        final DocumentCollectionBuilder builder, final File batchDir) throws IOException {
    this.ioFactory = ioFactory;
    this.basename = basename;
    this.field = field;
    this.completeness = completeness;
    this.termProcessor = termProcessor;
    this.bufferSize = bufferSize;
    this.builder = builder;
    this.batchDir = batchDir;
    this.virtualDocumentGap = virtualDocumentGap;
    this.cutPoints = new LongArrayList();
    this.cutPoints.add(0);

    termMap = new Object2ReferenceOpenHashMap<MutableString, ByteArrayPostingList>(INITIAL_TERM_MAP_SIZE);

    flags = new EnumMap<Component, Coding>(CompressionFlags.DEFAULT_STANDARD_INDEX);
    if (completeness.compareTo(Completeness.POSITIONS) < 0)
        flags.remove(Component.POSITIONS);
    if (completeness.compareTo(Completeness.COUNTS) < 0)
        flags.remove(Component.COUNTS);

    indexingIsStandard = indexingType == IndexingType.STANDARD;
    indexingIsRemapped = indexingType == IndexingType.REMAPPED;
    indexingIsVirtual = indexingType == IndexingType.VIRTUAL;
    if (indexingIsVirtual && virtualDocumentGap == 0)
        throw new IllegalArgumentException("Illegal virtual document gap: " + virtualDocumentGap);

    if (indexingIsVirtual)
        currSize = IntBigArrays.newBigArray(numVirtualDocs);
    maxDocInBatch = (currSize != null ? IntBigArrays.length(currSize) : 0) - 1;
    openSizeBitStream();
}

From source file:org.squashtest.tm.service.internal.requirement.VerifiedRequirementsManagerServiceImpl.java

/**
 * Extract a {@link Map}, key : {@link ExecutionStatus} value : {@link Long}.
 * The goal is to perform arithmetic operation with this map to calculate several rates on {@link RequirementVersion}
 * Constraints from specification Feat 4434 :
 * <code>//from   w  w  w . ja v a  2s .  c o  m
 * <ul>
 * <li>Requirement without linked {@link TestStep} must be treated at {@link Execution} level, for last execution.
 * We must also include fast pass so we take the {@link IterationTestPlanItem} status</li>
 * <li>Requirement with linked {@link TestStep} must be treated at {@link ExecutionStep} level</li>
 * <li>Only last execution must be considered for a given {@link IterationTestPlanItem}</li>
 * <li>FastPass must be considered for all cases (ie even if the {@link RequirementVersion} is linked to {@link TestStep})</li>
 * <li>Rate must be calculate on the designed {@link Requirement} and it's descendants</li>
 * <li>The descendant list must be filtered by {@link Milestone} and exclude {@link RequirementVersion} with {@link RequirementStatus#OBSOLETE}</li>
 * </ul>
 * </code>
 * @param mainRequirement
 * @param mainVersion
 * @param descendants
 * @param stats pojo containing the computed stats
 * @param iterationsIds
 */
private void findExecutionRate(RequirementVersion mainVersion, List<RequirementVersion> descendants,
        RequirementCoverageStat stats, List<Long> iterationsIds) {
    boolean hasDescendant = !descendants.isEmpty();
    Rate verificationRate = new Rate();
    Rate validationRate = new Rate();

    //see http://javadude.com/articles/passbyvalue.htm to understand why an array (or any object) is needed here
    Long[] mainUntestedElementsCount = new Long[] { 0L };
    Map<ExecutionStatus, Long> mainStatusMap = new EnumMap<>(ExecutionStatus.class);
    makeStatusMap(mainVersion.getRequirementVersionCoverages(), mainUntestedElementsCount, mainStatusMap,
            iterationsIds);
    verificationRate
            .setRequirementVersionRate(doRateVerifiedCalculation(mainStatusMap, mainUntestedElementsCount[0]));
    validationRate
            .setRequirementVersionRate(doRateValidatedCalculation(mainStatusMap, mainUntestedElementsCount[0]));

    if (hasDescendant) {
        verificationRate.setAncestor(true);
        validationRate.setAncestor(true);
        Set<RequirementVersionCoverage> descendantCoverages = getDescendantCoverages(descendants);
        Long[] descendantUntestedElementsCount = new Long[] { 0L };
        Map<ExecutionStatus, Long> descendantStatusMap = new EnumMap<>(ExecutionStatus.class);
        makeStatusMap(descendantCoverages, descendantUntestedElementsCount, descendantStatusMap, iterationsIds);
        verificationRate.setRequirementVersionChildrenRate(
                doRateVerifiedCalculation(descendantStatusMap, descendantUntestedElementsCount[0]));
        validationRate.setRequirementVersionChildrenRate(
                doRateValidatedCalculation(descendantStatusMap, descendantUntestedElementsCount[0]));

        Long[] allUntestedElementsCount = new Long[] { 0L };
        allUntestedElementsCount[0] = mainUntestedElementsCount[0] + descendantUntestedElementsCount[0];
        Map<ExecutionStatus, Long> allStatusMap = mergeMapResult(mainStatusMap, descendantStatusMap);
        verificationRate.setRequirementVersionGlobalRate(
                doRateVerifiedCalculation(allStatusMap, allUntestedElementsCount[0]));
        validationRate.setRequirementVersionGlobalRate(
                doRateValidatedCalculation(allStatusMap, allUntestedElementsCount[0]));
    }

    stats.addRate("verification", verificationRate);
    stats.addRate("validation", validationRate);
}

From source file:org.openecomp.sdc.be.servlets.AbstractValidationsServlet.java

protected void fillToscaTemplateFromJson(Wrapper<Response> responseWrapper, Wrapper<String> yamlStringWrapper,
        User user, UploadResourceInfo resourceInfo) {
    if (resourceInfo.getPayloadData() == null || resourceInfo.getPayloadData().isEmpty()) {
        ResponseFormat responseFormat = getComponentsUtils()
                .getResponseFormat(ActionStatus.INVALID_RESOURCE_PAYLOAD);
        Response errorResponse = buildErrorResponse(responseFormat);
        EnumMap<AuditingFieldsKeysEnum, Object> additionalParam = new EnumMap<AuditingFieldsKeysEnum, Object>(
                AuditingFieldsKeysEnum.class);
        additionalParam.put(AuditingFieldsKeysEnum.AUDIT_RESOURCE_NAME, resourceInfo.getName());
        getComponentsUtils().auditResource(responseFormat, user, null, "", "",
                AuditingActionEnum.IMPORT_RESOURCE, additionalParam);
        responseWrapper.setInnerElement(errorResponse);
    } else {//from   w w  w . ja v a 2  s. c  o m
        String toscaPayload = resourceInfo.getPayloadData();
        String decodedPayload = (GeneralUtility.isBase64Encoded(toscaPayload))
                ? new String(Base64.decodeBase64(toscaPayload))
                : toscaPayload;
        yamlStringWrapper.setInnerElement(decodedPayload);
    }

}