Example usage for java.util Collections disjoint

List of usage examples for java.util Collections disjoint

Introduction

In this page you can find the example usage for java.util Collections disjoint.

Prototype

public static boolean disjoint(Collection<?> c1, Collection<?> c2) 

Source Link

Document

Returns true if the two specified collections have no elements in common.

Usage

From source file:uniol.apt.analysis.synthesize.SynthesizePN.java

/**
 * Try to eliminate redundant regions.//  w  w w  .  j  a  v  a  2s. com
 * @param ts The transition system that is being solved.
 * @param requiredRegions Set of regions to minimize. Redundant regions will be removed.
 * @param onlyEventSeparation Should state separation be ignored?
 */
static public void minimizeRegions(TransitionSystem ts, Set<Region> requiredRegions,
        boolean onlyEventSeparation) {
    int numInputRegions = requiredRegions.size();
    Set<Region> remainingRegions = new HashSet<>(requiredRegions);
    requiredRegions.clear();

    // Build a list where each entry is generated from a separation problem and contains all regions that
    // solve this problem.
    Set<Set<Region>> separationProblems = new HashSet<>();
    calculateRequiredRegionsAndProblems(ts, separationProblems, requiredRegions, remainingRegions,
            onlyEventSeparation);

    debug("Required regions after first pass:");
    debug(requiredRegions);
    debug("List of regions that solve each remaining separation problem:");
    debug(separationProblems);

    // Now go through all remaining problems again
    for (Set<Region> problem : separationProblems) {
        // If none of our required regions solve this problem, we pick one arbitrarily that does
        if (Collections.disjoint(requiredRegions, problem))
            requiredRegions.add(problem.iterator().next());
    }

    debug("List of required regions:");
    debug(requiredRegions);
    debugFormat("Picked %d required regions out of %d input regions", requiredRegions.size(), numInputRegions);
}

From source file:org.onosproject.t3.impl.TroubleshootManager.java

/**
 * Computes a trace for a give packet that start in the network at the given connect point.
 *
 * @param completePath the path traversed by the packet
 * @param in           the input connect point
 * @param trace        the trace to build
 * @param isDualHomed  true if the trace we are doing starts or ends in a dual homed host
 * @return the build trace for that packet.
 *///from  w ww.  j a  va2 s. c  om
private StaticPacketTrace getTrace(List<ConnectPoint> completePath, ConnectPoint in, StaticPacketTrace trace,
        boolean isDualHomed) {

    log.debug("------------------------------------------------------------");

    //if the trace already contains the input connect point there is a loop
    if (pathContainsDevice(completePath, in.deviceId())) {
        trace.addResultMessage("Loop encountered in device " + in.deviceId());
        completePath.add(in);
        trace.addCompletePath(completePath);
        trace.setSuccess(false);
        return trace;
    }

    //let's add the input connect point
    completePath.add(in);

    //If the trace has no outputs for the given input we stop here
    if (trace.getGroupOuputs(in.deviceId()) == null) {
        computePath(completePath, trace, null);
        trace.addResultMessage("No output out of device " + in.deviceId() + ". Packet is dropped");
        trace.setSuccess(false);
        return trace;
    }

    //If the trace has outputs we analyze them all
    for (GroupsInDevice outputPath : trace.getGroupOuputs(in.deviceId())) {

        ConnectPoint cp = outputPath.getOutput();
        log.debug("Connect point in {}", in);
        log.debug("Output path {}", cp);
        log.debug("{}", outputPath.getFinalPacket());

        //Hosts for the the given output
        Set<Host> hostsList = hostService.getConnectedHosts(cp);
        //Hosts queried from the original ip or mac
        Set<Host> hosts = getHosts(trace);

        if (in.equals(cp) && trace.getInitialPacket().getCriterion(Criterion.Type.VLAN_VID) != null
                && outputPath.getFinalPacket().getCriterion(Criterion.Type.VLAN_VID) != null
                && ((VlanIdCriterion) trace.getInitialPacket().getCriterion(Criterion.Type.VLAN_VID)).vlanId()
                        .equals(((VlanIdCriterion) outputPath.getFinalPacket()
                                .getCriterion(Criterion.Type.VLAN_VID)).vlanId())) {
            if (trace.getGroupOuputs(in.deviceId()).size() == 1
                    && computePath(completePath, trace, outputPath.getOutput())) {
                trace.addResultMessage("Connect point out " + cp + " is same as initial input " + in);
                trace.setSuccess(false);
            }
        } else if (!Collections.disjoint(hostsList, hosts)) {
            //If the two host collections contain the same item it means we reached the proper output
            log.debug("Stopping here because host is expected destination {}, reached through", completePath);
            if (computePath(completePath, trace, outputPath.getOutput())) {
                trace.addResultMessage("Reached required destination Host " + cp);
                trace.setSuccess(true);
            }
            break;
        } else if (cp.port().equals(PortNumber.CONTROLLER)) {

            //Getting the master when the packet gets sent as packet in
            NodeId master = mastershipService.getMasterFor(cp.deviceId());
            trace.addResultMessage(PACKET_TO_CONTROLLER + " " + master.id());
            computePath(completePath, trace, outputPath.getOutput());
            handleVlanToController(outputPath, trace);

        } else if (linkService.getEgressLinks(cp).size() > 0) {

            //TODO this can be optimized if we use a Tree structure for paths.
            //if we already have outputs let's check if the one we are considering starts from one of the devices
            // in any of the ones we have.
            if (trace.getCompletePaths().size() > 0) {
                ConnectPoint inputForOutput = null;
                List<ConnectPoint> previousPath = new ArrayList<>();
                for (List<ConnectPoint> path : trace.getCompletePaths()) {
                    for (ConnectPoint connect : path) {
                        //if the path already contains the input for the output we've found we use it
                        if (connect.equals(in)) {
                            inputForOutput = connect;
                            previousPath = path;
                            break;
                        }
                    }
                }

                //we use the pre-existing path up to the point we fork to a new output
                if (inputForOutput != null && completePath.contains(inputForOutput)) {
                    List<ConnectPoint> temp = new ArrayList<>(previousPath);
                    temp = temp.subList(0, previousPath.indexOf(inputForOutput) + 1);
                    if (completePath.containsAll(temp)) {
                        completePath = temp;
                    }
                }
            }

            //let's add the ouput for the input
            completePath.add(cp);
            //let's compute the links for the given output
            Set<Link> links = linkService.getEgressLinks(cp);
            log.debug("Egress Links {}", links);
            //For each link we trace the corresponding device
            for (Link link : links) {
                ConnectPoint dst = link.dst();
                //change in-port to the dst link in port
                Builder updatedPacket = DefaultTrafficSelector.builder();
                outputPath.getFinalPacket().criteria().forEach(updatedPacket::add);
                updatedPacket.add(Criteria.matchInPort(dst.port()));
                log.debug("DST Connect Point {}", dst);
                //build the elements for that device
                traceInDevice(trace, updatedPacket.build(), dst, isDualHomed, completePath);
                //continue the trace along the path
                getTrace(completePath, dst, trace, isDualHomed);
            }
        } else if (edgePortService.isEdgePoint(outputPath.getOutput())
                && trace.getInitialPacket().getCriterion(Criterion.Type.ETH_DST) != null
                && ((EthCriterion) trace.getInitialPacket().getCriterion(Criterion.Type.ETH_DST)).mac()
                        .isMulticast()) {
            trace.addResultMessage("Packet is multicast and reached output " + outputPath.getOutput()
                    + " which is enabled and is edge port");
            trace.setSuccess(true);
            computePath(completePath, trace, outputPath.getOutput());
            if (!hasOtherOutput(in.deviceId(), trace, outputPath.getOutput())) {
                return trace;
            }
        } else if (deviceService.getPort(cp) != null && deviceService.getPort(cp).isEnabled()) {
            EthTypeCriterion ethTypeCriterion = (EthTypeCriterion) trace.getInitialPacket()
                    .getCriterion(Criterion.Type.ETH_TYPE);
            //We treat as correct output only if it's not LLDP or BDDP
            if (!(ethTypeCriterion.ethType().equals(EtherType.LLDP.ethType())
                    && !ethTypeCriterion.ethType().equals(EtherType.BDDP.ethType()))) {
                if (computePath(completePath, trace, outputPath.getOutput())) {
                    if (hostsList.isEmpty()) {
                        trace.addResultMessage("Packet is "
                                + ((EthTypeCriterion) outputPath.getFinalPacket()
                                        .getCriterion(Criterion.Type.ETH_TYPE)).ethType()
                                + " and reached " + cp + " with no hosts connected ");
                    } else {
                        IpAddress ipAddress = null;
                        if (trace.getInitialPacket().getCriterion(Criterion.Type.IPV4_DST) != null) {
                            ipAddress = ((IPCriterion) trace.getInitialPacket()
                                    .getCriterion(Criterion.Type.IPV4_DST)).ip().address();
                        } else if (trace.getInitialPacket().getCriterion(Criterion.Type.IPV6_DST) != null) {
                            ipAddress = ((IPCriterion) trace.getInitialPacket()
                                    .getCriterion(Criterion.Type.IPV6_DST)).ip().address();
                        }
                        if (ipAddress != null) {
                            IpAddress finalIpAddress = ipAddress;
                            if (hostsList.stream().anyMatch(host -> host.ipAddresses().contains(finalIpAddress))
                                    || hostService.getHostsByIp(finalIpAddress).isEmpty()) {
                                trace.addResultMessage("Packet is "
                                        + ((EthTypeCriterion) outputPath.getFinalPacket()
                                                .getCriterion(Criterion.Type.ETH_TYPE)).ethType()
                                        + " and reached " + cp + " with hosts " + hostsList);
                            } else {
                                trace.addResultMessage(
                                        "Wrong output " + cp + " for required destination ip " + ipAddress);
                                trace.setSuccess(false);
                            }
                        } else {
                            trace.addResultMessage("Packet is "
                                    + ((EthTypeCriterion) outputPath.getFinalPacket()
                                            .getCriterion(Criterion.Type.ETH_TYPE)).ethType()
                                    + " and reached " + cp + " with hosts " + hostsList);
                        }
                    }
                    trace.setSuccess(true);
                }
            }

        } else {
            computePath(completePath, trace, cp);
            trace.setSuccess(false);
            if (deviceService.getPort(cp) == null) {
                //Port is not existent on device.
                log.warn("Port {} is not available on device.", cp);
                trace.addResultMessage("Port " + cp + "is not available on device. Packet is dropped");
            } else {
                //No links means that the packet gets dropped.
                log.warn("No links out of {}", cp);
                trace.addResultMessage("No links depart from " + cp + ". Packet is dropped");
            }
        }
    }
    return trace;
}

From source file:org.wso2.carbon.identity.application.authentication.framework.handler.request.impl.consent.ConsentMgtPostAuthnHandler.java

private boolean isMandatoryClaimsDisapproved(List<ClaimMetaData> consentMandatoryClaims,
        List<ClaimMetaData> disapprovedClaims) {

    return isNotEmpty(consentMandatoryClaims)
            && !Collections.disjoint(disapprovedClaims, consentMandatoryClaims);
}

From source file:com.amalto.core.delegator.IItemCtrlDelegator.java

public void allowDelete(String clusterName, String concept, ComplexTypeMetadata.DeleteType deleteType)
        throws XtentisException {
    HashSet<String> roles;
    try {/*from   w  ww .  ja  v  a  2 s  .c o m*/
        roles = getLocalUser().getRoles();
    } catch (XtentisException e) {
        String message = "Unable to access user current roles."; //$NON-NLS-1$
        LOGGER.error(message, e);
        throw new RuntimeException(message, e);
    }

    Server server = ServerContext.INSTANCE.get();
    StorageAdmin storageAdmin = server.getStorageAdmin();
    Storage storage = storageAdmin.get(clusterName, storageAdmin.getType(clusterName));
    MetadataRepository repository = storage.getMetadataRepository();
    ComplexTypeMetadata complexTypeMetadata = repository.getComplexType(concept);
    if (roles != null && roles.size() > 0) {
        if (ComplexTypeMetadata.DeleteType.LOGICAL.equals(deleteType)) {
            if (!Collections.disjoint(complexTypeMetadata.getDenyDelete(ComplexTypeMetadata.DeleteType.LOGICAL),
                    roles)) {
                throw new XtentisException("Unauthorized. User '" + LocalUser.getLocalUser().getUsername() //$NON-NLS-1$
                        + "' has no logical delete permission on '" + clusterName + "." + concept + "'"); //$NON-NLS-1$//$NON-NLS-2$ //$NON-NLS-3$
            }
        } else if (ComplexTypeMetadata.DeleteType.PHYSICAL.equals(deleteType)) {
            if (!Collections.disjoint(
                    complexTypeMetadata.getDenyDelete(ComplexTypeMetadata.DeleteType.PHYSICAL), roles)) {
                throw new XtentisException("Unauthorized. User '" + LocalUser.getLocalUser().getUsername() //$NON-NLS-1$
                        + "' has no physical delete permission on '" + clusterName + "." + concept + "'"); //$NON-NLS-1$//$NON-NLS-2$ //$NON-NLS-3$
            }
        }
    }
}

From source file:eu.trentorise.smartcampus.permissionprovider.manager.ResourceManager.java

/**
 * Delete specified resources and parameters if not in use by some client
 * @param resources/*from   w ww.  j a v a  2 s .co  m*/
 * @param parameters
 * @throws ResourceException
 */
private void cleanServiceResources(List<Resource> resources, List<ResourceParameter> parameters)
        throws ResourceException {
    // check the service resources are in use by the clients
    if (resources != null && !resources.isEmpty()) {
        Set<String> ids = new HashSet<String>();
        for (Resource r : resources) {
            ids.add("" + r.getResourceId());
        }
        List<ClientDetailsEntity> clients = clientDetailsRepository.findAll();
        for (ClientDetailsEntity c : clients) {
            if (!Collections.disjoint(ids, c.getResourceIds())) {
                throw new ResourceException("Resource in use by client: " + c.getClientId());
            }
        }
    }
    resourceRepository.delete(resources);
    resourceParameterRepository.delete(parameters);
}

From source file:org.cbioportal.security.spring.CancerStudyPermissionEvaluator.java

/**
 * Helper function to determine if given user has access to given cancer study.
 *
 * @param cancerStudy cancer study to check for
 * @param user Spring Authentication of the logged-in user.
 * @return boolean// ww  w. ja va 2 s  .c  o  m
 */
private boolean hasPermission(CancerStudy cancerStudy, Authentication authentication) {

    Set<String> grantedAuthorities = getGrantedAuthorities(authentication);

    String stableStudyID = cancerStudy.getCancerStudyIdentifier();

    if (log.isDebugEnabled()) {
        log.debug("hasPermission(), cancer study stable id: " + stableStudyID);
        log.debug("hasPermission(), user: " + authentication.getPrincipal().toString());
        for (String authority : grantedAuthorities) {
            log.debug("hasPermission(), authority: " + authority);
        }
    }

    // everybody has access the 'all' cancer study
    if (stableStudyID.equalsIgnoreCase(ALL_CANCER_STUDIES_ID)) {
        return true;
    }
    // if a user has access to 'all', simply return true
    if (grantedAuthorities.contains(ALL_CANCER_STUDIES_ID.toUpperCase())) {
        if (log.isDebugEnabled()) {
            log.debug("hasPermission(), user has access to ALL cancer studies, return true");
        }
        return true;
    }
    // if a user has access to 'all_tcga', simply return true for tcga studies
    if (grantedAuthorities.contains(ALL_TCGA_CANCER_STUDIES_ID.toUpperCase())
            && stableStudyID.toUpperCase().endsWith("_TCGA")) {
        if (log.isDebugEnabled()) {
            log.debug("hasPermission(), user has access to ALL_TCGA cancer studies return true");
        }
        return true;
    }
    // if a user has access to 'all_target', simply return true for target studies
    if (grantedAuthorities.contains(ALL_TARGET_CANCER_STUDIES_ID.toUpperCase())
            && (stableStudyID.toUpperCase().endsWith("_TARGET")
                    || stableStudyID.equalsIgnoreCase("ALL_TARGET_PHASE1")
                    || stableStudyID.equalsIgnoreCase("ALL_TARGET_PHASE2"))) {
        if (log.isDebugEnabled()) {
            log.debug("hasPermission(), user has access to ALL_NCI_TARGET cancer studies return true");
        }
        return true;
    }

    // check if user is in study groups
    // performance now takes precedence over group accuracy (minimal risk to caching cancer study groups)
    Set<String> groups = new HashSet(Arrays.asList(cancerStudy.getGroups().split(";")));
    if (!Collections.disjoint(groups, grantedAuthorities)) {
        if (log.isDebugEnabled()) {
            log.debug("hasPermission(), user has access by groups return true");
        }
        return true;
    }

    // finally, check if the user has this study specifically listed in his 'groups' (a 'group' of this study only)
    boolean toReturn = grantedAuthorities.contains(stableStudyID.toUpperCase());

    if (log.isDebugEnabled()) {
        if (toReturn == true) {
            log.debug("hasPermission(), user has access to this cancer study: '" + stableStudyID.toUpperCase()
                    + "', returning true.");
        } else {
            log.debug("hasPermission(), user does not have access to the cancer study: '"
                    + stableStudyID.toUpperCase() + "', returning false.");
        }
    }

    // outta here
    return toReturn;
}

From source file:org.craftercms.cstudio.alfresco.dm.service.impl.DmContentTypeServiceImpl.java

@Override
public boolean isUserAllowed(Set<String> userRoles, ContentTypeConfigTO item) {
    if (item != null) {
        String name = item.getName();
        Set<String> allowedRoles = item.getAllowedRoles();
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("Checking allowed roles on " + name + ". user roles: " + userRoles
                    + ", allowed roles: " + allowedRoles);
        }//w  ww.  j a  v  a  2s.c  o m
        if (allowedRoles == null || allowedRoles.size() == 0) {
            return true;
        } else {
            boolean notAllowed = Collections.disjoint(userRoles, allowedRoles);
            if (notAllowed) {
                if (LOGGER.isDebugEnabled()) {
                    LOGGER.debug(name + " is not allowed for the user.");
                }
                return false;
            } else {
                return true;
            }
        }
    } else {
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug(
                    "no content type config provided. returning true for user access to content type checking.");
        }
        return true;
    }
}

From source file:com.sinet.gage.provision.service.impl.DomainFacadeImpl.java

/**
 * @param token//w  ww . j a v a  2  s.co m
 * @param domainId
 * @return
 */
@Override
public Map<String, List<String>> getSubscriptionsForDomain(String token, String domainId) {
    log.debug("Fetching  subscription for domain with id " + domainId);

    Map<String, List<String>> providerMap = new HashMap<>();
    List<String> providerList = new ArrayList<>();
    List<String> courseList = new ArrayList<>();
    List<CourseResponse> courseResponseList = new ArrayList<>();

    courseResponseList = courseService.getAllCoursesFromDomain(token, domainId);

    for (CourseResponse courseResponse : courseResponseList) {
        courseList.add(courseResponse.getBaseid());
    }

    List<String> allProviderIDList = domainService.findProviderIdsList(token);
    for (String providerId : allProviderIDList) {
        List<String> cList = courseService.getAllCourseIdsForProvider(token, providerId);
        if (!Collections.disjoint(cList, courseList))
            providerList.add(providerId);
    }
    providerList = providerList.stream().distinct().collect(Collectors.toList());

    providerMap.put(Constants.CATALOG_DOMAIN_LIST, providerList);
    providerMap.put(Constants.COURSE_LIST, courseList);

    return providerMap;
}

From source file:org.apache.hadoop.hbase.backup.impl.BackupAdminImpl.java

/**
 * Verifies that backup images are valid for merge.
 *
 * <ul>/*w w  w  .j a  v  a2 s  . c  o m*/
 * <li>All backups MUST be in the same destination
 * <li>No FULL backups are allowed - only INCREMENTAL
 * <li>All backups must be in COMPLETE state
 * <li>No holes in backup list are allowed
 * </ul>
 * <p>
 * @param backupIds list of backup ids
 * @param table backup system table
 * @throws IOException if the backup image is not valid for merge
 */
private void checkIfValidForMerge(String[] backupIds, BackupSystemTable table) throws IOException {
    String backupRoot = null;

    final Set<TableName> allTables = new HashSet<>();
    final Set<String> allBackups = new HashSet<>();
    long minTime = Long.MAX_VALUE, maxTime = Long.MIN_VALUE;
    for (String backupId : backupIds) {
        BackupInfo bInfo = table.readBackupInfo(backupId);
        if (bInfo == null) {
            String msg = "Backup session " + backupId + " not found";
            throw new IOException(msg);
        }
        if (backupRoot == null) {
            backupRoot = bInfo.getBackupRootDir();
        } else if (!bInfo.getBackupRootDir().equals(backupRoot)) {
            throw new IOException("Found different backup destinations in a list of a backup sessions "
                    + "\n1. " + backupRoot + "\n" + "2. " + bInfo.getBackupRootDir());
        }
        if (bInfo.getType() == BackupType.FULL) {
            throw new IOException("FULL backup image can not be merged for: \n" + bInfo);
        }

        if (bInfo.getState() != BackupState.COMPLETE) {
            throw new IOException("Backup image " + backupId + " can not be merged becuase of its state: "
                    + bInfo.getState());
        }
        allBackups.add(backupId);
        allTables.addAll(bInfo.getTableNames());
        long time = bInfo.getStartTs();
        if (time < minTime) {
            minTime = time;
        }
        if (time > maxTime) {
            maxTime = time;
        }
    }

    final long startRangeTime = minTime;
    final long endRangeTime = maxTime;
    final String backupDest = backupRoot;
    // Check we have no 'holes' in backup id list
    // Filter 1 : backupRoot
    // Filter 2 : time range filter
    // Filter 3 : table filter
    BackupInfo.Filter destinationFilter = info -> info.getBackupRootDir().equals(backupDest);

    BackupInfo.Filter timeRangeFilter = info -> {
        long time = info.getStartTs();
        return time >= startRangeTime && time <= endRangeTime;
    };

    BackupInfo.Filter tableFilter = info -> {
        List<TableName> tables = info.getTableNames();
        return !Collections.disjoint(allTables, tables);
    };

    BackupInfo.Filter typeFilter = info -> info.getType() == BackupType.INCREMENTAL;
    BackupInfo.Filter stateFilter = info -> info.getState() == BackupState.COMPLETE;

    List<BackupInfo> allInfos = table.getBackupHistory(-1, destinationFilter, timeRangeFilter, tableFilter,
            typeFilter, stateFilter);
    if (allInfos.size() != allBackups.size()) {
        // Yes we have at least one  hole in backup image sequence
        List<String> missingIds = new ArrayList<>();
        for (BackupInfo info : allInfos) {
            if (allBackups.contains(info.getBackupId())) {
                continue;
            }
            missingIds.add(info.getBackupId());
        }
        String errMsg = "Sequence of backup ids has 'holes'. The following backup images must be added:"
                + org.apache.hadoop.util.StringUtils.join(",", missingIds);
        throw new IOException(errMsg);
    }
}

From source file:com.sinet.gage.provision.service.impl.DomainFacadeImpl.java

/**
 * @param token//from w w w. j  a  v  a  2s  .  c  o m
 * @param domainId
 * @return
 */
@Override
public Map<String, List<String>> getSubscriptionsForSchool(String token, String domainId) {
    log.debug("Fetching  subscription for domain with id " + domainId);

    Map<String, List<String>> providerMap = new HashMap<>();
    List<String> providerList = new ArrayList<>();
    List<String> schoolCourseList = new ArrayList<>();
    List<CourseResponse> districtCoursesResponse = new ArrayList<>();
    List<String> courseList = new ArrayList<>();

    List<SubscriptionResponse> subResponse = subscriptionService.getSubscriptionsForDomain(token, domainId);

    if (subResponse.size() == 0) {
        return providerMap;
    }

    for (SubscriptionResponse s : subResponse) {
        districtCoursesResponse = courseService.getAllCoursesFromDomain(token, s.getEntityid());
    }

    schoolCourseList = courseService.getAllCoursesFromDomain(token, domainId).stream()
            .map(CourseResponse::getBaseid).collect(Collectors.toList());

    for (CourseResponse response : districtCoursesResponse) {
        if (schoolCourseList.contains(response.getId())) {
            courseList.add(response.getBaseid());
        }
    }

    List<String> allProviderIDList = domainService.findProviderIdsList(token);
    for (String providerId : allProviderIDList) {
        List<String> cList = courseService.getAllCourseIdsForProvider(token, providerId);
        if (!Collections.disjoint(cList, courseList))
            providerList.add(providerId);
    }

    providerMap.put(Constants.CATALOG_DOMAIN_LIST, providerList);
    providerMap.put(Constants.COURSE_LIST, courseList);

    return providerMap;
}