Example usage for java.util Collections disjoint

List of usage examples for java.util Collections disjoint

Introduction

In this page you can find the example usage for java.util Collections disjoint.

Prototype

public static boolean disjoint(Collection<?> c1, Collection<?> c2) 

Source Link

Document

Returns true if the two specified collections have no elements in common.

Usage

From source file:org.jamocha.rating.fraj.RatingProvider.java

private void recursiveRateNode(final Node node, final Map<Node, Double> nodeToCost,
        final Map<Node, Set<Pair<Set<Path>, Set<PathFilterList>>>> preNetwork,
        final StatisticsProvider statProvider) {

    // If node costs have been calculated return pre network filters
    if (preNetwork.containsKey(node)) {
        assert nodeToCost.containsKey(node);
        return;//  w  w  w. ja va 2 s.co m
    }
    // Else calculate costs and then return pre network filters

    // Get the Set PathNodeFilterSets for each edge from the pre-Network by recursively calling
    // this method for every parent node
    final Edge[] incomingEdges;
    try {
        incomingEdges = node.getIncomingEdges();
    } catch (final UnsupportedOperationException e) {
        // node instanceof OTN
        preNetwork.put(node,
                node.getPathNodeFilterSets().stream()
                        .map(pnfs -> Pair.of(
                                Lambdas.newIdentityHashSet(
                                        PathCollector.newHashSet().collectAllInLists(pnfs).getPaths()),
                                Collections.<PathFilterList>singleton(pnfs)))
                        .collect(toSet()));
        nodeToCost.put(node, 0.0);
        return;
    }
    PathNodeFilterSet chosenPnfs = null;
    Pair<Set<Path>, Set<PathFilterList>> chosenPair = null;
    Set<Set<PathFilterList>> chosenEdgeSet = null;
    Map<Path, Set<PathFilterList>> chosenPathToComponent = null;
    for (final PathNodeFilterSet localPnfs : node.getPathNodeFilterSets()) {
        chosenEdgeSet = new HashSet<>();
        chosenPathToComponent = new HashMap<>();
        final Set<Path> localPaths = Lambdas
                .newIdentityHashSet(PathCollector.newHashSet().collectAllInLists(localPnfs).getPaths());
        final Set<Path> resultPaths = new HashSet<Path>();
        final Set<PathFilterList> resultFilters = new HashSet<>();
        final Set<Set<PathFilterList>> preNetworkFilters = new HashSet<>();
        for (final Edge edge : incomingEdges) {
            final Node sourceNode = edge.getSourceNode();
            recursiveRateNode(sourceNode, nodeToCost, preNetwork, statProvider);
            final Set<Pair<Set<Path>, Set<PathFilterList>>> set = preNetwork.get(sourceNode);
            for (final Pair<Set<Path>, Set<PathFilterList>> pair : set) {
                final Set<Path> paths = pair.getLeft();
                final Set<PathFilterList> filters = pair.getRight();
                if (!Collections.disjoint(paths, localPaths)) {
                    preNetworkFilters.add(filters);
                    resultFilters.addAll(filters);
                    resultPaths.addAll(paths);
                    chosenEdgeSet.add(filters);
                    for (final Path path : paths) {
                        chosenPathToComponent.put(path, filters);
                    }
                }
            }
        }
        resultFilters.add(localPnfs);
        final Pair<Set<Path>, Set<PathFilterList>> localPair = Pair.of(resultPaths, resultFilters);
        preNetwork.computeIfAbsent(node, Lambdas.newIdentityHashSet()).add(localPair);
        chosenPnfs = localPnfs;
        chosenPair = localPair;
    }
    assert null != chosenPnfs;
    assert null != chosenPair;
    assert null != chosenEdgeSet;
    assert null != chosenPathToComponent;

    // Create a List of all PathFilters in this node
    final List<PathFilter> pathFilters = new ArrayList<>(chosenPnfs.getFilters());
    // Create a Map that maps each Set of PathNodeFilterSets to a singletonList with a Pair of a
    // List of all other PathNodeFilterSetLists and the PathFilter List from this Node
    final Map<Set<PathFilterList>, List<Pair<List<Set<PathFilterList>>, List<PathFilter>>>> pathToComponents = new HashMap<Set<PathFilterList>, List<Pair<List<Set<PathFilterList>>, List<PathFilter>>>>();
    for (final Set<PathFilterList> edge : chosenEdgeSet) {
        final SetView<Set<PathFilterList>> otherEdges = Sets.difference(chosenEdgeSet,
                Collections.singleton(edge));
        pathToComponents.put(edge,
                Collections.singletonList(Pair.of(new ArrayList<>(otherEdges), pathFilters)));
    }

    // Rate the node, depending on the Type of node either Alpha or Beta
    final double cost;
    if (incomingEdges.length > 1) {
        // Create a Map that maps each path to the corresponding Set of PathNodeFilterSets
        cost = rateBeta(statProvider, chosenPnfs, pathToComponents, chosenPathToComponent);
    } else if (!Objects.isNull(node.getMemory())) {
        cost = rateMaterialisedAlpha(statProvider, chosenPnfs,
                Iterables.first(chosenEdgeSet).getOrElse(new HashSet<PathFilterList>()));
    } else {
        cost = rateVirtualAlpha(statProvider, chosenPnfs,
                Iterables.first(chosenEdgeSet).getOrElse(new HashSet<PathFilterList>()));
    }
    nodeToCost.put(node, cost);
    // Return the Set of PathNodeFilterSet that represents the pre-network including this
    // node
}

From source file:org.apache.hadoop.hive.ql.optimizer.SharedWorkOptimizer.java

private static Set<Operator<?>> gatherDPPBranchOps(ParseContext pctx, SharedWorkOptimizerCache optimizerCache,
        Set<Operator<?>> ops, Set<Operator<?>> discardedOps) {
    Set<Operator<?>> dppBranches = new HashSet<>();
    for (Operator<?> op : ops) {
        if (op instanceof TableScanOperator) {
            Collection<Operator<?>> c = optimizerCache.tableScanToDPPSource.get((TableScanOperator) op);
            for (Operator<?> dppSource : c) {
                Set<Operator<?>> ascendants = findAscendantWorkOperators(pctx, optimizerCache, dppSource);
                if (!Collections.disjoint(ascendants, discardedOps)) {
                    // Remove branch
                    Operator<?> currentOp = dppSource;
                    while (currentOp.getNumChild() <= 1) {
                        dppBranches.add(currentOp);
                        currentOp = currentOp.getParentOperators().get(0);
                    }//  w  w w .jav  a2  s .c  o m
                }
            }
        }
    }
    return dppBranches;
}

From source file:org.mskcc.cbio.oncokb.util.AlterationUtils.java

public static Boolean hasImportantCuratedOncogenicity(Set<Oncogenicity> oncogenicities) {
    Set<Oncogenicity> curatedOncogenicities = new HashSet<>();
    curatedOncogenicities.add(Oncogenicity.YES);
    curatedOncogenicities.add(Oncogenicity.LIKELY);
    curatedOncogenicities.add(Oncogenicity.LIKELY_NEUTRAL);
    return !Collections.disjoint(curatedOncogenicities, oncogenicities);
}

From source file:org.mskcc.cbio.oncokb.util.AlterationUtils.java

public static Boolean hasOncogenic(Set<Oncogenicity> oncogenicities) {
    Set<Oncogenicity> curatedOncogenicities = new HashSet<>();
    curatedOncogenicities.add(Oncogenicity.YES);
    curatedOncogenicities.add(Oncogenicity.LIKELY);
    return !Collections.disjoint(curatedOncogenicities, oncogenicities);
}

From source file:org.apache.hadoop.hive.ql.optimizer.SharedWorkOptimizer.java

private static boolean validPreConditions(ParseContext pctx, SharedWorkOptimizerCache optimizerCache,
        SharedResult sr) {/*from   w  ww.  ja v  a2  s. c o  m*/

    // We check whether merging the works would cause the size of
    // the data in memory grow too large.
    // TODO: Currently ignores GBY and PTF which may also buffer data in memory.
    if (sr.dataSize > sr.maxDataSize) {
        // Size surpasses limit, we cannot convert
        LOG.debug("accumulated data size: {} / max size: {}", sr.dataSize, sr.maxDataSize);
        return false;
    }

    TableScanOperator tsOp1 = (TableScanOperator) sr.retainableOps.get(0);
    TableScanOperator tsOp2 = (TableScanOperator) sr.discardableOps.get(0);

    // 1) The set of operators in the works of the TS operators need to meet
    // some requirements. In particular:
    // 1.1. None of the works that contain the TS operators can contain a Union
    // operator. This is not supported yet as we might end up with cycles in
    // the Tez DAG.
    // 1.2. There cannot be more than one DummyStore operator in the new resulting
    // work when the TS operators are merged. This is due to an assumption in
    // MergeJoinProc that needs to be further explored.
    // If any of these conditions are not met, we cannot merge.
    // TODO: Extend rule so it can be applied for these cases.
    final Set<Operator<?>> workOps1 = findWorkOperators(optimizerCache, tsOp1);
    final Set<Operator<?>> workOps2 = findWorkOperators(optimizerCache, tsOp2);
    boolean foundDummyStoreOp = false;
    for (Operator<?> op : workOps1) {
        if (op instanceof UnionOperator) {
            // We cannot merge (1.1)
            return false;
        }
        if (op instanceof DummyStoreOperator) {
            foundDummyStoreOp = true;
        }
    }
    for (Operator<?> op : workOps2) {
        if (op instanceof UnionOperator) {
            // We cannot merge (1.1)
            return false;
        }
        if (foundDummyStoreOp && op instanceof DummyStoreOperator) {
            // We cannot merge (1.2)
            return false;
        }
    }
    // 2) We check whether output works when we merge the operators will collide.
    //
    //   Work1   Work2    (merge TS in W1 & W2)        Work1
    //       \   /                  ->                  | |       X
    //       Work3                                     Work3
    //
    // If we do, we cannot merge. The reason is that Tez currently does
    // not support parallel edges, i.e., multiple edges from same work x
    // into same work y.
    final Set<Operator<?>> outputWorksOps1 = findChildWorkOperators(pctx, optimizerCache, tsOp1);
    final Set<Operator<?>> outputWorksOps2 = findChildWorkOperators(pctx, optimizerCache, tsOp2);
    if (!Collections.disjoint(outputWorksOps1, outputWorksOps2)) {
        // We cannot merge
        return false;
    }
    // 3) We check whether we will end up with same operators inputing on same work.
    //
    //       Work1        (merge TS in W2 & W3)        Work1
    //       /   \                  ->                  | |       X
    //   Work2   Work3                                 Work2
    //
    // If we do, we cannot merge. The reason is the same as above, currently
    // Tez currently does not support parallel edges.
    final Set<Operator<?>> inputWorksOps1 = findParentWorkOperators(pctx, optimizerCache, tsOp1);
    final Set<Operator<?>> inputWorksOps2 = findParentWorkOperators(pctx, optimizerCache, tsOp2,
            sr.discardableInputOps);
    if (!Collections.disjoint(inputWorksOps1, inputWorksOps2)) {
        // We cannot merge
        return false;
    }
    // 4) We check whether one of the operators is part of a work that is an input for
    // the work of the other operator.
    //
    //   Work1            (merge TS in W1 & W3)        Work1
    //     |                        ->                   |        X
    //   Work2                                         Work2
    //     |                                             |
    //   Work3                                         Work1
    //
    // If we do, we cannot merge, as we would end up with a cycle in the DAG.
    final Set<Operator<?>> descendantWorksOps1 = findDescendantWorkOperators(pctx, optimizerCache, tsOp1,
            sr.discardableInputOps);
    final Set<Operator<?>> descendantWorksOps2 = findDescendantWorkOperators(pctx, optimizerCache, tsOp2,
            sr.discardableInputOps);
    if (!Collections.disjoint(descendantWorksOps1, workOps2)
            || !Collections.disjoint(workOps1, descendantWorksOps2)) {
        return false;
    }
    return true;
}

From source file:net.java.jaspicoil.MSPacSpnegoServerAuthModule.java

private boolean authorizeCaller(HttpServletRequest request, byte[] serviceToken, GSSName name,
        final Subject clientSubject) {

    // create Subject with principals from name
    final Subject kerberosServiceSubject = createSubject(name);

    final Set<Principal> kerberosServicePrincipals = kerberosServiceSubject.getPrincipals();

    if (kerberosServicePrincipals.size() > 0) {
        final Set<Principal> clientPrincipals = clientSubject.getPrincipals();

        clientPrincipals.addAll(kerberosServicePrincipals);

        // Pickup the first Principal as the caller
        final Principal caller = kerberosServicePrincipals.iterator().next();

        if (caller != null) {
            // Fetch the list of extra groups
            final Set<String> extraGroups = fetchExtraGroups(request, this.serviceSubject, this.options);

            // Let's add all the groups as valid Principal as part of the
            // clientSubject
            final String[] groups = buildGroupsFromPAC(serviceToken, this.serviceSubject, extraGroups);

            final List<String> groupList = Arrays.asList(groups);

            if (this.mandatoryGroups != null && this.mandatoryGroups.size() > 0) {
                // There was some mandatory group to check
                if (!groupList.containsAll(this.mandatoryGroups)) {
                    // None of the global constraint was found, so exiting
                    debug("Not all the mandatory groups required ({1}) where found in the user groups {0} so failing the authentication.",
                            groupList, this.mandatoryGroups);
                    return false;
                }/* w w  w  .j  a  v  a  2s.c o  m*/
            }

            // Check global constraints
            if (this.smartcardSecuredUsersOnly || this.delegatedSecuredUsersOnly) {

                final List<String> contraintGroupList = new ArrayList<String>();
                if (this.smartcardSecuredUsersOnly) {
                    contraintGroupList.add(GROUP_SMARTCARD_AUTHENTICATED);
                }
                if (this.delegatedSecuredUsersOnly) {
                    contraintGroupList.add(GROUP_DELEGATED_AUTHENTICATED);
                }

                // Test if at least one of the constraints are matched
                if (Collections.disjoint(groupList, contraintGroupList)) {
                    // None of the global constraint was found, so exiting
                    debug("The global contrainted group {1} where not found in the user groups {0} so failing the authentication.",
                            groupList, contraintGroupList);
                    return false;
                }

            }

            final GroupPrincipalCallback groupPrincipalCallback = new GroupPrincipalCallback(clientSubject,
                    groups);
            try {
                // notify caller for the groups
                this.handler.handle(new Callback[] { groupPrincipalCallback });
                debug("Groups found {0}", groupList);
            } catch (final IOException e) {
                LOG.log(Level.WARNING, "Unable to set the groups " + groupList, e);
            } catch (final UnsupportedCallbackException e) {
                LOG.log(Level.WARNING, "Unable to set the groups " + groupList, e);
            }
        }

        // Create the caller principal to pass to caller
        final CallerPrincipalCallback callerPrincipalCallback = new CallerPrincipalCallback(clientSubject,
                caller);

        try {
            // notify caller for the Principal
            this.handler.handle(new Callback[] { callerPrincipalCallback });
            debug("Caller principal is {0}", (Object) caller);
            return true;
        } catch (final IOException e) {
            LOG.log(Level.WARNING, "Unable to set caller principal {0}", e);
        } catch (final UnsupportedCallbackException e) {
            LOG.log(Level.WARNING, "Unable to set caller principal {0}", e);
        }
    }
    return false;
}

From source file:com.flexive.tests.embedded.SearchEngineTest.java

private int getExpectedPartialMatches(FxResultSet result, final List<FxSelectListItem> queryItems) {
    return Iterables.size(Iterables.filter(result.getResultRows(), new Predicate<FxResultRow>() {
        @Override//from w w w  .j  a  v a 2 s  .c  o m
        public boolean apply(FxResultRow row) {
            final SelectMany selectMany = ((FxSelectMany) row.getValue(2)).getBestTranslation();
            return !Collections.disjoint(queryItems, selectMany.getSelected());
        }
    }));
}

From source file:org.jamocha.dn.compiler.pathblocks.PathBlocks.java

protected static List<PathRule> createOutput(final List<Either<Rule, ExistentialProxy>> rules,
        final PathBlockSet resultBlockSet) {
    final Function<? super Block, ? extends Integer> characteristicNumber = block -> block
            .getFlatFilterInstances().size() / block.getRulesOrProxies().size();
    final TreeMap<Integer, CursorableLinkedList<Block>> blockMap = resultBlockSet.getBlocks().stream()
            .collect(groupingBy(characteristicNumber, TreeMap::new, toCollection(CursorableLinkedList::new)));
    // iterate over all the filter proxies ever used
    for (final FilterProxy filterProxy : FilterProxy.getFilterProxies()) {
        final Set<ExistentialProxy> existentialProxies = filterProxy.getProxies();
        // determine the largest characteristic number of the blocks containing filter instances
        // of one of the existential proxies (choice is arbitrary, since the filters and the
        // conflicts are identical if they belong to the same filter).
        final OptionalInt optMax = resultBlockSet.getRuleInstanceToBlocks()
                .computeIfAbsent(Either.right(existentialProxies.iterator().next()), newHashSet()).stream()
                .mapToInt(composeToInt(characteristicNumber, Integer::intValue)).max();
        if (!optMax.isPresent())
            continue;
        final int eCN = optMax.getAsInt();
        // get the list to append the blocks using the existential closure filter INSTANCE to
        final CursorableLinkedList<Block> targetList = blockMap.get(eCN);
        // for every existential part
        for (final ExistentialProxy existentialProxy : existentialProxies) {
            final FilterInstance exClosure = existentialProxy.getExistentialClosure();
            // create a list storing the blocks to move
            final List<Block> toMove = new ArrayList<>();
            for (final CursorableLinkedList<Block> blockList : blockMap.headMap(eCN, true).values()) {
                // iterate over the blocks in the current list
                for (final ListIterator<Block> iterator = blockList.listIterator(); iterator.hasNext();) {
                    final Block current = iterator.next();
                    // if the current block uses the current existential closure filter
                    // INSTANCE, it has to be moved
                    if (current.getFlatFilterInstances().contains(exClosure)) {
                        iterator.remove();
                        toMove.add(current);
                    }//from  w  ww  . jav  a  2 s .c  o  m
                }
            }
            // append the blocks to be moved (they were only removed so far)
            targetList.addAll(toMove);
        }
    }
    final Set<FilterInstance> constructedFIs = new HashSet<>();
    final Map<Either<Rule, ExistentialProxy>, Map<FilterInstance, Set<FilterInstance>>> ruleToJoinedWith = new HashMap<>();
    final Map<Set<FilterInstance>, PathFilterList> joinedWithToComponent = new HashMap<>();
    // at this point, the network can be constructed
    for (final CursorableLinkedList<Block> blockList : blockMap.values()) {
        for (final Block block : blockList) {
            final List<Either<Rule, ExistentialProxy>> blockRules = Lists
                    .newArrayList(block.getRulesOrProxies());
            final Set<List<FilterInstance>> filterInstanceColumns = Block
                    .getFilterInstanceColumns(block.getFilters(), block.getRuleToFilterToRow(), blockRules);
            // since we are considering blocks, it is either the case that all filter
            // instances of the column have been constructed or none of them have
            final PathSharedListWrapper sharedListWrapper = new PathSharedListWrapper(blockRules.size());
            final Map<Either<Rule, ExistentialProxy>, PathSharedList> ruleToSharedList = IntStream
                    .range(0, blockRules.size()).boxed()
                    .collect(toMap(blockRules::get, sharedListWrapper.getSharedSiblings()::get));
            final List<List<FilterInstance>> columnsToConstruct, columnsAlreadyConstructed;
            {
                final Map<Boolean, List<List<FilterInstance>>> partition = filterInstanceColumns.stream()
                        .collect(partitioningBy(column -> Collections.disjoint(column, constructedFIs)));
                columnsAlreadyConstructed = partition.get(Boolean.FALSE);
                columnsToConstruct = partition.get(Boolean.TRUE);
            }

            if (!columnsAlreadyConstructed.isEmpty()) {
                final Map<PathSharedList, LinkedHashSet<PathFilterList>> sharedPart = new HashMap<>();
                for (final List<FilterInstance> column : columnsAlreadyConstructed) {
                    for (final FilterInstance fi : column) {
                        sharedPart
                                .computeIfAbsent(ruleToSharedList.get(fi.getRuleOrProxy()), newLinkedHashSet())
                                .add(joinedWithToComponent
                                        .get(ruleToJoinedWith.get(fi.getRuleOrProxy()).get(fi)));
                    }
                }
                sharedListWrapper.addSharedColumns(sharedPart);
            }

            for (final List<FilterInstance> column : columnsToConstruct) {
                sharedListWrapper.addSharedColumn(column.stream().collect(
                        toMap(fi -> ruleToSharedList.get(fi.getRuleOrProxy()), FilterInstance::convert)));
            }
            constructedFIs.addAll(block.getFlatFilterInstances());
            for (final Entry<Either<Rule, ExistentialProxy>, Map<Filter, FilterInstancesSideBySide>> entry : block
                    .getRuleToFilterToRow().entrySet()) {
                final Either<Rule, ExistentialProxy> rule = entry.getKey();
                final Set<FilterInstance> joined = entry.getValue().values().stream()
                        .flatMap(sbs -> sbs.getInstances().stream()).collect(toSet());
                final Map<FilterInstance, Set<FilterInstance>> joinedWithMapForThisRule = ruleToJoinedWith
                        .computeIfAbsent(rule, newHashMap());
                joined.forEach(fi -> joinedWithMapForThisRule.put(fi, joined));
                joinedWithToComponent.put(joined, ruleToSharedList.get(rule));
            }
        }
    }
    final List<PathRule> pathRules = new ArrayList<>();
    for (final Either<Rule, ExistentialProxy> either : rules) {
        if (either.isRight()) {
            continue;
        }
        final List<PathFilterList> pathFilterLists = Stream
                .concat(either.left().get().existentialProxies.values().stream().map(p -> Either.right(p)),
                        Stream.of(either))
                .flatMap(e -> ruleToJoinedWith.getOrDefault(e, Collections.emptyMap()).values().stream()
                        .distinct())
                .map(joinedWithToComponent::get).collect(toList());
        pathRules.add(either.left().get().getOriginal().toPathRule(PathFilterList.toSimpleList(pathFilterLists),
                pathFilterLists.size() > 1 ? InitialFactPathsFinder.gather(pathFilterLists)
                        : Collections.emptySet()));
    }
    return pathRules;
}

From source file:org.jahia.services.workflow.WorkflowService.java

public Collection<WorkflowRule> getWorkflowRules(JCRNodeWrapper objectNode) {

    try {/* w  w  w. j av a2s  .  c  om*/

        Map<String, WorkflowRule> rules = recurseOnRules(objectNode);
        Map<String, List<String>> perms = new HashMap<>();

        JCRNodeWrapper rootNode = objectNode.getSession().getNode("/");
        JahiaAccessManager accessControlManager = (JahiaAccessManager) rootNode.getRealNode().getSession()
                .getAccessControlManager();
        if (objectNode.getAclEntries() != null) {
            for (List<String[]> list : objectNode.getAclEntries().values()) {
                for (String[] strings : list) {
                    for (Privilege privilege : accessControlManager.getPermissionsInRole(strings[2])) {
                        if (!perms.containsKey(strings[0])) {
                            perms.put(strings[0], new ArrayList<String>());
                        }
                        perms.get(strings[0]).add(JCRContentUtils.getJCRName(privilege.getName(),
                                objectNode.getRealNode().getSession().getWorkspace().getNamespaceRegistry()));
                    }
                }
            }
        }
        Map<String, WorkflowRule> rulesCopy = new HashMap<>(rules);
        for (Map.Entry<String, WorkflowRule> ruleEntry : rules.entrySet()) {
            WorkflowRule rule = ruleEntry.getValue();
            for (Map.Entry<String, List<String>> aclEntry : perms.entrySet()) {
                if (aclEntry.getKey().startsWith(
                        rule.getDefinitionPath().equals("/") ? "/" : rule.getDefinitionPath() + "/")) {
                    if (!Collections.disjoint(aclEntry.getValue(), rule.getPermissions().values())) {
                        rule = new WorkflowRule(aclEntry.getKey(), ruleEntry.getValue().getDefinitionPath(),
                                rule.getProviderKey(), rule.getWorkflowDefinitionKey(), rule.getPermissions());
                        rulesCopy.put(ruleEntry.getKey(), rule);
                    }
                }
            }
        }
        return Collections.unmodifiableCollection(rulesCopy.values());
    } catch (RepositoryException e) {
        logger.error(e.getMessage(), e);
    }
    return null;
}

From source file:org.apache.calcite.rel.rules.AbstractMaterializedViewRule.java

/**
 * It checks whether the query can be rewritten using the view even though the
 * view uses additional tables. In order to do that, we need to double-check
 * that every join that exists in the view and is not in the query is a
 * cardinality-preserving join, i.e., it only appends columns to the row
 * without changing its multiplicity. Thus, the join needs to be:
 * <ul>//from   ww w  .  j  ava2 s. c om
 * <li> Equi-join </li>
 * <li> Between all columns in the keys </li>
 * <li> Foreign-key columns do not allow NULL values </li>
 * <li> Foreign-key </li>
 * <li> Unique-key </li>
 * </ul>
 *
 * <p>If it can be rewritten, it returns true and it inserts the missing equi-join
 * predicates in the input compensationEquiColumns multimap. Otherwise, it returns
 * false.
 */
private static boolean compensateQueryPartial(
        Multimap<RexTableInputRef, RexTableInputRef> compensationEquiColumns, Set<RelTableRef> viewTableRefs,
        EquivalenceClasses vEC, Set<RelTableRef> queryTableRefs) {
    // Create UK-FK graph with view tables
    final DirectedGraph<RelTableRef, Edge> graph = DefaultDirectedGraph.create(Edge.FACTORY);
    final Multimap<List<String>, RelTableRef> tableQNameToTableRefs = ArrayListMultimap.create();
    final Set<RelTableRef> extraTableRefs = new HashSet<>();
    for (RelTableRef tRef : viewTableRefs) {
        // Add tables in view as vertices
        graph.addVertex(tRef);
        tableQNameToTableRefs.put(tRef.getQualifiedName(), tRef);
        if (!queryTableRefs.contains(tRef)) {
            // Add to extra tables if table is not part of the query
            extraTableRefs.add(tRef);
        }
    }
    for (RelTableRef tRef : graph.vertexSet()) {
        // Add edges between tables
        List<RelReferentialConstraint> constraints = tRef.getTable().getReferentialConstraints();
        for (RelReferentialConstraint constraint : constraints) {
            Collection<RelTableRef> parentTableRefs = tableQNameToTableRefs
                    .get(constraint.getTargetQualifiedName());
            if (parentTableRefs == null || parentTableRefs.isEmpty()) {
                continue;
            }
            for (RelTableRef parentTRef : parentTableRefs) {
                boolean canBeRewritten = true;
                Multimap<RexTableInputRef, RexTableInputRef> equiColumns = ArrayListMultimap.create();
                for (int pos = 0; pos < constraint.getNumColumns(); pos++) {
                    int foreignKeyPos = constraint.getColumnPairs().get(pos).source;
                    RelDataType foreignKeyColumnType = tRef.getTable().getRowType().getFieldList()
                            .get(foreignKeyPos).getType();
                    RexTableInputRef foreignKeyColumnRef = RexTableInputRef.of(tRef, foreignKeyPos,
                            foreignKeyColumnType);
                    int uniqueKeyPos = constraint.getColumnPairs().get(pos).target;
                    RexTableInputRef uniqueKeyColumnRef = RexTableInputRef.of(parentTRef, uniqueKeyPos,
                            parentTRef.getTable().getRowType().getFieldList().get(uniqueKeyPos).getType());
                    if (!foreignKeyColumnType.isNullable() && vEC.getEquivalenceClassesMap()
                            .get(uniqueKeyColumnRef).contains(foreignKeyColumnRef)) {
                        equiColumns.put(foreignKeyColumnRef, uniqueKeyColumnRef);
                    } else {
                        canBeRewritten = false;
                        break;
                    }
                }
                if (canBeRewritten) {
                    // Add edge FK -> UK
                    Edge edge = graph.getEdge(tRef, parentTRef);
                    if (edge == null) {
                        edge = graph.addEdge(tRef, parentTRef);
                    }
                    edge.equiColumns.putAll(equiColumns);
                    break;
                }
            }
        }
    }

    // Try to eliminate tables from graph: if we can do it, it means extra tables in
    // view are cardinality-preserving joins
    boolean done = false;
    do {
        List<RelTableRef> nodesToRemove = new ArrayList<>();
        for (RelTableRef tRef : graph.vertexSet()) {
            if (graph.getInwardEdges(tRef).size() == 1 && graph.getOutwardEdges(tRef).isEmpty()) {
                // UK-FK join
                nodesToRemove.add(tRef);
                if (extraTableRefs.contains(tRef)) {
                    // We need to add to compensation columns as the table is not present in the query
                    compensationEquiColumns.putAll(graph.getInwardEdges(tRef).get(0).equiColumns);
                }
            }
        }
        if (!nodesToRemove.isEmpty()) {
            graph.removeAllVertices(nodesToRemove);
        } else {
            done = true;
        }
    } while (!done);

    // After removing them, we check whether all the remaining tables in the graph
    // are tables present in the query: if they are, we can try to rewrite
    if (!Collections.disjoint(graph.vertexSet(), extraTableRefs)) {
        return false;
    }
    return true;
}