Example usage for com.google.common.collect Sets intersection

List of usage examples for com.google.common.collect Sets intersection

Introduction

In this page you can find the example usage for com.google.common.collect Sets intersection.

Prototype

public static <E> SetView<E> intersection(final Set<E> set1, final Set<?> set2) 

Source Link

Document

Returns an unmodifiable view of the intersection of two sets.

Usage

From source file:org.geoserver.gwc.layer.StyleParameterFilter.java

@Override
public List<String> getLegalValues() {
    checkInitialized();/*from  w w w  .  ja  va2 s .c o m*/
    Set<String> layerStyles = getLayerStyles();
    if (allowedStyles == null) {
        // Values is null so allow any of the backing layer's styles
        return new ArrayList<String>(layerStyles);
    } else {
        // Values is set so only allow the intersection of the specified styles and those of the backing layer.
        return new ArrayList<String>(Sets.intersection(layerStyles, allowedStyles));
    }
}

From source file:org.apache.crunch.impl.mr.plan.MSCRPlanner.java

public MRExecutor plan(Class<?> jarClass, Configuration conf) throws IOException {

    DotfileUtil dotfileUtil = new DotfileUtil(jarClass, conf);

    // Generate the debug lineage dotfiles (if configuration is enabled)
    dotfileUtil.buildLineageDotfile(outputs);

    Map<PCollectionImpl<?>, Set<Target>> targetDeps = Maps.newTreeMap(DEPTH_COMPARATOR);
    for (PCollectionImpl<?> pcollect : outputs.keySet()) {
        targetDeps.put(pcollect, pcollect.getTargetDependencies());
    }//  w  ww  .j a  v a  2 s .co m

    Multimap<Target, JobPrototype> assignments = HashMultimap.create();

    while (!targetDeps.isEmpty()) {
        Set<Target> allTargets = Sets.newHashSet();
        for (PCollectionImpl<?> pcollect : targetDeps.keySet()) {
            allTargets.addAll(outputs.get(pcollect));
        }
        GraphBuilder graphBuilder = new GraphBuilder();

        // Walk the current plan tree and build a graph in which the vertices are
        // sources, targets, and GBK operations.
        Set<PCollectionImpl<?>> currentStage = Sets.newHashSet();
        for (PCollectionImpl<?> output : targetDeps.keySet()) {
            Set<Target> deps = Sets.intersection(allTargets, targetDeps.get(output));
            if (deps.isEmpty()) {
                graphBuilder.visitOutput(output);
                currentStage.add(output);
            }
        }

        Graph baseGraph = graphBuilder.getGraph();
        boolean hasInputs = false;
        for (Vertex v : baseGraph) {
            if (v.isInput()) {
                hasInputs = true;
                break;
            }
        }
        if (!hasInputs) {
            LOG.warn("No input sources for pipeline, nothing to do...");
            return new MRExecutor(conf, jarClass, outputs, toMaterialize, appendedTargets, pipelineCallables);
        }

        // Create a new graph that splits up up dependent GBK nodes.
        Graph graph = prepareFinalGraph(baseGraph);

        // Break the graph up into connected components.
        List<List<Vertex>> components = graph.connectedComponents();

        // Generate the debug graph dotfiles (if configuration is enabled)
        dotfileUtil.buildBaseGraphDotfile(outputs, graph);
        dotfileUtil.buildSplitGraphDotfile(outputs, graph, components);

        // For each component, we will create one or more job prototypes,
        // depending on its profile.
        // For dependency handling, we only need to care about which
        // job prototype a particular GBK is assigned to.
        Multimap<Vertex, JobPrototype> newAssignments = HashMultimap.create();
        for (List<Vertex> component : components) {
            newAssignments.putAll(constructJobPrototypes(component));
        }

        // Add in the job dependency information here.
        for (Map.Entry<Vertex, JobPrototype> e : newAssignments.entries()) {
            JobPrototype current = e.getValue();
            for (Vertex parent : graph.getParents(e.getKey())) {
                for (JobPrototype parentJobProto : newAssignments.get(parent)) {
                    current.addDependency(parentJobProto);
                }
            }
        }

        ImmutableMultimap<Target, JobPrototype> previousStages = ImmutableMultimap.copyOf(assignments);
        for (Map.Entry<Vertex, JobPrototype> e : newAssignments.entries()) {
            if (e.getKey().isOutput()) {
                PCollectionImpl<?> pcollect = e.getKey().getPCollection();
                JobPrototype current = e.getValue();

                // Add in implicit dependencies via SourceTargets that are read into memory
                for (Target pt : pcollect.getTargetDependencies()) {
                    for (JobPrototype parentJobProto : assignments.get(pt)) {
                        current.addDependency(parentJobProto);
                    }
                }

                // Add this to the set of output assignments
                for (Target t : outputs.get(pcollect)) {
                    assignments.put(t, e.getValue());
                }
            } else {
                Source source = e.getKey().getSource();
                if (source != null && source instanceof Target) {
                    JobPrototype current = e.getValue();
                    Collection<JobPrototype> parentJobPrototypes = previousStages.get((Target) source);
                    if (parentJobPrototypes != null) {
                        for (JobPrototype parentJobProto : parentJobPrototypes) {
                            current.addDependency(parentJobProto);
                        }
                    }
                }
            }
        }

        // Remove completed outputs and mark materialized output locations
        // for subsequent job processing.
        for (PCollectionImpl<?> output : currentStage) {
            if (toMaterialize.containsKey(output)) {
                MaterializableIterable mi = toMaterialize.get(output);
                if (mi.isSourceTarget()) {
                    output.materializeAt((SourceTarget) mi.getSource());
                }
            }
            targetDeps.remove(output);
        }
    }

    // Finally, construct the jobs from the prototypes and return.
    MRExecutor exec = new MRExecutor(conf, jarClass, outputs, toMaterialize, appendedTargets,
            pipelineCallables);

    // Generate the debug Plan dotfiles
    dotfileUtil.buildPlanDotfile(exec, assignments, pipeline, lastJobID);

    for (JobPrototype proto : Sets.newHashSet(assignments.values())) {
        exec.addJob(proto.getCrunchJob(jarClass, conf, pipeline, lastJobID));
    }

    // Generate the debug RTNode dotfiles (if configuration is enabled)
    dotfileUtil.buildRTNodesDotfile(exec);

    // Attach the dotfiles to the MRExcutor context
    dotfileUtil.addDotfilesToContext(exec);

    return exec;
}

From source file:com.github.fge.jsonpatch.diff.JsonDiff.java

private static void generateObjectDiffs(final DiffProcessor processor, final JsonPointer pointer,
        final ObjectNode source, final ObjectNode target) {
    final Set<String> firstFields = Sets.newTreeSet(Sets.newHashSet(source.fieldNames()));
    final Set<String> secondFields = Sets.newTreeSet(Sets.newHashSet(target.fieldNames()));

    for (final String field : Sets.difference(firstFields, secondFields))
        processor.valueRemoved(pointer.append(field), source.get(field));

    for (final String field : Sets.difference(secondFields, firstFields))
        processor.valueAdded(pointer.append(field), target.get(field));

    for (final String field : Sets.intersection(firstFields, secondFields))
        generateDiffs(processor, pointer.append(field), source.get(field), target.get(field));
}

From source file:co.cask.cdap.etl.planner.Dag.java

/**
 * Validate the DAG is a valid DAG without cycles, and no islands. This should only be called before any
 * mutating operations like {@link #removeSource()} are called.
 *
 * @throws IllegalStateException if there is a cycle in the graph, or an island in the graph
 *///from www.j  a va2  s .c o m
void validate() {
    // if there are no sources, we must have a cycle.
    if (sources.isEmpty()) {
        throw new IllegalStateException("DAG does not have any sources. Please remove cycles from the graph.");
    }
    // similarly, if there are no sinks, we must have a cycle
    if (sinks.isEmpty()) {
        throw new IllegalStateException("DAG does not have any sinks. Please remove cycles from the graph.");
    }

    // check for cycles
    getTopologicalOrder();

    // check for sections of the dag that are on an island by themselves

    // source -> [ nodes accessible by the source ]
    Map<String, Set<String>> nodesAccessibleBySources = new HashMap<>();
    for (String source : sources) {
        nodesAccessibleBySources.put(source, accessibleFrom(source));
    }

    // the algorithm is to keep an island and try to keep adding to it until we can't anymore.
    // the island starts off as the nodes accessible by the first source
    // we then loop through all other sources and add them to the island if they can access any node in the island.
    // we stop if we ever loop through all other sources and can't add them to the island,
    // or if the island has grown to include all sources.
    Set<String> islandNodes = new HashSet<>();
    // seed the island with the first source
    Set<String> potentialIslandSources = new HashSet<>(sources);
    String firstSource = potentialIslandSources.iterator().next();
    islandNodes.addAll(nodesAccessibleBySources.get(firstSource));
    potentialIslandSources.remove(firstSource);

    while (!potentialIslandSources.isEmpty()) {
        Set<String> sourcesAdded = new HashSet<>();
        // for each source not yet a part of the island
        for (String potentialIslandSource : potentialIslandSources) {
            Set<String> accessibleBySource = nodesAccessibleBySources.get(potentialIslandSource);
            // if that source can access the island in any way, add it to the island
            if (!Sets.intersection(islandNodes, accessibleBySource).isEmpty()) {
                islandNodes.addAll(accessibleBySource);
                sourcesAdded.add(potentialIslandSource);
            }
        }
        // if this is empty, no sources were added to the island. That means the island really is an island.
        if (sourcesAdded.isEmpty()) {
            throw new IllegalStateException(String.format(
                    "Invalid DAG. There is an island made up of stages %s (no other stages connect to them).",
                    Joiner.on(',').join(islandNodes)));
        }
        potentialIslandSources.removeAll(sourcesAdded);
    }
}

From source file:net.sourceforge.jwbf.mediawiki.actions.editing.PostModifyContent.java

/**
 * @return true if one or both sets are <code>null</code> or the intersection of sets is empty.
 *///from   w  w w . j  a va  2  s .com
boolean isIntersectionEmpty(Set<?> a, Set<?> b) {
    if (a == b) {
        return true;
    }
    if (a == null || b == null) {
        return false;
    }
    SetView<?> intersection = Sets.intersection(a, b);
    return intersection.isEmpty();
}

From source file:edu.umn.msi.tropix.proteomics.itraqquantitation.impl.ScanIndex.java

public boolean numberChargeAndAlternativeNameMatch(@Nonnull final ScanIndex scanIndex) {
    boolean match = false;
    if (numberAndChargeMatch(scanIndex)) {
        match = !Sets.intersection(alternativeNames, scanIndex.alternativeNames).isEmpty();
    }// www .  ja v  a  2s.  c  o  m
    return match;
}

From source file:org.dishevelled.venn.model.QuaternaryVennModelImpl.java

/**
 * Create a new quaternary venn model with the specified sets.
 *
 * @param first first set, must not be null
 * @param second second set, must not be null
 * @param third third set, must not be null
 * @param fourth fourth set, must not be null
 *///  ww  w.  j a  v  a 2 s .com
public QuaternaryVennModelImpl(final Set<? extends E> first, final Set<? extends E> second,
        final Set<? extends E> third, final Set<? extends E> fourth) {
    if (first == null) {
        throw new IllegalArgumentException("first must not be null");
    }
    if (second == null) {
        throw new IllegalArgumentException("second must not be null");
    }
    if (third == null) {
        throw new IllegalArgumentException("third must not be null");
    }
    if (fourth == null) {
        throw new IllegalArgumentException("fourth must not be null");
    }

    // todo  defensive copy?
    this.first = new ObservableSetImpl(first);
    this.second = new ObservableSetImpl(second);
    this.third = new ObservableSetImpl(third);
    this.fourth = new ObservableSetImpl(fourth);

    // alias
    ObservableSet<E> f = this.first;
    ObservableSet<E> s = this.second;
    ObservableSet<E> t = this.third;
    ObservableSet<E> r = this.fourth;
    firstOnly = Sets.difference(Sets.difference(Sets.difference(f, s), t), r); // f - s - t - r
    secondOnly = Sets.difference(Sets.difference(Sets.difference(s, f), t), r); // s - f - t - r
    thirdOnly = Sets.difference(Sets.difference(Sets.difference(t, f), s), r); // t - f - s - r
    fourthOnly = Sets.difference(Sets.difference(Sets.difference(r, f), s), t); // r - f - s - t
    firstSecond = Sets.difference(Sets.difference(Sets.intersection(f, s), t), r); // f n s - t - r
    firstThird = Sets.difference(Sets.difference(Sets.intersection(f, t), s), r); // f n t - s - r
    firstFourth = Sets.difference(Sets.difference(Sets.intersection(f, r), s), t); // f n r - s - t
    secondThird = Sets.difference(Sets.difference(Sets.intersection(s, t), f), r); // s n t - f - r
    secondFourth = Sets.difference(Sets.difference(Sets.intersection(s, r), f), t); // s n r - f - t
    thirdFourth = Sets.difference(Sets.difference(Sets.intersection(t, r), f), s); // t n r - f - s
    firstSecondThird = Sets.difference(Sets.intersection(f, Sets.intersection(s, t)), r); // f n s n t - r
    firstSecondFourth = Sets.difference(Sets.intersection(f, Sets.intersection(s, r)), t); // f n s n r - t
    firstThirdFourth = Sets.difference(Sets.intersection(f, Sets.intersection(t, r)), s); // f n t n r - s
    secondThirdFourth = Sets.difference(Sets.intersection(s, Sets.intersection(t, r)), f); // s n t n r - f
    intersection = Sets.intersection(f, Sets.intersection(s, Sets.intersection(t, r))); // f n s n t n r
    union = Sets.union(f, Sets.union(s, Sets.union(t, r))); // f u s u t u r
    selection = new SelectionView<E>(union, f, s, t, r);

    exclusives = new HashMap<ImmutableBitSet, Set<E>>(15);

    exclusives.put(toImmutableBitSet(0), firstOnly);
    exclusives.put(toImmutableBitSet(1), secondOnly);
    exclusives.put(toImmutableBitSet(2), thirdOnly);
    exclusives.put(toImmutableBitSet(3), fourthOnly);

    exclusives.put(toImmutableBitSet(0, 1), firstSecond);
    exclusives.put(toImmutableBitSet(0, 2), firstThird);
    exclusives.put(toImmutableBitSet(0, 3), firstFourth);
    exclusives.put(toImmutableBitSet(1, 2), secondThird);
    exclusives.put(toImmutableBitSet(1, 3), secondFourth);
    exclusives.put(toImmutableBitSet(2, 3), thirdFourth);

    exclusives.put(toImmutableBitSet(0, 1, 2), firstSecondThird);
    exclusives.put(toImmutableBitSet(0, 1, 3), firstSecondFourth);
    exclusives.put(toImmutableBitSet(0, 2, 3), firstThirdFourth);
    exclusives.put(toImmutableBitSet(1, 2, 3), secondThirdFourth);

    exclusives.put(toImmutableBitSet(0, 1, 2, 3), intersection);
}

From source file:grakn.core.graql.reasoner.rule.InferenceRule.java

/**
 * @return true if the rule has disconnected head, i.e. head and body do not share any variables
 *//* w w w  .  j  av  a  2s .c  o  m*/
private boolean hasDisconnectedHead() {
    return Sets.intersection(body.getVarNames(), head.getVarNames()).isEmpty();
}

From source file:org.caleydo.view.relationshipexplorer.ui.collection.AEntityCollection.java

@Override
public Set<Object> getElementIDsFromBroadcastingID(Object broadcastingID) {
    Set<Object> elementIDs = getElementIDsFromBroadcastID(broadcastingID);
    return new HashSet<>(Sets.intersection(elementIDs, allElementIDs));
}

From source file:org.apache.cassandra.repair.RepairMessageVerbHandler.java

public void doVerb(final MessageIn<RepairMessage> message, final int id) {
    // TODO add cancel/interrupt message
    RepairJobDesc desc = message.payload.desc;
    try {//from  ww w .j a v a2s.c o  m
        switch (message.payload.messageType) {
        case PREPARE_GLOBAL_MESSAGE:
        case PREPARE_MESSAGE:
            PrepareMessage prepareMessage = (PrepareMessage) message.payload;
            logger.debug("Preparing, {}", prepareMessage);
            List<ColumnFamilyStore> columnFamilyStores = new ArrayList<>(prepareMessage.cfIds.size());
            for (UUID cfId : prepareMessage.cfIds) {
                Pair<String, String> kscf = Schema.instance.getCF(cfId);
                ColumnFamilyStore columnFamilyStore = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);
                columnFamilyStores.add(columnFamilyStore);
            }
            CassandraVersion peerVersion = SystemKeyspace.getReleaseVersion(message.from);
            // note that we default isGlobal to true since old version always default to true:
            boolean isGlobal = peerVersion == null
                    || peerVersion.compareTo(ActiveRepairService.SUPPORTS_GLOBAL_PREPARE_FLAG_VERSION) < 0
                    || message.payload.messageType.equals(RepairMessage.Type.PREPARE_GLOBAL_MESSAGE);
            logger.debug("Received prepare message: global message = {}, peerVersion = {},",
                    message.payload.messageType.equals(RepairMessage.Type.PREPARE_GLOBAL_MESSAGE), peerVersion);
            ActiveRepairService.instance.registerParentRepairSession(prepareMessage.parentRepairSession,
                    columnFamilyStores, prepareMessage.ranges, prepareMessage.isIncremental, isGlobal);
            MessagingService.instance().sendReply(new MessageOut(MessagingService.Verb.INTERNAL_RESPONSE), id,
                    message.from);
            break;

        case SNAPSHOT:
            logger.debug("Snapshotting {}", desc);
            final ColumnFamilyStore cfs = Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily);
            final Range<Token> repairingRange = desc.range;
            Set<SSTableReader> snapshottedSSSTables = cfs.snapshot(desc.sessionId.toString(),
                    new Predicate<SSTableReader>() {
                        public boolean apply(SSTableReader sstable) {
                            return sstable != null && !(sstable.partitioner instanceof LocalPartitioner) && // exclude SSTables from 2i
                            new Bounds<>(sstable.first.getToken(), sstable.last.getToken())
                                    .intersects(Collections.singleton(repairingRange));
                        }
                    }, true); //ephemeral snapshot, if repair fails, it will be cleaned next startup

            Set<SSTableReader> currentlyRepairing = ActiveRepairService.instance
                    .currentlyRepairing(cfs.metadata.cfId, desc.parentSessionId);
            if (!Sets.intersection(currentlyRepairing, snapshottedSSSTables).isEmpty()) {
                // clear snapshot that we just created
                cfs.clearSnapshot(desc.sessionId.toString());
                logger.error("Cannot start multiple repair sessions over the same sstables");
                MessageOut reply = new MessageOut(MessagingService.Verb.INTERNAL_RESPONSE)
                        .withParameter(MessagingService.FAILURE_RESPONSE_PARAM, MessagingService.ONE_BYTE);
                MessagingService.instance().sendReply(reply, id, message.from);
                return;
            }
            ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId)
                    .addSSTables(cfs.metadata.cfId, snapshottedSSSTables);
            logger.debug("Enqueuing response to snapshot request {} to {}", desc.sessionId, message.from);
            MessagingService.instance().sendReply(new MessageOut(MessagingService.Verb.INTERNAL_RESPONSE), id,
                    message.from);
            break;

        case VALIDATION_REQUEST:
            ValidationRequest validationRequest = (ValidationRequest) message.payload;
            logger.debug("Validating {}", validationRequest);
            // trigger read-only compaction
            ColumnFamilyStore store = Keyspace.open(desc.keyspace).getColumnFamilyStore(desc.columnFamily);

            Validator validator = new Validator(desc, message.from, validationRequest.gcBefore);
            CompactionManager.instance.submitValidation(store, validator);
            break;

        case SYNC_REQUEST:
            // forwarded sync request
            SyncRequest request = (SyncRequest) message.payload;
            logger.debug("Syncing {}", request);
            long repairedAt = ActiveRepairService.UNREPAIRED_SSTABLE;
            if (desc.parentSessionId != null
                    && ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId) != null)
                repairedAt = ActiveRepairService.instance.getParentRepairSession(desc.parentSessionId)
                        .getRepairedAt();

            StreamingRepairTask task = new StreamingRepairTask(desc, request, repairedAt);
            task.run();
            break;

        case ANTICOMPACTION_REQUEST:
            AnticompactionRequest anticompactionRequest = (AnticompactionRequest) message.payload;
            logger.debug("Got anticompaction request {}", anticompactionRequest);
            ListenableFuture<?> compactionDone = ActiveRepairService.instance.doAntiCompaction(
                    anticompactionRequest.parentRepairSession, anticompactionRequest.successfulRanges);
            compactionDone.addListener(new Runnable() {
                @Override
                public void run() {
                    MessagingService.instance().sendReply(
                            new MessageOut(MessagingService.Verb.INTERNAL_RESPONSE), id, message.from);
                }
            }, MoreExecutors.sameThreadExecutor());
            break;

        case CLEANUP:
            logger.debug("cleaning up repair");
            CleanupMessage cleanup = (CleanupMessage) message.payload;
            ActiveRepairService.instance.removeParentRepairSession(cleanup.parentRepairSession);
            MessagingService.instance().sendReply(new MessageOut(MessagingService.Verb.INTERNAL_RESPONSE), id,
                    message.from);
            break;

        default:
            ActiveRepairService.instance.handleMessage(message.from, message.payload);
            break;
        }
    } catch (Exception e) {
        logger.error("Got error, removing parent repair session");
        if (desc != null && desc.parentSessionId != null)
            ActiveRepairService.instance.removeParentRepairSession(desc.parentSessionId);
        throw new RuntimeException(e);
    }
}