Example usage for com.google.common.collect Sets intersection

List of usage examples for com.google.common.collect Sets intersection

Introduction

In this page you can find the example usage for com.google.common.collect Sets intersection.

Prototype

public static <E> SetView<E> intersection(final Set<E> set1, final Set<?> set2) 

Source Link

Document

Returns an unmodifiable view of the intersection of two sets.

Usage

From source file:com.siemens.sw360.portal.tags.CompareProject.java

private void renderLinkedProjects(StringBuilder display) {
    if (ensureSomethingTodoAndNoNullLinkedProjects()) {
        Map<String, ProjectRelationship> oldLinkedProjects = old.getLinkedProjects();
        Map<String, ProjectRelationship> updateLinkedProjects = update.getLinkedProjects();
        Sets.SetView<String> removedProjectIds = Sets.difference(oldLinkedProjects.keySet(),
                updateLinkedProjects.keySet());
        Sets.SetView<String> addedProjectIds = Sets.difference(updateLinkedProjects.keySet(),
                oldLinkedProjects.keySet());
        Sets.SetView<String> commonProjectIds = Sets.intersection(updateLinkedProjects.keySet(),
                oldLinkedProjects.keySet());

        renderProjectLinkList(display, oldLinkedProjects, removedProjectIds, "Removed Project links");
        renderProjectLinkList(display, updateLinkedProjects, addedProjectIds, "Added Project links");
        renderProjectLinkListCompare(display, oldLinkedProjects, updateLinkedProjects, commonProjectIds);
    }/*w ww. j  ava 2s.  co  m*/
}

From source file:uk.ac.open.kmi.iserve.discovery.disco.impl.GenericLogicDiscoverer.java

/**
 * Generic implementation for finding all the Services or Operations that have ALL the given types as inputs or outputs.
 *
 * @param entityType   the MSM URI of the type of entity we are looking for. Only supports Service and Operation.
 * @param relationship the MSM URI of the relationship we are looking for. Only supports hasInput and hasOutput.
 * @param types        the input/output types (modelReferences that is) we are looking for
 * @return a Map mapping operation/services URIs to MatchResults.
 *///www .  j  a  va  2s .  c  o  m
private Map<URI, MatchResult> findAll(URI entityType, URI relationship, Set<URI> types) {

    // Ensure that we have been given correct parameters
    if (types == null || types.isEmpty()
            || (!entityType.toASCIIString().equals(MSM.Service.getURI())
                    && !entityType.toASCIIString().equals(MSM.Operation.getURI()))
            || (!relationship.toASCIIString().equals(MSM.hasInput.getURI())
                    && !entityType.toASCIIString().equals(MSM.hasOutput.getURI())
                    && !relationship.toASCIIString().equals(SAWSDL.modelReference.getURI()))) {

        return ImmutableMap.of();
    }

    // Expand the input types to get all that match enough to be consumed
    // The structure is: <OriginalType, MatchingType, MatchResult>
    Table<URI, URI, MatchResult> expandedTypes;
    if (relationship.toASCIIString().equals(SAWSDL.modelReference.getURI())) {
        expandedTypes = HashBasedTable.create();
        for (URI type : types) {
            expandedTypes.putAll(this.conceptMatcher.listMatchesAtMostOfType(ImmutableSet.of(type),
                    LogicConceptMatchType.Subsume));
            expandedTypes.putAll(
                    this.conceptMatcher.listMatchesOfType(ImmutableSet.of(type), LogicConceptMatchType.Exact));
        }
    } else {
        expandedTypes = this.conceptMatcher.listMatchesAtLeastOfType(types, LogicConceptMatchType.Plugin);
    }

    // Track all the results in a multimap to push the details up the stack
    ListMultimap<URI, MatchResult> result = ArrayListMultimap.create();

    // Do the intersection of those operations that can consume each of the inputs separately
    boolean firstTime = true;
    Map<URI, MatchResult> intermediateMatches;
    Map<URI, Map<URI, MatchResult>> rowMap = expandedTypes.rowMap();
    // For each original type
    for (URI inputType : rowMap.keySet()) {
        // obtain those entities that match any of the expanded matching types
        intermediateMatches = findSome(entityType, relationship, rowMap.get(inputType).keySet());
        if (firstTime) {
            // Add all entries
            firstTime = false;
            for (Map.Entry<URI, MatchResult> entry : intermediateMatches.entrySet()) {
                result.put(entry.getKey(), entry.getValue());
            }
        } else {
            // Put all the values from the intersection
            Set<URI> intersection = Sets.intersection(result.keySet(), intermediateMatches.keySet());
            for (URI opUri : intersection) {
                result.put(opUri, intermediateMatches.get(opUri));
            }

            // Drop all the values from the difference
            // Use an immutable copy since the views will be changed
            Set<URI> difference = Sets.difference(result.keySet(), intermediateMatches.keySet())
                    .immutableCopy();
            for (URI opUri : difference) {
                result.removeAll(opUri);
            }
        }
    }

    // Merge the results into a single map using Union
    return Maps.transformValues(result.asMap(), MatchResultsMerger.INTERSECTION);

}

From source file:org.caleydo.view.crossword.api.model.TypedSet.java

public static TypedSet intersection(BitSetSet a, TypedSet b) {
    if (b.wrappee instanceof BitSetSet) {
        BitSet clone = (BitSet) a.getBitSet().clone();
        clone.and(((BitSetSet) b.wrappee).getBitSet());
        return new TypedSet(new BitSetSet(clone), b.idType);
    }/*from   w  w w.j a v  a  2  s . c o m*/
    return new TypedSet(ImmutableSet.copyOf(Sets.intersection(b, a)), b.idType); // as the predicate is: in the
    // second argument
}

From source file:org.apache.tephra.hbase.txprune.InvalidListPruningDebugTool.java

/**
 * Returns a set of regions that are live but are not empty nor have a prune upper bound recorded. These regions
 * will stop the progress of pruning./*from  w w  w  .  jav a2s .  c o m*/
 * <p/>
 * Note that this can return false positives in the following case -
 * At time 't' empty regions were recorded, and time 't+1' prune iteration was invoked.
 * Since  a new set of regions was recorded at time 't+1', all regions recorded as empty before time 't + 1' will
 * now be reported as blocking the pruning, even though they are empty. This is because we cannot tell if those
 * regions got any new data between time 't' and 't + 1'.
 *
 * @param numRegions number of regions
 * @param time time in milliseconds or relative time, regions recorded before the given time are returned
 * @return {@link Set} of regions that needs to be compacted and flushed
 */
@Override
@SuppressWarnings("WeakerAccess")
public Set<String> getRegionsToBeCompacted(Integer numRegions, String time) throws IOException {
    // Fetch the live regions at the given time
    RegionsAtTime timeRegion = getRegionsOnOrBeforeTime(time);
    if (timeRegion.getRegions().isEmpty()) {
        return Collections.emptySet();
    }

    Long timestamp = timeRegion.getTime();
    SortedSet<String> regions = timeRegion.getRegions();

    // Get the live regions
    SortedSet<String> liveRegions = getRegionsOnOrBeforeTime(NOW).getRegions();
    // Retain only the live regions
    regions = Sets.newTreeSet(Sets.intersection(liveRegions, regions));

    SortedSet<byte[]> emptyRegions = dataJanitorState.getEmptyRegionsAfterTime(timestamp, null);
    SortedSet<String> emptyRegionNames = new TreeSet<>();
    Iterable<String> regionStrings = Iterables.transform(emptyRegions, TimeRegions.BYTE_ARR_TO_STRING_FN);
    for (String regionString : regionStrings) {
        emptyRegionNames.add(regionString);
    }

    Set<String> nonEmptyRegions = Sets.newHashSet(Sets.difference(regions, emptyRegionNames));

    // Get all pruned regions for the current time and remove them from the nonEmptyRegions,
    // resulting in a set of regions that are not empty and have not been registered prune upper bound
    List<RegionPruneInfo> prunedRegions = dataJanitorState.getPruneInfoForRegions(null);
    for (RegionPruneInfo prunedRegion : prunedRegions) {
        if (nonEmptyRegions.contains(prunedRegion.getRegionNameAsString())) {
            nonEmptyRegions.remove(prunedRegion.getRegionNameAsString());
        }
    }

    if ((numRegions < 0) || (numRegions >= nonEmptyRegions.size())) {
        return nonEmptyRegions;
    }

    Set<String> subsetRegions = new HashSet<>(numRegions);
    for (String regionName : nonEmptyRegions) {
        if (subsetRegions.size() == numRegions) {
            break;
        }
        subsetRegions.add(regionName);
    }
    return subsetRegions;
}

From source file:org.sosy_lab.cpachecker.util.ci.translators.ApronRequirementsTranslator.java

private Collection<String> getConvexHullRequiredVars(final ApronState pRequirement,
        final @Nullable Collection<String> requiredVars) {
    Set<String> seenRequired = new HashSet<>();
    Set<String> required;
    if (requiredVars == null) {
        required = new HashSet<>();
    } else {//from w  ww. j a  v  a 2 s .  c om
        required = new HashSet<>(requiredVars);
    }
    List<String> varNames = getAllVarNames(pRequirement);
    Tcons0[] constraints = pRequirement.getApronNativeState().toTcons(pRequirement.getManager().getManager());
    List<Set<String>> constraintVars = new ArrayList<>(constraints.length);

    for (Tcons0 constraint : constraints) {
        constraintVars.add(getVarsInConstraint(constraint, varNames));
    }

    Iterator<Set<String>> it = constraintVars.iterator();

    int setSize;
    Set<String> intermediate;

    while (it.hasNext()) {
        intermediate = it.next();
        if (!Sets.intersection(required, intermediate).isEmpty()) {
            setSize = seenRequired.size();
            seenRequired.addAll(intermediate);
            required.addAll(intermediate);

            if (setSize != seenRequired.size()) {
                it = constraintVars.iterator();
            }
        }
    }

    return seenRequired;
}

From source file:com.facebook.buck.util.Filters.java

/**
 * Takes a list of image files (as paths), and a target density (mdpi, hdpi, xhdpi), and
 * returns a list of files which can be safely left out when building an APK for phones with that
 * screen density. That APK will run on other screens as well but look worse due to scaling.
 * <p>/*from  www. j  av a2  s. c  o  m*/
 * Each combination of non-density qualifiers is processed separately. For example, if we have
 * {@code drawable-hdpi, drawable-mdpi, drawable-xhdpi, drawable-hdpi-ro}, for a target of {@code
 * mdpi}, we'll be keeping {@code drawable-mdpi, drawable-hdpi-ro}.
 * @param candidates list of paths to image files
 * @param targetDensities densities we want to keep
 * @param canDownscale do we have access to an image scaler
 * @return set of files to remove
 */
@VisibleForTesting
static Set<Path> filterByDensity(Collection<Path> candidates, Set<Filters.Density> targetDensities,
        boolean canDownscale) {
    ImmutableSet.Builder<Path> removals = ImmutableSet.builder();

    Table<String, Density, Path> imageValues = HashBasedTable.create();

    // Create mappings for drawables. If candidate == "<base>/drawable-<dpi>-<other>/<filename>",
    // then we'll record a mapping of the form ("<base>/<filename>/<other>", "<dpi>") -> candidate.
    // For example:
    //                                    mdpi                               hdpi
    //                       --------------------------------------------------------------------
    // key: res/some.png/    |  res/drawable-mdpi/some.png          res/drawable-hdpi/some.png
    // key: res/some.png/fr  |  res/drawable-fr-hdpi/some.png
    for (Path candidate : candidates) {
        Qualifiers qualifiers = new Qualifiers(candidate);

        String filename = candidate.getFileName().toString();
        Density density = qualifiers.density;
        String resDirectory = candidate.getParent().getParent().toString();
        String key = String.format("%s/%s/%s", resDirectory, filename, qualifiers.others);
        imageValues.put(key, density, candidate);
    }

    for (String key : imageValues.rowKeySet()) {
        Map<Density, Path> options = imageValues.row(key);
        Set<Density> available = options.keySet();

        // This is to make sure we preserve the existing structure of drawable/ files.
        Set<Density> targets = targetDensities;
        if (available.contains(Density.NO_QUALIFIER) && !available.contains(Density.MDPI)) {
            targets = Sets.newHashSet(Iterables.transform(targetDensities, new Function<Density, Density>() {
                @Override
                public Density apply(Density input) {
                    return (input == Density.MDPI) ? Density.NO_QUALIFIER : input;
                }
            }));
        }

        // We intend to keep all available targeted densities.
        Set<Density> toKeep = Sets.newHashSet(Sets.intersection(available, targets));

        // Make sure we have a decent fit for the largest target density.
        Density largestTarget = Density.ORDERING.max(targets);
        if (!available.contains(largestTarget)) {
            Density fallback = null;
            // Downscaling nine-patch drawables would require extra logic, not doing that yet.
            if (canDownscale && !options.values().iterator().next().toString().endsWith(".9.png")) {
                // Highest possible quality, because we'll downscale it.
                fallback = Density.ORDERING.max(available);
            } else {
                // We want to minimize size, so we'll go for the smallest available density that's
                // still larger than the missing one and, missing that, for the largest available.
                for (Density candidate : Density.ORDERING.reverse().sortedCopy(available)) {
                    if (fallback == null || Density.ORDERING.compare(candidate, largestTarget) > 0) {
                        fallback = candidate;
                    }
                }
            }
            toKeep.add(fallback);
        }

        // Mark remaining densities for removal.
        for (Density density : Sets.difference(available, toKeep)) {
            removals.add(options.get(density));
        }
    }

    return removals.build();
}

From source file:com.facebook.presto.sql.planner.optimizations.StreamPreferredProperties.java

public StreamPreferredProperties withPartitioning(Collection<Symbol> partitionSymbols) {
    if (partitionSymbols.isEmpty()) {
        return singleStream();
    }//from  w ww .  j  a  v a 2s  .co  m

    Iterable<Symbol> desiredPartitioning = partitionSymbols;
    if (partitioningColumns.isPresent()) {
        if (exactColumnOrder) {
            if (partitioningColumns.get().equals(desiredPartitioning)) {
                return this;
            }
        } else {
            // If there are common columns between our requirements and the desired partitionSymbols, both can be satisfied in one shot
            Set<Symbol> common = Sets.intersection(ImmutableSet.copyOf(desiredPartitioning),
                    ImmutableSet.copyOf(partitioningColumns.get()));

            // If we find common partitioning columns, use them, else use child's partitioning columns
            if (!common.isEmpty()) {
                desiredPartitioning = common;
            }
        }
    }

    return new StreamPreferredProperties(distribution, Optional.of(desiredPartitioning), false);
}

From source file:co.cask.cdap.etl.planner.ConnectorDag.java

/**
 * Insert connector nodes into the dag.//from  w  w w. j a v  a2 s .c  om
 *
 * A connector node is a boundary at which the pipeline can be split into sub dags.
 * It is treated as a sink within one subdag and as a source in another subdag.
 * A connector is inserted in front of a reduce node (aggregator plugin type, etc)
 * when there is a path from some source to one or more reduce nodes or sinks.
 * This is required because in a single mapper, we can't write to both a sink and do a reduce.
 * We also can't have 2 reducers in a single mapreduce job.
 * A connector is also inserted in front of any node if the inputs into the node come from multiple sources.
 * A connector is also inserted in front of a reduce node that has another reduce node as its input.
 *
 * After splitting, the result will be a collection of subdags, with each subdag representing a single
 * mapreduce job (or possibly map-only job). Or in spark, each subdag would be a series of operations from
 * one rdd to another rdd.
 *
 * @return the nodes that had connectors inserted in front of them
 */
public Set<String> insertConnectors() {
    // none of this is particularly efficient, but this should never be a bottleneck
    // unless we're dealing with very very large dags

    Set<String> addedAlready = new HashSet<>();

    /*
        Isolate the specified node by inserting a connector in front of and behind the node.
        If all inputs into the the node are sources, a connector will not be inserted in front.
        If all outputs from the node are sinks, a connector will not be inserted after.
        Other connectors count as both a source and a sink.
     */
    for (String isolationNode : isolationNodes) {
        isolate(isolationNode, addedAlready);
    }

    /*
        Find sections of the dag where a source is writing to both a sink and a reduce node
        or to multiple reduce nodes. a connector counts as both a source and a sink.
            
        for example, if a source is writing to both a sink and a reduce:
            
            |---> sink1
          source ---|
            |---> reduce ---> sink2
            
        we need to split this up into:
            
            |---> sink1
          source ---|                    =>     connector ---> reduce ---> sink2
            |---> connector
            
        The same logic applies if a source is writing to multiple reduce nodes. So if we run into this scenario,
        we will add a connector in front of all reduce nodes accessible from the source.
        When trying to find a path from a source to multiple reduce nodes, we also need to stop searching
        once we see a reduce node or a connector. Otherwise, every single reduce node would end up
        with a connector in front of it.
     */
    for (String node : getTopologicalOrder()) {
        if (!sources.contains(node) && !connectors.contains(node)) {
            continue;
        }

        Set<String> accessibleByNode = accessibleFrom(node, Sets.union(connectors, reduceNodes));
        Set<String> sinksAndReduceNodes = Sets.intersection(accessibleByNode,
                Sets.union(connectors, Sets.union(sinks, reduceNodes)));
        // don't count this node
        sinksAndReduceNodes = Sets.difference(sinksAndReduceNodes, ImmutableSet.of(node));

        if (sinksAndReduceNodes.size() > 1) {
            for (String reduceNodeConnector : Sets.intersection(sinksAndReduceNodes, reduceNodes)) {
                addConnectorInFrontOf(reduceNodeConnector, addedAlready);
            }
        }
    }

    /*
        Find nodes that have input from multiple sources and add them to the connectors set.
        We can probably remove this part once we support multiple sources. Even though we don't support
        multiple sources today, the fact that we support forks means we have to deal with the multi-input case
        and break it down into separate phases. For example:
            
        |---> reduce1 ---|
          n1 ---|                |---> n2
        |---> reduce2 ---|
            
        From the previous section, both reduces will get a connector inserted in front:
            
        |---> reduce1.connector               reduce1.connector ---> reduce1 ---|
          n1 ---|                              =>                                       |---> n2
        |---> reduce2.connector               reduce2.connector ---> reduce2 ---|
            
        Since we don't support multi-input yet, we need to convert that further into 3 phases:
            
          reduce1.connector ---> reduce1 ---> n2.connector
                                                            =>       sink.connector ---> n2
          reduce2.connector ---> reduce2 ---> n2.connector
            
        To find these nodes, we traverse the graph in order and keep track of sources that have a path to each node
        with a map of node -> [ sources that have a path to the node ]
        if we find that a node is accessible by more than one source, we insert a connector in front of it and
        reset all sources for that node to its connector
     */
    SetMultimap<String, String> nodeSources = HashMultimap.create();
    for (String source : sources) {
        nodeSources.put(source, source);
    }
    for (String node : getTopologicalOrder()) {
        Set<String> connectedSources = nodeSources.get(node);
        /*
            If this node is a connector, replace all sources for this node with itself, since a connector is a source
            Taking the example above, we end up with:
                
              reduce1.connector ---> reduce1 ---|
                                      |---> n2
              reduce2.connector ---> reduce2 ---|
                
            When we get to n2, we need it to see that it has 2 sources: reduce1.connector and reduce2.connector
            So when get to reduce1.connector, we need to replace its source (n1) with itself.
            Similarly, when we get to reduce2.connector, we need to replaces its source (n1) with itself.
            If we didn't, when we got to n2, it would think its only source is n1, and we would
            miss the connector that should be inserted in front of it.
         */
        if (connectors.contains(node)) {
            connectedSources = new HashSet<>();
            connectedSources.add(node);
            nodeSources.replaceValues(node, connectedSources);
        }
        // if more than one source is connected to this node, then we need to insert a connector in front of this node.
        // its source should then be changed to the connector that was inserted in front of it.
        if (connectedSources.size() > 1) {
            String connectorNode = addConnectorInFrontOf(node, addedAlready);
            connectedSources = new HashSet<>();
            connectedSources.add(connectorNode);
            nodeSources.replaceValues(node, connectedSources);
        }
        for (String nodeOutput : getNodeOutputs(node)) {
            // propagate the source connected to me to all my outputs
            nodeSources.putAll(nodeOutput, connectedSources);
        }
    }

    /*
        Find reduce nodes that are accessible from other reduce nodes. For example:
            
          source ---> reduce1 ---> reduce2 ---> sink
            
        Needs to be broken down into:
            
          source ---> reduce1 ---> reduce2.connector      =>     reduce2.connector ---> reduce2 ---> sink
     */
    for (String reduceNode : reduceNodes) {
        Set<String> accessibleByNode = accessibleFrom(reduceNode, Sets.union(connectors, reduceNodes));
        Set<String> accessibleReduceNodes = Sets.intersection(accessibleByNode, reduceNodes);

        // Sets.difference because we don't want to add ourselves
        accessibleReduceNodes = Sets.difference(accessibleReduceNodes, ImmutableSet.of(reduceNode));
        for (String accessibleReduceNode : accessibleReduceNodes) {
            addConnectorInFrontOf(accessibleReduceNode, addedAlready);
        }
    }

    return addedAlready;
}

From source file:org.apache.heron.scheduler.dryrun.UpdateTableDryRunRenderer.java

private String renderContainerDiffView(int containerId, ContainersDiffView diffView) {
    StringBuilder builder = new StringBuilder();
    Optional<PackingPlan.ContainerPlan> oldPackingPlan = diffView.getOldPlan();
    Optional<PackingPlan.ContainerPlan> newPackingPlan = diffView.getNewPlan();
    String header = String.format("%s ", formatter.renderContainerName(containerId));
    builder.append(header);/*w  w w.  j a v a2  s .  c  o m*/
    // Container exists in both old and new packing plan
    if (oldPackingPlan.isPresent() && newPackingPlan.isPresent()) {
        PackingPlan.ContainerPlan newContainerPlan = newPackingPlan.get();
        PackingPlan.ContainerPlan oldContainerPlan = oldPackingPlan.get();
        // Container plan did not change
        if (newContainerPlan.equals(oldContainerPlan)) {
            builder.append(formatter.renderContainerChange(ContainerChange.UNAFFECTED) + "\n");
            String resourceUsage = formatter.renderResourceUsage(newContainerPlan.getRequiredResource());
            List<Row> rows = new ArrayList<>();
            for (PackingPlan.InstancePlan plan : newContainerPlan.getInstances()) {
                rows.add(formatter.rowOfInstancePlan(plan, TextColor.DEFAULT, TextStyle.DEFAULT));
            }
            String containerTable = formatter.renderOneContainer(rows);
            builder.append(resourceUsage + "\n");
            builder.append(containerTable);
        } else {
            // Container plan has changed
            String resourceUsage = formatter.renderResourceUsageChange(oldContainerPlan.getRequiredResource(),
                    newContainerPlan.getRequiredResource());
            Set<PackingPlan.InstancePlan> oldInstancePlans = oldContainerPlan.getInstances();
            Set<PackingPlan.InstancePlan> newInstancePlans = newContainerPlan.getInstances();
            Set<PackingPlan.InstancePlan> unchangedPlans = Sets.intersection(oldInstancePlans, newInstancePlans)
                    .immutableCopy();
            Set<PackingPlan.InstancePlan> newPlans = Sets.difference(newInstancePlans, oldInstancePlans);
            Set<PackingPlan.InstancePlan> removedPlans = Sets.difference(oldInstancePlans, newInstancePlans);
            List<Row> rows = new ArrayList<>();
            for (PackingPlan.InstancePlan plan : unchangedPlans) {
                rows.add(formatter.rowOfInstancePlan(plan, TextColor.DEFAULT, TextStyle.DEFAULT));
            }
            for (PackingPlan.InstancePlan plan : newPlans) {
                rows.add(formatter.rowOfInstancePlan(plan, TextColor.GREEN, TextStyle.DEFAULT));
            }
            for (PackingPlan.InstancePlan plan : removedPlans) {
                rows.add(formatter.rowOfInstancePlan(plan, TextColor.RED, TextStyle.STRIKETHROUGH));
            }
            builder.append(formatter.renderContainerChange(ContainerChange.MODIFIED) + "\n");
            builder.append(resourceUsage + "\n");
            String containerTable = formatter.renderOneContainer(rows);
            builder.append(containerTable);
        }
    } else if (oldPackingPlan.isPresent()) {
        // Container has been removed
        PackingPlan.ContainerPlan oldContainerPlan = oldPackingPlan.get();
        List<Row> rows = new ArrayList<>();
        for (PackingPlan.InstancePlan plan : oldContainerPlan.getInstances()) {
            rows.add(formatter.rowOfInstancePlan(plan, TextColor.RED, TextStyle.STRIKETHROUGH));
        }
        builder.append(formatter.renderContainerChange(ContainerChange.REMOVED) + "\n");
        builder.append(formatter.renderResourceUsage(oldContainerPlan.getRequiredResource()) + "\n");
        builder.append(formatter.renderOneContainer(rows));
    } else if (newPackingPlan.isPresent()) {
        // New container has been added
        PackingPlan.ContainerPlan newContainerPlan = newPackingPlan.get();
        List<Row> rows = new ArrayList<>();
        for (PackingPlan.InstancePlan plan : newContainerPlan.getInstances()) {
            rows.add(formatter.rowOfInstancePlan(plan, TextColor.GREEN, TextStyle.DEFAULT));
        }
        builder.append(formatter.renderContainerChange(ContainerChange.NEW) + "\n");
        builder.append(formatter.renderResourceUsage(newContainerPlan.getRequiredResource()) + "\n");
        builder.append(formatter.renderOneContainer(rows));
    } else {
        throw new RuntimeException(
                "Unexpected error: either new container plan or old container plan has to exist");
    }
    return builder.toString();
}

From source file:com.google.security.zynamics.binnavi.debug.models.breakpoints.BreakpointManager.java

/**
 * This function enforces the type hierarchy of breakpoints.
 *
 * @param addresses The set of addresses for the breakpoints to be added.
 * @param type The type of the breakpoints to be added.
 *
 * @return The Set of breakpoints which has been set.
 *//*from w ww . ja va 2 s  .  c  o  m*/
private Set<BreakpointAddress> enforceBreakpointHierarchy(final Set<BreakpointAddress> addresses,
        final BreakpointType type) {
    final SetView<BreakpointAddress> alreadyRegularBreakpoints = Sets.intersection(addresses,
            indexedBreakpointStorage.getBreakPointAddresses());
    final SetView<BreakpointAddress> alreadySteppingBreakpoints = Sets.intersection(addresses,
            stepBreakpointStorage.getBreakPointAddresses());
    final SetView<BreakpointAddress> alreadyEchoBreakpoints = Sets.intersection(addresses,
            echoBreakpointStorage.getBreakPointAddresses());

    Set<BreakpointAddress> addressesSet = null;

    switch (type) {
    case REGULAR:
        final SetView<BreakpointAddress> notInRegularBreakpoints = Sets.difference(addresses,
                indexedBreakpointStorage.getBreakPointAddresses());
        removeBreakpoints(alreadySteppingBreakpoints, stepBreakpointStorage);
        removeBreakpoints(alreadyEchoBreakpoints, echoBreakpointStorage);
        addressesSet = notInRegularBreakpoints;
        break;

    case STEP:
        final SetView<BreakpointAddress> notInSteppingBreakpoints = Sets.difference(addresses,
                stepBreakpointStorage.getBreakPointAddresses());
        removeBreakpoints(alreadyEchoBreakpoints, echoBreakpointStorage);
        addressesSet = Sets.difference(notInSteppingBreakpoints, alreadyRegularBreakpoints);
        break;

    case ECHO:
        final SetView<BreakpointAddress> notInEchoBreakPoints = Sets.difference(addresses,
                echoBreakpointStorage.getBreakPointAddresses());
        addressesSet = Sets.difference(notInEchoBreakPoints,
                Sets.union(alreadySteppingBreakpoints, alreadyRegularBreakpoints));
        break;
    default:
        throw new IllegalStateException("IE00722: Breakpoint of invalid type");

    }
    return addressesSet;
}