Example usage for com.google.common.collect Multimap values

List of usage examples for com.google.common.collect Multimap values

Introduction

In this page you can find the example usage for com.google.common.collect Multimap values.

Prototype

Collection<V> values();

Source Link

Document

Returns a view collection containing the value from each key-value pair contained in this multimap, without collapsing duplicates (so values().size() == size() ).

Usage

From source file:edu.harvard.med.screensaver.ui.cherrypickrequests.CherryPickRequestDetailViewer.java

private boolean doWarnOnDeprecatedWells(CherryPickRequest cherryPickRequest) {
    Multimap<AdministrativeActivity, WellKey> wellDeprecations = TreeMultimap.create();
    for (LabCherryPick labCherryPick : cherryPickRequest.getLabCherryPicks()) {
        Well well = labCherryPick.getSourceWell();
        if (well.isDeprecated()) {
            wellDeprecations.put(well.getDeprecationActivity(), well.getWellKey());
        }/*www. j  av  a  2s .c om*/
        well = labCherryPick.getScreenerCherryPick().getScreenedWell();
        if (well.isDeprecated()) {
            wellDeprecations.put(well.getDeprecationActivity(), well.getWellKey());
        }
    }
    for (AdministrativeActivity deprecationActivity : wellDeprecations.keySet()) {
        showMessage("cherryPicks.deprecatedWells", deprecationActivity.getComments(),
                Joiner.on(", ").join(wellDeprecations.values()));
        return true;
    }
    return false;
}

From source file:com.puppetlabs.geppetto.graph.dependency.DependencyDataCalculator.java

/**
 * Calculates dependency data and returns a map from Modulefiles to ModuleNodeData.
 * //from ww w . j ava  2  s. c  o  m
 * @param moduleData
 * @param exportData
 * @return
 */
public Map<File, ModuleNodeData> calculateDependencyData(File root,
        Multimap<ModuleName, MetadataInfo> moduleData, AllModuleReferences exportData) {

    // create node data for all existing modules and check if there are ambiguities
    Multimap<ModuleName, ModuleNodeData> processedModules = ArrayListMultimap.create();
    for (MetadataInfo mi : moduleData.values()) {
        Metadata m = mi.getMetadata();
        ModuleNodeData mnd = ModuleNodeData.existing(m.getName(), m.getVersion(), mi.isRole(), toHREF_URL(mi));
        moduleNodeData.put(mi, mnd);
        processedModules.put(m.getName(), mnd);
    }
    for (ModuleName key : processedModules.keySet()) {
        Collection<ModuleNodeData> modules = processedModules.get(key);
        if (modules.size() > 1) {
            int counter = 0;
            for (ModuleNodeData mnd : modules)
                mnd.setAmbiguous(++counter);
        }
    }
    // moduleData is keyed by "fullName" to lower case

    // Create pseudo module for non modular content
    nonModularNode = ModuleNodeData.root(root);
    pptpNode = new ModuleNodeData(ModuleName.create("root", "puppet", false), null, ModuleType.PPTP, ""); // will not be rendered

    // create module nodes for missing (unsatisfied dependencies)
    // unless dependency is to represented module name, but version is not matched (in which case
    // the unmatched but existing node is used.
    // if a dependency appears more than once, use the first (skip the rest with same name)
    for (MetadataInfo mi : moduleData.values()) {
        final ModuleNodeData a = moduleNodeData.get(mi);
        Set<ModuleName> processed = Sets.newHashSet();
        for (Dependency d : mi.getUnresolvedDependencies()) {
            final ModuleName name = d.getName();
            if (!processed.add(name))
                continue;
            Collection<MetadataInfo> existingVersions = moduleData.get(name);
            ModuleNodeData b = null;
            if (existingVersions == null || existingVersions.size() < 1) {
                b = moduleNodeData.get(name);
                if (b == null) {
                    // need a node for the missing module
                    b = ModuleNodeData.unresolved(name);
                    // need to generate one that can not be found if name is null
                    moduleNodeData.put(name == null ? ModuleName.create("no", "name", false) : name, b);
                }
            } else {
                // pick (one of) the existing versions (it is actually illegal to have more
                // than one, so just pick the first one).
                MetadataInfo first = Iterables.get(existingVersions, 0);
                b = moduleNodeData.get(first);
            }
            createUnresolvedEdge(a, b, d);
        }
        // Add edges for all resolved dependencies
        for (MetadataInfo.Resolution r : mi.getResolvedDependencies()) {
            createResolvedEdge(a, moduleNodeData.get(r.metadata), r.dependency);
        }
    }
    Map<File, ModuleNodeData> fileIndex = Maps.newHashMap();
    for (Map.Entry<Object, ModuleNodeData> m : moduleNodeData.entrySet()) {
        if (!(m.getKey() instanceof ModuleName)) {
            MetadataInfo mi = (MetadataInfo) m.getKey();
            fileIndex.put(mi.getFile(), m.getValue());
        }
    }
    Map<File, Multimap<File, Export>> ambiguities = exportData.getAmbiguityMap();
    for (Map.Entry<File, Multimap<File, AllModuleReferences.Export>> x : exportData.getImportMap().entrySet()) {
        // get the imported
        File fromFile = x.getKey();
        Multimap<File, Export> m = x.getValue();

        // get any ambiguities
        Multimap<File, Export> ambiguitiesForFile = ambiguities.get(fromFile);
        for (File toFile : m.keySet()) {
            createImportEdge(file2Module(fromFile, fileIndex), file2Module(toFile, fileIndex), m.get(toFile),
                    ambiguitiesForFile != null ? ambiguitiesForFile.get(toFile) : null);
        }
    }

    for (File fromFile : exportData.getUnresolvedMap().keySet()) {
        createUnresolvedEdge(file2Module(fromFile, fileIndex), exportData.getUnresolvedMap().get(fromFile));

    }
    return fileIndex;
}

From source file:org.cloudsmith.geppetto.graph.dependency.DependencyDataCalculator.java

/**
 * Calculates dependency data and returns a map from Modulefiles to ModuleNodeData.
 * //from w  ww . j  av a 2 s. c om
 * @param moduleData
 * @param exportData
 * @return
 */
public Map<File, ModuleNodeData> calculateDependencyData(File root,
        Multimap<ModuleName, MetadataInfo> moduleData, AllModuleReferences exportData) {

    // create node data for all existing modules and check if there are ambiguities
    Multimap<ModuleName, ModuleNodeData> processedModules = ArrayListMultimap.create();
    for (MetadataInfo mi : moduleData.values()) {
        Metadata m = mi.getMetadata();
        ModuleNodeData mnd = ModuleNodeData.existing(m.getName(), m.getVersion(), mi.isRole(), toHREF_URL(mi));
        moduleNodeData.put(mi, mnd);
        processedModules.put(m.getName(), mnd);
    }
    for (ModuleName key : processedModules.keySet()) {
        Collection<ModuleNodeData> modules = processedModules.get(key);
        if (modules.size() > 1) {
            int counter = 0;
            for (ModuleNodeData mnd : modules)
                mnd.setAmbiguous(++counter);
        }
    }
    // moduleData is keyed by "fullName" to lower case

    // Create pseudo module for non modular content
    nonModularNode = ModuleNodeData.root(root);
    pptpNode = new ModuleNodeData(new ModuleName("root", "puppet", false), null, ModuleType.PPTP, ""); // will not be rendered

    // create module nodes for missing (unsatisfied dependencies)
    // unless dependency is to represented module name, but version is not matched (in which case
    // the unmatched but existing node is used.
    // if a dependency appears more than once, use the first (skip the rest with same name)
    for (MetadataInfo mi : moduleData.values()) {
        final ModuleNodeData a = moduleNodeData.get(mi);
        Set<ModuleName> processed = Sets.newHashSet();
        for (Dependency d : mi.getUnresolvedDependencies()) {
            final ModuleName name = d.getName();
            if (!processed.add(name))
                continue;
            Collection<MetadataInfo> existingVersions = moduleData.get(name);
            ModuleNodeData b = null;
            if (existingVersions == null || existingVersions.size() < 1) {
                b = moduleNodeData.get(name);
                if (b == null) {
                    // need a node for the missing module
                    b = ModuleNodeData.unresolved(name);
                    // need to generate one that can not be found if name is null
                    moduleNodeData.put(name == null ? new ModuleName("no", "name", false) : name, b);
                }
            } else {
                // pick (one of) the existing versions (it is actually illegal to have more
                // than one, so just pick the first one).
                MetadataInfo first = Iterables.get(existingVersions, 0);
                b = moduleNodeData.get(first);
            }
            createUnresolvedEdge(a, b, d);
        }
        // Add edges for all resolved dependencies
        for (MetadataInfo.Resolution r : mi.getResolvedDependencies()) {
            createResolvedEdge(a, moduleNodeData.get(r.metadata), r.dependency);
        }
    }
    Map<File, ModuleNodeData> fileIndex = Maps.newHashMap();
    for (Map.Entry<Object, ModuleNodeData> m : moduleNodeData.entrySet()) {
        if (!(m.getKey() instanceof ModuleName)) {
            MetadataInfo mi = (MetadataInfo) m.getKey();
            fileIndex.put(mi.getFile(), m.getValue());
        }
    }
    Map<File, Multimap<File, Export>> ambiguities = exportData.getAmbiguityMap();
    for (Map.Entry<File, Multimap<File, AllModuleReferences.Export>> x : exportData.getImportMap().entrySet()) {
        // get the imported
        File fromFile = x.getKey();
        Multimap<File, Export> m = x.getValue();

        // get any ambiguities
        Multimap<File, Export> ambiguitiesForFile = ambiguities.get(fromFile);
        for (File toFile : m.keySet()) {
            createImportEdge(file2Module(fromFile, fileIndex), file2Module(toFile, fileIndex), m.get(toFile),
                    ambiguitiesForFile != null ? ambiguitiesForFile.get(toFile) : null);
        }
    }

    for (File fromFile : exportData.getUnresolvedMap().keySet()) {
        createUnresolvedEdge(file2Module(fromFile, fileIndex), exportData.getUnresolvedMap().get(fromFile));

    }
    return fileIndex;
}

From source file:org.codeqinvest.codechanges.scm.svn.DefaultSvnRevisionsRetriever.java

/**
 * {@inheritDoc}//from   w  ww . j a v  a  2 s.co m
 */
@Override
@Cacheable("svnRevisions")
public Revisions retrieveRevisions(ScmConnectionSettings connectionSettings, int numberOfCommits)
        throws SVNException {
    log.info("Retrieve revisions on last {} commits for {}", numberOfCommits, connectionSettings);
    final SVNRepository repository = SvnRepositoryFactory.create(connectionSettings);

    final Multimap<String, SvnFileRevision> revisions = ArrayListMultimap.create();
    repository.log(null, repository.getLatestRevision(), 0L, true, true, numberOfCommits,
            new ISVNLogEntryHandler() {

                @Override
                public void handleLogEntry(SVNLogEntry logEntry) throws SVNException {
                    log.debug("Process revision {}", logEntry.getRevision());
                    for (SVNLogEntryPath logEntryPath : logEntry.getChangedPaths().values()) {
                        if (logEntryPath.getCopyPath() != null) {
                            revisions.put(logEntryPath.getPath(), new SvnFileRevision(logEntry.getRevision(),
                                    logEntryPath.getCopyPath(), logEntryPath.getPath()));
                        } else {
                            revisions.put(logEntryPath.getPath(), new SvnFileRevision(logEntry.getRevision(),
                                    logEntryPath.getPath(), logEntryPath.getPath()));
                        }
                    }
                }
            });

    log.info("Found {} changes for last {} commits with connection {}", revisions.values().size(),
            numberOfCommits, connectionSettings);
    return new Revisions(revisions);
}

From source file:org.jboss.errai.ioc.rebind.ioc.graph.impl.DefaultQualifierFactory.java

private Qualifier combineNormal(final NormalQualifier q1, final NormalQualifier q2) {
    final Multimap<Class<? extends Annotation>, AnnotationWrapper> allAnnosByType = HashMultimap.create();
    for (final AnnotationWrapper wrapper : q1.annotations) {
        allAnnosByType.put(wrapper.anno.annotationType(), wrapper);
    }/* ww  w. j a  v a2s.  c  o  m*/
    for (final AnnotationWrapper wrapper : q2.annotations) {
        allAnnosByType.put(wrapper.anno.annotationType(), wrapper);
    }

    for (final Class<? extends Annotation> annoType : allAnnosByType.keySet()) {
        if (allAnnosByType.get(annoType).size() == 2) {
            final Iterator<AnnotationWrapper> iter = allAnnosByType.get(annoType).iterator();
            throw new RuntimeException("Found two annotations of same type but with different values:\n\t"
                    + iter.next() + "\n\t" + iter.next());
        }
    }

    return getOrCreateQualifier(
            new TreeSet<DefaultQualifierFactory.AnnotationWrapper>(allAnnosByType.values()));
}

From source file:org.opennms.features.topology.plugins.topo.linkd.internal.OspfLinkStatusProvider.java

@Override
protected List<EdgeAlarmStatusSummary> getEdgeAlarmSummaries(List<Integer> linkIds) {
    org.opennms.core.criteria.Criteria criteria = new org.opennms.core.criteria.Criteria(OspfLink.class);
    criteria.addRestriction(new InRestriction("id", linkIds));

    List<OspfLink> links = getOspfLinkDao().findMatching(criteria);
    Multimap<String, EdgeAlarmStatusSummary> summaryMap = HashMultimap.create();
    for (OspfLink sourceLink : links) {
        OnmsNode sourceNode = sourceLink.getNode();
        for (OspfLink targetLink : links) {
            boolean ipAddrCheck = sourceLink.getOspfRemIpAddr().equals(targetLink.getOspfIpAddr())
                    && targetLink.getOspfRemIpAddr().equals(sourceLink.getOspfIpAddr());
            if (ipAddrCheck) {
                summaryMap.put(sourceNode.getNodeId() + ":" + sourceLink.getOspfIfIndex(),
                        new EdgeAlarmStatusSummary(sourceLink.getId(), targetLink.getId(), null));
            }/* www  .  ja v  a2 s .c o  m*/
        }
    }

    List<OnmsAlarm> alarms = getLinkDownAlarms();

    for (OnmsAlarm alarm : alarms) {
        String key = alarm.getNodeId() + ":" + alarm.getIfIndex();
        if (summaryMap.containsKey(key)) {

            Collection<EdgeAlarmStatusSummary> summaries = summaryMap.get(key);
            for (EdgeAlarmStatusSummary summary : summaries) {
                summary.setEventUEI(alarm.getUei());
            }

        }

    }

    return new ArrayList<EdgeAlarmStatusSummary>(summaryMap.values());

}

From source file:org.fenixedu.academic.dto.resourceAllocationManager.PeriodsManagementBean.java

private void populatePeriodsForExecutionYear() {

    periods.clear();/*from   ww w  . j  ava2s  .c  o m*/

    Multimap<OccupationPeriodType, OccupationPeriodBean> map = HashMultimap.create();

    setDegrees(new ArrayList<ExecutionDegree>(executionYear.getExecutionDegreesSet()));

    Collections.sort(degrees, ExecutionDegree.EXECUTION_DEGREE_COMPARATORY_BY_DEGREE_TYPE_AND_NAME);

    for (ExecutionDegree degree : degrees) {

        Collection<OccupationPeriodReference> references = degree.getOccupationPeriodReferencesSet();

        for (OccupationPeriodReference reference : references) {

            OccupationPeriodBean bean = null;

            for (OccupationPeriodBean periodBean : map.get(reference.getPeriodType())) {
                if (periodBean.getOccupationPeriod().isEqualTo(reference.getOccupationPeriod())) {
                    bean = periodBean;
                    break;
                }
            }

            if (bean == null) {
                bean = new OccupationPeriodBean(reference, idCounter++);
                map.put(reference.getPeriodType(), bean);
            }

            bean.addReference(reference);
        }

    }

    periods.addAll(map.values());

    Collections.sort(periods);

}

From source file:com.puppetlabs.geppetto.validation.impl.DirectoryValidatorImpl.java

/**
 * Calculate containers/*from  ww  w  .j av  a 2s  .  c  om*/
 * sets up iterateable over all files including pptp
 */
private void configureContainers(Multimap<ModuleName, MetadataInfo> moduleData) {
    List<URI> uris = Lists.newArrayList();
    for (File f : ppFiles)
        uris.add(URI.createFileURI(f.getPath()));
    for (File f : rbFiles)
        uris.add(URI.createFileURI(f.getPath()));

    PuppetTarget target = PuppetTarget.forComplianceLevel(options.getComplianceLevel(), false);
    uris.add(target.getPlatformURI());
    URI typesURI = target.getTypesURI();
    if (typesURI != null)
        uris.add(typesURI);

    ppRunner.configureContainers(root, moduleData.values(), uris);
}

From source file:com.ikanow.aleph2.analytics.spark.utils.RddDependencyUtils.java

/** Builds an RDD pipeline
 * @param inputs/*  w  ww .  ja v a 2  s  . c om*/
 * @param enrichment_pipeline_config
 * @return a validation, if successful containing (all generated rdds, output rdds only) - normally only the second tuple is needed
 */
public static Validation<String, //(error)
        Tuple2<Map<String, Either<JavaRDD<Tuple2<Long, IBatchRecord>>, JavaRDD<Tuple2<IBatchRecord, Tuple2<Long, IBatchRecord>>>>>, //(list of all RDDs)
                Map<String, JavaRDD<Tuple2<Long, IBatchRecord>>> //(just outputs
>> buildEnrichmentPipeline(final IAnalyticsContext context, final JavaSparkContext jsc,
        final Multimap<String, JavaPairRDD<Object, Tuple2<Long, IBatchRecord>>> inputs,
        final Collection<EnrichmentControlMetadataBean> enrichment_pipeline_config) {
    // Build the pipeline
    final Validation<String, LinkedHashMap<String, Tuple2<Set<String>, List<EnrichmentControlMetadataBean>>>> maybe_enrichment_pipeline = DependencyUtils
            .buildPipelineOfContainers(inputs.keySet(), enrichment_pipeline_config);

    return maybe_enrichment_pipeline.bind(enrichment_pipeline -> {

        // (2 types of RDD - before and after...)
        final HashMap<String, Either<JavaRDD<Tuple2<Long, IBatchRecord>>, JavaRDD<Tuple2<IBatchRecord, Tuple2<Long, IBatchRecord>>>>> mutable_rdds = new HashMap<>();

        // Insert all the inputs:
        inputs.asMap().entrySet().stream().forEach(kv -> mutable_rdds.put(kv.getKey(), Either.left(
                kv.getValue().stream().reduce((acc1, acc2) -> acc1.union(acc2)).get().map(t2 -> t2._2()))));

        // First pass, find all the groupings:
        // (if _any_ immediately downstream element needs grouping then treat as all do and map the extra element away)
        final Map<String, Collection<String>> jobs_that_need_to_group = enrichment_pipeline.values().stream()
                .distinct().<Tuple2<String, Collection<String>>>flatMap(t2 -> {
                    return t2._2().stream().findFirst().map(e -> Optionals.ofNullable(e.grouping_fields()))
                            .<Stream<Tuple2<String, Collection<String>>>>map(
                                    groupings -> t2._1().stream().map(input -> Tuples._2T(input, groupings)))
                            .orElseGet(Stream::empty);
                }).collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2()));

        // Second pass, do we need $inputs:
        if (enrichment_pipeline.values().stream().distinct()
                .anyMatch(t2 -> t2._1().contains(EnrichmentControlMetadataBean.PREVIOUS_STEP_ALL_INPUTS))) {
            inputs.put(EnrichmentControlMetadataBean.PREVIOUS_STEP_ALL_INPUTS, inputs.values().stream()
                    .reduce((acc1, acc2) -> acc1.union(acc2)).orElse(jsc.emptyRDD().flatMapToPair(__ -> null)));
        }

        // Third/forth pass, create another mutable state that tells us which enrichers are the furthest downstream
        final Set<String> mutable_enricher_set = new HashSet<>(enrichment_pipeline.values().stream().distinct()
                .<EnrichmentControlMetadataBean>flatMap(t2 -> StreamUtils.stream(t2._2().stream().findFirst())) // (discard inputs)
                .map(e -> e.name()).collect(Collectors.toSet()));
        enrichment_pipeline.values().stream().distinct().forEach(t2 -> {
            final EnrichmentControlMetadataBean control = t2._2().stream().findFirst().get();
            mutable_enricher_set.removeAll(control.dependencies());
        });

        // Fifth (!) pass actually does all the work:

        enrichment_pipeline.values().stream().distinct().filter(t2 -> t2._2().stream().findFirst().isPresent()) // (discard inputs)
                .forEach(t2 -> {
                    final EnrichmentControlMetadataBean control = t2._2().stream().findFirst().get();
                    final boolean upstream_is_grouped = !Optionals.ofNullable(control.grouping_fields())
                            .isEmpty();
                    final Collection<String> downstream_grouping = jobs_that_need_to_group.get(control.name());
                    final boolean downstream_is_grouped = null != downstream_grouping;

                    final boolean to_emit = mutable_enricher_set.contains(control.name());

                    // Get all the inputs:
                    // 4 cases depending on whether upstream/downstream are grouped

                    if (upstream_is_grouped) {
                        // (ignore any inputs that haven't been grouped)
                        final JavaRDD<Tuple2<IBatchRecord, Tuple2<Long, IBatchRecord>>> rdd_inputs = t2._1()
                                .stream().map(dep -> mutable_rdds.get(dep))
                                .filter(rdd_choice -> rdd_choice.isRight())
                                .map(rdd_choice -> rdd_choice.right().value())
                                .reduce((acc1, acc2) -> acc1.union(acc2)).orElseGet(() -> jsc.emptyRDD());

                        if (!downstream_is_grouped) {
                            mutable_rdds.put(control.name(),
                                    Either.left(EnrichmentPipelineService.javaGroupOf(rdd_inputs).mapPartitions(
                                            EnrichmentPipelineService.create(context, to_emit, t2._2())
                                                    .javaInMapPartitionsPostGroup())));
                        } else {
                            mutable_rdds.put(control.name(), Either.right(EnrichmentPipelineService
                                    .javaGroupOf(rdd_inputs)
                                    .mapPartitions(EnrichmentPipelineService.create(context, to_emit, t2._2())
                                            .javaInMapPartitionsPrePostGroup(
                                                    new ArrayList<>(downstream_grouping)))));
                        }
                    } else {
                        // (convert any grouped inputs to ungrouped)
                        final JavaRDD<Tuple2<Long, IBatchRecord>> rdd_inputs = t2._1().stream()
                                .map(dep -> mutable_rdds.get(dep))
                                .map(rdd_choice -> rdd_choice.<JavaRDD<Tuple2<Long, IBatchRecord>>>either(
                                        ungrouped -> ungrouped, grouped -> grouped.map(tt2 -> tt2._2())))
                                .reduce((acc1, acc2) -> acc1.union(acc2)).orElseGet(() -> jsc.emptyRDD());

                        if (!downstream_is_grouped) {
                            mutable_rdds.put(control.name(),
                                    Either.left(rdd_inputs.mapPartitions(EnrichmentPipelineService
                                            .create(context, to_emit, t2._2()).javaInMapPartitions())));
                        } else {
                            mutable_rdds.put(control.name(),
                                    Either.right(rdd_inputs.mapPartitions(EnrichmentPipelineService
                                            .create(context, to_emit, t2._2()).javaInMapPartitionsPreGroup(
                                                    new ArrayList<>(downstream_grouping)))));
                        }
                    }
                });

        return Validation.success(Tuples._2T(mutable_rdds,
                mutable_enricher_set.stream().map(e_name -> Tuples._2T(e_name, mutable_rdds.get(e_name)))
                        .filter(name_rdd -> null != name_rdd._2())
                        .<Tuple2<String, JavaRDD<Tuple2<Long, IBatchRecord>>>>map(
                                name__rdd_choice -> Tuples._2T(
                                        name__rdd_choice._1(),
                                        name__rdd_choice
                                                ._2().either(
                                                        ungrouped -> ungrouped,
                                                        grouped -> grouped.map(t2 -> t2._2))))
                        .collect(Collectors
                                .<Tuple2<String, JavaRDD<Tuple2<Long, IBatchRecord>>>, String, JavaRDD<Tuple2<Long, IBatchRecord>>>toMap(
                                        t2 -> t2._1(), t2 -> t2._2()))));
    });
}

From source file:edu.uci.ics.sourcerer.tools.java.extractor.missing.MissingTypeResolver.java

private Set<Integer> matchClustersToLibraryVersions(Set<Integer> clusters) {
    TaskProgressLogger task = TaskProgressLogger.get();

    task.start("Matching clusters to library versions");
    // Ideally we'd want to minimize the number of extra clusters included and the overlap of clusters between libraries
    // TODO augment this with some measure of how well the cluster matches the missing types: weight the clusters by how many types they contain
    final Multimap<Integer, Integer> lv2c = HashMultimap.create();
    final Multimap<Integer, Integer> c2lv = HashMultimap.create();
    final Map<Integer, Integer> clusterSizes = new HashMap<>();

    // Build the maps
    task.start("Building the maps");
    for (Integer cluster : clusters) {
        for (Integer libraryVersion : findLibraryVersionByCluster.select(cluster)) {
            c2lv.put(cluster, libraryVersion);
            for (Integer clus : findClusterByLibraryVersion.select(libraryVersion)) {
                lv2c.put(libraryVersion, clus);
            }//from  w w w  .jav  a 2s  .  com
        }
    }
    task.report("Library versions by cluster: " + c2lv.keySet().size() + " keys, " + c2lv.size() + " entries");
    task.report("Cluster by library version: " + lv2c.keySet().size() + " keys, " + lv2c.size() + " entries");
    for (Integer clusterID : lv2c.values()) {
        if (!clusterSizes.containsKey(clusterID)) {
            clusterSizes.put(clusterID, findTypeCountByCluster.select(clusterID));
        }
    }
    task.finish();

    Set<Integer> coreLibraryVersions = new HashSet<>();
    Set<Integer> coveredClusters = new HashSet<>();

    // Start by picking all the library versions that don't contain extra clusters
    task.start("Checking for core library versions");
    for (Integer libraryVersionID : lv2c.keySet()) {
        boolean noExtra = true;
        for (Integer clusterID : lv2c.get(libraryVersionID)) {
            if (!clusters.contains(clusterID)) {
                noExtra = false;
            }
        }
        if (noExtra) {
            coreLibraryVersions.add(libraryVersionID);
            coveredClusters.addAll(lv2c.get(libraryVersionID));
        }
    }
    if (coveredClusters.retainAll(clusters)) {
        task.report("Retaining should have done nothing");
    }
    task.report(coveredClusters.size() + " of " + clusters.size() + " covered");
    task.finish();

    Set<Integer> finalLibraryVersions = new HashSet<>();
    Set<Integer> clustersToBeCovered = new HashSet<>(clusters);

    // If we covered all the clusters, skip this step
    if (coveredClusters.size() < clusters.size()) {
        task.start("Checking for additional library versions");
        final Set<Integer> missingClusters = new HashSet<>();
        for (Integer clusterID : clusters) {
            if (!coveredClusters.contains(clusterID)) {
                missingClusters.add(clusterID);
            }
        }
        task.report(missingClusters.size() + " missing clusters");
        Set<Integer> additionalLibraryVersions = new HashSet<>();
        // Find each library that can provide missing clusters, 
        // and measure their "cost per cluster" (number of extra clusters - number of clusters provided)
        // let's try measuring cost instead by number of extra types - number of types provided
        for (Integer clusterID : missingClusters) {
            Integer bestLibraryVersionID = null;
            int bestCost = Integer.MAX_VALUE;
            for (Integer libraryVersionID : c2lv.get(clusterID)) {
                Collection<Integer> clus = lv2c.get(libraryVersionID);
                int provided = 0;
                int extra = 0;
                for (Integer cluster : clus) {
                    if (clusters.contains(cluster)) {
                        provided += clusterSizes.get(cluster);
                    } else {
                        extra += clusterSizes.get(cluster);
                    }
                }
                int cost = extra - provided;
                if (cost < bestCost) {
                    bestLibraryVersionID = libraryVersionID;
                    bestCost = cost;
                }
            }
            additionalLibraryVersions.add(bestLibraryVersionID);
        }
        task.report(additionalLibraryVersions.size() + " additional library versions identified");
        task.start("Sorting additional library versions");
        // Sort the additional library versions by the number of additional types they contain
        Integer[] arr = additionalLibraryVersions.toArray(new Integer[additionalLibraryVersions.size()]);
        Arrays.sort(arr, new Comparator<Integer>() {
            @Override
            public int compare(Integer o1, Integer o2) {
                return -Integer.compare(CollectionUtils.intersectionSize(lv2c.get(o1), missingClusters),
                        CollectionUtils.intersectionSize(lv2c.get(o2), missingClusters));
            }
        });
        task.finish();
        task.start("Picking additional library versions");
        // Pick the libraries to actually add
        for (Integer libraryVersionID : arr) {
            Collection<Integer> clus = lv2c.get(libraryVersionID);
            if (CollectionUtils.containsAny(missingClusters, clus)) {
                finalLibraryVersions.add(libraryVersionID);
                missingClusters.removeAll(clus);
                clustersToBeCovered.removeAll(clus);
            }
        }
        task.report("Added " + finalLibraryVersions.size() + " library versions");
        task.finish();
        task.finish();
    }

    task.start("Sorting core library versions");
    // Now order the core libraries by the number of clusters they contain
    Integer[] arr = coreLibraryVersions.toArray(new Integer[coreLibraryVersions.size()]);
    Arrays.sort(arr, new Comparator<Integer>() {
        @Override
        public int compare(Integer o1, Integer o2) {
            return -Integer.compare(lv2c.get(o1).size(), lv2c.get(o2).size());
        }
    });
    task.finish();

    task.start("Picking core library versions");
    // Pick the core libraries to actually add
    for (Integer libraryVersionID : arr) {
        Collection<Integer> clus = lv2c.get(libraryVersionID);
        if (CollectionUtils.containsAny(clustersToBeCovered, clus)) {
            finalLibraryVersions.add(libraryVersionID);
            clustersToBeCovered.removeAll(clus);
        }
    }
    task.finish();

    task.report(finalLibraryVersions.size() + " library versions matched.");
    task.finish();

    return finalLibraryVersions;
}