Example usage for com.google.common.collect Multimap keySet

List of usage examples for com.google.common.collect Multimap keySet

Introduction

In this page you can find the example usage for com.google.common.collect Multimap keySet.

Prototype

Set<K> keySet();

Source Link

Document

Returns a view collection of all distinct keys contained in this multimap.

Usage

From source file:org.gradle.plugins.ide.internal.IdeDependenciesExtractor.java

private static void downloadAuxiliaryArtifacts(DependencyHandler dependencyHandler,
        Multimap<ComponentIdentifier, IdeExtendedRepoFileDependency> dependencies,
        List<Class<? extends Artifact>> artifactTypes) {
    if (artifactTypes.isEmpty()) {
        return;/* ww  w. j  av  a 2s  .com*/
    }

    ArtifactResolutionQuery query = dependencyHandler.createArtifactResolutionQuery();
    query.forComponents(dependencies.keySet());

    @SuppressWarnings("unchecked")
    Class<? extends Artifact>[] artifactTypesArray = (Class<? extends Artifact>[]) new Class<?>[artifactTypes
            .size()];
    query.withArtifacts(JvmLibrary.class, artifactTypes.toArray(artifactTypesArray));
    Set<ComponentArtifactsResult> componentResults = query.execute().getResolvedComponents();
    for (ComponentArtifactsResult componentResult : componentResults) {
        for (IdeExtendedRepoFileDependency dependency : dependencies.get(componentResult.getId())) {
            for (ArtifactResult sourcesResult : componentResult.getArtifacts(SourcesArtifact.class)) {
                if (sourcesResult instanceof ResolvedArtifactResult) {
                    dependency.addSourceFile(((ResolvedArtifactResult) sourcesResult).getFile());
                }
            }

            for (ArtifactResult javadocResult : componentResult.getArtifacts(JavadocArtifact.class)) {
                if (javadocResult instanceof ResolvedArtifactResult) {
                    dependency.addJavadocFile(((ResolvedArtifactResult) javadocResult).getFile());
                }
            }
        }
    }
}

From source file:io.covert.dns.storage.accumulo.mutgen.EdgeMutationGeneratorFactory.java

public static void configure(Job job, String table, String dataType, boolean bidirectional,
        boolean univariateStats, Multimap<String, String> edges) {
    Configuration conf = job.getConfiguration();

    conf.set("edge.mutation.generator.table", table);
    conf.set("edge.mutation.generator.data.type", dataType);

    conf.setBoolean("edge.mutation.generator.bidirection", bidirectional);
    conf.setBoolean("edge.mutation.generator.univar.stats", univariateStats);

    StringBuilder s = new StringBuilder();
    boolean first = true;
    for (String name1 : edges.keySet()) {
        for (String name2 : edges.get(name1)) {
            if (first) {
                first = false;// w  w w . ja  v a2s  .  c  o  m
                s.append(name1).append(":").append(name2);
            } else {
                s.append(",").append(name1).append(":").append(name2);
            }
        }
    }
    conf.set("edge.mutation.generator.edges", s.toString());
}

From source file:com.torodb.torod.db.backends.meta.routines.DeleteDocuments.java

public static int execute(Configuration configuration, CollectionSchema colSchema,
        Multimap<DocStructure, Integer> didsByStructure, @Nonnull DatabaseInterface databaseInterface)
        throws SQLException, RetryTransactionException {
    TableProvider tableProvider = new TableProvider(colSchema);

    DSLContext dsl = DSL.using(configuration);

    Set<SubDocTable> tables = Sets.newHashSet();
    for (DocStructure structure : didsByStructure.keySet()) {
        tables.clear();//w  w  w. ja v  a2s  .  c om
        structure.accept(tableProvider, tables);

        executeDeleteSubDocuments(dsl, tables, didsByStructure.get(structure), databaseInterface);
    }

    Set<Integer> dids = Sets.newHashSet(didsByStructure.values());
    return executeDeleteRoots(dsl, colSchema, dids, databaseInterface);
}

From source file:org.lealone.cluster.dht.RangeStreamer.java

/**
 * @param rangesWithSources The ranges we want to fetch (key) and their potential sources (value)
 * @param sourceFilters A (possibly empty) collection of source filters to apply. 
 *                      In addition to any filters given here, we always exclude ourselves.
 * @param dbName database name// w  w w.  ja v a2 s .c o  m
 * @return Map of source endpoint to collection of ranges
 */
private static Multimap<InetAddress, Range<Token>> getRangeFetchMap(
        Multimap<Range<Token>, InetAddress> rangesWithSources, Collection<ISourceFilter> sourceFilters,
        String dbName) {
    Multimap<InetAddress, Range<Token>> rangeFetchMapMap = HashMultimap.create();
    for (Range<Token> range : rangesWithSources.keySet()) {
        boolean foundSource = false;

        outer: for (InetAddress address : rangesWithSources.get(range)) {
            if (address.equals(Utils.getBroadcastAddress())) {
                // If localhost is a source, we have found one, but we don't add it to the map to avoid streaming
                // locally
                foundSource = true;
                continue;
            }

            for (ISourceFilter filter : sourceFilters) {
                if (!filter.shouldInclude(address))
                    continue outer;
            }

            rangeFetchMapMap.put(address, range);
            foundSource = true;
            break; // ensure we only stream from one other node for each range
        }

        if (!foundSource)
            throw new IllegalStateException("unable to find sufficient sources for streaming range " + range
                    + " in database " + dbName);
    }

    return rangeFetchMapMap;
}

From source file:org.apache.cassandra.dht.RangeStreamer.java

/**
 * @param rangesWithSources The ranges we want to fetch (key) and their potential sources (value)
 * @param sourceFilters A (possibly empty) collection of source filters to apply. In addition to any filters given
 *                      here, we always exclude ourselves.
 * @param keyspace keyspace name/*from  w w w .ja  va 2s .co m*/
 * @return Map of source endpoint to collection of ranges
 */
private static Multimap<InetAddress, Range<Token>> getRangeFetchMap(
        Multimap<Range<Token>, InetAddress> rangesWithSources, Collection<ISourceFilter> sourceFilters,
        String keyspace) {
    Multimap<InetAddress, Range<Token>> rangeFetchMapMap = HashMultimap.create();
    for (Range<Token> range : rangesWithSources.keySet()) {
        boolean foundSource = false;

        outer: for (InetAddress address : rangesWithSources.get(range)) {
            if (address.equals(FBUtilities.getBroadcastAddress())) {
                // If localhost is a source, we have found one, but we don't add it to the map to avoid streaming locally
                foundSource = true;
                continue;
            }

            for (ISourceFilter filter : sourceFilters) {
                if (!filter.shouldInclude(address))
                    continue outer;
            }

            rangeFetchMapMap.put(address, range);
            foundSource = true;
            break; // ensure we only stream from one other node for each range
        }

        if (!foundSource)
            throw new IllegalStateException("unable to find sufficient sources for streaming range " + range
                    + " in keyspace " + keyspace);
    }

    return rangeFetchMapMap;
}

From source file:com.liveramp.hank.storage.cueball.Cueball.java

public static DiskPartitionAssignment getDataDirectoryAssignments(DataDirectoriesConfigurator configurator,
        Collection<Integer> partitionNumbers) {

    ArrayList<String> sortedDataDirectories = new ArrayList<String>(configurator.getDataDirectories());
    Collections.sort(sortedDataDirectories);

    LinkedList<Integer> sortedPartitions = new LinkedList<>(partitionNumbers);
    Collections.sort(sortedPartitions);

    //  TODO we can make this dynamic based on disk size, but not urgent
    double numPartitionsPerDisk = (double) partitionNumbers.size() / sortedDataDirectories.size();

    Multimap<String, Integer> partitionsPerDisk = HashMultimap.create();
    for (String dataDirectory : sortedDataDirectories) {

        int numToAssign = (int) Math.ceil(numPartitionsPerDisk * (partitionsPerDisk.keySet().size() + 1))
                - partitionsPerDisk.values().size();

        for (int i = 0; i < numToAssign && !sortedPartitions.isEmpty(); i++) {
            partitionsPerDisk.put(dataDirectory, sortedPartitions.pop());
        }//from   www . j  av  a 2s .  c  o m

    }

    Map<Integer, String> inverse = Maps.newHashMap();
    for (Map.Entry<String, Integer> entry : partitionsPerDisk.entries()) {
        inverse.put(entry.getValue(), entry.getKey());
    }

    return new DiskPartitionAssignment(inverse);
}

From source file:com.google.javascript.jscomp.lint.CheckRequiresSorted.java

/**
 * Canonicalizes a list of import statements by deduplicating and merging imports for the same
 * namespace, and sorting the result.// w ww  . j a va 2 s .  com
 */
private static List<ImportStatement> canonicalizeImports(Multimap<String, ImportStatement> importsByNamespace) {
    List<ImportStatement> canonicalImports = new ArrayList<>();

    for (String namespace : importsByNamespace.keySet()) {
        Collection<ImportStatement> allImports = importsByNamespace.get(namespace);

        // Find the strongest primitive across all existing imports. Every emitted import for this
        // namespace will use this primitive. This makes the logic simpler and cannot change runtime
        // behavior, but may produce spurious changes when multiple aliasing imports of differing
        // strength exist (which are already in violation of the style guide).
        ImportPrimitive strongestPrimitive = allImports.stream().map(ImportStatement::primitive)
                .reduce(ImportPrimitive.WEAKEST, ImportPrimitive::stronger);

        // Emit each aliasing import separately, as deduplicating them would require code references
        // to be rewritten.
        boolean hasAliasing = false;
        for (ImportStatement stmt : Iterables.filter(allImports, ImportStatement::isAliasing)) {
            canonicalImports.add(stmt.upgrade(strongestPrimitive));
            hasAliasing = true;
        }

        // Emit a single destructuring import with a non-empty pattern, merged from the existing
        // destructuring imports.
        boolean hasDestructuring = false;
        ImmutableList<Node> destructuringNodes = allImports.stream().filter(ImportStatement::isDestructuring)
                .flatMap(i -> i.nodes().stream()).collect(toImmutableList());
        ImmutableList<DestructuringBinding> destructures = allImports.stream()
                .filter(ImportStatement::isDestructuring).flatMap(i -> i.destructures().stream()).distinct()
                .sorted().collect(toImmutableList());
        if (!destructures.isEmpty()) {
            canonicalImports.add(ImportStatement.of(destructuringNodes, strongestPrimitive, namespace,
                    /* alias= */ null, destructures));
            hasDestructuring = true;
        }

        // Emit a standalone import unless an aliasing or destructuring one already exists.
        if (!hasAliasing && !hasDestructuring) {
            ImmutableList<Node> standaloneNodes = allImports.stream().filter(ImportStatement::isStandalone)
                    .flatMap(i -> i.nodes().stream()).collect(toImmutableList());
            canonicalImports.add(ImportStatement.of(standaloneNodes, strongestPrimitive, namespace,
                    /* alias= */ null, /* destructures= */ null));
        }
    }

    // Sorting by natural order yields the correct result due to the implementation of
    // ImportStatement#compareTo.
    Collections.sort(canonicalImports);

    return canonicalImports;
}

From source file:com.foundationdb.util.Strings.java

public static <T> String toString(Multimap<T, ?> map) {
    StringBuilder sb = new StringBuilder();
    for (Iterator<T> keysIter = map.keySet().iterator(); keysIter.hasNext();) {
        T key = keysIter.next();/*from  w  ww  . j  a v a 2  s.  co m*/
        sb.append(key).append(" => ");
        for (Iterator<?> valsIter = map.get(key).iterator(); valsIter.hasNext();) {
            sb.append(valsIter.next());
            if (valsIter.hasNext())
                sb.append(", ");
        }
        if (keysIter.hasNext())
            sb.append(nl());
    }
    return sb.toString();
}

From source file:ai.grakn.graql.internal.reasoner.utils.ReasonerUtils.java

/**
 * calculates map intersection by doing an intersection on key sets and accumulating the keys
 * @param m1 first operand/*from  www.j a  va 2 s  .c o  m*/
 * @param m2 second operand
 * @param <K> map key type
 * @param <V> map value type
 * @return map intersection
 */
public static <K, V> Multimap<K, V> multimapIntersection(Multimap<K, V> m1, Multimap<K, V> m2) {
    Multimap<K, V> intersection = HashMultimap.create();
    Sets.SetView<K> keyIntersection = Sets.intersection(m1.keySet(), m2.keySet());
    Stream.concat(m1.entries().stream(), m2.entries().stream())
            .filter(e -> keyIntersection.contains(e.getKey()))
            .forEach(e -> intersection.put(e.getKey(), e.getValue()));
    return intersection;
}

From source file:com.google.devtools.build.lib.collect.ImmutableSortedKeyListMultimap.java

@SuppressWarnings("unchecked")
public static <K extends Comparable<K>, V> ImmutableSortedKeyListMultimap<K, V> copyOf(Multimap<K, V> data) {
    if (data.isEmpty()) {
        return EMPTY_MULTIMAP;
    }// w  w  w  .j  ava2  s .c  o m
    if (data instanceof ImmutableSortedKeyListMultimap) {
        return (ImmutableSortedKeyListMultimap<K, V>) data;
    }
    Set<K> keySet = data.keySet();
    int size = keySet.size();
    K[] sortedKeys = (K[]) new Comparable<?>[size];
    int index = 0;
    for (K key : keySet) {
        sortedKeys[index++] = Preconditions.checkNotNull(key);
    }
    Arrays.sort(sortedKeys);
    List<V>[] values = (List<V>[]) new List<?>[size];
    for (int i = 0; i < size; i++) {
        values[i] = ImmutableList.copyOf(data.get(sortedKeys[i]));
    }
    return new ImmutableSortedKeyListMultimap<>(sortedKeys, values);
}