Example usage for com.google.common.collect Multimap putAll

List of usage examples for com.google.common.collect Multimap putAll

Introduction

In this page you can find the example usage for com.google.common.collect Multimap putAll.

Prototype

boolean putAll(@Nullable K key, Iterable<? extends V> values);

Source Link

Document

Stores a key-value pair in this multimap for each of values , all using the same key, key .

Usage

From source file:org.robotframework.ide.eclipse.main.plugin.project.build.validation.KeywordSettingsValidator.java

private Multimap<String, RobotToken> extractArgumentVariables(final UserKeyword keyword,
        final VariableExtractor extractor, final String fileName) {
    final Multimap<String, RobotToken> arguments = ArrayListMultimap.create();

    // first add arguments embedded in name, then from [Arguments] setting
    final Multimap<String, RobotToken> embeddedArguments = VariableNamesSupport
            .extractUnifiedVariables(newArrayList(keyword.getKeywordName()), extractor, fileName);
    for (final String argName : embeddedArguments.keySet()) {
        arguments.putAll(EmbeddedKeywordNamesSupport.removeRegex(argName), embeddedArguments.get(argName));
    }/*from   w w w  .  j a va2s .  c o  m*/
    for (final KeywordArguments argument : keyword.getArguments()) {
        for (final RobotToken token : argument.getArguments()) {

            final boolean hasDefault = token.getText().contains("=");
            if (hasDefault) {
                final List<String> splitted = Splitter.on('=').limit(2).splitToList(token.getText());
                final String def = splitted.get(0);
                final String unifiedDefinitionName = VariableNamesSupport.extractUnifiedVariableName(def);
                final Multimap<String, RobotToken> usedVariables = VariableNamesSupport
                        .extractUnifiedVariables(newArrayList(token), new VariableExtractor(), null);
                arguments.put(unifiedDefinitionName,
                        Iterables.getFirst(usedVariables.get(unifiedDefinitionName), null));
            } else {
                arguments.putAll(
                        VariableNamesSupport.extractUnifiedVariables(newArrayList(token), extractor, fileName));
            }
        }
    }
    return arguments;
}

From source file:org.apache.archiva.redback.common.ldap.role.DefaultLdapRoleMapperConfiguration.java

public Map<String, Collection<String>> getLdapGroupMappings() {
    Multimap<String, String> map = ArrayListMultimap.create();

    Collection<String> keys = userConf.getKeys();

    for (String key : keys) {
        if (key.startsWith(UserConfigurationKeys.LDAP_GROUPS_ROLE_START_KEY)) {
            String val = userConf.getString(key);
            String[] roles = StringUtils.split(val, ',');
            for (String role : roles) {
                map.put(StringUtils.substringAfter(key, UserConfigurationKeys.LDAP_GROUPS_ROLE_START_KEY),
                        role);/*from   w w  w.j ava2s  . c  o  m*/
            }
        }
    }

    for (Map.Entry<String, List<String>> entry : this.ldapMappings.entrySet()) {
        map.putAll(entry.getKey(), entry.getValue());
    }

    Map<String, Collection<String>> mappings = map.asMap();
    return mappings;
}

From source file:org.apache.cassandra.dht.BootStrapper.java

/** get potential sources for each range, ordered by proximity (as determined by EndpointSnitch) */
Multimap<Range, InetAddress> getRangesWithSources(String table) {
    assert tokenMetadata.sortedTokens().size() > 0;
    final AbstractReplicationStrategy strat = Table.open(table).getReplicationStrategy();
    Collection<Range> myRanges = strat.getPendingAddressRanges(tokenMetadata, token, address);

    Multimap<Range, InetAddress> myRangeAddresses = ArrayListMultimap.create();
    Multimap<Range, InetAddress> rangeAddresses = strat.getRangeAddresses(tokenMetadata);
    for (Range myRange : myRanges) {
        for (Range range : rangeAddresses.keySet()) {
            if (range.contains(myRange)) {
                List<InetAddress> preferred = DatabaseDescriptor.getEndpointSnitch()
                        .getSortedListByProximity(address, rangeAddresses.get(range));
                myRangeAddresses.putAll(myRange, preferred);
                break;
            }//from   www  . j  a  v  a 2  s.  co  m
        }
        assert myRangeAddresses.keySet().contains(myRange);
    }
    return myRangeAddresses;
}

From source file:org.eclipse.emf.compare.ide.ui.internal.logical.resolver.ResourceDependencyLocalResolver.java

/**
 * Checks the current state of our {@link #resourceListener} and updates the dependency graph for all
 * resources that have been changed since we last checked.
 * //from   w  w  w  .ja v  a  2s  . c  o  m
 * @param resourceSet
 *            The resource set in which to load our temporary resources.
 * @param diagnostic
 *            The diagnostic.
 * @param tspm
 *            Monitor on which to report progress to the user.
 */
protected void updateChangedResources(SynchronizedResourceSet resourceSet, DiagnosticSupport diagnostic,
        ThreadSafeProgressMonitor tspm) {
    // this.diagnostic = createDiagnostic();
    final Set<URI> removedURIs = Sets.difference(resourceListener.popRemovedURIs(),
            scheduler.getComputedElements());
    final Set<URI> changedURIs = Sets.difference(resourceListener.popChangedURIs(),
            scheduler.getComputedElements());

    eventBus.post(new ResourceRemovedEvent<URI>(removedURIs));

    // We need to re-resolve the changed resources, along with their direct parents
    final Set<URI> recompute = new LinkedHashSet<URI>(changedURIs);
    final Multimap<URI, URI> parentToGrandParents = ArrayListMultimap.create();
    for (URI changed : changedURIs) {
        if (dependencyGraph.contains(changed)) {
            Set<URI> directParents = dependencyGraph.getDirectParents(changed);
            recompute.addAll(directParents);
            for (URI uri : directParents) {
                Set<URI> grandParents = dependencyGraph.getDirectParents(uri);
                parentToGrandParents.putAll(uri, grandParents);
            }
        }
    }

    eventBus.post(new ResourceRemovedEvent<URI>(recompute));

    demandResolveAll(recompute, diagnostic, resourceSet, tspm);

    // Re-connect changed resources parents' with their parents
    Set<URI> toResolve = new LinkedHashSet<URI>();
    for (URI parentURI : parentToGrandParents.keySet()) {
        if (dependencyGraph.contains(parentURI)) {
            toResolve.addAll(parentToGrandParents.get(parentURI));
        }
    }
    demandResolveAll(toResolve, diagnostic, resourceSet, tspm);
}

From source file:com.spotify.helios.client.HeliosClient.java

/**
 * Returns a list of all hosts registered in the Helios cluster that match both the given hostname
 * pattern and set of host selectors./*from w  w w. j a  va  2  s.co m*/
 *
 * @see #listHosts(Set)
 */
public ListenableFuture<List<String>> listHosts(final String namePattern,
        final Set<String> unparsedHostSelectors) {

    final Multimap<String, String> query = HashMultimap.create();
    query.put("namePattern", namePattern);
    query.putAll("selector", unparsedHostSelectors);

    return listHosts(query);
}

From source file:org.apache.shindig.gadgets.http.HttpResponse.java

/**
 * Expected layout:/*w  w w . ja va  2 s .com*/
 *
 * int - status code
 * Map<String, List<String>> - headers
 * int - length of body
 * byte array - body, of previously specified length
 */
@SuppressWarnings("unchecked")
public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
    httpStatusCode = in.readInt();

    // We store the multimap as a Map<String,List<String>> to insulate us from google-collections API churn
    // And to remain backwards compatible

    Map<String, List<String>> headerCopyMap = (Map<String, List<String>>) in.readObject();
    Multimap headerCopy = newHeaderMultimap();

    for (Map.Entry<String, List<String>> entry : headerCopyMap.entrySet()) {
        headerCopy.putAll(entry.getKey(), entry.getValue());
    }

    int bodyLength = in.readInt();
    responseBytes = new byte[bodyLength];
    int cnt, offset = 0;
    while ((cnt = in.read(responseBytes, offset, bodyLength)) > 0) {
        offset += cnt;
        bodyLength -= cnt;
    }
    if (offset != responseBytes.length) {
        throw new IOException(
                "Invalid body! Expected length = " + responseBytes.length + ", bytes readed = " + offset + '.');
    }

    date = getAndUpdateDate(headerCopy);
    encoding = getAndUpdateEncoding(headerCopy, responseBytes);
    headers = Multimaps.unmodifiableMultimap(headerCopy);
    metadata = Collections.emptyMap();
}

From source file:org.reficio.p2.P2Mojo.java

private Multimap<P2Artifact, ResolvedArtifact> resolveArtifacts(List<P2Artifact> artifacts) {
    Multimap<P2Artifact, ResolvedArtifact> resolvedArtifacts = ArrayListMultimap.create();
    if (null != artifacts) {
        for (P2Artifact p2Artifact : artifacts) {
            logResolving(p2Artifact);//w  w w .  j  a  va2 s.  com
            ArtifactResolutionResult resolutionResult = resolveArtifact(p2Artifact);
            resolvedArtifacts.putAll(p2Artifact, resolutionResult.getResolvedArtifacts());
        }
    }
    return resolvedArtifacts;
}

From source file:org.apache.hadoop.hive.ql.exec.tez.CustomPartitionVertex.java

@Override
public void onRootVertexInitialized(String inputName, InputDescriptor inputDescriptor, List<Event> events) {
    numInputsSeenSoFar++;//from   w  w  w  . j a va  2s  . co  m
    LOG.info("On root vertex initialized " + inputName);
    try {
        // This is using the payload from the RootVertexInitializer corresponding
        // to InputName. Ideally it should be using it's own configuration class -
        // but that
        // means serializing another instance.
        MRInputUserPayloadProto protoPayload = MRInputHelpers
                .parseMRInputPayload(inputDescriptor.getUserPayload());
        this.conf = TezUtils.createConfFromByteString(protoPayload.getConfigurationBytes());

        /*
         * Currently in tez, the flow of events is thus:
         * "Generate Splits -> Initialize Vertex" (with parallelism info obtained
         * from the generate splits phase). The generate splits phase groups
         * splits using the TezGroupedSplitsInputFormat. However, for bucket map
         * joins the grouping done by this input format results in incorrect
         * results as the grouper has no knowledge of buckets. So, we initially
         * set the input format to be HiveInputFormat (in DagUtils) for the case
         * of bucket map joins so as to obtain un-grouped splits. We then group
         * the splits corresponding to buckets using the tez grouper which returns
         * TezGroupedSplits.
         */

        // This assumes that Grouping will always be used.
        // Enabling grouping on the payload.
        MRInputUserPayloadProto updatedPayload = MRInputUserPayloadProto.newBuilder(protoPayload)
                .setGroupingEnabled(true).build();
        inputDescriptor
                .setUserPayload(UserPayload.create(updatedPayload.toByteString().asReadOnlyByteBuffer()));
    } catch (IOException e) {
        e.printStackTrace();
        throw new RuntimeException(e);
    }

    boolean dataInformationEventSeen = false;
    Map<String, Set<FileSplit>> pathFileSplitsMap = new TreeMap<String, Set<FileSplit>>();

    for (Event event : events) {
        if (event instanceof InputConfigureVertexTasksEvent) {
            // No tasks should have been started yet. Checked by initial state
            // check.
            LOG.info("Got a input configure vertex event for input: " + inputName);
            Preconditions.checkState(dataInformationEventSeen == false);
            InputConfigureVertexTasksEvent cEvent = (InputConfigureVertexTasksEvent) event;

            // The vertex cannot be configured until all DataEvents are seen - to
            // build the routing table.
            configureVertexTaskEvent = cEvent;
            LOG.info("Configure task for input name: " + inputName + " num tasks: "
                    + configureVertexTaskEvent.getNumTasks());
        }
        if (event instanceof InputUpdatePayloadEvent) {
            // this event can never occur. If it does, fail.
            Preconditions.checkState(false);
        } else if (event instanceof InputDataInformationEvent) {
            dataInformationEventSeen = true;
            InputDataInformationEvent diEvent = (InputDataInformationEvent) event;
            FileSplit fileSplit;
            try {
                fileSplit = getFileSplitFromEvent(diEvent);
            } catch (IOException e) {
                throw new RuntimeException("Failed to get file split for event: " + diEvent, e);
            }
            Set<FileSplit> fsList = pathFileSplitsMap
                    .get(Utilities.getBucketFileNameFromPathSubString(fileSplit.getPath().getName()));
            if (fsList == null) {
                fsList = new TreeSet<FileSplit>(new PathComparatorForSplit());
                pathFileSplitsMap.put(
                        Utilities.getBucketFileNameFromPathSubString(fileSplit.getPath().getName()), fsList);
            }
            fsList.add(fileSplit);
        }
    }

    LOG.info("Path file splits map for input name: " + inputName + " is " + pathFileSplitsMap);

    Multimap<Integer, InputSplit> bucketToInitialSplitMap = getBucketSplitMapForPath(pathFileSplitsMap);

    try {
        int totalResource = context.getTotalAvailableResource().getMemory();
        int taskResource = context.getVertexTaskResource().getMemory();
        float waves = conf.getFloat(TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES,
                TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_WAVES_DEFAULT);

        int availableSlots = totalResource / taskResource;

        LOG.info("Grouping splits. " + availableSlots + " available slots, " + waves
                + " waves. Bucket initial splits map: " + bucketToInitialSplitMap);
        JobConf jobConf = new JobConf(conf);
        ShimLoader.getHadoopShims().getMergedCredentials(jobConf);

        Multimap<Integer, InputSplit> bucketToGroupedSplitMap = HashMultimap.<Integer, InputSplit>create();
        boolean secondLevelGroupingDone = false;
        if ((mainWorkName.isEmpty()) || (inputName.compareTo(mainWorkName) == 0)) {
            SplitLocationProvider splitLocationProvider = Utils.getSplitLocationProvider(conf, LOG);
            for (Integer key : bucketToInitialSplitMap.keySet()) {
                InputSplit[] inputSplitArray = (bucketToInitialSplitMap.get(key).toArray(new InputSplit[0]));
                Multimap<Integer, InputSplit> groupedSplit = grouper.generateGroupedSplits(jobConf, conf,
                        inputSplitArray, waves, availableSlots, inputName, mainWorkName.isEmpty(),
                        splitLocationProvider);
                if (mainWorkName.isEmpty() == false) {
                    Multimap<Integer, InputSplit> singleBucketToGroupedSplit = HashMultimap
                            .<Integer, InputSplit>create();
                    singleBucketToGroupedSplit.putAll(key, groupedSplit.values());
                    groupedSplit = grouper.group(jobConf, singleBucketToGroupedSplit, availableSlots,
                            HiveConf.getFloatVar(conf, HiveConf.ConfVars.TEZ_SMB_NUMBER_WAVES), null);
                    secondLevelGroupingDone = true;
                }
                bucketToGroupedSplitMap.putAll(key, groupedSplit.values());
            }
            processAllEvents(inputName, bucketToGroupedSplitMap, secondLevelGroupingDone);
        } else {
            SplitLocationProvider splitLocationProvider = Utils.getSplitLocationProvider(conf, LOG);
            // do not group across files in case of side work because there is only 1 KV reader per
            // grouped split. This would affect SMB joins where we want to find the smallest key in
            // all the bucket files.
            for (Integer key : bucketToInitialSplitMap.keySet()) {
                InputSplit[] inputSplitArray = (bucketToInitialSplitMap.get(key).toArray(new InputSplit[0]));
                Multimap<Integer, InputSplit> groupedSplit = grouper.generateGroupedSplits(jobConf, conf,
                        inputSplitArray, waves, availableSlots, inputName, false, splitLocationProvider);
                bucketToGroupedSplitMap.putAll(key, groupedSplit.values());
            }
            /*
             * this is the small table side. In case of SMB join, we need to send each split to the
             * corresponding bucket-based task on the other side. In case a split needs to go to
             * multiple downstream tasks, we need to clone the event and send it to the right
             * destination.
             */
            LOG.info("This is the side work - multi-mr work.");
            processAllSideEventsSetParallelism(inputName, bucketToGroupedSplitMap);
        }
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}

From source file:io.bazel.rules.closure.webfiles.Webset.java

/**
 * Loads graph of web files from proto manifests.
 *
 * @param manifests set of web rule target proto files in reverse topological order
 * @return set of web files and relationships between them, which could be mutated, although
 *     adding a single key will most likely result in a full rehash
 *//*from   w  w w .  j  a  va  2 s  .c  o m*/
public static Webset load(Map<Path, WebfileManifestInfo> manifests, WebpathInterner interner) {
    int webfileCapacity = 0;
    int unlinkCapacity = 16; // LinkedHashMultimap#DEFAULT_KEY_CAPACITY
    for (WebfileManifestInfo manifest : manifests.values()) {
        webfileCapacity += manifest.getWebfileCount();
        unlinkCapacity = Math.max(unlinkCapacity, manifest.getUnlinkCount());
    }
    Map<Webpath, Webfile> webfiles = Maps.newLinkedHashMapWithExpectedSize(webfileCapacity);
    Multimap<Webpath, Webpath> links = LinkedHashMultimap.create(webfileCapacity, 4);
    Multimap<Webpath, Webpath> unlinks = LinkedHashMultimap.create(unlinkCapacity, 4);
    for (Map.Entry<Path, WebfileManifestInfo> entry : manifests.entrySet()) {
        Path manifestPath = entry.getKey();
        Path zipPath = WebfilesUtils.getIncrementalZipPath(manifestPath);
        WebfileManifestInfo manifest = entry.getValue();
        String label = manifest.getLabel();
        for (WebfileInfo info : manifest.getWebfileList()) {
            Webpath webpath = interner.get(info.getWebpath());
            webfiles.put(webpath, Webfile.create(webpath, zipPath, label, info));
        }
        for (MultimapInfo mapping : manifest.getLinkList()) {
            Webpath from = interner.get(mapping.getKey());
            for (Webpath to : Iterables.transform(mapping.getValueList(), interner)) {
                // When compiling web_library rules, if the strict dependency checking invariant holds
                // true, we can choose to only load adjacent manifests, rather than transitive ones. The
                // adjacent manifests may contain links to transitive web files which will not be in the
                // webfiles map.
                if (webfiles.containsKey(to)) {
                    links.put(from, to);
                    checkArgument(!unlinks.containsEntry(from, to),
                            "Has a use case for resurrected links been discovered? %s -> %s", from, to);
                }
            }
        }
        for (MultimapInfo mapping : manifest.getUnlinkList()) {
            unlinks.putAll(interner.get(mapping.getKey()),
                    Collections2.transform(mapping.getValueList(), interner));
        }
    }
    for (Map.Entry<Webpath, Webpath> entry : unlinks.entries()) {
        links.remove(entry.getKey(), entry.getValue());
    }
    unlinks.clear();
    return new AutoValue_Webset(webfiles, links, interner);
}

From source file:org.elasticsearch.cassandra.cluster.routing.LocalFirstSearchStrategy.java

@Override
public AbstractSearchStrategy.Result topology(String ksName, Collection<InetAddress> staredShard) {
    Keyspace.open(ksName);//from ww  w  .  j a v  a  2 s . c  o m

    Set<InetAddress> liveNodes = Gossiper.instance.getLiveTokenOwners();
    InetAddress localAddress = FBUtilities.getBroadcastAddress();
    Map<Range<Token>, List<InetAddress>> allRanges = StorageService.instance
            .getRangeToAddressMapInLocalDC(ksName);

    Multimap<InetAddress, Range<Token>> topo = ArrayListMultimap.create();
    boolean consistent = true;

    Collection<Range<Token>> localRanges = new ArrayList<Range<Token>>();
    for (Entry<Range<Token>, List<InetAddress>> entry : allRanges.entrySet()) {
        List<InetAddress> addrList = entry.getValue();
        if (addrList.contains(localAddress)) {
            localRanges.add(entry.getKey());
            entry.getValue().remove(localAddress);
        }
    }
    logger.debug("{} localRanges for keyspace {} on address {} = {}", localRanges.size(), ksName,
            FBUtilities.getBroadcastAddress(), localRanges);

    topo.putAll(localAddress, localRanges);

    // remove localRanges from allRanges.
    for (Range<Token> range : localRanges) {
        allRanges.remove(range);
    }

    // remove dead nodes form allRanges values.
    for (Entry<Range<Token>, List<InetAddress>> entry : allRanges.entrySet()) {
        List<InetAddress> addrList = entry.getValue();
        for (Iterator<InetAddress> i = addrList.iterator(); i.hasNext();) {
            InetAddress addr = i.next();
            if (!liveNodes.contains(addr)) {
                i.remove();
            }
        }
        if (addrList.size() == 0) {
            consistent = false;
            logger.warn("Inconsistent search for keyspace {}, no alive node for range {}", ksName,
                    entry.getKey());
        }
    }

    // pickup a random address for non-local ranges
    Random rnd = new Random();
    for (Entry<Range<Token>, List<InetAddress>> entry : allRanges.entrySet()) {
        List<InetAddress> addrList = entry.getValue();
        InetAddress addr = addrList.get(rnd.nextInt(addrList.size()));
        topo.put(addr, entry.getKey());
    }
    if (logger.isDebugEnabled()) {
        logger.debug("topology for keyspace {} = {}", ksName, topo.asMap());
    }
    return null;
    // return new AbstractSearchStrategy.Result(topo.asMap(), consistent,
    // Gossiper.instance.getUnreachableTokenOwners().size());
}