Example usage for com.google.common.collect Multimap asMap

List of usage examples for com.google.common.collect Multimap asMap

Introduction

In this page you can find the example usage for com.google.common.collect Multimap asMap.

Prototype

Map<K, Collection<V>> asMap();

Source Link

Document

Returns a view of this multimap as a Map from each distinct key to the nonempty collection of that key's associated values.

Usage

From source file:org.apache.cassandra.dht.RangeStreamer.java

/**
 * Get a map of all ranges and the source that will be cleaned up once this bootstrapped node is added for the given ranges.
 * For each range, the list should only contain a single source. This allows us to consistently migrate data without violating
 * consistency./*from ww  w. j  a v a2s . c  om*/
 *
 * @throws java.lang.IllegalStateException when there is no source to get data streamed, or more than 1 source found.
 */
private Multimap<Range<Token>, InetAddress> getAllRangesWithStrictSourcesFor(String keyspace,
        Collection<Range<Token>> desiredRanges) {
    assert tokens != null;
    AbstractReplicationStrategy strat = Keyspace.open(keyspace).getReplicationStrategy();

    // Active ranges
    TokenMetadata metadataClone = metadata.cloneOnlyTokenMap();
    Multimap<Range<Token>, InetAddress> addressRanges = strat.getRangeAddresses(metadataClone);

    // Pending ranges
    metadataClone.updateNormalTokens(tokens, address);
    Multimap<Range<Token>, InetAddress> pendingRangeAddresses = strat.getRangeAddresses(metadataClone);

    // Collects the source that will have its range moved to the new node
    Multimap<Range<Token>, InetAddress> rangeSources = ArrayListMultimap.create();

    for (Range<Token> desiredRange : desiredRanges) {
        for (Map.Entry<Range<Token>, Collection<InetAddress>> preEntry : addressRanges.asMap().entrySet()) {
            if (preEntry.getKey().contains(desiredRange)) {
                Set<InetAddress> oldEndpoints = Sets.newHashSet(preEntry.getValue());
                Set<InetAddress> newEndpoints = Sets.newHashSet(pendingRangeAddresses.get(desiredRange));

                // Due to CASSANDRA-5953 we can have a higher RF then we have endpoints.
                // So we need to be careful to only be strict when endpoints == RF
                if (oldEndpoints.size() == strat.getReplicationFactor()) {
                    oldEndpoints.removeAll(newEndpoints);
                    assert oldEndpoints.size() == 1 : "Expected 1 endpoint but found " + oldEndpoints.size();
                }

                rangeSources.put(desiredRange, oldEndpoints.iterator().next());
            }
        }

        // Validate
        Collection<InetAddress> addressList = rangeSources.get(desiredRange);
        if (addressList == null || addressList.isEmpty())
            throw new IllegalStateException("No sources found for " + desiredRange);

        if (addressList.size() > 1)
            throw new IllegalStateException("Multiple endpoints found for " + desiredRange);

        InetAddress sourceIp = addressList.iterator().next();
        EndpointState sourceState = Gossiper.instance.getEndpointStateForEndpoint(sourceIp);
        if (Gossiper.instance.isEnabled() && (sourceState == null || !sourceState.isAlive()))
            throw new RuntimeException("A node required to move the data consistently is down (" + sourceIp
                    + "). "
                    + "If you wish to move the data from a potentially inconsistent replica, restart the node with -Dcassandra.consistent.rangemovement=false");
    }

    return rangeSources;
}

From source file:org.atlasapi.persistence.content.mongo.MongoDBQueryBuilder.java

DBObject buildQuery(ContentQuery query) {

    // handle attributes that are not part of a list structure
    Multimap<List<String>, ConstrainedAttribute> attributeConstraints = HashMultimap.create();
    for (ConstrainedAttribute constraint : buildQueries(query)) {
        if (constraint == null) {
            continue;
        }/*from   w w  w.  j  a  v  a2  s.  c  o m*/
        attributeConstraints.put(entityPath(constraint.attribute), constraint);
    }

    // sort the keys by length so that versions are dealt with before broadcasts etc.
    TreeMap<List<String>, Collection<ConstrainedAttribute>> map = Maps.newTreeMap(LENGTH_ORDER);
    map.putAll(attributeConstraints.asMap());

    DBObject finalQuery = new BasicDBObject();

    Map<List<String>, DBObject> queries = Maps.newHashMap();
    for (Entry<List<String>, Collection<ConstrainedAttribute>> entry : map.entrySet()) {

        List<String> entityPath = entry.getKey();

        Collection<ConstrainedAttribute> constraints = entry.getValue();

        if (entityPath.isEmpty()) {
            finalQuery.putAll(buildQueryForSingleLevelEntity(constraints));
            continue;
        }

        DBObject parentDbObject = null;

        List<String> parentPath = entityPath;
        while (!parentPath.isEmpty()) {
            parentPath = parentPath.subList(0, parentPath.size() - 1);
            if (queries.get(parentPath) != null) {
                parentDbObject = queries.get(parentPath);
                break;
            }
        }
        if (parentDbObject == null) {
            parentDbObject = finalQuery;
            parentPath = ImmutableList.of();
        }

        DBObject rhs = buildQueryForSingleLevelEntity(constraints);
        String key = DOTTED_MONGO_ATTRIBUTE_PATH.join(entityPath.subList(parentPath.size(), entityPath.size()));
        DBObject attrObj = new BasicDBObject(key, new BasicDBObject(MongoConstants.ELEM_MATCH, rhs));
        parentDbObject.putAll(attrObj);
        queries.put(entityPath, rhs);
    }
    return finalQuery;
}

From source file:org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.java

private boolean checkHFilesCountPerRegionPerFamily(final Multimap<ByteBuffer, LoadQueueItem> regionGroups) {
    for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) {
        final Collection<LoadQueueItem> lqis = e.getValue();
        HashMap<byte[], MutableInt> filesMap = new HashMap<byte[], MutableInt>();
        for (LoadQueueItem lqi : lqis) {
            MutableInt count = filesMap.get(lqi.family);
            if (count == null) {
                count = new MutableInt();
                filesMap.put(lqi.family, count);
            }/*  w w w  . j  av  a  2s  . c  om*/
            count.increment();
            if (count.intValue() > maxFilesPerRegionPerFamily) {
                LOG.error("Trying to load more than " + maxFilesPerRegionPerFamily + " hfiles to family "
                        + Bytes.toStringBinary(lqi.family) + " of region with start key "
                        + Bytes.toStringBinary(e.getKey()));
                return false;
            }
        }
    }
    return true;
}

From source file:org.opendaylight.protocol.bgp.rib.impl.ExportPolicyPeerTracker.java

private Map<PeerRole, PeerExportGroup> createGroups(final Map<YangInstanceIdentifier, PeerRole> peerPathRoles) {
    if (peerPathRoles.isEmpty()) {
        return Collections.emptyMap();
    }//w  w w .  java2  s .  c  o m

    // Index things nicely for easy access
    final Multimap<PeerRole, YangInstanceIdentifier> roleToIds = ArrayListMultimap
            .create(PeerRole.values().length, 2);
    final Map<PeerId, PeerRole> idToRole = new HashMap<>();
    for (final Entry<YangInstanceIdentifier, PeerRole> e : peerPathRoles.entrySet()) {
        roleToIds.put(e.getValue(), e.getKey());
        idToRole.put(IdentifierUtils.peerId((NodeIdentifierWithPredicates) e.getKey().getLastPathArgument()),
                e.getValue());
    }

    // Optimized immutable copy, reused for all PeerGroups
    final Map<PeerId, PeerRole> allPeerRoles = ImmutableMap.copyOf(idToRole);

    final Map<PeerRole, PeerExportGroup> ret = new EnumMap<>(PeerRole.class);
    for (final Entry<PeerRole, Collection<YangInstanceIdentifier>> e : roleToIds.asMap().entrySet()) {
        final AbstractExportPolicy policy = this.policyDatabase.exportPolicyForRole(e.getKey());
        final Collection<Entry<PeerId, YangInstanceIdentifier>> peers = ImmutableList
                .copyOf(Collections2.transform(e.getValue(), GENERATE_PEERID));

        ret.put(e.getKey(), new PeerExportGroup(peers, allPeerRoles, policy));
    }

    return ret;
}

From source file:fr.inria.eventcloud.overlay.can.StaticLoadBalancingTestBuilder.java

private List<Quadruple> loadEvents(File file) {
    QuadrupleIterator iterator;// w  w  w . j a v  a2 s. co m

    Multimap<Node, Quadruple> mmap = ArrayListMultimap.create();

    try {
        iterator = RDFReader.pipe(new BufferedInputStream(new FileInputStream(file)), SerializationFormat.TriG);

        Quadruple q;

        while (iterator.hasNext()) {
            q = iterator.next();
            mmap.put(q.getGraph(), q);
        }
    } catch (FileNotFoundException e) {
        e.printStackTrace();
    }

    Collection<Collection<Quadruple>> compoundEvents = mmap.asMap().values();

    List<Quadruple> result = new ArrayList<Quadruple>();

    for (Collection<Quadruple> ce : compoundEvents) {
        result.addAll(ce);
        result.add(CompoundEvent.createMetaQuadruple(new CompoundEvent(ce)));
    }

    return result;
}

From source file:com.facebook.buck.cli.AuditInputCommand.java

@VisibleForTesting
int printJsonInputs(PartialGraph partialGraph) throws IOException {
    final Multimap<String, String> targetInputs = TreeMultimap.create();

    new AbstractBottomUpTraversal<BuildRule, Void>(partialGraph.getDependencyGraph()) {

        @Override// w ww. j  a  va2  s.  c om
        public void visit(BuildRule rule) {
            for (Path input : rule.getInputs()) {
                // TODO(user) remove `toString` once Jackson supports serializing Path instances
                targetInputs.put(rule.getFullyQualifiedName(), input.toString());
            }
        }

        @Override
        public Void getResult() {
            return null;
        }

    }.traverse();
    ObjectMapper mapper = new ObjectMapper();

    // Note: using `asMap` here ensures that the keys are sorted
    mapper.writeValue(console.getStdOut(), targetInputs.asMap());

    return 0;
}

From source file:com.b2international.snowowl.snomed.datastore.id.cis.CisSnomedIdentifierService.java

@Override
public void publish(final Set<String> componentIds) {
    LOGGER.debug("Publishing {} component IDs.", componentIds.size());

    final Map<String, SctId> sctIds = getSctIds(componentIds);
    final Map<String, SctId> problemSctIds = ImmutableMap.copyOf(Maps.filterValues(sctIds,
            Predicates.<SctId>not(Predicates.or(SctId::isAssigned, SctId::isPublished))));

    HttpPut deprecateRequest = null;//  w  w w .j a  v  a2  s  . c  om
    String currentNamespace = null;

    try {

        final Map<String, SctId> assignedSctIds = ImmutableMap
                .copyOf(Maps.filterValues(sctIds, SctId::isAssigned));
        if (!assignedSctIds.isEmpty()) {
            if (assignedSctIds.size() > 1) {
                final Multimap<String, String> componentIdsByNamespace = toNamespaceMultimap(
                        assignedSctIds.keySet());
                for (final Entry<String, Collection<String>> entry : componentIdsByNamespace.asMap()
                        .entrySet()) {
                    currentNamespace = entry.getKey();

                    for (final Collection<String> bulkIds : Iterables.partition(entry.getValue(), BULK_LIMIT)) {
                        LOGGER.debug(
                                String.format("Sending bulk publication request for namespace %s with size %d.",
                                        currentNamespace, bulkIds.size()));
                        deprecateRequest = httpPut(String.format("sct/bulk/publish?token=%s", getToken()),
                                createBulkPublishData(currentNamespace, bulkIds));
                        execute(deprecateRequest);
                    }
                }

            } else {

                final String componentId = Iterables.getOnlyElement(assignedSctIds.keySet());
                currentNamespace = SnomedIdentifiers.getNamespace(componentId);
                deprecateRequest = httpPut(String.format("sct/publish?token=%s", getToken()),
                        createPublishData(componentId));
                execute(deprecateRequest);
            }
        }

        if (!problemSctIds.isEmpty()) {
            throw new SctIdStatusException(
                    "Cannot publish %s component IDs because they are not assigned or already published.",
                    problemSctIds);
        }

    } catch (IOException e) {
        throw new SnowowlRuntimeException(
                String.format("Exception while publishing IDs for namespace %s.", currentNamespace), e);
    } finally {
        release(deprecateRequest);
    }
}

From source file:com.b2international.snowowl.snomed.datastore.id.cis.CisSnomedIdentifierService.java

@Override
public void release(final Set<String> componentIds) {
    LOGGER.debug("Releasing {} component IDs.", componentIds.size());

    final Map<String, SctId> sctIds = getSctIds(componentIds);
    final Map<String, SctId> problemSctIds = ImmutableMap.copyOf(Maps.filterValues(sctIds,
            Predicates.<SctId>not(Predicates.or(SctId::isAssigned, SctId::isReserved, SctId::isAvailable))));

    if (!problemSctIds.isEmpty()) {
        throw new SctIdStatusException(
                "Cannot release %s component IDs because they are not assigned, reserved, or already available.",
                problemSctIds);//from w  ww. j  a va 2 s . c  o m
    }

    final Map<String, SctId> assignedOrReservedSctIds = ImmutableMap
            .copyOf(Maps.filterValues(sctIds, Predicates.or(SctId::isAssigned, SctId::isReserved)));

    if (assignedOrReservedSctIds.isEmpty()) {
        return;
    }

    HttpPut releaseRequest = null;
    String currentNamespace = null;

    try {

        if (assignedOrReservedSctIds.size() > 1) {
            final Multimap<String, String> componentIdsByNamespace = toNamespaceMultimap(
                    assignedOrReservedSctIds.keySet());
            for (final Entry<String, Collection<String>> entry : componentIdsByNamespace.asMap().entrySet()) {
                currentNamespace = entry.getKey();

                for (final Collection<String> bulkIds : Iterables.partition(entry.getValue(), BULK_LIMIT)) {
                    LOGGER.debug(String.format("Sending bulk release request for namespace %s with size %d.",
                            currentNamespace, bulkIds.size()));
                    releaseRequest = httpPut(String.format("sct/bulk/release?token=%s", getToken()),
                            createBulkReleaseData(currentNamespace, bulkIds));
                    execute(releaseRequest);
                }
            }

        } else {

            final String componentId = Iterables.getOnlyElement(assignedOrReservedSctIds.keySet());
            currentNamespace = SnomedIdentifiers.getNamespace(componentId);
            releaseRequest = httpPut(String.format("sct/release?token=%s", getToken()),
                    createReleaseData(componentId));
            execute(releaseRequest);
        }

    } catch (IOException e) {
        throw new SnowowlRuntimeException(
                String.format("Exception while releasing IDs for namespace %s.", currentNamespace), e);
    } finally {
        release(releaseRequest);
    }
}

From source file:com.facebook.swift.codec.metadata.AbstractThriftMetadataBuilder.java

protected final void inferThriftFieldIds(Multimap<String, FieldMetadata> fieldsByName,
        Set<String> fieldsWithConflictingIds) {
    // for each name group, set the ids on the fields without ids
    for (Entry<String, Collection<FieldMetadata>> entry : fieldsByName.asMap().entrySet()) {
        Collection<FieldMetadata> fields = entry.getValue();
        String fieldName = entry.getKey();

        // skip all entries without a name or singleton groups... we'll deal with these later
        if (fields.size() <= 1) {
            continue;
        }/*from  w ww  .ja va 2 s  . c  om*/

        // all ids used by this named field
        Set<Short> ids = ImmutableSet.copyOf(Optional.presentInstances(transform(fields, getThriftFieldId())));

        // multiple conflicting ids
        if (ids.size() > 1) {
            if (!fieldsWithConflictingIds.contains(fieldName)) {
                metadataErrors.addError("Thrift class '%s' field '%s' has multiple ids: %s", structName,
                        fieldName, ids.toString());
                fieldsWithConflictingIds.add(fieldName);
            }
            continue;
        }

        // single id, so set on all fields in this group (groups with no id are handled later),
        // and validate isLegacyId is consistent and correct.
        if (ids.size() == 1) {
            short id = Iterables.getOnlyElement(ids);

            boolean isLegacyId = extractFieldIsLegacyId(id, fieldName, fields);

            // propagate the id data to all fields in this group
            for (FieldMetadata field : fields) {
                field.setId(id);
                field.setIsLegacyId(isLegacyId);
            }
        }
    }
}

From source file:org.gradle.api.internal.artifacts.resolution.DefaultArtifactResolutionQuery.java

public ArtifactResolutionQueryResult execute() {
    final List<Dependency> artifactDependencies = createArtifactDependencies();
    Configuration configuration = configurationContainer
            .detachedConfiguration(artifactDependencies.toArray(new Dependency[artifactDependencies.size()]));

    Multimap<ComponentIdentifier, JvmLibraryArtifact> jvmLibraryArtifacts = ArrayListMultimap.create();
    LenientConfiguration lenientConfiguration = configuration.getResolvedConfiguration()
            .getLenientConfiguration();/*  www  .  ja v a  2 s  .co  m*/
    Set<ResolvedArtifact> resolvedArtifacts = lenientConfiguration.getArtifacts(Specs.satisfyAll());
    // TODO: handle resolution failures (lenientConfiguration.getUnresolvedModuleDependencies)

    for (ResolvedArtifact artifact : resolvedArtifacts) {
        ModuleComponentIdentifier componentId = toComponentIdentifier(artifact.getModuleVersion().getId());
        jvmLibraryArtifacts.put(componentId, toJvmLibraryArtifact(artifact));
    }

    Set<JvmLibrary> jvmLibraries = Sets.newHashSet();
    for (Map.Entry<ComponentIdentifier, Collection<JvmLibraryArtifact>> entry : jvmLibraryArtifacts.asMap()
            .entrySet()) {
        jvmLibraries.add(new DefaultJvmLibrary(entry.getKey(), ImmutableList.copyOf(entry.getValue())));
    }

    return new DefaultArtifactResolutionQueryResult(jvmLibraries);
}