Example usage for com.google.common.collect Multimap asMap

List of usage examples for com.google.common.collect Multimap asMap

Introduction

In this page you can find the example usage for com.google.common.collect Multimap asMap.

Prototype

Map<K, Collection<V>> asMap();

Source Link

Document

Returns a view of this multimap as a Map from each distinct key to the nonempty collection of that key's associated values.

Usage

From source file:ai.grakn.graql.internal.reasoner.atom.binary.RelationAtom.java

private Multimap<Role, RelationPlayer> getRoleRelationPlayerMap() {
    Multimap<Role, RelationPlayer> roleRelationPlayerMap = ArrayListMultimap.create();
    Multimap<Role, Var> roleVarTypeMap = getRoleVarMap();
    List<RelationPlayer> relationPlayers = getRelationPlayers();
    roleVarTypeMap.asMap().entrySet().forEach(e -> {
        Role role = e.getKey();//from   w  ww .ja  va 2  s .c o  m
        Label roleLabel = role.getLabel();
        relationPlayers.stream().filter(rp -> rp.getRole().isPresent()).forEach(rp -> {
            VarPatternAdmin roleTypeVar = rp.getRole().orElse(null);
            Label rl = roleTypeVar != null ? roleTypeVar.getTypeLabel().orElse(null) : null;
            if (roleLabel != null && roleLabel.equals(rl)) {
                roleRelationPlayerMap.put(role, rp);
            }
        });
    });
    return roleRelationPlayerMap;
}

From source file:com.facebook.swift.codec.metadata.AbstractThriftMetadataBuilder.java

protected final void normalizeThriftFields(ThriftCatalog catalog) {
    // assign all fields an id (if possible)
    Set<String> fieldsWithConflictingIds = inferThriftFieldIds();

    // group fields by id
    Multimap<Optional<Short>, FieldMetadata> fieldsById = Multimaps.index(fields, getThriftFieldId());
    for (Entry<Optional<Short>, Collection<FieldMetadata>> entry : fieldsById.asMap().entrySet()) {
        Collection<FieldMetadata> fields = entry.getValue();

        // fields must have an id
        if (!entry.getKey().isPresent()) {
            for (String fieldName : newTreeSet(transform(fields, getOrExtractThriftFieldName()))) {
                // only report errors for fields that don't have conflicting ids
                if (!fieldsWithConflictingIds.contains(fieldName)) {
                    metadataErrors.addError("Thrift class '%s' fields %s do not have an id", structName,
                            newTreeSet(transform(fields, getOrExtractThriftFieldName())));
                }/*  w  w w.ja  v  a 2 s.c  o m*/
            }
            continue;
        }

        short fieldId = entry.getKey().get();

        // ensure all fields for this ID have the same name
        String fieldName = extractFieldName(fieldId, fields);
        for (FieldMetadata field : fields) {
            field.setName(fieldName);
        }

        // ensure all fields for this ID have the same requiredness
        Requiredness requiredness = extractFieldRequiredness(fieldId, fieldName, fields);
        for (FieldMetadata field : fields) {
            field.setRequiredness(requiredness);
        }

        // We need to do the isLegacyId check in two places. We've already done this
        // process for fields which had multiple `@ThriftField` annotations when we
        // assigned them all the same ID. It doesn't hurt to do it again. On the other
        // hand, we need to do it now to catch the fields which only had a single
        // @ThriftAnnotation, because inferThriftFieldIds skipped them.
        boolean isLegacyId = extractFieldIsLegacyId(fieldId, fieldName, fields);
        for (FieldMetadata field : fields) {
            field.setIsLegacyId(isLegacyId);
        }

        // verify fields have a supported java type and all fields
        // for this ID have the same thrift type
        verifyFieldType(fieldId, fieldName, fields, catalog);
    }
}

From source file:com.facebook.buck.cli.AuditClasspathCommand.java

@VisibleForTesting
int printJsonClasspath(PartialGraph partialGraph) throws IOException {
    DependencyGraph graph = partialGraph.getDependencyGraph();
    List<BuildTarget> targets = partialGraph.getTargets();
    Multimap<String, String> targetClasspaths = LinkedHashMultimap.create();

    for (BuildTarget target : targets) {
        BuildRule rule = graph.findBuildRuleByTarget(target);
        HasClasspathEntries hasClasspathEntries = getHasClasspathEntriesFrom(rule);
        if (hasClasspathEntries == null) {
            continue;
        }/*w ww  .j a  va2  s.  co  m*/
        targetClasspaths.putAll(target.getFullyQualifiedName(), Iterables.transform(
                hasClasspathEntries.getTransitiveClasspathEntries().values(), Functions.toStringFunction()));
    }

    ObjectMapper mapper = new ObjectMapper();

    // Note: using `asMap` here ensures that the keys are sorted
    mapper.writeValue(console.getStdOut(), targetClasspaths.asMap());

    return 0;
}

From source file:io.prestosql.plugin.accumulo.index.IndexLookup.java

private List<Range> getIndexRanges(String indexTable,
        Multimap<AccumuloColumnConstraint, Range> constraintRanges, Collection<Range> rowIDRanges,
        Authorizations auths) {//from   w ww  .  j  av  a2  s .c o m
    Set<Range> finalRanges = new HashSet<>();
    // For each column/constraint pair we submit a task to scan the index ranges
    List<Future<Set<Range>>> tasks = new ArrayList<>();
    CompletionService<Set<Range>> executor = new ExecutorCompletionService<>(executorService);
    for (Entry<AccumuloColumnConstraint, Collection<Range>> constraintEntry : constraintRanges.asMap()
            .entrySet()) {
        tasks.add(executor.submit(() -> {
            // Create a batch scanner against the index table, setting the ranges
            BatchScanner scan = connector.createBatchScanner(indexTable, auths, 10);
            scan.setRanges(constraintEntry.getValue());

            // Fetch the column family for this specific column
            scan.fetchColumnFamily(
                    new Text(Indexer.getIndexColumnFamily(constraintEntry.getKey().getFamily().getBytes(),
                            constraintEntry.getKey().getQualifier().getBytes()).array()));

            // For each entry in the scanner
            Text tmpQualifier = new Text();
            Set<Range> columnRanges = new HashSet<>();
            for (Entry<Key, Value> entry : scan) {
                entry.getKey().getColumnQualifier(tmpQualifier);

                // Add to our column ranges if it is in one of the row ID ranges
                if (inRange(tmpQualifier, rowIDRanges)) {
                    columnRanges.add(new Range(tmpQualifier));
                }
            }

            LOG.debug("Retrieved %d ranges for index column %s", columnRanges.size(),
                    constraintEntry.getKey().getName());
            scan.close();
            return columnRanges;
        }));
    }
    tasks.forEach(future -> {
        try {
            // If finalRanges is null, we have not yet added any column ranges
            if (finalRanges.isEmpty()) {
                finalRanges.addAll(future.get());
            } else {
                // Retain only the row IDs for this column that have already been added
                // This is your set intersection operation!
                finalRanges.retainAll(future.get());
            }
        } catch (ExecutionException | InterruptedException e) {
            if (e instanceof InterruptedException) {
                Thread.currentThread().interrupt();
            }
            throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting index ranges",
                    e.getCause());
        }
    });
    return ImmutableList.copyOf(finalRanges);
}

From source file:org.solovyev.android.messenger.realms.sms.SmsAccountConnection.java

private void onSmsReceived(@Nonnull BroadcastReceiver broadcastReceiver, @Nonnull Intent intent)
        throws AccountException {
    final SmsAccount account = getAccount();
    final Multimap<String, SmsData> messagesByPhoneNumber = getMessagesByPhoneNumber(intent);

    if (!messagesByPhoneNumber.isEmpty()) {
        final User user = account.getUser();
        final UserService userService = getUserService();
        final ChatService chatService = getChatService();

        final List<User> contacts = userService.getContacts(user.getEntity());

        for (Map.Entry<String, Collection<SmsData>> entry : messagesByPhoneNumber.asMap().entrySet()) {
            final User contact = findOrCreateContact(entry.getKey(), contacts);
            final Chat chat = chatService.getOrCreatePrivateChat(user.getEntity(), contact.getEntity());

            final List<Message> messages = new ArrayList<Message>(entry.getValue().size());
            for (SmsData smsData : entry.getValue()) {
                final Message message = toMessage(smsData, account, contact, chat);
                if (message != null) {
                    messages.add(message);
                }/*from   w w  w .jav  a 2  s  .  c o m*/
            }

            chatService.saveMessages(chat.getEntity(), messages);
        }
    }

    if (account.getConfiguration().isStopFurtherProcessing()) {
        broadcastReceiver.abortBroadcast();
    }
}

From source file:org.ambraproject.wombat.controller.ArticleMetadata.java

Map<String, Collection<Object>> getContainingArticleLists() throws IOException {
    List<Map<?, ?>> articleListObjects = factory.articleApi.requestObject(
            ApiAddress.builder("articles").embedDoi(articleId.getDoi()).addParameter("lists").build(),
            List.class);
    Multimap<String, Object> result = LinkedListMultimap.create(articleListObjects.size());
    for (Map<?, ?> articleListObject : articleListObjects) {
        String listType = Preconditions.checkNotNull((String) articleListObject.get("type"));
        result.put(listType, articleListObject);
    }/*from  w w w  .ja  v a 2 s .c o m*/
    return result.asMap();
}

From source file:org.lealone.cluster.dht.RangeStreamer.java

/**
 * Get a map of all ranges and the source that will be cleaned up 
 * once this bootstrapped node is added for the given ranges.
 * For each range, the list should only contain a single source. 
 * This allows us to consistently migrate data without violating consistency.
 *
 * @throws java.lang.IllegalStateException when there is no source to get data streamed, or more than 1 source found.
 *///from w w  w  .j  a va 2 s. com
// desiredRange?InetAddress
private Multimap<Range<Token>, InetAddress> getAllRangesWithStrictSourcesFor(Database db,
        Collection<Range<Token>> desiredRanges) {
    assert tokens != null;
    AbstractReplicationStrategy strat = ClusterMetaData.getReplicationStrategy(db);

    // Active ranges
    TokenMetaData metadataClone = metadata.cloneOnlyTokenMap();
    Multimap<Range<Token>, InetAddress> rangeAddresses = strat.getRangeAddresses(metadataClone);

    // Pending ranges
    metadataClone.updateNormalTokens(tokens, address);
    Multimap<Range<Token>, InetAddress> pendingRangeAddresses = strat.getRangeAddresses(metadataClone);

    // Collects the source that will have its range moved to the new node
    Multimap<Range<Token>, InetAddress> rangeSources = ArrayListMultimap.create();

    for (Range<Token> desiredRange : desiredRanges) {
        for (Map.Entry<Range<Token>, Collection<InetAddress>> preEntry : rangeAddresses.asMap().entrySet()) {
            if (preEntry.getKey().contains(desiredRange)) {
                Set<InetAddress> oldEndpoints = Sets.newHashSet(preEntry.getValue());
                Set<InetAddress> newEndpoints = Sets.newHashSet(pendingRangeAddresses.get(desiredRange));

                // Due to CASSANDRA-5953 we can have a higher RF then we have endpoints.
                // So we need to be careful to only be strict when endpoints == RF
                if (oldEndpoints.size() == strat.getReplicationFactor()) {
                    oldEndpoints.removeAll(newEndpoints);
                    assert oldEndpoints.size() == 1 : "Expected 1 endpoint but found " + oldEndpoints.size();
                }

                rangeSources.put(desiredRange, oldEndpoints.iterator().next());
            }
        }

        // Validate
        Collection<InetAddress> addressList = rangeSources.get(desiredRange);
        if (addressList == null || addressList.isEmpty())
            throw new IllegalStateException("No sources found for " + desiredRange);

        if (addressList.size() > 1)
            throw new IllegalStateException("Multiple endpoints found for " + desiredRange);

        InetAddress sourceIp = addressList.iterator().next();
        EndpointState sourceState = Gossiper.instance.getEndpointStateForEndpoint(sourceIp);
        if (Gossiper.instance.isEnabled() && (sourceState == null || !sourceState.isAlive()))
            throw new RuntimeException("A node required to move the data consistently is down (" + sourceIp
                    + "). If you wish to move the data from a potentially inconsistent replica,"
                    + " restart the node with -Dlealone.consistent.rangemovement=false");
    }

    return rangeSources;
}

From source file:com.google.javascript.jscomp.J2clConstantHoisterPass.java

@Override
public void process(Node externs, Node root) {
    if (!J2clSourceFileChecker.shouldRunJ2clPasses(compiler)) {
        return;//from w  w  w. jav a2  s .  com
    }

    final Multimap<String, Node> fieldAssignments = ArrayListMultimap.create();
    final Set<Node> hoistableFunctions = new HashSet<>();
    NodeTraversal.traversePostOrder(compiler, root, (NodeTraversal t, Node node, Node parent) -> {
        // TODO(stalcup): don't gather assignments ourselves, switch to a persistent
        // DefinitionUseSiteFinder instead.
        if (parent != null && NodeUtil.isLValue(node)) {
            fieldAssignments.put(node.getQualifiedName(), parent);
        }

        // TODO(stalcup): convert to a persistent index of hoistable functions.
        if (isHoistableFunction(t, node)) {
            hoistableFunctions.add(node);
        }
    });

    for (Collection<Node> assignments : fieldAssignments.asMap().values()) {
        maybeHoistClassField(assignments, hoistableFunctions);
    }
}

From source file:com.facebook.presto.accumulo.tools.RewriteIndex.java

private void flushDeleteEntries(Connector connector, AccumuloTable table, long start, BatchWriter indexWriter,
        Multimap<ByteBuffer, Mutation> queryIndexEntries, Map<ByteBuffer, RowStatus> rowIdStatuses)
        throws MutationsRejectedException, TableNotFoundException {
    if (queryIndexEntries.size() > 0) {
        setRowIdStatuses(connector, table, start, queryIndexEntries, rowIdStatuses);

        AtomicLong numDeleteRows = new AtomicLong(0);
        ImmutableList.Builder<Mutation> builder = ImmutableList.builder();
        queryIndexEntries.asMap().entrySet().forEach(entry -> {
            if (rowIdStatuses.get(entry.getKey()) == RowStatus.ABSENT) {
                builder.addAll(entry.getValue());
                numDeleteRows.incrementAndGet();
            }//from www .  j a v  a  2  s .  c o  m
        });
        List<Mutation> deleteMutations = builder.build();

        numDeletedIndexEntries += deleteMutations.size();

        if (!dryRun) {
            indexWriter.addMutations(deleteMutations);
        }
    }
}

From source file:org.apache.hadoop.hbase.index.mapreduce.IndexLoadIncrementalHFile.java

/**
 * This takes the LQI's grouped by likely regions and attempts to bulk load them. Any failures are
 * re-queued for another pass with the groupOrSplitPhase.
 *///from  ww w  . ja  va2  s .c om
protected void bulkLoadPhase(final HTable table, final HConnection conn, ExecutorService pool,
        Deque<LoadQueueItem> queue, final Multimap<ByteBuffer, LoadQueueItem> regionGroups) throws IOException {
    // atomically bulk load the groups.
    Set<Future<List<LoadQueueItem>>> loadingFutures = new HashSet<Future<List<LoadQueueItem>>>();
    for (Entry<ByteBuffer, ? extends Collection<LoadQueueItem>> e : regionGroups.asMap().entrySet()) {
        final byte[] first = e.getKey().array();
        final Collection<LoadQueueItem> lqis = e.getValue();

        final Callable<List<LoadQueueItem>> call = new Callable<List<LoadQueueItem>>() {
            public List<LoadQueueItem> call() throws Exception {
                List<LoadQueueItem> toRetry = tryAtomicRegionLoad(conn, table.getTableName(), first, lqis);
                return toRetry;
            }
        };
        loadingFutures.add(pool.submit(call));
    }

    // get all the results.
    for (Future<List<LoadQueueItem>> future : loadingFutures) {
        try {
            List<LoadQueueItem> toRetry = future.get();

            // LQIs that are requeued to be regrouped.
            queue.addAll(toRetry);

        } catch (ExecutionException e1) {
            Throwable t = e1.getCause();
            if (t instanceof IOException) {
                // At this point something unrecoverable has happened.
                // TODO Implement bulk load recovery
                throw new IOException("BulkLoad encountered an unrecoverable problem", t);
            }
            LOG.error("Unexpected execution exception during bulk load", e1);
            throw new IllegalStateException(t);
        } catch (InterruptedException e1) {
            LOG.error("Unexpected interrupted exception during bulk load", e1);
            throw new IllegalStateException(e1);
        }
    }
}