Example usage for com.google.common.collect Multimap asMap

List of usage examples for com.google.common.collect Multimap asMap

Introduction

In this page you can find the example usage for com.google.common.collect Multimap asMap.

Prototype

Map<K, Collection<V>> asMap();

Source Link

Document

Returns a view of this multimap as a Map from each distinct key to the nonempty collection of that key's associated values.

Usage

From source file:edu.cmu.lti.oaqa.baseqa.passage.rerank.scorers.LuceneInMemoryPassageScorer.java

@Override
public void prepare(JCas jcas) throws AnalysisEngineProcessException {
    uri2conf2score = HashBasedTable.create();
    uri2conf2rank = HashBasedTable.create();
    // index/* www .  ja v  a  2 s.co m*/
    List<Passage> passages = TypeUtil.getRankedPassages(jcas);
    RAMDirectory index = new RAMDirectory();
    try (IndexWriter writer = new IndexWriter(index, new IndexWriterConfig(analyzer))) {
        for (Passage passage : passages) {
            Document doc = new Document();
            doc.add(new StringField("uri", TypeUtil.getUriOffsets(passage, ":"), Field.Store.YES));
            doc.add(new TextField("text", passage.getText(), Field.Store.NO));
            writer.addDocument(doc);
        }
        writer.close();
        reader = DirectoryReader.open(index);
        searcher = new IndexSearcher(reader);
    } catch (IOException e) {
        throw new AnalysisEngineProcessException(e);
    }
    // queries
    List<String> tokens = TypeUtil.getOrderedTokens(jcas).stream().map(Token::getCoveredText)
            .map(QueryParser::escape).filter(name -> !name.isEmpty() && !stoplist.contains(name.toLowerCase()))
            .collect(toList());
    Multimap<String, String> ctype2names = HashMultimap.create();
    for (Concept concept : TypeUtil.getConcepts(jcas)) {
        Set<String> ctypes = TypeUtil.getConceptTypes(concept).stream().map(ConceptType::getAbbreviation)
                .collect(toSet());
        String cnames = TypeUtil.getConceptNames(concept).stream()
                .map(LuceneInMemoryPassageScorer::normalizeQuoteName).distinct().collect(joining(" "));
        ctypes.stream().filter(t -> !FORBIDDEN_CTYPES.contains(t))
                .forEach(ctype -> ctype2names.put(ctype, cnames));
    }
    Multimap<String, String> ctypepre2names = HashMultimap.create();
    ctype2names.asMap().entrySet().forEach(e -> ctypepre2names.putAll(e.getKey().split(":")[0], e.getValue()));
    Multimap<String, String> ctype2mentions = HashMultimap.create();
    for (Concept concept : TypeUtil.getConcepts(jcas)) {
        Set<String> ctypes = TypeUtil.getConceptTypes(concept).stream().map(ConceptType::getAbbreviation)
                .collect(toSet());
        String cmentions = TypeUtil.getConceptMentions(concept).stream().map(ConceptMention::getMatchedName)
                .map(LuceneInMemoryPassageScorer::normalizeQuoteName).distinct().collect(joining(" "));
        ctypes.stream().filter(t -> !FORBIDDEN_CTYPES.contains(t))
                .forEach(ctype -> ctype2mentions.put(ctype, cmentions));
    }
    Multimap<String, String> ctypepre2mentions = HashMultimap.create();
    ctypepre2mentions.asMap().entrySet()
            .forEach(e -> ctypepre2mentions.putAll(e.getKey().split(":")[0], e.getValue()));
    LOG.debug("Query strings");
    ExecutorService service = Executors.newCachedThreadPool();
    // execute against all tokens
    service.submit(() -> {
        String concatTokens = String.join(" ", tokens);
        LOG.debug(" - Concatenated tokens: {}", concatTokens);
        search(concatTokens, "tokens_concatenated@all");
    });
    // execute against concatenated concept names
    service.submit(() -> {
        String concatCnames = String.join(" ", ctype2names.values());
        LOG.debug(" - Concatenated concept names: {}", concatCnames);
        search(concatCnames, "cnames_concatenated@all");
    });
    // execute against concatenated concept mentions
    service.submit(() -> {
        String concatCmentions = String.join(" ", ctype2mentions.values());
        LOG.debug(" - Concatenated concept mentions: {}", concatCmentions);
        search(concatCmentions, "cmentions_concatenated@all");
    });
    // execute against concept names for each concept
    service.submit(() -> {
        for (String cnames : ImmutableSet.copyOf(ctype2names.values())) {
            LOG.debug(" - Concatenated concept names: {}", cnames);
            search(cnames, "cnames_individual@all");
        }
    });
    // execute against concept names for each concept type
    service.submit(() -> {
        for (String ctype : ctype2names.keySet()) {
            String concatCnames = String.join(" ", ctype2names.get(ctype));
            LOG.debug(" - Concatenated concept names for {}: {}", ctype, concatCnames);
            search(concatCnames, "cnames@" + ctype + "@all");
        }
    });
    // execute against concept names for each concept type prefix
    service.submit(() -> {
        for (String ctypepre : ctypepre2names.keySet()) {
            String concatCnames = String.join(" ", ctypepre2names.get(ctypepre));
            LOG.debug(" - Concatenated concept names for {}: {}", ctypepre, concatCnames);
            search(concatCnames, "cnames@" + ctypepre + "@all");
        }
    });
    // execute against concept mentions for each concept
    service.submit(() -> {
        for (String cmentions : ImmutableSet.copyOf(ctype2mentions.values())) {
            LOG.debug(" - Concatenated concept mentions: {}", cmentions);
            search(cmentions, "cmentions_individual@all");
        }
    });
    // execute against concept mentions for each concept type
    service.submit(() -> {
        for (String ctype : ctype2mentions.keySet()) {
            String concatCmentions = String.join(" ", ctype2mentions.get(ctype));
            LOG.debug(" - Concatenated concept mentions for {}: {}", ctype, concatCmentions);
            search(concatCmentions, "cmentions@" + ctype + "@all");
        }
    });
    // execute against concept mentions for each concept type prefix
    service.submit(() -> {
        for (String ctypepre : ctypepre2mentions.keySet()) {
            String concatCmentions = String.join(" ", ctypepre2mentions.get(ctypepre));
            LOG.debug(" - Concatenated concept mentions for {}: {}", ctypepre, concatCmentions);
            search(concatCmentions, "cmentions@" + ctypepre + "@all");
        }
    });
    service.shutdown();
    try {
        service.awaitTermination(1, TimeUnit.MINUTES);
    } catch (InterruptedException e) {
        throw new AnalysisEngineProcessException(e);
    }
    confs = uri2conf2score.columnKeySet();
}

From source file:com.tysanclan.site.projectewok.util.FinancialTimeline.java

public IModel<List<BaseSeries<String, BigDecimal>>> getParticipation(DateTime start, DateTime end) {
    BaseSeries<String, BigDecimal> series = new BaseSeries<String, BigDecimal>();

    Multimap<String, BigDecimal> amounts = HashMultimap.create();

    for (Entry<DateTime, Contribution> e : contributionsByDate.entries()) {
        if (!e.getKey().isBefore(start) && !e.getKey().isAfter(end)) {
            amounts.put(e.getValue().getUser() != null ? e.getValue().getUser().getUsername() : "Anonymous",
                    e.getValue().getAmount());
        }/*from  w w  w.jav a  2s  . com*/
    }

    Map<String, BigDecimal> sums = Maps.transformValues(amounts.asMap(), SumFunction.INSTANCE);

    for (Entry<String, BigDecimal> e : sums.entrySet()) {
        series.addEntry(e.getKey(), e.getValue());
    }

    return listOf(series);
}

From source file:co.cask.cdap.data.tools.HBaseQueueDebugger.java

/**
 * Only works for {@link co.cask.cdap.data2.transaction.queue.hbase.ShardedHBaseQueueStrategy}.
 *//*  w ww  . j av  a 2  s  .  co  m*/
public QueueStatistics scanQueue(final QueueName queueName, @Nullable Long consumerGroupId) throws Exception {
    HBaseConsumerStateStore stateStore;
    try {
        stateStore = queueAdmin.getConsumerStateStore(queueName);
    } catch (IllegalStateException e) {
        throw new NotFoundException(queueName);
    }

    TransactionExecutor txExecutor = Transactions.createTransactionExecutor(txExecutorFactory, stateStore);
    Multimap<Long, QueueBarrier> barriers = txExecutor
            .execute(new TransactionExecutor.Function<HBaseConsumerStateStore, Multimap<Long, QueueBarrier>>() {
                @Override
                public Multimap<Long, QueueBarrier> apply(HBaseConsumerStateStore input) throws Exception {
                    return input.getAllBarriers();
                }
            }, stateStore);
    System.out.printf("Got %d barriers\n", barriers.size());

    QueueStatistics stats = new QueueStatistics();

    if (consumerGroupId != null) {
        barriers = Multimaps.filterKeys(barriers, Predicates.equalTo(consumerGroupId));
    }

    for (Map.Entry<Long, Collection<QueueBarrier>> entry : barriers.asMap().entrySet()) {
        long groupId = entry.getKey();
        Collection<QueueBarrier> groupBarriers = entry.getValue();

        System.out.printf("Scanning barriers for group %d\n", groupId);

        int currentSection = 1;
        PeekingIterator<QueueBarrier> barrierIterator = Iterators.peekingIterator(groupBarriers.iterator());
        while (barrierIterator.hasNext()) {
            QueueBarrier start = barrierIterator.next();
            QueueBarrier end = barrierIterator.hasNext() ? barrierIterator.peek() : null;

            System.out.printf("Scanning section %d/%d...\n", currentSection, groupBarriers.size());
            scanQueue(txExecutor, stateStore, queueName, start, end, stats);
            System.out.printf("Current results: %s\n", stats.getReport());
            currentSection++;
        }
        System.out.println("Scanning complete");
    }

    System.out.printf("Total results: %s\n", stats.getReport());
    return stats;
}

From source file:org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl.java

/**
 * As the leader broker, select bundles for the namespace service to unload so that they may be reassigned to new
 * brokers./*from w  w  w.ja  va 2s. co  m*/
 */
@Override
public synchronized void doLoadShedding() {
    if (!LoadManagerShared.isLoadSheddingEnabled(pulsar)) {
        return;
    }
    if (getAvailableBrokers().size() <= 1) {
        log.info("Only 1 broker available: no load shedding will be performed");
        return;
    }
    // Remove bundles who have been unloaded for longer than the grace period from the recently unloaded
    // map.
    final long timeout = System.currentTimeMillis()
            - TimeUnit.MINUTES.toMillis(conf.getLoadBalancerSheddingGracePeriodMinutes());
    final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles();
    recentlyUnloadedBundles.keySet().removeIf(e -> recentlyUnloadedBundles.get(e) < timeout);

    for (LoadSheddingStrategy strategy : loadSheddingPipeline) {
        final Multimap<String, String> bundlesToUnload = strategy.findBundlesForUnloading(loadData, conf);

        bundlesToUnload.asMap().forEach((broker, bundles) -> {
            bundles.forEach(bundle -> {
                final String namespaceName = LoadManagerShared.getNamespaceNameFromBundleName(bundle);
                final String bundleRange = LoadManagerShared.getBundleRangeFromBundleName(bundle);
                if (!shouldAntiAffinityNamespaceUnload(namespaceName, bundleRange, broker)) {
                    return;
                }

                log.info("[Overload shedder] Unloading bundle: {} from broker {}", bundle, broker);
                try {
                    pulsar.getAdminClient().namespaces().unloadNamespaceBundle(namespaceName, bundleRange);
                    loadData.getRecentlyUnloadedBundles().put(bundle, System.currentTimeMillis());
                } catch (PulsarServerException | PulsarAdminException e) {
                    log.warn("Error when trying to perform load shedding on {} for broker {}", bundle, broker,
                            e);
                }
            });
        });
    }
}

From source file:com.twitter.aurora.scheduler.thrift.SchedulerThriftInterface.java

@Override
public Response getJobs(@Nullable String maybeNullRole) {
    Optional<String> ownerRole = Optional.fromNullable(maybeNullRole);

    // Ensure we only return one JobConfiguration for each JobKey.
    Map<IJobKey, IJobConfiguration> jobs = Maps.newHashMap();

    // Query the task store, find immediate jobs, and synthesize a JobConfiguration for them.
    // This is necessary because the ImmediateJobManager doesn't store jobs directly and
    // ImmediateJobManager#getJobs always returns an empty Collection.
    Query.Builder scope = ownerRole.isPresent() ? Query.roleScoped(ownerRole.get()) : Query.unscoped();
    Multimap<IJobKey, IScheduledTask> tasks = Tasks
            .byJobKey(Storage.Util.weaklyConsistentFetchTasks(storage, scope.active()));

    jobs.putAll(Maps.transformEntries(tasks.asMap(),
            new Maps.EntryTransformer<IJobKey, Collection<IScheduledTask>, IJobConfiguration>() {
                @Override/*  www.ja va2  s .c  o  m*/
                public IJobConfiguration transformEntry(IJobKey jobKey, Collection<IScheduledTask> tasks) {

                    // Pick an arbitrary task for each immediate job. The chosen task might not be the most
                    // recent if the job is in the middle of an update or some shards have been selectively
                    // created.
                    TaskConfig firstTask = tasks.iterator().next().getAssignedTask().getTask().newBuilder();
                    return IJobConfiguration.build(
                            new JobConfiguration().setKey(jobKey.newBuilder()).setOwner(firstTask.getOwner())
                                    .setTaskConfig(firstTask).setInstanceCount(tasks.size()));
                }
            }));

    // Get cron jobs directly from the manager. Do this after querying the task store so the real
    // template JobConfiguration for a cron job will overwrite the synthesized one that could have
    // been created above.
    Predicate<IJobConfiguration> configFilter = ownerRole.isPresent()
            ? Predicates.compose(Predicates.equalTo(ownerRole.get()), JobKeys.CONFIG_TO_ROLE)
            : Predicates.<IJobConfiguration>alwaysTrue();
    jobs.putAll(Maps.uniqueIndex(FluentIterable.from(cronJobManager.getJobs()).filter(configFilter),
            JobKeys.FROM_CONFIG));

    return new Response().setResponseCode(OK).setResult(Result
            .getJobsResult(new GetJobsResult().setConfigs(IJobConfiguration.toBuildersSet(jobs.values()))));
}

From source file:com.continuuity.http.BasicHttpResponder.java

/**
 * Send response back to client./*from www.  j av  a2 s . c om*/
 * @param status Status of the response.
 * @param content Content to be sent back.
 * @param contentType Type of content.
 * @param headers Headers to be sent back.
 */
@Override
public void sendContent(HttpResponseStatus status, ChannelBuffer content, String contentType,
        Multimap<String, String> headers) {
    HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);

    if (content != null) {
        response.setContent(content);
        response.setHeader(HttpHeaders.Names.CONTENT_TYPE, contentType);
        response.setHeader(HttpHeaders.Names.CONTENT_LENGTH, content.readableBytes());
    } else {
        response.setHeader(HttpHeaders.Names.CONTENT_LENGTH, 0);
    }

    if (keepalive) {
        response.setHeader(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.KEEP_ALIVE);
    }

    // Add headers, note will override all headers set by the framework
    if (headers != null) {
        for (Map.Entry<String, Collection<String>> entry : headers.asMap().entrySet()) {
            response.setHeader(entry.getKey(), entry.getValue());
        }
    }

    ChannelFuture future = channel.write(response);
    if (!keepalive) {
        future.addListener(ChannelFutureListener.CLOSE);
    }
}

From source file:io.prestosql.server.remotetask.HttpRemoteTask.java

@Override
public synchronized void addSplits(Multimap<PlanNodeId, Split> splitsBySource) {
    requireNonNull(splitsBySource, "splitsBySource is null");

    // only add pending split if not done
    if (getTaskStatus().getState().isDone()) {
        return;/*from  w  w w  .j av  a 2  s  .c o m*/
    }

    boolean needsUpdate = false;
    for (Entry<PlanNodeId, Collection<Split>> entry : splitsBySource.asMap().entrySet()) {
        PlanNodeId sourceId = entry.getKey();
        Collection<Split> splits = entry.getValue();

        checkState(!noMoreSplits.containsKey(sourceId), "noMoreSplits has already been set for %s", sourceId);
        int added = 0;
        for (Split split : splits) {
            if (pendingSplits.put(sourceId,
                    new ScheduledSplit(nextSplitId.getAndIncrement(), sourceId, split))) {
                added++;
            }
        }
        if (planFragment.isPartitionedSources(sourceId)) {
            pendingSourceSplitCount += added;
            partitionedSplitCountTracker.setPartitionedSplitCount(getPartitionedSplitCount());
        }
        needsUpdate = true;
    }
    updateSplitQueueSpace();

    if (needsUpdate) {
        this.needsUpdate.set(true);
        scheduleUpdate();
    }
}

From source file:uk.ac.ebi.mnb.dialog.tools.ChokePoint.java

public void actionPerformed(ActionEvent ae) {

    int n = DefaultReconstructionManager.getInstance().active().getMetabolome().size();

    final Multimap<Metabolite, MetabolicReaction> reactants = HashMultimap.create(n, 5);
    final Multimap<Metabolite, MetabolicReaction> products = HashMultimap.create(n, 5);

    // count number of reactions producing and consuming each metabolite
    for (final MetabolicReaction rxn : getSelection().get(MetabolicReaction.class)) {

        for (final MetabolicParticipant reactant : rxn.getReactants()) {
            Metabolite metabolite = reactant.getMolecule();
            reactants.put(metabolite, rxn);
        }/*w  w  w.j av a2  s.c  om*/

        for (final MetabolicParticipant product : rxn.getProducts()) {
            Metabolite metabolite = product.getMolecule();
            products.put(metabolite, rxn);
        }

    }

    final List<MetabolicReaction> chokePoints = new ArrayList<MetabolicReaction>();

    // unique consumed metabolites
    for (Entry<Metabolite, Collection<MetabolicReaction>> entry : reactants.asMap().entrySet()) {
        if (entry.getValue().size() == 1) {
            Metabolite metabolite = entry.getKey();
            // use a better annotation
            Annotation note = new Note("Reaction uniquely consumes " + metabolite.getName() + " ("
                    + metabolite.getIdentifier() + ")");
            entry.getValue().iterator().next().addAnnotation(note);
            chokePoints.addAll(entry.getValue());
        }
    }

    // unique produced metabolites
    for (Entry<Metabolite, Collection<MetabolicReaction>> entry : products.asMap().entrySet()) {
        if (entry.getValue().size() == 1) {
            Metabolite metabolite = entry.getKey();
            // use a better annotation
            Annotation note = new Note("Reaction uniquely produces " + metabolite.getName() + " ("
                    + metabolite.getIdentifier() + ")");
            entry.getValue().iterator().next().addAnnotation(note);
            chokePoints.addAll(entry.getValue());
        }
    }

    LOGGER.debug("identified " + chokePoints.size() + " choke points");

    EntityMap map = new EntityMap(DefaultEntityFactory.getInstance());
    map.addAll(chokePoints);
    setSelection(map);

}

From source file:org.sosy_lab.cpachecker.util.cwriter.CExpressionInvariantExporter.java

/**
 * @return Mapping from line numbers to states associated with the given line.
 *//*from  w w  w.jav  a  2  s . co m*/
private Map<Integer, BooleanFormula> getInvariantsForFile(ReachedSet pReachedSet, String filename) {

    // One formula per reported state.
    Multimap<Integer, BooleanFormula> byState = HashMultimap.create();

    for (AbstractState state : pReachedSet) {

        CFANode loc = AbstractStates.extractLocation(state);
        if (loc != null && loc.getNumEnteringEdges() > 0) {
            CFAEdge edge = loc.getEnteringEdge(0);
            FileLocation location = edge.getFileLocation();
            FluentIterable<FormulaReportingState> reporting = AbstractStates.asIterable(state)
                    .filter(FormulaReportingState.class);

            if (location.getFileName().equals(filename) && !reporting.isEmpty()) {
                BooleanFormula reported = bfmgr
                        .and(reporting.transform(s -> s.getFormulaApproximation(fmgr)).toList());
                byState.put(location.getStartingLineInOrigin(), reported);
            }
        }
    }
    return Maps.transformValues(byState.asMap(), invariants -> bfmgr.or(invariants));
}

From source file:sf.net.experimaestro.manager.plans.Plan.java

/**
 * Returns the graph corresponding to this plan
 *
 * @param map The current plan path (containg joins in input, and operators in output)
 * @return The node that is the root (sink) of the DAG
 *///from   ww  w  . ja v a 2 s  .  c o m
public synchronized Operator prepare(Map<Operator, Operator> map, OperatorMap opMap) {
    // Check if a plan was not already generated
    Operator old = map.get(this);
    if (old != null)
        return old;

    // Outputs will contain the list of operators that have
    // to be merged (because we have a series of different inputs)
    ArrayList<Operator> outputs = new ArrayList<>();

    for (Multimap<DotName, Operator> inputs : inputsList) {
        TaskOperator self = new TaskOperator(this);

        if (inputs.isEmpty()) {
            self.addParent(new Constant(JsonNull.getSingleton()));
            self.setMappings(ImmutableMap.of());
            outputs.add(self);
        } else {
            // --- Loop over the cartesian product of the inputs
            DotName ids[] = new DotName[inputs.keySet().size()];
            OperatorIterable inputValues[] = new OperatorIterable[inputs.keySet().size()];
            {

                int index = 0;
                for (Map.Entry<DotName, Collection<Operator>> input : inputs.asMap().entrySet()) {
                    ids[index] = input.getKey();
                    inputValues[index] = new OperatorIterable(input.getValue(), map, opMap);
                    index++;
                }
                assert index == ids.length;
            }

            // Create a new operator
            Operator inputOperators[] = new Operator[inputValues.length];

            for (int i = inputValues.length; --i >= 0;) {
                OperatorIterable values = inputValues[i];
                Union union = new Union();
                for (Operator operator : values) {
                    union.addParent(operator);
                }

                if (union.getParents().size() == 1)
                    inputOperators[i] = union.getParent(0);
                else
                    inputOperators[i] = union;

                opMap.add(inputOperators[i]);

            }

            // Find LCAs and store them in a map operator ID -> inputs
            // joins contains the list of pairwise LCAs in the operator
            // graph above
            BitSet[] joins = new BitSet[inputOperators.length];
            for (int i = 0; i < joins.length; i++) {
                joins[i] = new BitSet();
            }

            for (int i = 0; i < ids.length - 1; i++) {
                for (int j = i + 1; j < ids.length; j++) {
                    ArrayList<Operator> lca = opMap.findLCAs(inputOperators[i], inputOperators[j]);
                    for (Operator operator : lca) {
                        int key = opMap.get(operator);
                        joins[i].set(key);
                        joins[j].set(key);
                    }
                }
            }

            Lattice lattice = new Lattice(opMap);
            for (int i = 0; i < joins.length; i++) {
                lattice.add(joins[i], inputOperators[i]);
            }
            LatticeNode.MergeResult merge = lattice.merge();

            self.addParent(merge.operator);

            // Associate streams with names
            Map<DotName, Integer> mappings = new TreeMap<>();
            for (int i = 0; i < ids.length; i++) {
                mappings.put(ids[i], merge.map.get(inputOperators[i]));
            }
            self.setMappings(mappings);

            // --- Handle group by

            outputs.add(self);
        }
    }

    // End of loop over inputs

    Operator planOperator;
    if (outputs.size() == 1) {
        map.put(this, outputs.get(0));
        planOperator = outputs.get(0);
    } else {
        Union union = new Union();
        map.put(this, union);
        for (Operator output : outputs)
            union.addParent(output);
        planOperator = union;
    }

    return planOperator;

}