Example usage for com.google.common.collect Multimap asMap

List of usage examples for com.google.common.collect Multimap asMap

Introduction

In this page you can find the example usage for com.google.common.collect Multimap asMap.

Prototype

Map<K, Collection<V>> asMap();

Source Link

Document

Returns a view of this multimap as a Map from each distinct key to the nonempty collection of that key's associated values.

Usage

From source file:org.robotninjas.commandbus.CommandBus.java

/**
 * Unregisters all handler methods on a registered {@code object}.
 *
 * @param clazz  class whose handler methods should be unregistered.
 * @throws IllegalArgumentException if the object was not previously registered.
 *///  w w w  .j  av a  2s  . co  m
public void unregister(Class<?> clazz) {
    Multimap<Class<?>, EventHandler> methodsInListener = finder.findAllHandlers(clazz);
    for (Entry<Class<?>, Collection<EventHandler>> entry : methodsInListener.asMap().entrySet()) {
        Class<?> eventType = entry.getKey();
        Collection<EventHandler> eventMethodsInListener = entry.getValue();

        handlersByTypeLock.writeLock().lock();
        try {
            Set<EventHandler> currentHandlers = handlersByType.get(eventType);
            if (!currentHandlers.containsAll(eventMethodsInListener)) {
                throw new IllegalArgumentException(
                        "missing event handler for an annotated method. Is " + clazz + " registered?");
            }
            currentHandlers.removeAll(eventMethodsInListener);
        } finally {
            handlersByTypeLock.writeLock().unlock();
        }
    }
}

From source file:org.voltdb.sysprocs.saverestore.StreamSnapshotWritePlan.java

private void placeTasksForTable(Table table, Multimap<Long, SnapshotTableTask> tasks) {
    for (Entry<Long, Collection<SnapshotTableTask>> tasksEntry : tasks.asMap().entrySet()) {
        // Stream snapshots need to write all partitioned tables to all selected partitions
        // and all replicated tables to all selected partitions
        if (table.getIsreplicated()) {
            placeReplicatedTasks(tasksEntry.getValue(), Arrays.asList(tasksEntry.getKey()));
        } else {//from   ww  w. j  a  v a2  s . c  o m
            placePartitionedTasks(tasksEntry.getValue(), Arrays.asList(tasksEntry.getKey()));
        }
    }
}

From source file:com.ning.atlas.bus.EventBus.java

/**
 * Unregisters all handler methods on a registered {@code object}.
 *
 * @param object  object whose handler methods should be unregistered.
 * @throws IllegalArgumentException if the object was not previously registered.
 *//*from ww w  . j a v  a2s .co m*/
public void unregister(Object object) {
    Multimap<Class<?>, EventHandler> methodsInListener = finder.findAllHandlers(object);
    for (Entry<Class<?>, Collection<EventHandler>> entry : methodsInListener.asMap().entrySet()) {
        Set<EventHandler> currentHandlers = getHandlersForEventType(entry.getKey());
        Collection<EventHandler> eventMethodsInListener = entry.getValue();

        if (currentHandlers == null || !currentHandlers.containsAll(entry.getValue())) {
            throw new IllegalArgumentException(
                    "missing event handler for an annotated method. Is " + object + " registered?");
        }
        currentHandlers.removeAll(eventMethodsInListener);
    }
}

From source file:org.solovyev.android.messenger.users.ContactFragment.java

@Override
public void onViewCreated(@Nonnull View root, Bundle savedInstanceState) {
    super.onViewCreated(root, savedInstanceState);

    final User contact = getUser();
    final FragmentActivity activity = getActivity();

    final ImageView contactIcon = (ImageView) root.findViewById(R.id.mpp_contact_icon_imageview);
    getUserService().getIconsService().setUserPhoto(contact, contactIcon);

    final ViewGroup propertiesViewGroup = (ViewGroup) root.findViewById(R.id.mpp_contact_properties_viewgroup);

    final Multimap<String, String> properties = ArrayListMultimap.create();
    for (AProperty property : getAccountService().getUserProperties(contact, activity)) {
        properties.put(property.getName(), property.getValue());
    }//from   ww w  . jav  a 2s .  c  om

    for (Map.Entry<String, Collection<String>> entry : properties.asMap().entrySet()) {
        final View propertyView = ViewFromLayoutBuilder.newInstance(R.layout.mpp_property).build(activity);

        final TextView propertyLabel = (TextView) propertyView.findViewById(R.id.mpp_property_label);
        propertyLabel.setText(entry.getKey());

        for (String propertyValue : entry.getValue()) {
            final TextView propertyValueTextView = (TextView) propertyView
                    .findViewById(R.id.mpp_property_value);
            propertyValueTextView.setText(propertyValue);
        }

        propertiesViewGroup.addView(propertyView);
    }

    root.findViewById(R.id.mpp_save_button).setVisibility(GONE);
    root.findViewById(R.id.mpp_remove_button).setVisibility(GONE);

    getMultiPaneManager().showTitle(getSherlockActivity(), this, contact.getDisplayName());
}

From source file:org.chaston.oakfunds.storage.mgmt.SchemaUpdater.java

private Map<String, Collection<TableDefDiscrepancy>> groupByTable(Iterable<TableDefDiscrepancy> discrepancies) {
    Multimap<String, TableDefDiscrepancy> discrepanciesByTable = MultimapBuilder.hashKeys().arrayListValues()
            .build();// w  w w  .  java 2 s  .  c  o m
    for (TableDefDiscrepancy discrepancy : discrepancies) {
        discrepanciesByTable.put(discrepancy.getTableName(), discrepancy);
    }
    return discrepanciesByTable.asMap();
}

From source file:org.apache.phoenix.hbase.index.write.recovery.TrackingParallelWriterIndexCommitter.java

@Override
public void write(Multimap<HTableInterfaceReference, Mutation> toWrite) throws MultiIndexWriteFailureException {
    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = toWrite.asMap().entrySet();
    TaskBatch<Boolean> tasks = new TaskBatch<Boolean>(entries.size());
    List<HTableInterfaceReference> tables = new ArrayList<HTableInterfaceReference>(entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // get the mutations for each table. We leak the implementation here a little bit to save
        // doing a complete copy over of all the index update for each table.
        final List<Mutation> mutations = (List<Mutation>) entry.getValue();
        // track each reference so we can get at it easily later, when determing failures
        final HTableInterfaceReference tableReference = entry.getKey();
        final RegionCoprocessorEnvironment env = this.env;
        tables.add(tableReference);/*from w  w w  .j ava2 s.c o m*/

        /*
         * Write a batch of index updates to an index table. This operation stops (is cancelable) via two
         * mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the running thread.
         * The former will only work if we are not in the midst of writing the current batch to the table, though we
         * do check these status variables before starting and before writing the batch. The latter usage,
         * interrupting the thread, will work in the previous situations as was at some points while writing the
         * batch, depending on the underlying writer implementation (HTableInterface#batch is blocking, but doesn't
         * elaborate when is supports an interrupt).
         */
        tasks.add(new Task<Boolean>() {

            /**
             * Do the actual write to the primary table. We don't need to worry about closing the table because that
             * is handled the {@link CachingHTableFactory}.
             */
            @SuppressWarnings("deprecation")
            @Override
            public Boolean call() throws Exception {
                try {
                    // this may have been queued, but there was an abort/stop so we try to early exit
                    throwFailureIfDone();

                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Writing index update:" + mutations + " to table: " + tableReference);
                    }

                    try {
                        // TODO: Once HBASE-11766 is fixed, reexamine whether this is necessary.
                        // Also, checking the prefix of the table name to determine if this is a local
                        // index is pretty hacky. If we're going to keep this, we should revisit that
                        // as well.
                        if (tableReference.getTableName().startsWith(MetaDataUtil.LOCAL_INDEX_TABLE_PREFIX)) {
                            Region indexRegion = IndexUtil.getIndexRegion(env);
                            if (indexRegion != null) {
                                throwFailureIfDone();
                                indexRegion.batchMutate(mutations.toArray(new Mutation[mutations.size()]),
                                        HConstants.NO_NONCE, HConstants.NO_NONCE);
                                return Boolean.TRUE;
                            }
                        }
                    } catch (IOException ignord) {
                        // when it's failed we fall back to the standard & slow way
                        if (LOG.isDebugEnabled()) {
                            LOG.debug(
                                    "indexRegion.batchMutate failed and fall back to HTable.batch(). Got error="
                                            + ignord);
                        }
                    }

                    HTableInterface table = factory.getTable(tableReference.get());
                    throwFailureIfDone();
                    table.batch(mutations);
                } catch (InterruptedException e) {
                    // reset the interrupt status on the thread
                    Thread.currentThread().interrupt();
                    throw e;
                } catch (Exception e) {
                    throw e;
                }
                return Boolean.TRUE;
            }

            private void throwFailureIfDone() throws SingleIndexWriteFailureException {
                if (stopped.isStopped() || abortable.isAborted() || Thread.currentThread().isInterrupted()) {
                    throw new SingleIndexWriteFailureException(
                            "Pool closed, not attempting to write to the index!", null);
                }

            }
        });
    }

    List<Boolean> results = null;
    try {
        LOG.debug("Waiting on index update tasks to complete...");
        results = this.pool.submitUninterruptible(tasks);
    } catch (ExecutionException e) {
        throw new RuntimeException("Should not fail on the results while using a WaitForCompletionTaskRunner",
                e);
    } catch (EarlyExitFailure e) {
        throw new RuntimeException("Stopped while waiting for batch, quiting!", e);
    }

    // track the failures. We only ever access this on return from our calls, so no extra
    // synchronization is needed. We could update all the failures as we find them, but that add a
    // lot of locking overhead, and just doing the copy later is about as efficient.
    List<HTableInterfaceReference> failures = new ArrayList<HTableInterfaceReference>();
    int index = 0;
    for (Boolean result : results) {
        // there was a failure
        if (result == null) {
            // we know which table failed by the index of the result
            failures.add(tables.get(index));
        }
        index++;
    }

    // if any of the tasks failed, then we need to propagate the failure
    if (failures.size() > 0) {
        // make the list unmodifiable to avoid any more synchronization concerns
        throw new MultiIndexWriteFailureException(Collections.unmodifiableList(failures));
    }
    return;
}

From source file:uk.ac.ebi.atlas.search.baseline.BaselineExperimentAssayGroupSearchService.java

SortedSet<BaselineExperimentAssayGroup> buildResults(
        SetMultimap<String, String> assayGroupsWithExpressionByExperiment, boolean conditionSearch,
        String searchSpecies) {// w w w.j  ava  2s .  co  m
    SortedSet<BaselineExperimentAssayGroup> results = Sets.newTreeSet();

    for (Map.Entry<String, Collection<String>> exprAssayGroups : assayGroupsWithExpressionByExperiment.asMap()
            .entrySet()) {

        String experimentAccession = exprAssayGroups.getKey();
        Collection<String> assayGroupIds = exprAssayGroups.getValue();

        BaselineExperiment experiment = (BaselineExperiment) experimentTrader
                .getPublicExperiment(experimentAccession);

        Multimap<FactorGroup, String> assayGroupIdsByFilterFactors = experiment.getExperimentalFactors()
                .getAssayGroupIdsGroupedByNonDefaultFactors(assayGroupIds);

        for (Map.Entry<FactorGroup, Collection<String>> filterFactorAssayGroupIds : assayGroupIdsByFilterFactors
                .asMap().entrySet()) {
            FactorGroup filterFactor = filterFactorAssayGroupIds.getKey();
            Collection<String> assayGroupIdsForFilterFactor = filterFactorAssayGroupIds.getValue();

            String experimentSpecies = experiment.isMultiOrganismExperiment()
                    ? filterFactor.getOrganismFactorValue()
                    : experiment.getFirstOrganism();

            if (StringUtils.isBlank(searchSpecies) || Species.sameSpecies(experimentSpecies, searchSpecies)) {
                BaselineExperimentAssayGroup result = new BaselineExperimentAssayGroup(
                        experiment.getAccession(), experiment.getDisplayName(), experimentSpecies,
                        experiment.getExperimentalFactors().getDefaultQueryFactorType(),
                        experiment.isTissueExperiment());
                result.setFilterFactors(filterFactor);
                if (conditionSearch) {
                    result.setAssayGroupsWithCondition(ImmutableSet.copyOf(assayGroupIdsForFilterFactor),
                            experiment);
                }
                results.add(result);
            }
        }

    }
    return results;
}

From source file:com.google.devtools.build.lib.skyframe.serialization.MultimapCodec.java

@Override
public void serialize(SerializationContext context, Multimap<K, V> obj, CodedOutputStream codedOut)
        throws SerializationException, IOException {
    if (obj instanceof ListMultimap) {
        codedOut.writeBoolNoTag(true);//from   w w  w. ja  v  a 2s  .c  om
    } else if (obj instanceof SetMultimap) {
        codedOut.writeBoolNoTag(false);
    } else {
        throw new SerializationException("Unexpected multimap type: " + obj.getClass());
    }
    codedOut.writeInt32NoTag(obj.asMap().size());
    for (Map.Entry<K, Collection<V>> entry : obj.asMap().entrySet()) {
        context.serialize(entry.getKey(), codedOut);
        context.serialize(entry.getValue(), codedOut);
    }
}

From source file:com.facebook.presto.raptor.storage.BucketBalancer.java

private int updateAssignments(Multimap<String, BucketAssignment> sourceToAllocationChanges) {
    // perform moves in decreasing order of source node total assigned buckets
    List<String> sourceNodes = sourceToAllocationChanges.asMap().entrySet().stream()
            .sorted((a, b) -> Integer.compare(b.getValue().size(), a.getValue().size())).map(Map.Entry::getKey)
            .collect(toList());/*  ww  w.  j ava 2s.  c  o m*/

    int moves = 0;
    for (String source : sourceNodes) {
        for (BucketAssignment reassignment : sourceToAllocationChanges.get(source)) {
            // todo: rate-limit new assignments
            shardManager.updateBucketAssignment(reassignment.getDistributionId(),
                    reassignment.getBucketNumber(), reassignment.getNodeIdentifier());
            bucketsBalanced.update(1);
            moves++;
            log.info("Distribution %s: Moved bucket %s from %s to %s", reassignment.getDistributionId(),
                    reassignment.getBucketNumber(), source, reassignment.getNodeIdentifier());
        }
    }

    return moves;
}

From source file:org.apache.ambari.server.state.stack.upgrade.RepositoryVersionHelper.java

/**
 * Serializes repository info to json for storing to DB.
 * Produces json like:/*from  www  .  j a va2s  .co  m*/
 * <pre>
 * [
 *    {
 *       "repositories":[
 *          {
 *             "Repositories/base_url":"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.2.0.0",
 *             "Repositories/repo_name":"HDP-UTILS",
 *             "Repositories/repo_id":"HDP-UTILS-1.1.0.20"
 *          },
 *          {
 *             "Repositories/base_url":"http://s3.amazonaws.com/dev.hortonworks.com/HDP/centos5/2.x/updates/2.2.0.0",
 *             "Repositories/repo_name":"HDP",
 *             "Repositories/repo_id":"HDP-2.2"
 *          }
 *       ],
 *       "OperatingSystems/os_type":"redhat5"
 *    }
 * ]
 * </pre>
 *
 * @param repositories list of repository infos
 * @return serialized list of operating systems
 */
public String serializeOperatingSystems(List<RepositoryInfo> repositories) {
    final JsonArray rootJson = new JsonArray();
    final Multimap<String, RepositoryInfo> operatingSystems = ArrayListMultimap.create();
    for (RepositoryInfo repository : repositories) {
        operatingSystems.put(repository.getOsType(), repository);
    }
    for (Entry<String, Collection<RepositoryInfo>> operatingSystem : operatingSystems.asMap().entrySet()) {
        final JsonObject operatingSystemJson = new JsonObject();
        final JsonArray repositoriesJson = new JsonArray();
        for (RepositoryInfo repository : operatingSystem.getValue()) {
            final JsonObject repositoryJson = new JsonObject();
            repositoryJson.addProperty(RepositoryResourceProvider.REPOSITORY_BASE_URL_PROPERTY_ID,
                    repository.getBaseUrl());
            repositoryJson.addProperty(RepositoryResourceProvider.REPOSITORY_REPO_NAME_PROPERTY_ID,
                    repository.getRepoName());
            repositoryJson.addProperty(RepositoryResourceProvider.REPOSITORY_REPO_ID_PROPERTY_ID,
                    repository.getRepoId());
            repositoriesJson.add(repositoryJson);
        }
        operatingSystemJson.add(RepositoryVersionResourceProvider.SUBRESOURCE_REPOSITORIES_PROPERTY_ID,
                repositoriesJson);
        operatingSystemJson.addProperty(OperatingSystemResourceProvider.OPERATING_SYSTEM_OS_TYPE_PROPERTY_ID,
                operatingSystem.getKey());
        rootJson.add(operatingSystemJson);
    }
    return gson.toJson(rootJson);
}