Example usage for com.google.common.collect Multimap values

List of usage examples for com.google.common.collect Multimap values

Introduction

In this page you can find the example usage for com.google.common.collect Multimap values.

Prototype

Collection<V> values();

Source Link

Document

Returns a view collection containing the value from each key-value pair contained in this multimap, without collapsing duplicates (so values().size() == size() ).

Usage

From source file:org.eclipse.xtext.xbase.typesystem.override.ResolvedFeatures.java

protected List<IResolvedOperation> computeAllOperations() {
    JvmType rawType = getRawType();/*from w w w  .  jav a2s.  c  o  m*/
    if (!(rawType instanceof JvmDeclaredType)) {
        return Collections.emptyList();
    }
    Multimap<String, AbstractResolvedOperation> processedOperations = LinkedHashMultimap.create();
    for (IResolvedOperation resolvedOperation : getDeclaredOperations()) {
        processedOperations.put(resolvedOperation.getDeclaration().getSimpleName(),
                (AbstractResolvedOperation) resolvedOperation);
    }
    if (targetVersion.isAtLeast(JavaVersion.JAVA8)) {
        computeAllOperationsFromSortedSuperTypes((JvmDeclaredType) rawType, processedOperations);
    } else {
        Set<JvmType> processedTypes = Sets.newHashSet(rawType);
        computeAllOperationsFromSuperTypes((JvmDeclaredType) rawType, processedOperations, processedTypes);
    }
    // make sure the declared operations are the first in the list
    List<IResolvedOperation> result = new ArrayList<IResolvedOperation>(processedOperations.size());
    result.addAll(getDeclaredOperations());
    for (AbstractResolvedOperation operation : processedOperations.values()) {
        if (operation.getDeclaration().getDeclaringType() != rawType) {
            result.add(operation);
        }
    }
    return Collections.unmodifiableList(result);
}

From source file:org.cloudsmith.geppetto.validation.impl.ValidationServiceImpl.java

/**
 * @param moduleData//from  ww  w .  j a va 2  s .com
 *            - resolved module data
 * @param root
 *            - root file for relativization
 * @param diagnostics
 *            - where to report issues
 */
private void checkCircularDependencies(Multimap<ModuleName, MetadataInfo> moduleData, Diagnostic diagnostics,
        File root) {
    // problems: multiple versions of the same, etc.Use an identity set
    for (MetadataInfo mi : moduleData.values()) {
        Set<MetadataInfo> checkedModules = Sets.newIdentityHashSet();
        List<MetadataInfo> circle = Lists.newLinkedList();
        checkCircularity(mi, mi, circle, checkedModules);
    }
}

From source file:org.onosproject.intentperf.IntentPerfInstaller.java

/**
 * Creates a specified number of intents for testing purposes.
 *
 * @param numberOfKeys number of intents
 * @param pathLength   path depth//from   ww w .  j a  v a  2s.c  o m
 * @param firstKey     first key to attempt
 * @return set of intents
 */
private Set<Intent> createIntents(int numberOfKeys, int pathLength, int firstKey) {
    List<NodeId> neighbors = getNeighbors();

    Multimap<NodeId, Device> devices = ArrayListMultimap.create();
    deviceService.getAvailableDevices()
            .forEach(device -> devices.put(mastershipService.getMasterFor(device.id()), device));

    // ensure that we have at least one device per neighbor
    neighbors.forEach(node -> checkState(devices.get(node).size() > 0, "There are no devices for {}", node));

    // TODO pull this outside so that createIntent can use it
    // prefix based on node id for keys generated on this instance
    long keyPrefix = ((long) clusterService.getLocalNode().ip().getIp4Address().toInt()) << 32;

    int maxKeysPerNode = (int) Math.ceil((double) numberOfKeys / neighbors.size());
    Multimap<NodeId, Intent> intents = ArrayListMultimap.create();

    for (int count = 0, k = firstKey; count < numberOfKeys; k++) {
        Key key = Key.of(keyPrefix + k, appId);

        NodeId leader = partitionService.getLeader(key);
        if (!neighbors.contains(leader) || intents.get(leader).size() >= maxKeysPerNode) {
            // Bail if we are not sending to this node or we have enough for this node
            continue;
        }
        intents.put(leader, createIntent(key, keyPrefix + k, leader, devices));

        // Bump up the counter and remember this as the last key used.
        count++;
        lastKey = k;
        if (count % 1000 == 0) {
            log.info("Building intents... {} (attempt: {})", count, lastKey);
        }
    }
    checkState(intents.values().size() == numberOfKeys, "Generated wrong number of intents");
    log.info("Created {} intents", numberOfKeys);
    intents.keySet().forEach(node -> log.info("\t{}\t{}", node, intents.get(node).size()));

    return Sets.newHashSet(intents.values());
}

From source file:org.opennms.features.topology.plugins.topo.linkd.internal.IsIsLinkStatusProvider.java

@Override
protected List<EdgeAlarmStatusSummary> getEdgeAlarmSummaries(List<Integer> linkIds) {
    Criteria criteria = new Criteria(IsIsLink.class);
    criteria.addRestriction(new InRestriction("id", linkIds));

    List<IsIsLink> links = getIsisLinkDao().findMatching(criteria);
    Multimap<String, EdgeAlarmStatusSummary> summaryMap = HashMultimap.create();
    for (IsIsLink sourceLink : links) {
        OnmsNode sourceNode = sourceLink.getNode();
        IsIsElement sourceElement = sourceNode.getIsisElement();
        for (IsIsLink targetLink : links) {
            boolean isisAdjIndexCheck = sourceLink.getIsisISAdjIndex() == targetLink.getIsisISAdjIndex();
            boolean isisSysIdCheck = targetLink.getIsisISAdjNeighSysID().equals(sourceElement.getIsisSysID());
            if (isisAdjIndexCheck && isisSysIdCheck) {
                summaryMap.put(sourceNode.getNodeId() + ":" + sourceLink.getIsisCircIfIndex(),
                        new EdgeAlarmStatusSummary(sourceLink.getId(), targetLink.getId(), null));
            }/* w w w.  j  ava  2 s  .  c  o m*/
        }
    }

    List<OnmsAlarm> alarms = getLinkDownAlarms();
    for (OnmsAlarm alarm : alarms) {
        String key = alarm.getNodeId() + ":" + alarm.getIfIndex();
        if (summaryMap.containsKey(key)) {
            Collection<EdgeAlarmStatusSummary> summaries = summaryMap.get(key);
            for (EdgeAlarmStatusSummary summary : summaries) {
                summary.setEventUEI(alarm.getUei());
            }

        }
    }
    return new ArrayList<EdgeAlarmStatusSummary>(summaryMap.values());
}

From source file:org.activityinfo.legacy.shared.impl.GetSitesHandler.java

private boolean weAreFetchingAllSitesForAnActivityAndThereAreNoLinkedSites(GetSites command,
        Multimap<Integer, SiteDTO> siteMap) {

    // are we limiting the number of rows to return?
    if (command.getLimit() >= 0) {
        return false;
    }//from  w w w  . j  a v  a 2s  . co m

    // are we filtering on a SINGLE dimension??
    Filter filter = command.getFilter();
    if (filter.getRestrictedDimensions().size() != 1) {
        return false;
    }

    // is that dimension the Activity dimension?
    if (!filter.getRestrictedDimensions().contains(DimensionType.Activity)) {
        return false;
    }

    // are there any linked sites?
    if (command.isFetchLinks()) {
        for (SiteDTO site : siteMap.values()) {
            if (site.isLinked()) {
                return false;
            }
        }
    }

    // RETURN ALL SITES for filtered Activity
    return true;
}

From source file:io.prestosql.execution.scheduler.SourcePartitionedScheduler.java

@Override
public synchronized ScheduleResult schedule() {
    dropListenersFromWhenFinishedOrNewLifespansAdded();

    int overallSplitAssignmentCount = 0;
    ImmutableSet.Builder<RemoteTask> overallNewTasks = ImmutableSet.builder();
    List<ListenableFuture<?>> overallBlockedFutures = new ArrayList<>();
    boolean anyBlockedOnPlacements = false;
    boolean anyBlockedOnNextSplitBatch = false;
    boolean anyNotBlocked = false;

    for (Entry<Lifespan, ScheduleGroup> entry : scheduleGroups.entrySet()) {
        Lifespan lifespan = entry.getKey();
        ScheduleGroup scheduleGroup = entry.getValue();
        Set<Split> pendingSplits = scheduleGroup.pendingSplits;

        if (scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS
                || scheduleGroup.state == ScheduleGroupState.DONE) {
            verify(scheduleGroup.nextSplitBatchFuture == null);
        } else if (pendingSplits.isEmpty()) {
            // try to get the next batch
            if (scheduleGroup.nextSplitBatchFuture == null) {
                scheduleGroup.nextSplitBatchFuture = splitSource.getNextBatch(scheduleGroup.partitionHandle,
                        lifespan, splitBatchSize - pendingSplits.size());

                long start = System.nanoTime();
                addSuccessCallback(scheduleGroup.nextSplitBatchFuture, () -> stage.recordGetSplitTime(start));
            }//  www  .  j  ava  2  s  .co  m

            if (scheduleGroup.nextSplitBatchFuture.isDone()) {
                SplitBatch nextSplits = getFutureValue(scheduleGroup.nextSplitBatchFuture);
                scheduleGroup.nextSplitBatchFuture = null;
                pendingSplits.addAll(nextSplits.getSplits());
                if (nextSplits.isLastBatch()) {
                    if (scheduleGroup.state == ScheduleGroupState.INITIALIZED && pendingSplits.isEmpty()) {
                        // Add an empty split in case no splits have been produced for the source.
                        // For source operators, they never take input, but they may produce output.
                        // This is well handled by Presto execution engine.
                        // However, there are certain non-source operators that may produce output without any input,
                        // for example, 1) an AggregationOperator, 2) a HashAggregationOperator where one of the grouping sets is ().
                        // Scheduling an empty split kicks off necessary driver instantiation to make this work.
                        pendingSplits
                                .add(new Split(splitSource.getConnectorId(), splitSource.getTransactionHandle(),
                                        new EmptySplit(splitSource.getConnectorId()), lifespan));
                    }
                    scheduleGroup.state = ScheduleGroupState.NO_MORE_SPLITS;
                }
            } else {
                overallBlockedFutures.add(scheduleGroup.nextSplitBatchFuture);
                anyBlockedOnNextSplitBatch = true;
                continue;
            }
        }

        Multimap<Node, Split> splitAssignment = ImmutableMultimap.of();
        if (!pendingSplits.isEmpty()) {
            if (!scheduleGroup.placementFuture.isDone()) {
                anyBlockedOnPlacements = true;
                continue;
            }

            if (scheduleGroup.state == ScheduleGroupState.INITIALIZED) {
                scheduleGroup.state = ScheduleGroupState.SPLITS_ADDED;
            }
            if (state == State.INITIALIZED) {
                state = State.SPLITS_ADDED;
            }

            // calculate placements for splits
            SplitPlacementResult splitPlacementResult = splitPlacementPolicy.computeAssignments(pendingSplits);
            splitAssignment = splitPlacementResult.getAssignments();

            // remove splits with successful placements
            splitAssignment.values().forEach(pendingSplits::remove); // AbstractSet.removeAll performs terribly here.
            overallSplitAssignmentCount += splitAssignment.size();

            // if not completed placed, mark scheduleGroup as blocked on placement
            if (!pendingSplits.isEmpty()) {
                scheduleGroup.placementFuture = splitPlacementResult.getBlocked();
                overallBlockedFutures.add(scheduleGroup.placementFuture);
                anyBlockedOnPlacements = true;
            }
        }

        // if no new splits will be assigned, update state and attach completion event
        Multimap<Node, Lifespan> noMoreSplitsNotification = ImmutableMultimap.of();
        if (pendingSplits.isEmpty() && scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS) {
            scheduleGroup.state = ScheduleGroupState.DONE;
            if (!lifespan.isTaskWide()) {
                Node node = ((BucketedSplitPlacementPolicy) splitPlacementPolicy)
                        .getNodeForBucket(lifespan.getId());
                noMoreSplitsNotification = ImmutableMultimap.of(node, lifespan);
            }
        }

        // assign the splits with successful placements
        overallNewTasks.addAll(assignSplits(splitAssignment, noMoreSplitsNotification));

        // Assert that "placement future is not done" implies "pendingSplits is not empty".
        // The other way around is not true. One obvious reason is (un)lucky timing, where the placement is unblocked between `computeAssignments` and this line.
        // However, there are other reasons that could lead to this.
        // Note that `computeAssignments` is quite broken:
        // 1. It always returns a completed future when there are no tasks, regardless of whether all nodes are blocked.
        // 2. The returned future will only be completed when a node with an assigned task becomes unblocked. Other nodes don't trigger future completion.
        // As a result, to avoid busy loops caused by 1, we check pendingSplits.isEmpty() instead of placementFuture.isDone() here.
        if (scheduleGroup.nextSplitBatchFuture == null && scheduleGroup.pendingSplits.isEmpty()
                && scheduleGroup.state != ScheduleGroupState.DONE) {
            anyNotBlocked = true;
        }
    }

    // * `splitSource.isFinished` invocation may fail after `splitSource.close` has been invoked.
    //   If state is NO_MORE_SPLITS/FINISHED, splitSource.isFinished has previously returned true, and splitSource is closed now.
    // * Even if `splitSource.isFinished()` return true, it is not necessarily safe to tear down the split source.
    //   * If anyBlockedOnNextSplitBatch is true, it means we have not checked out the recently completed nextSplitBatch futures,
    //     which may contain recently published splits. We must not ignore those.
    //   * If any scheduleGroup is still in DISCOVERING_SPLITS state, it means it hasn't realized that there will be no more splits.
    //     Next time it invokes getNextBatch, it will realize that. However, the invocation will fail we tear down splitSource now.
    if ((state == State.NO_MORE_SPLITS || state == State.FINISHED)
            || (noMoreScheduleGroups && scheduleGroups.isEmpty() && splitSource.isFinished())) {
        switch (state) {
        case INITIALIZED:
            // We have not scheduled a single split so far.
            // But this shouldn't be possible. See usage of EmptySplit in this method.
            throw new IllegalStateException("At least 1 split should have been scheduled for this plan node");
        case SPLITS_ADDED:
            state = State.NO_MORE_SPLITS;
            splitSource.close();
            // fall through
        case NO_MORE_SPLITS:
            state = State.FINISHED;
            whenFinishedOrNewLifespanAdded.set(null);
            // fall through
        case FINISHED:
            return new ScheduleResult(true, overallNewTasks.build(), overallSplitAssignmentCount);
        default:
            throw new IllegalStateException("Unknown state");
        }
    }

    if (anyNotBlocked) {
        return new ScheduleResult(false, overallNewTasks.build(), overallSplitAssignmentCount);
    }

    if (anyBlockedOnPlacements || groupedExecution) {
        // In a broadcast join, output buffers of the tasks in build source stage have to
        // hold onto all data produced before probe side task scheduling finishes,
        // even if the data is acknowledged by all known consumers. This is because
        // new consumers may be added until the probe side task scheduling finishes.
        //
        // As a result, the following line is necessary to prevent deadlock
        // due to neither build nor probe can make any progress.
        // The build side blocks due to a full output buffer.
        // In the meantime the probe side split cannot be consumed since
        // builder side hash table construction has not finished.
        overallNewTasks.addAll(finalizeTaskCreationIfNecessary());
    }

    ScheduleResult.BlockedReason blockedReason;
    if (anyBlockedOnNextSplitBatch) {
        blockedReason = anyBlockedOnPlacements ? MIXED_SPLIT_QUEUES_FULL_AND_WAITING_FOR_SOURCE
                : WAITING_FOR_SOURCE;
    } else {
        blockedReason = anyBlockedOnPlacements ? SPLIT_QUEUES_FULL : NO_ACTIVE_DRIVER_GROUP;
    }

    overallBlockedFutures.add(whenFinishedOrNewLifespanAdded);
    return new ScheduleResult(false, overallNewTasks.build(),
            nonCancellationPropagating(whenAnyComplete(overallBlockedFutures)), blockedReason,
            overallSplitAssignmentCount);
}

From source file:org.wso2.carbon.identity.application.authenticator.fido.dao.DeviceStoreDAO.java

/**
 * Retrieves Device Registration data from store.
 *
 * @param username The username of the Device Registration.
 * @return Collection of Device Registration.
 * @throws FIDOAuthenticatorServerException when SQL statement can not be executed.
 *//*  w  w  w . ja va 2 s.  c o m*/
public Collection getDeviceRegistration(String username, String tenantDomain, String userStoreDomain)
        throws FIDOAuthenticatorServerException {

    if (log.isDebugEnabled()) {
        log.debug("getDeviceRegistration inputs {username: " + username + ", tenantDomain: " + tenantDomain
                + ", userStoreDomain : " + userStoreDomain + "}");
    }
    Connection connection = IdentityDatabaseUtil.getDBConnection();
    PreparedStatement preparedStatement = null;
    ResultSet resultSet = null;
    Multimap<String, String> devices = ArrayListMultimap.create();

    try {
        preparedStatement = connection
                .prepareStatement(FIDOAuthenticatorConstants.SQLQueries.GET_DEVICE_REGISTRATION_QUERY);
        preparedStatement.setInt(1, IdentityTenantUtil.getTenantId(tenantDomain));
        preparedStatement.setString(2, userStoreDomain);
        preparedStatement.setString(3, username);
        resultSet = preparedStatement.executeQuery();
        while (resultSet.next()) {
            String keyHandle = resultSet.getString(FIDOAuthenticatorConstants.U2F_KEY_HANDLE);
            String deviceData = resultSet.getString(FIDOAuthenticatorConstants.U2F_DEVICE_DATA);
            devices.put(keyHandle, deviceData);

        }
    } catch (SQLException e) {
        throw new FIDOAuthenticatorServerException("Error executing get device registration SQL : "
                + FIDOAuthenticatorConstants.SQLQueries.GET_DEVICE_REGISTRATION_QUERY, e);
    } finally {
        IdentityDatabaseUtil.closeAllConnections(connection, resultSet, preparedStatement);
    }

    return devices.values();
}

From source file:org.eclipse.xtext.serializer.sequencer.ContextFinder.java

protected Iterable<ISerializationContext> findContextsByContainer(EObject sem,
        Iterable<ISerializationContext> contextCandidates) {
    if (sem.eResource() != null && sem.eResource().getContents().contains(sem))
        return Collections.singleton(getRootContext(sem));
    EReference ref = sem.eContainmentFeature();
    if (ref == null || (contextCandidates != null && Iterables.size(contextCandidates) < 2))
        return contextCandidates;
    Multimap<IConstraint, ISerializationContext> containerConstraints = getConstraints(sem.eContainer());
    int refID = sem.eContainer().eClass().getFeatureID(ref);
    Set<ISerializationContext> childContexts = Sets.newLinkedHashSet();
    for (Entry<IConstraint, Collection<ISerializationContext>> e : Lists
            .newArrayList(containerConstraints.asMap().entrySet())) {
        IConstraint constraint = e.getKey();
        Collection<ISerializationContext> contexts = e.getValue();
        if (constraint.getFeatures()[refID] == null)
            containerConstraints.removeAll(constraint);
        else {/*from   w w w  .  j  av  a 2 s  .  com*/
            childContexts.addAll(createContextsForFeatures(contexts, constraint.getFeatures()[refID], sem));
        }
    }
    Set<ISerializationContext> result;
    if (contextCandidates != null) {
        result = Sets.newLinkedHashSet(contextCandidates);
        result.retainAll(childContexts);
    } else
        result = childContexts;
    if (result.size() < 2)
        return result;
    Iterable<ISerializationContext> filteredContexts = findContextsByContainer(sem.eContainer(),
            containerConstraints.values());
    childContexts = Sets.newLinkedHashSet();
    for (Entry<IConstraint, Collection<ISerializationContext>> e : Lists
            .newArrayList(containerConstraints.asMap().entrySet()))
        if (intersect(filteredContexts, e.getValue()))
            childContexts.addAll(createContextsForFeatures(e.getValue(), e.getKey().getFeatures()[refID], sem));
    result.retainAll(childContexts);
    return result;
}

From source file:com.yahoo.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl.java

private boolean isBrokerAvailableForRebalancing(String bundleName, long maxLoadLevel) {

    NamespaceName namespaceName = new NamespaceName(getNamespaceNameFromBundleName(bundleName));
    Map<Long, Set<ResourceUnit>> availableBrokers = sortedRankings.get();
    // this does not have "http://" in front, hacky but no time to pretty up
    Multimap<Long, ResourceUnit> brokers = getFinalCandidates(namespaceName, availableBrokers);

    for (Object broker : brokers.values()) {
        ResourceUnit underloadedRU = (ResourceUnit) broker;
        LoadReport currentLoadReport = currentLoadReports.get(underloadedRU);
        if (isBelowLoadLevel(currentLoadReport.getSystemResourceUsage(), maxLoadLevel)) {
            return true;
        }/*from w ww. jav  a2s. c  o m*/
    }
    return false;
}

From source file:com.giaybac.traprange.extractor.PDFTableExtractor.java

public List<Table> extract() {
    List<Table> retVal = new ArrayList<>();
    Multimap<Integer, Range<Integer>> pageIdNLineRangesMap = LinkedListMultimap.create();
    Multimap<Integer, TextPosition> pageIdNTextsMap = LinkedListMultimap.create();
    try {//  w  w  w . j a v a2  s  .  c o  m
        this.document = PDDocument.load(inputStream);
        for (int pageId = 0; pageId < document.getNumberOfPages(); pageId++) {
            boolean b = !exceptedPages.contains(pageId)
                    && (extractedPages.isEmpty() || extractedPages.contains(pageId));
            if (b) {
                PDPage pdPage = (PDPage) document.getDocumentCatalog().getAllPages().get(pageId);
                List<TextPosition> texts = extractTextPositions(pdPage);//sorted by .getY() ASC
                //extract line ranges
                List<Range<Integer>> lineRanges = getLineRanges(pageId, texts);
                //extract column ranges
                List<TextPosition> textsByLineRanges = getTextsByLineRanges(lineRanges, texts);

                pageIdNLineRangesMap.putAll(pageId, lineRanges);
                pageIdNTextsMap.putAll(pageId, textsByLineRanges);
            }
        }
        //Calculate columnRanges
        List<Range<Integer>> columnRanges = getColumnRanges(pageIdNTextsMap.values());
        for (int pageId : pageIdNTextsMap.keySet()) {
            Table table = buildTable(pageId, (List) pageIdNTextsMap.get(pageId),
                    (List) pageIdNLineRangesMap.get(pageId), columnRanges);
            retVal.add(table);
            //debug
            logger.debug("Found " + table.getRows().size() + " row(s) and " + columnRanges.size()
                    + " column(s) of a table in page " + pageId);
        }
    } catch (IOException ex) {
        throw new RuntimeException("Parse pdf file fail", ex);
    }
    //return
    return retVal;
}