Example usage for com.google.common.collect Multimap size

List of usage examples for com.google.common.collect Multimap size

Introduction

In this page you can find the example usage for com.google.common.collect Multimap size.

Prototype

int size();

Source Link

Document

Returns the number of key-value pairs in this multimap.

Usage

From source file:org.reficio.p2.P2Mojo.java

private void processFeatures() {
    // artifacts should already have been resolved by processArtifacts()
    Multimap<P2FeatureArtifact, ResolvedArtifact> resolvedFeatures = resolveFeatures();
    // then bundle the features including the transitive dependencies (if specified so)
    log.info("Resolved " + resolvedFeatures.size() + " feature artifacts");
    if (null != featureArtifacts) {
        for (P2FeatureArtifact p2Feature : featureArtifacts) {
            for (ResolvedArtifact resolvedArtifact : resolvedFeatures.get(p2Feature)) {
                handleFeatureArtifact(p2Feature, resolvedArtifact);
            }// w ww  .  j a  v  a 2  s  .  c  om
        }
    }
    if (null != featureDefinitions) {
        for (P2FeatureDefinition p2Feature : featureDefinitions) {
            this.createFeature(p2Feature);
        }
    }
}

From source file:com.vectrace.MercurialEclipse.model.HgFolder.java

/**
 * Parse the list of files and then apply the filer
 *//* w ww.  j a v a2 s  .c o  m*/
private void parseListing(List<IPath> listing, SortedSet<String> filter) {
    Multimap<String, IPath> sublisting = HashMultimap.create();

    for (IPath line : listing) {
        assert line.isPrefixOf(path);

        String segment = line.segment(path.segmentCount());

        if (line.segmentCount() == path.segmentCount() + 1) {
            if (filter == null || filter.contains(line.toOSString())) {
                IHgResource file = new HgFile(root, changeset, line);
                this.members.add(file);
            }
        } else {
            sublisting.put(segment, line);
        }
    }

    if (sublisting.size() != 0) {
        Set<String> folderNames = sublisting.keySet();
        for (Iterator<String> it = folderNames.iterator(); it.hasNext();) {
            String folderName = it.next();
            Collection<IPath> folder = sublisting.get(folderName);

            HgFolder hgFolder = new HgFolder(root, changeset, path.append(folderName),
                    new ArrayList<IPath>(folder), filter);

            if (hgFolder.members().length != 0) {
                this.members.add(hgFolder);
            }
        }
    }

}

From source file:com.palantir.atlasdb.keyvalue.impl.StatsTrackingKeyValueService.java

@Override
public void putWithTimestamps(String tableName, Multimap<Cell, Value> values) {
    TableStats s = getTableStats(tableName);

    long start = System.currentTimeMillis();
    super.putWithTimestamps(tableName, values);
    long finish = System.currentTimeMillis();
    s.totalPutMillis.addAndGet(finish - start);
    s.totalPutCalls.incrementAndGet();/*from   w  w w  .ja v  a2s  .  c o m*/

    // Only update stats after put was successful.
    s.totalPutCells.addAndGet(values.size());
    for (Entry<Cell, Value> e : values.entries()) {
        s.totalPutCellBytes.addAndGet(e.getKey().getRowName().length);
        s.totalPutCellBytes.addAndGet(e.getKey().getColumnName().length);
        s.totalPutValueBytes.addAndGet(e.getValue().getContents().length);
    }
}

From source file:gg.pistol.sweeper.core.Analyzer.java

private SweeperCountImpl computeCount(TargetImpl root, Multimap<String, TargetImpl> hashDups)
        throws SweeperAbortException {
    log.trace("Counting {} hash duplicates.", hashDups.size());

    int totalTargets = root.getTotalTargets();
    int totalTargetFiles = root.getTotalTargetFiles();
    long totalSize = root.getSize();

    int duplicateTargets = 0;
    int duplicateTargetFiles = 0;
    long duplicateSize = 0;

    // Filter the upper targets in order to have correct aggregate counting of duplicates. The hashDups can contain
    // targets that are children of other targets.
    Collection<TargetImpl> hashDupUpperTargets = filterUpperTargets(hashDups.values());

    // Group the duplicate targets by hash.
    Multimap<String, TargetImpl> dups = filterDuplicateHash(hashDupUpperTargets);

    for (String key : dups.keySet()) {
        Iterator<TargetImpl> iterator = dups.get(key).iterator();

        // Jump over the first value from a duplicate group because deleting all the others will make this one
        // a non-duplicate.
        iterator.next();/*from  w ww.ja  va  2  s .c  o m*/

        while (iterator.hasNext()) {
            TargetImpl target = iterator.next();
            duplicateTargets += target.getTotalTargets();
            duplicateTargetFiles += target.getTotalTargetFiles();
            duplicateSize += target.getSize();

            checkAbortFlag();
        }
    }

    SweeperCountImpl count = new SweeperCountImpl(totalTargets, totalTargetFiles, totalSize, duplicateTargets,
            duplicateTargetFiles, duplicateSize);

    return count;
}

From source file:org.mqnaas.core.impl.ApplicationInstance.java

public Multimap<Class<? extends IApplication>, IService> getServices() {
    Multimap<Class<? extends IApplication>, IInternalService> internalServices = proxyHolder.getServices();
    Multimap<Class<? extends IApplication>, IService> services = ArrayListMultimap
            .create(internalServices.size(), 3);
    services.putAll(internalServices);/*from w ww  .j ava  2  s  .com*/
    return services;
}

From source file:com.android.tools.idea.templates.RepositoryUrlManager.java

/**
 * Resolves multiple dynamic dependencies on artifacts distributed in the SDK.
 *
 * <p>This method doesn't check any remote repositories, just the already downloaded SDK "extras" repositories.
 *//*from   w  w  w  .j av a  2s  .com*/
public List<GradleCoordinate> resolveDynamicSdkDependencies(
        @NotNull Multimap<String, GradleCoordinate> dependencies, @Nullable String supportLibVersionFilter,
        @NotNull AndroidSdkData sdk, @NotNull FileOp fileOp) {
    List<GradleCoordinate> result = Lists.newArrayListWithCapacity(dependencies.size());
    String supportFilter = findExistingExplicitVersion(dependencies.values());
    if (supportFilter != null) {
        supportLibVersionFilter = supportFilter;
    }

    for (String key : dependencies.keySet()) {
        GradleCoordinate highest = Collections.max(dependencies.get(key), COMPARE_PLUS_LOWER);
        if (highest.getGroupId() == null || highest.getArtifactId() == null) {
            return null;
        }

        // For test consistency, don't depend on installed SDK state while testing
        if (myForceRepositoryChecksInTests || !ApplicationManager.getApplication().isUnitTestMode()) {
            // If this coordinate points to an artifact in one of our repositories, check to see if there is a static version
            // that we can add instead of a plus revision.
            String filter = highest.getRevision();
            if (filter.endsWith("+")) {
                filter = filter.length() > 1 ? filter.substring(0, filter.length() - 1) : null;
                boolean includePreviews = false;
                if (filter == null && ImportModule.SUPPORT_GROUP_ID.equals(highest.getGroupId())) {
                    filter = supportLibVersionFilter;
                    includePreviews = true;
                }
                String version = getLibraryRevision(highest.getGroupId(), highest.getArtifactId(), filter,
                        includePreviews, sdk.getLocation(), fileOp);
                if (version == null && filter != null) {
                    // No library found at the support lib version filter level, so look for any match
                    version = getLibraryRevision(highest.getGroupId(), highest.getArtifactId(), null,
                            includePreviews, sdk.getLocation(), fileOp);
                }
                if (version == null && !includePreviews) {
                    // Still no library found, check preview versions
                    version = getLibraryRevision(highest.getGroupId(), highest.getArtifactId(), null, true,
                            sdk.getLocation(), fileOp);
                }
                if (version != null) {
                    String libraryCoordinate = highest.getId() + ":" + version;
                    GradleCoordinate available = GradleCoordinate.parseCoordinateString(libraryCoordinate);
                    if (available != null) {
                        File archiveFile = getArchiveForCoordinate(available, sdk.getLocation(), fileOp);
                        if (((archiveFile != null && fileOp.exists(archiveFile))
                                // Not a known library hardcoded in RepositoryUrlManager?
                                || SupportLibrary.forGradleCoordinate(available) == null)
                                && COMPARE_PLUS_LOWER.compare(available, highest) >= 0) {
                            highest = available;
                        }
                    }
                }
            }
        }
        result.add(highest);
    }
    return result;
}

From source file:org.splevo.vpm.analyzer.DefaultVPMAnalyzerService.java

/**
 * Check the refinements if the aggregated variation points or at least a some of them can be
 * merged instead of just grouped.//w  w w .  ja va2 s  .c om
 *
 * If only a subgroup of the VPs can be merged, the refinement will be split to merge and later
 * on refine the VPs.
 *
 * @param detectedRefinements
 * @return
 */
private List<Refinement> mergeDetection(List<Refinement> detectedRefinements) {

    logger.info("Run merge detection");

    List<Refinement> refinedRefinements = Lists.newLinkedList();

    for (Refinement origRefinement : detectedRefinements) {
        EList<VariationPoint> variationPoints = origRefinement.getVariationPoints();

        Multimap<VariationPoint, VariationPoint> mergeVPBuckets = identifyMergeableVPs(variationPoints);
        checkDistinctBuckets(mergeVPBuckets);

        if (mergeVPBuckets.size() == 0) {
            refinedRefinements.add(origRefinement);
        } else {
            Refinement updatedRefinements = createMergeRefinements(origRefinement, mergeVPBuckets);
            refinedRefinements.add(updatedRefinements);
        }

    }

    return refinedRefinements;
}

From source file:com.palantir.atlasdb.cleaner.Scrubber.java

/**
 * @return number of cells read from _scrub table
 *//*from www. j ava2 s . c  o  m*/
private int scrubSomeCells(SortedMap<Long, Multimap<String, Cell>> scrubTimestampToTableNameToCell,
        final TransactionManager txManager, long maxScrubTimestamp) {

    // Don't call expensive toString() if trace logging is off
    if (log.isTraceEnabled()) {
        log.trace("Attempting to scrub cells: " + scrubTimestampToTableNameToCell);
    }

    if (log.isInfoEnabled()) {
        int numCells = 0;
        Set<String> tables = Sets.newHashSet();
        for (Multimap<String, Cell> v : scrubTimestampToTableNameToCell.values()) {
            tables.addAll(v.keySet());
            numCells += v.size();
        }
        log.info("Attempting to scrub " + numCells + " cells from tables " + tables);
    }

    if (scrubTimestampToTableNameToCell.size() == 0) {
        return 0; // No cells left to scrub
    }

    Multimap<Long, Cell> toRemoveFromScrubQueue = HashMultimap.create();

    int numCellsReadFromScrubTable = 0;
    List<Future<Void>> scrubFutures = Lists.newArrayList();
    for (Map.Entry<Long, Multimap<String, Cell>> entry : scrubTimestampToTableNameToCell.entrySet()) {
        final long scrubTimestamp = entry.getKey();
        final Multimap<String, Cell> tableNameToCell = entry.getValue();

        numCellsReadFromScrubTable += tableNameToCell.size();

        long commitTimestamp = getCommitTimestampRollBackIfNecessary(scrubTimestamp, tableNameToCell);
        if (commitTimestamp >= maxScrubTimestamp) {
            // We cannot scrub this yet because not all transactions can read this value.
            continue;
        } else if (commitTimestamp != TransactionConstants.FAILED_COMMIT_TS) {
            // This is CRITICAL; don't scrub if the hard delete transaction didn't actually finish
            // (we still remove it from the _scrub table with the call to markCellsAsScrubbed though),
            // or else we could cause permanent data loss if the hard delete transaction failed after
            // queuing cells to scrub but before successfully committing
            for (final List<Entry<String, Cell>> batch : Iterables.partition(tableNameToCell.entries(),
                    batchSizeSupplier.get())) {
                final Multimap<String, Cell> batchMultimap = HashMultimap.create();
                for (Entry<String, Cell> e : batch) {
                    batchMultimap.put(e.getKey(), e.getValue());
                }
                scrubFutures.add(exec.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws Exception {
                        scrubCells(txManager, batchMultimap, scrubTimestamp,
                                aggressiveScrub ? TransactionType.AGGRESSIVE_HARD_DELETE
                                        : TransactionType.HARD_DELETE);
                        return null;
                    }
                }));
            }
        }
        toRemoveFromScrubQueue.putAll(scrubTimestamp, tableNameToCell.values());
    }

    for (Future<Void> future : scrubFutures) {
        Futures.getUnchecked(future);
    }

    Multimap<Cell, Long> cellToScrubTimestamp = HashMultimap.create();
    scrubberStore.markCellsAsScrubbed(Multimaps.invertFrom(toRemoveFromScrubQueue, cellToScrubTimestamp),
            batchSizeSupplier.get());

    if (log.isTraceEnabled()) {
        log.trace("Finished scrubbing cells: " + scrubTimestampToTableNameToCell);
    }

    if (log.isInfoEnabled()) {
        Set<String> tables = Sets.newHashSet();
        for (Multimap<String, Cell> v : scrubTimestampToTableNameToCell.values()) {
            tables.addAll(v.keySet());
        }
        long minTimestamp = Collections.min(scrubTimestampToTableNameToCell.keySet());
        long maxTimestamp = Collections.max(scrubTimestampToTableNameToCell.keySet());
        log.info("Finished scrubbing " + numCellsReadFromScrubTable + " cells at "
                + scrubTimestampToTableNameToCell.size() + " timestamps (" + minTimestamp + "..." + maxTimestamp
                + ") from tables " + tables);
    }

    return numCellsReadFromScrubTable;
}

From source file:com.facebook.presto.execution.BenchmarkNodeScheduler.java

@Benchmark
@OperationsPerInvocation(SPLITS)// w w  w.  j av a2 s.  c o  m
public Object benchmark(BenchmarkData data) throws Throwable {
    List<RemoteTask> remoteTasks = ImmutableList.copyOf(data.getTaskMap().values());
    Iterator<MockRemoteTaskFactory.MockRemoteTask> finishingTask = Iterators.cycle(data.getTaskMap().values());
    Iterator<Split> splits = data.getSplits().iterator();
    Set<Split> batch = new HashSet<>();
    while (splits.hasNext() || !batch.isEmpty()) {
        Multimap<Node, Split> assignments = data.getNodeSelector().computeAssignments(batch, remoteTasks)
                .getAssignments();
        for (Node node : assignments.keySet()) {
            MockRemoteTaskFactory.MockRemoteTask remoteTask = data.getTaskMap().get(node);
            remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder()
                    .putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
            remoteTask.startSplits(MAX_SPLITS_PER_NODE);
        }
        if (assignments.size() == batch.size()) {
            batch.clear();
        } else {
            batch.removeAll(assignments.values());
        }
        while (batch.size() < SPLIT_BATCH_SIZE && splits.hasNext()) {
            batch.add(splits.next());
        }
        finishingTask.next().finishSplits((int) Math.ceil(MAX_SPLITS_PER_NODE / 50.0));
    }

    return remoteTasks;
}

From source file:io.prestosql.execution.BenchmarkNodeScheduler.java

@Benchmark
@OperationsPerInvocation(SPLITS)//from  w ww .j a v a2  s .co  m
public Object benchmark(BenchmarkData data) {
    List<RemoteTask> remoteTasks = ImmutableList.copyOf(data.getTaskMap().values());
    Iterator<MockRemoteTaskFactory.MockRemoteTask> finishingTask = Iterators.cycle(data.getTaskMap().values());
    Iterator<Split> splits = data.getSplits().iterator();
    Set<Split> batch = new HashSet<>();
    while (splits.hasNext() || !batch.isEmpty()) {
        Multimap<Node, Split> assignments = data.getNodeSelector().computeAssignments(batch, remoteTasks)
                .getAssignments();
        for (Node node : assignments.keySet()) {
            MockRemoteTaskFactory.MockRemoteTask remoteTask = data.getTaskMap().get(node);
            remoteTask.addSplits(ImmutableMultimap.<PlanNodeId, Split>builder()
                    .putAll(new PlanNodeId("sourceId"), assignments.get(node)).build());
            remoteTask.startSplits(MAX_SPLITS_PER_NODE);
        }
        if (assignments.size() == batch.size()) {
            batch.clear();
        } else {
            batch.removeAll(assignments.values());
        }
        while (batch.size() < SPLIT_BATCH_SIZE && splits.hasNext()) {
            batch.add(splits.next());
        }
        finishingTask.next().finishSplits((int) Math.ceil(MAX_SPLITS_PER_NODE / 50.0));
    }

    return remoteTasks;
}