Example usage for com.google.common.collect Multimap clear

List of usage examples for com.google.common.collect Multimap clear

Introduction

In this page you can find the example usage for com.google.common.collect Multimap clear.

Prototype

void clear();

Source Link

Document

Removes all key-value pairs from the multimap, leaving it #isEmpty empty .

Usage

From source file:com.github.haixing_hu.lang.Assignment.java

@SuppressWarnings("unchecked")
public static <K, V extends Assignable<? super V>> Multimap<K, V> deepAssign(@Nullable Multimap<K, V> left,
        @Nullable final Multimap<K, V> right) {
    if (right == null) {
        return null;
    } else {/*  w  w w  .  ja v a 2 s  . com*/
        if (left == null) {
            left = LinkedHashMultimap.create();
        } else {
            left.clear();
        }
        final Collection<Map.Entry<K, V>> entries = right.entries();
        for (final Map.Entry<K, V> entry : entries) {
            final K key = entry.getKey();
            final V value = entry.getValue();
            if (value == null) {
                left.put(key, null);
            } else {
                left.put(key, (V) value.clone());
            }
        }
        return left;
    }
}

From source file:org.crypto.sse.Partition.java

public static Multimap<Integer, String> partitioning(Multimap<String, String> lookup) {

    // Partitions Creation
    Set<String> keys = lookup.keySet();

    int partitionId = 0;
    Multimap<Integer, String> partitions = ArrayListMultimap.create();
    int counter2 = 0;

    for (String key : keys) {
        Set<Integer> keys2 = partitions.keySet();
        List<String> inter = (List<String>) lookup.get(key);
        List<String> interTMP = new ArrayList<String>(inter);

        Printer.debugln("Step number: " + counter2++ + "Number of keywords " + keys.size());

        Set<String> set = new HashSet<String>(interTMP);
        Multimap<Integer, String> partitionsTMP = ArrayListMultimap.create();

        for (Integer key2 : keys2) {

            if (!set.isEmpty()) {
                Set<String> tmp = new HashSet<String>(partitions.get(key2));

                Set<String> intersection = Sets.intersection(tmp, set);

                Set<String> difference;

                if (intersection.isEmpty()) {
                    difference = tmp;//from  www .  j a  v  a 2s. c o m
                } else {
                    difference = Sets.difference(tmp, intersection);
                    set = Sets.difference(set, intersection);

                }

                if (!difference.isEmpty()) {
                    partitionId = partitionId + 1;
                    partitionsTMP.putAll(partitionId, difference);
                }

                if (!intersection.isEmpty()) {
                    partitionId = partitionId + 1;
                    partitionsTMP.putAll(partitionId, intersection);
                }

            } else {
                partitionId = partitionId + 1;
                partitionsTMP.putAll(partitionId, new HashSet<String>(partitions.get(key2)));
            }

        }

        interTMP = new ArrayList<String>(set);

        if (!interTMP.isEmpty()) {

            partitionId = partitionId + 1;
            partitionsTMP.putAll(partitionId, interTMP);

        }

        partitions = ArrayListMultimap.create(partitionsTMP);
        partitionsTMP.clear();
        interTMP.clear();

    }

    Printer.debugln("Partitions size " + partitions.keySet().size());
    Printer.debugln("\n");

    return partitions;
}

From source file:com.mycompany.swapriori.ConvertToObjectInfo.java

public Map<String, Multimap> convert(List<Triple> listOfTriples) {
    Map<String, Multimap> ObjectInfo = new LinkedHashMap(100);
    Map<String, Multimap> ObjectInfo1 = new LinkedHashMap(100);
    //List<Triple> listOfTriples = new ArrayList<>(100);
    Set<String> objectSet = new LinkedHashSet(100);
    for (Triple t : listOfTriples) {
        objectSet.add(t.object);//from   www.j  a  v  a 2s . c om
    }
    //System.out.println(objectSet);
    List<Triple> pullOut = null;

    //Multimap<String, String> multiMap = ArrayListMultimap.create();
    //remove all triples with some object
    for (String iterObject : objectSet) {
        Multimap<String, String> multiMap = ArrayListMultimap.create();
        Multimap<String, String> multiMap1 = ArrayListMultimap.create();
        for (Triple t1 : listOfTriples) {
            if (t1.getObject().equals(iterObject)) {
                multiMap.put(t1.getPredicate(), t1.getSubject());
                multiMap1.put(t1.getPredicate(), t1.getSubject());
            }
            //System.out.println(multiMap);
        }
        //System.out.println(multiMap);
        ObjectInfo.put(iterObject, multiMap1);
        ObjectInfo1 = new LinkedHashMap(ObjectInfo);
        //System.out.println(ObjectInfo);
        //System.out.println("Clearing");
        multiMap.clear();
    }
    //System.out.println(multiMap + " " + "final");
    //System.out.println(ObjectInfo1 + "Final");
    return ObjectInfo1;
}

From source file:co.mitro.analysis.StatsGenerator.java

/**
 * Generate statistics and return newly created objects that have not been committed.
 * @param outDir directory in which to write summary files. Subdirectories outDir/users 
 *        and outDir/orgs must exist. Supply null for no output.
 *///from   w  w w. ja v a  2 s .  co m
public static Snapshot generateStatistics(String outDir, Manager manager)
        throws SQLException, IOException, MitroServletException {
    final long runTimestampMs = System.currentTimeMillis();
    Snapshot output = new Snapshot();

    // TODO: don't do this in one gigantic transaction.
    Multimap<Integer, Link> countToFile = TreeMultimap.create(Ordering.natural().reverse(), Ordering.natural());
    // get all orgs.
    Map<Integer, GroupInfo> orgIdToOrg = Maps.newHashMap();
    for (DBGroup o : DBGroup.getAllOrganizations(manager)) {
        GroupInfo newGi = new GroupInfo();
        newGi.autoDelete = o.isAutoDelete();
        newGi.groupId = o.getId();
        newGi.isTopLevelOrg = true;
        newGi.name = o.getName();
        Set<String> users = Sets.newHashSet();
        for (DBGroup orgGroup : o.getAllOrgGroups(manager)) {
            users.add(orgGroup.getName());
        }
        newGi.users = Lists.newArrayList(users);
        orgIdToOrg.put(newGi.groupId, newGi);
    }
    int numPeople = 0;
    for (DBIdentity id : manager.identityDao.queryForAll()) {
        ++numPeople;
        try {
            logger.info(id.getName() + ": " + id.getGuidCookie());
            DBHistoricalUserState userState = getHistoricalUserState(manager, runTimestampMs, orgIdToOrg, id);
            output.userStateObjects.add(userState);

            String filename = id.getName() + ".html";
            renderIfOutputEnabled(outDir, "/users/" + filename, userStateTemplate, userState);
            countToFile.put(userState.numSecrets, new Link(id.getName(), filename, userState.numSecrets));
        } catch (MitroServletException e) {
            logger.error("UNKNOWN ERROR", e);
        }
    }
    renderIfOutputEnabled(outDir, "/users/index.html", indexTemplate, countToFile.values());

    countToFile.clear();
    int numOrgs = 0;
    // now do the orgs
    for (DBGroup org : DBGroup.getAllOrganizations(manager)) {
        ++numOrgs;
        // hack to make this work
        Set<Integer> admins = Sets.newHashSet();
        org.putDirectUsersIntoSet(admins, DBAcl.adminAccess());
        int userId = admins.iterator().next();
        DBIdentity dbi = manager.identityDao.queryForId(userId);
        MitroRequestContext context = new MitroRequestContext(dbi, null, manager, null);
        GetOrganizationStateResponse resp = GetOrganizationState.doOperation(context, org.getId());
        DBHistoricalOrgState orgState = new DBHistoricalOrgState(resp, org.getId(), runTimestampMs);
        output.orgStateObjects.add(orgState);

        String filename = org.getId() + ".html";
        renderIfOutputEnabled(outDir, "/orgs/" + filename, orgStateTemplate, orgState);
        countToFile.put(orgState.numMembers + orgState.numAdmins, new Link(org.getName() + org.getId(),
                org.getId() + ".html", orgState.numAdmins + orgState.numMembers));
    }
    renderIfOutputEnabled(outDir, "/orgs/index.html", indexTemplate, countToFile.values());
    renderIfOutputEnabled(outDir, "/index.html", indexTemplate,
            ImmutableList.of(new Link("organizations", "orgs/index.html", numOrgs),
                    new Link("users", "users/index.html", numPeople)));

    return output;
}

From source file:org.eclipse.xtext.ui.refactoring.impl.AbstractReferenceUpdater.java

protected void createClusteredReferenceUpdates(ElementRenameArguments elementRenameArguments,
        Multimap<URI, IReferenceDescription> resource2references, ResourceSet resourceSet,
        IRefactoringUpdateAcceptor updateAcceptor, StatusWrapper status, IProgressMonitor monitor) {
    SubMonitor progress = SubMonitor.convert(monitor, resource2references.keySet().size() + 1);
    if (loadTargetResources(resourceSet, elementRenameArguments, status, progress.newChild(1))) {
        if (getClusterSize() > 0) {
            Set<Resource> targetResources = newHashSet(resourceSet.getResources());
            Multimap<URI, IReferenceDescription> cluster = HashMultimap.create();
            SubMonitor clusterMonitor = progress.newChild(1);
            for (URI referringResourceURI : resource2references.keySet()) {
                cluster.putAll(referringResourceURI, resource2references.get(referringResourceURI));
                if (cluster.keySet().size() == getClusterSize()) {
                    unloadNonTargetResources(resourceSet, targetResources);
                    createReferenceUpdatesForCluster(elementRenameArguments, cluster, resourceSet,
                            updateAcceptor, status, clusterMonitor);
                    cluster.clear();
                }//w w  w.  j a v  a 2 s  .  c  om
            }
            if (!cluster.isEmpty()) {
                unloadNonTargetResources(resourceSet, targetResources);
                createReferenceUpdatesForCluster(elementRenameArguments, cluster, resourceSet, updateAcceptor,
                        status, clusterMonitor);
            }
        } else {
            createReferenceUpdatesForCluster(elementRenameArguments, resource2references, resourceSet,
                    updateAcceptor, status, progress.newChild(resource2references.keySet().size()));
        }
    }
}

From source file:org.computer.whunter.rpm.parser.RpmSpecParser.java

/** 
 * The values of fields and macros can themselves contain the values of other directives. Search through the 
 * properties and replace these values if they are present.
 * //from  w w  w.ja  v  a  2s.  c o  m
 * @param properties the properties to modify by expanding any values
 */
private void expandReferences(Multimap<String, String> properties) {

    Map<Pattern, String> matcherPatterns = new HashMap<Pattern, String>();
    matcherPatterns.putAll(m_fieldReferenceMatcherPatterns);
    matcherPatterns.putAll(m_macroReferenceMatcherPatterns);

    Map<String, Pattern> replacePatterns = new HashMap<String, Pattern>();
    replacePatterns.putAll(m_fieldReferenceReplacePatterns);
    replacePatterns.putAll(m_macroReferenceReplacePatterns);

    Multimap<String, String> newProperties = ArrayListMultimap.create();
    for (Entry<String, String> property : properties.entries()) {
        String newValue = expandReferences(property.getValue().toString(), properties, matcherPatterns,
                replacePatterns);
        newProperties.put(property.getKey().toString(), newValue);
    }
    properties.clear();
    properties.putAll(newProperties);
}

From source file:io.bazel.rules.closure.webfiles.Webset.java

/**
 * Loads graph of web files from proto manifests.
 *
 * @param manifests set of web rule target proto files in reverse topological order
 * @return set of web files and relationships between them, which could be mutated, although
 *     adding a single key will most likely result in a full rehash
 *//*from  w w w. j a  va2  s  .c  om*/
public static Webset load(Map<Path, WebfileManifestInfo> manifests, WebpathInterner interner) {
    int webfileCapacity = 0;
    int unlinkCapacity = 16; // LinkedHashMultimap#DEFAULT_KEY_CAPACITY
    for (WebfileManifestInfo manifest : manifests.values()) {
        webfileCapacity += manifest.getWebfileCount();
        unlinkCapacity = Math.max(unlinkCapacity, manifest.getUnlinkCount());
    }
    Map<Webpath, Webfile> webfiles = Maps.newLinkedHashMapWithExpectedSize(webfileCapacity);
    Multimap<Webpath, Webpath> links = LinkedHashMultimap.create(webfileCapacity, 4);
    Multimap<Webpath, Webpath> unlinks = LinkedHashMultimap.create(unlinkCapacity, 4);
    for (Map.Entry<Path, WebfileManifestInfo> entry : manifests.entrySet()) {
        Path manifestPath = entry.getKey();
        Path zipPath = WebfilesUtils.getIncrementalZipPath(manifestPath);
        WebfileManifestInfo manifest = entry.getValue();
        String label = manifest.getLabel();
        for (WebfileInfo info : manifest.getWebfileList()) {
            Webpath webpath = interner.get(info.getWebpath());
            webfiles.put(webpath, Webfile.create(webpath, zipPath, label, info));
        }
        for (MultimapInfo mapping : manifest.getLinkList()) {
            Webpath from = interner.get(mapping.getKey());
            for (Webpath to : Iterables.transform(mapping.getValueList(), interner)) {
                // When compiling web_library rules, if the strict dependency checking invariant holds
                // true, we can choose to only load adjacent manifests, rather than transitive ones. The
                // adjacent manifests may contain links to transitive web files which will not be in the
                // webfiles map.
                if (webfiles.containsKey(to)) {
                    links.put(from, to);
                    checkArgument(!unlinks.containsEntry(from, to),
                            "Has a use case for resurrected links been discovered? %s -> %s", from, to);
                }
            }
        }
        for (MultimapInfo mapping : manifest.getUnlinkList()) {
            unlinks.putAll(interner.get(mapping.getKey()),
                    Collections2.transform(mapping.getValueList(), interner));
        }
    }
    for (Map.Entry<Webpath, Webpath> entry : unlinks.entries()) {
        links.remove(entry.getKey(), entry.getValue());
    }
    unlinks.clear();
    return new AutoValue_Webset(webfiles, links, interner);
}

From source file:com.facebook.presto.execution.TaskExecutorSimulator.java

public void run() throws Exception {
    Multimap<Integer, SimulationTask> tasks = Multimaps
            .synchronizedListMultimap(ArrayListMultimap.<Integer, SimulationTask>create());
    Set<ListenableFuture<?>> finishFutures = newConcurrentHashSet();
    AtomicBoolean done = new AtomicBoolean();

    long start = System.nanoTime();

    // large tasks
    for (int userId = 0; userId < 2; userId++) {
        ListenableFuture<?> future = createUser("large_" + userId, 100, taskExecutor, done, tasks);
        finishFutures.add(future);//from  www . j a va 2  s. c  om
    }

    // small tasks
    for (int userId = 0; userId < 4; userId++) {
        ListenableFuture<?> future = createUser("small_" + userId, 5, taskExecutor, done, tasks);
        finishFutures.add(future);
    }

    // tiny tasks
    for (int userId = 0; userId < 1; userId++) {
        ListenableFuture<?> future = createUser("tiny_" + userId, 1, taskExecutor, done, tasks);
        finishFutures.add(future);
    }

    // warm up
    for (int i = 0; i < 30; i++) {
        TimeUnit.MILLISECONDS.sleep(1000);
        System.out.println(taskExecutor);
    }
    tasks.clear();

    // run
    for (int i = 0; i < 60; i++) {
        TimeUnit.MILLISECONDS.sleep(1000);
        System.out.println(taskExecutor);
    }

    // capture finished tasks
    Map<Integer, Collection<SimulationTask>> middleTasks;
    synchronized (tasks) {
        middleTasks = new TreeMap<>(tasks.asMap());
    }

    // wait for finish
    done.set(true);
    Futures.allAsList(finishFutures).get(1, TimeUnit.MINUTES);

    Duration runtime = Duration.nanosSince(start).convertToMostSuccinctTimeUnit();
    synchronized (this) {
        System.out.println();
        System.out.println("Simulation finished in  " + runtime);
        System.out.println();

        for (Entry<Integer, Collection<SimulationTask>> entry : middleTasks.entrySet()) {
            Distribution durationDistribution = new Distribution();
            Distribution taskParallelismDistribution = new Distribution();

            for (SimulationTask task : entry.getValue()) {
                long taskStart = Long.MAX_VALUE;
                long taskEnd = 0;
                long totalCpuTime = 0;

                for (SimulationSplit split : task.getSplits()) {
                    taskStart = Math.min(taskStart, split.getStartNanos());
                    taskEnd = Math.max(taskEnd, split.getDoneNanos());
                    totalCpuTime += TimeUnit.MILLISECONDS.toNanos(split.getRequiredProcessMillis());
                }

                Duration taskDuration = new Duration(taskEnd - taskStart, NANOSECONDS)
                        .convertTo(TimeUnit.MILLISECONDS);
                durationDistribution.add(taskDuration.toMillis());

                double taskParallelism = 1.0 * totalCpuTime / (taskEnd - taskStart);
                taskParallelismDistribution.add((long) (taskParallelism * 100));
            }

            System.out.println("Splits " + entry.getKey() + ": Completed " + entry.getValue().size());

            Map<Double, Long> durationPercentiles = durationDistribution.getPercentiles();
            System.out.printf(
                    "   wall time ms :: p01 %4s :: p05 %4s :: p10 %4s :: p97 %4s :: p50 %4s :: p75 %4s :: p90 %4s :: p95 %4s :: p99 %4s\n",
                    durationPercentiles.get(0.01), durationPercentiles.get(0.05), durationPercentiles.get(0.10),
                    durationPercentiles.get(0.25), durationPercentiles.get(0.50), durationPercentiles.get(0.75),
                    durationPercentiles.get(0.90), durationPercentiles.get(0.95),
                    durationPercentiles.get(0.99));

            Map<Double, Long> parallelismPercentiles = taskParallelismDistribution.getPercentiles();
            System.out.printf(
                    "    parallelism :: p99 %4.2f :: p95 %4.2f :: p90 %4.2f :: p75 %4.2f :: p50 %4.2f :: p25 %4.2f :: p10 %4.2f :: p05 %4.2f :: p01 %4.2f\n",
                    parallelismPercentiles.get(0.99) / 100.0, parallelismPercentiles.get(0.95) / 100.0,
                    parallelismPercentiles.get(0.90) / 100.0, parallelismPercentiles.get(0.75) / 100.0,
                    parallelismPercentiles.get(0.50) / 100.0, parallelismPercentiles.get(0.25) / 100.0,
                    parallelismPercentiles.get(0.10) / 100.0, parallelismPercentiles.get(0.05) / 100.0,
                    parallelismPercentiles.get(0.01) / 100.0);
        }
    }
    Thread.sleep(10);
}

From source file:de.hzi.helmholtz.Compare.PathwayComparisonUsingModules.java

public Multimap<Integer, Multimap<Double, String>> pcompare(PathwayUsingModules firstPathway,
        PathwayUsingModules secondPathway) {

    Multimap<Double, String> forwardScores = ArrayListMultimap.create();
    Multimap<Integer, Multimap<Double, String>> forwardBestScores = ArrayListMultimap.create();

    int currentQueryGene = 0;
    Iterator<Module> sourceGeneIt = firstPathway.geneIterator(); // for reverse scoring, this becomes targetGeneIt
    while (sourceGeneIt.hasNext()) {
        Module queryGene = sourceGeneIt.next();
        // clear forward scores after one gene is done
        forwardScores.clear();

        Iterator<Module> targetGeneIt = secondPathway.geneIterator();
        int currentTargetGene = 0;
        while (targetGeneIt.hasNext()) {
            Module targetGene = targetGeneIt.next();
            ModuleSimilarity geneSimilarity = new ModuleSimilarity();
            if ((queryGene.size() <= targetGene.size())) {
                // Match one source gene against one target gene with the same index
                List<Module> targetGenes = new ArrayList<Module>();
                targetGenes.add(targetGene);
                double score = geneSimilarity.levenshteinSimilarity(queryGene, targetGenes, functionWeight,
                        statusWeight, substrateWeight);
                forwardScores.put(score, currentTargetGene + "");
            } else if (queryGene.size() > targetGene.size()) {
                // Merge multiple target genes and compare to one source gene
                // store scores for windows of all sizes upto maxWindowSize
                for (int currentWindowSize = 0; currentWindowSize < maxWindowSize; currentWindowSize++) {
                    if (currentTargetGene + currentWindowSize < secondPathway.size()) {
                        // construct list of target genes to compare, list size = currentWindowSize
                        List<Module> mergedGenes = new ArrayList<Module>();
                        List<Module> targetGenesList = secondPathway.getModules();
                        for (int i = currentTargetGene; i <= currentTargetGene + currentWindowSize; i++) {
                            mergedGenes.add(targetGenesList.get(i));
                        }//from   www  . j  ava2  s. c o m
                        double score = geneSimilarity.levenshteinSimilarity(queryGene, mergedGenes,
                                functionWeight, statusWeight, substrateWeight);
                        if (score > 0) {
                            String combinedGenes = "";
                            for (int i = currentTargetGene; i <= currentTargetGene + currentWindowSize; i++) {
                                combinedGenes += i + "+";
                            }
                            combinedGenes = combinedGenes.substring(0, combinedGenes.length() - 1);
                            forwardScores.put(Math.abs(score), combinedGenes);
                        } else {
                            String combinedGenes = "";
                            for (int i = currentTargetGene + currentWindowSize; i >= currentTargetGene; i--) {
                                combinedGenes += i + "+";
                            }
                            combinedGenes = combinedGenes.substring(0, combinedGenes.length() - 1);
                            forwardScores.put(Math.abs(score), combinedGenes);
                        }
                    }
                }
            }
            currentTargetGene++;
        }
        Multimap<Double, String> forwardscore1 = ArrayListMultimap.create(forwardScores);
        TreeMultimap<Double, String> forwardscore = TreeMultimap.create(Ordering.natural().reverse(),
                Ordering.natural());
        forwardscore.putAll(forwardscore1);
        forwardBestScores.put(currentQueryGene, forwardscore);
        currentQueryGene++;
    }
    // ////System.out.println("  dsds  "+forwardBestScores);
    return forwardBestScores;
}

From source file:com.facebook.presto.accumulo.tools.RewriteIndex.java

private void deleteIndexEntries(Connector connector, AccumuloTable table, long start) {
    LOG.info(format("Scanning index table %s to delete index entries", table.getIndexTableName()));
    BatchScanner scanner = null;//  ww  w  .  j  a  v  a2s  .c om
    BatchWriter indexWriter = null;
    try {
        // Create index writer and metrics writer, but we are never going to flush the metrics writer
        indexWriter = connector.createBatchWriter(table.getIndexTableName(), bwc);
        scanner = connector.createBatchScanner(table.getIndexTableName(), auths, 10);
        LOG.info(format("Created batch scanner against %s with auths %s", table.getIndexTableName(), auths));

        IteratorSetting timestampFilter = new IteratorSetting(21, "timestamp", TimestampFilter.class);
        TimestampFilter.setRange(timestampFilter, 0L, start);
        scanner.addScanIterator(timestampFilter);

        scanner.setRanges(connector.tableOperations().splitRangeByTablets(table.getIndexTableName(),
                new Range(), Integer.MAX_VALUE));

        // Scan the index table, gathering row IDs into batches
        long numTotalMutations = 0L;

        Map<ByteBuffer, RowStatus> rowIdStatuses = new HashMap<>();
        Multimap<ByteBuffer, Mutation> queryIndexEntries = MultimapBuilder.hashKeys().hashSetValues().build();
        Text text = new Text();
        for (Entry<Key, Value> entry : scanner) {
            ++numTotalMutations;

            ByteBuffer rowID = ByteBuffer.wrap(entry.getKey().getColumnQualifier(text).copyBytes());
            Mutation mutation = new Mutation(entry.getKey().getRow(text).copyBytes());
            mutation.putDelete(entry.getKey().getColumnFamily(text).copyBytes(),
                    entry.getKey().getColumnQualifier(text).copyBytes(),
                    entry.getKey().getColumnVisibilityParsed(), start);

            // Get status of this row ID
            switch (rowIdStatuses.getOrDefault(rowID, RowStatus.UNKNOWN)) {
            case ABSENT:
            case UNKNOWN:
                // Absent or unknown? Add it to the collection to check the status and/or delete
                queryIndexEntries.put(rowID, mutation);
                break;
            case PRESENT: // Present? No op
                break;
            }

            if (queryIndexEntries.size() == 100000) {
                flushDeleteEntries(connector, table, start, indexWriter,
                        ImmutableMultimap.copyOf(queryIndexEntries), rowIdStatuses);
                queryIndexEntries.clear();
            }
        }

        flushDeleteEntries(connector, table, start, indexWriter, ImmutableMultimap.copyOf(queryIndexEntries),
                rowIdStatuses);
        queryIndexEntries.clear();

        LOG.info(format(
                "Finished scanning index entries. There are %s distinct row IDs containing %s entries. %s rows present in the data table and %s absent",
                rowIdStatuses.size(), numTotalMutations,
                rowIdStatuses.entrySet().stream().filter(entry -> entry.getValue().equals(RowStatus.PRESENT))
                        .count(),
                rowIdStatuses.entrySet().stream().filter(entry -> entry.getValue().equals(RowStatus.ABSENT))
                        .count()));

        if (dryRun) {
            LOG.info(format("Would have deleted %s index entries", numDeletedIndexEntries));
        } else {
            LOG.info(format("Deleted %s index entries", numDeletedIndexEntries));
        }
    } catch (AccumuloException | AccumuloSecurityException e) {
        LOG.error("Accumulo exception", e);
    } catch (TableNotFoundException e) {
        LOG.error("Table not found, must have been deleted during process", e);
    } finally {
        if (indexWriter != null) {
            try {
                indexWriter.close();
            } catch (MutationsRejectedException e) {
                LOG.error("Server rejected mutations", e);
            }
        }

        if (scanner != null) {
            scanner.close();
        }
    }
}