Example usage for com.google.common.collect Multimap size

List of usage examples for com.google.common.collect Multimap size

Introduction

In this page you can find the example usage for com.google.common.collect Multimap size.

Prototype

int size();

Source Link

Document

Returns the number of key-value pairs in this multimap.

Usage

From source file:co.cask.cdap.data.tools.HBaseQueueDebugger.java

/**
 * Only works for {@link co.cask.cdap.data2.transaction.queue.hbase.ShardedHBaseQueueStrategy}.
 *//* www  .  j  av a  2  s . co  m*/
public QueueStatistics scanQueue(final QueueName queueName, @Nullable Long consumerGroupId) throws Exception {
    HBaseConsumerStateStore stateStore;
    try {
        stateStore = queueAdmin.getConsumerStateStore(queueName);
    } catch (IllegalStateException e) {
        throw new NotFoundException(queueName);
    }

    TransactionExecutor txExecutor = Transactions.createTransactionExecutor(txExecutorFactory, stateStore);
    Multimap<Long, QueueBarrier> barriers = txExecutor
            .execute(new TransactionExecutor.Function<HBaseConsumerStateStore, Multimap<Long, QueueBarrier>>() {
                @Override
                public Multimap<Long, QueueBarrier> apply(HBaseConsumerStateStore input) throws Exception {
                    return input.getAllBarriers();
                }
            }, stateStore);
    System.out.printf("Got %d barriers\n", barriers.size());

    QueueStatistics stats = new QueueStatistics();

    if (consumerGroupId != null) {
        barriers = Multimaps.filterKeys(barriers, Predicates.equalTo(consumerGroupId));
    }

    for (Map.Entry<Long, Collection<QueueBarrier>> entry : barriers.asMap().entrySet()) {
        long groupId = entry.getKey();
        Collection<QueueBarrier> groupBarriers = entry.getValue();

        System.out.printf("Scanning barriers for group %d\n", groupId);

        int currentSection = 1;
        PeekingIterator<QueueBarrier> barrierIterator = Iterators.peekingIterator(groupBarriers.iterator());
        while (barrierIterator.hasNext()) {
            QueueBarrier start = barrierIterator.next();
            QueueBarrier end = barrierIterator.hasNext() ? barrierIterator.peek() : null;

            System.out.printf("Scanning section %d/%d...\n", currentSection, groupBarriers.size());
            scanQueue(txExecutor, stateStore, queueName, start, end, stats);
            System.out.printf("Current results: %s\n", stats.getReport());
            currentSection++;
        }
        System.out.println("Scanning complete");
    }

    System.out.printf("Total results: %s\n", stats.getReport());
    return stats;
}

From source file:com.torodb.backend.AbstractReadInterface.java

@Override
@SuppressFBWarnings(value = { "OBL_UNSATISFIED_OBLIGATION",
        "ODR_OPEN_DATABASE_RESOURCE" }, justification = "ResultSet is wrapped in a Cursor<Integer>. It's iterated and closed in caller code")
public Cursor<Integer> getCollectionDidsWithFieldsIn(DSLContext dsl, MetaDatabase metaDatabase,
        MetaCollection metaCol, MetaDocPart metaDocPart, Multimap<MetaField, KvValue<?>> valuesMultimap)
        throws SQLException {
    assert metaDatabase.getMetaCollectionByIdentifier(metaCol.getIdentifier()) != null;
    assert metaCol.getMetaDocPartByIdentifier(metaDocPart.getIdentifier()) != null;
    assert valuesMultimap.keySet().stream()
            .allMatch(metafield -> metaDocPart.getMetaFieldByIdentifier(metafield.getIdentifier()) != null);

    if (valuesMultimap.size() > 500) {
        @SuppressWarnings("checkstyle:LineLength")
        Stream<Entry<Long, List<Tuple2<Entry<MetaField, KvValue<?>>, Long>>>> valuesEntriesBatchStream = Seq
                .seq(valuesMultimap.entries().stream()).zipWithIndex().groupBy(t -> t.v2 / 500).entrySet()
                .stream();//from  w w w.  j a  va 2s.  c  om
        Stream<Stream<Entry<MetaField, KvValue<?>>>> valuesEntryBatchStreamOfStream = valuesEntriesBatchStream
                .map(e -> e.getValue().stream().map(se -> se.v1));
        Stream<Multimap<MetaField, KvValue<?>>> valuesMultimapBatchStream = valuesEntryBatchStreamOfStream
                .map(e -> toValuesMultimap(e));
        Stream<Cursor<Integer>> didCursorStream = valuesMultimapBatchStream
                .map(Unchecked.function(valuesMultimapBatch -> getCollectionDidsWithFieldsInBatch(dsl,
                        metaDatabase, metaCol, metaDocPart, valuesMultimapBatch)));
        Stream<Integer> didStream = didCursorStream.flatMap(cursor -> cursor.getRemaining().stream());

        return new IteratorCursor<>(didStream.iterator());
    }

    return getCollectionDidsWithFieldsInBatch(dsl, metaDatabase, metaCol, metaDocPart, valuesMultimap);
}

From source file:com.palantir.atlasdb.cleaner.Scrubber.java

void scrubImmediately(final TransactionManager txManager,
        final Multimap<String, Cell> tableNameToCell, final long scrubTimestamp, final long commitTimestamp) {
    if (log.isInfoEnabled()) {
        log.info("Scrubbing a total of " + tableNameToCell.size() + " cells immediately.");
    }//from w w  w .j  av  a2s  .co  m

    // Note that if the background scrub thread is also running at the same time, it will try to scrub
    // the same cells as the current thread (since these cells were queued for scrubbing right before
    // the hard delete transaction committed; while this is unfortunate (because it means we will be
    // doing more work than necessary), the behavior is still correct
    long nextImmutableTimestamp;
    while ((nextImmutableTimestamp = immutableTimestampSupplier.get()) < commitTimestamp) {
        try {
            if (log.isInfoEnabled()) {
                log.info(String.format(
                        "Sleeping because immutable timestamp %d has not advanced to at least commit timestamp %d",
                        nextImmutableTimestamp, commitTimestamp));
            }
            Thread.sleep(AtlasDbConstants.SCRUBBER_RETRY_DELAY_MILLIS);
        } catch (InterruptedException e) {
            log.error("Interrupted while waiting for immutableTimestamp to advance past commitTimestamp", e);
        }
    }

    List<Future<Void>> scrubFutures = Lists.newArrayList();
    for (List<Entry<String, Cell>> batch : Iterables.partition(tableNameToCell.entries(),
            batchSizeSupplier.get())) {
        final Multimap<String, Cell> batchMultimap = HashMultimap.create();
        for (Entry<String, Cell> e : batch) {
            batchMultimap.put(e.getKey(), e.getValue());
        }

        final Callable<Void> c = new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                if (log.isInfoEnabled()) {
                    log.info("Scrubbing " + batchMultimap.size() + " cells immediately.");
                }

                // Here we don't need to check scrub timestamps because we guarantee that scrubImmediately is called
                // AFTER the transaction commits
                scrubCells(txManager, batchMultimap, scrubTimestamp, TransactionType.AGGRESSIVE_HARD_DELETE);

                Multimap<Cell, Long> cellToScrubTimestamp = HashMultimap.create();

                cellToScrubTimestamp = Multimaps.invertFrom(
                        Multimaps.index(batchMultimap.values(), Functions.constant(scrubTimestamp)),
                        cellToScrubTimestamp);

                scrubberStore.markCellsAsScrubbed(cellToScrubTimestamp, batchSizeSupplier.get());

                if (log.isInfoEnabled()) {
                    log.info("Completed scrub immediately.");
                }
                return null;
            }
        };
        if (!inScrubThread.get()) {
            scrubFutures.add(exec.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    inScrubThread.set(true);
                    c.call();
                    return null;
                }
            }));
        } else {
            try {
                c.call();
            } catch (Exception e) {
                throw Throwables.throwUncheckedException(e);
            }
        }
    }

    for (Future<Void> future : scrubFutures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw Throwables.throwUncheckedException(e);
        } catch (ExecutionException e) {
            throw Throwables.rewrapAndThrowUncheckedException(e);
        }
    }
}

From source file:com.palantir.atlasdb.sweep.SweepTaskRunner.java

public SweepResults run(String tableName, @Nullable byte[] startRow) {
    Preconditions.checkNotNull(tableName);
    Preconditions.checkState(!tableName.startsWith(AtlasDbConstants.NAMESPACE_PREFIX),
            "The sweeper should not be run on tables passed through namespace mapping.");
    Preconditions.checkState(!AtlasDbConstants.hiddenTables.contains(tableName));

    // Earliest start timestamp of any currently open transaction, with two caveats:
    // (1) unreadableTimestamps are calculated via wall-clock time, and so may not be correct
    //     under pathological clock conditions
    // (2) immutableTimestamps do not account for locks have timed out after checking their locks;
    //     such a transaction may have a start timestamp less than the immutableTimestamp, and it
    //     could still get successfully committed (its commit timestamp may or may not be less than
    //     the immutableTimestamp
    // Note that this is fine, because we'll either
    // (1) force old readers to abort (if they read a garbage collection sentinel), or
    // (2) force old writers to retry (note that we must roll back any uncommitted transactions that
    //     we encounter
    SweepStrategy sweepStrategy = sweepStrategyManager.get().get(tableName);
    if (sweepStrategy == null) {
        sweepStrategy = SweepStrategy.CONSERVATIVE;
    } else if (sweepStrategy == SweepStrategy.NOTHING) {
        return new SweepResults(null, 0, 0);
    }/*ww  w.j  a v  a 2s.com*/
    if (startRow == null) {
        startRow = new byte[0];
    }
    int batchSize = batchSizeSupplier.get();
    RangeRequest rangeRequest = RangeRequest.builder().startRowInclusive(startRow).batchHint(batchSize).build();

    long sweepTimestamp = getSweepTimestamp(tableName);
    ClosableIterator<RowResult<Value>> valueResults;
    if (sweepStrategy == SweepStrategy.CONSERVATIVE) {
        valueResults = ClosableIterators.wrap(ImmutableList.<RowResult<Value>>of().iterator());
    } else {
        valueResults = keyValueService.getRange(tableName, rangeRequest, sweepTimestamp);
    }

    ClosableIterator<RowResult<Set<Long>>> rowResults = keyValueService.getRangeOfTimestamps(tableName,
            rangeRequest, sweepTimestamp);

    try {
        List<RowResult<Set<Long>>> rowResultTimestamps = ImmutableList
                .copyOf(Iterators.limit(rowResults, batchSize));
        PeekingIterator<RowResult<Value>> peekingValues = Iterators.peekingIterator(valueResults);
        Set<Cell> sentinelsToAdd = Sets.newHashSet();
        Multimap<Cell, Long> rowTimestamps = getTimestampsFromRowResults(rowResultTimestamps, sweepStrategy);
        Multimap<Cell, Long> cellTsPairsToSweep = getCellTsPairsToSweep(rowTimestamps, peekingValues,
                sweepTimestamp, sweepStrategy, sentinelsToAdd);
        sweepCells(tableName, cellTsPairsToSweep, sentinelsToAdd);
        byte[] nextRow = rowResultTimestamps.size() < batchSize ? null
                : RangeRequests.getNextStartRow(false, Iterables.getLast(rowResultTimestamps).getRowName());
        return new SweepResults(nextRow, rowResultTimestamps.size(), cellTsPairsToSweep.size());
    } finally {
        rowResults.close();
        valueResults.close();
    }
}

From source file:com.zimbra.cs.db.DbBlobConsistency.java

public static void delete(DbConnection conn, Mailbox mbox, Multimap<Integer, Integer> idRevs)
        throws ServiceException {
    Set<Integer> mail_itemIds = new HashSet<Integer>();
    Multimap<Integer, Integer> rev_itemIds = HashMultimap.create();
    for (Integer itemId : idRevs.keySet()) {
        Collection<Integer> revs = idRevs.get(itemId);
        for (int rev : revs) {
            if (rev == 0) {
                mail_itemIds.add(itemId);
            } else {
                rev_itemIds.put(itemId, rev);
            }//from  w  w  w  .j  a  va 2s  .  c om
        }
    }

    if (mail_itemIds.size() > 0) {
        PreparedStatement miDumpstmt = null;
        try {
            StringBuffer sql = new StringBuffer();
            sql.append("DELETE FROM ").append(DbMailItem.getMailItemTableName(mbox, true)).append(" WHERE ")
                    .append(DbMailItem.IN_THIS_MAILBOX_AND).append(DbUtil.whereIn("id", mail_itemIds.size()));

            miDumpstmt = conn.prepareStatement(sql.toString());
            int pos = 1;
            pos = DbMailItem.setMailboxId(miDumpstmt, mbox, pos);
            for (int itemId : mail_itemIds) {
                miDumpstmt.setInt(pos++, itemId);
            }
            miDumpstmt.execute();
        } catch (SQLException e) {
            throw ServiceException.FAILURE(
                    "deleting " + idRevs.size() + " item(s): " + DbMailItem.getIdListForLogging(idRevs.keys())
                            + " from " + DbMailItem.TABLE_MAIL_ITEM_DUMPSTER + " table",
                    e);
        } finally {
            DbPool.quietCloseStatement(miDumpstmt);
        }
    }

    if (rev_itemIds.size() > 0) {
        PreparedStatement revDumpstmt = null;
        try {
            StringBuffer sql = new StringBuffer();
            sql.append("DELETE FROM ").append(DbMailItem.getRevisionTableName(mbox, true)).append(" WHERE ")
                    .append(DbMailItem.IN_THIS_MAILBOX_AND).append(DbUtil
                            .whereIn(Db.getInstance().concat("item_id", "'-'", "version"), rev_itemIds.size()));

            revDumpstmt = conn.prepareStatement(sql.toString());
            int pos = 1;
            pos = DbMailItem.setMailboxId(revDumpstmt, mbox, pos);
            for (Integer itemId : rev_itemIds.keySet()) {
                Collection<Integer> revs = rev_itemIds.get(itemId);
                for (int rev : revs) {
                    revDumpstmt.setString(pos++, itemId + "-" + rev);
                }
            }
            revDumpstmt.execute();
        } catch (SQLException e) {
            throw ServiceException.FAILURE(
                    "deleting " + idRevs.size() + " item(s): " + DbMailItem.getIdListForLogging(idRevs.keys())
                            + " from " + DbMailItem.TABLE_REVISION_DUMPSTER + " table",
                    e);
        } finally {
            DbPool.quietCloseStatement(revDumpstmt);
        }
    }
}

From source file:org.jclouds.aws.ec2.compute.extensions.AWSEC2SecurityGroupExtension.java

@Override
public SecurityGroup addIpPermission(IpProtocol protocol, int startPort, int endPort,
        Multimap<String, String> tenantIdGroupNamePairs, Iterable<String> ipRanges, Iterable<String> groupIds,
        SecurityGroup group) {/* w  w  w  . ja v a  2 s  .c om*/
    String region = AWSUtils.getRegionFromLocationOrNull(group.getLocation());
    String id = group.getProviderId();

    IpPermission.Builder builder = IpPermission.builder();

    builder.ipProtocol(protocol);
    builder.fromPort(startPort);
    builder.toPort(endPort);

    if (Iterables.size(ipRanges) > 0) {
        for (String cidr : ipRanges) {
            builder.cidrBlock(cidr);
        }
    }

    if (tenantIdGroupNamePairs.size() > 0) {
        for (String userId : tenantIdGroupNamePairs.keySet()) {
            for (String groupString : tenantIdGroupNamePairs.get(userId)) {
                String[] parts = AWSUtils.parseHandle(groupString);
                String groupId = parts[1];
                builder.tenantIdGroupNamePair(userId, groupId);
            }
        }
    }

    client.getSecurityGroupApi().get().authorizeSecurityGroupIngressInRegion(region, id, builder.build());

    return getSecurityGroupById(group.getId());
}

From source file:org.jclouds.aws.ec2.compute.extensions.AWSEC2SecurityGroupExtension.java

@Override
public SecurityGroup removeIpPermission(IpProtocol protocol, int startPort, int endPort,
        Multimap<String, String> tenantIdGroupNamePairs, Iterable<String> ipRanges, Iterable<String> groupIds,
        SecurityGroup group) {/*from w w  w.  java 2  s .  c om*/
    String region = AWSUtils.getRegionFromLocationOrNull(group.getLocation());
    String id = group.getProviderId();

    IpPermission.Builder builder = IpPermission.builder();

    builder.ipProtocol(protocol);
    builder.fromPort(startPort);
    builder.toPort(endPort);

    if (Iterables.size(ipRanges) > 0) {
        for (String cidr : ipRanges) {
            builder.cidrBlock(cidr);
        }
    }

    if (tenantIdGroupNamePairs.size() > 0) {
        for (String userId : tenantIdGroupNamePairs.keySet()) {
            for (String groupString : tenantIdGroupNamePairs.get(userId)) {
                String[] parts = AWSUtils.parseHandle(groupString);
                String groupId = parts[1];
                builder.tenantIdGroupNamePair(userId, groupId);
            }
        }
    }

    client.getSecurityGroupApi().get().revokeSecurityGroupIngressInRegion(region, id, builder.build());

    return getSecurityGroupById(group.getId());
}

From source file:com.zimbra.cs.store.file.BlobConsistencyChecker.java

/**
 * Reconciles blobs against the files in the given directory and adds any inconsistencies
 * to the current result set.//from   w  w  w  .  j  a v a 2 s.  co  m
 */
private void check(short volumeId, String blobDirPath, Multimap<Integer, BlobInfo> blobsById)
        throws IOException {
    Multimap<Integer, BlobInfo> revisions = HashMultimap.create();
    File blobDir = new File(blobDirPath);
    File[] files = blobDir.listFiles();
    if (files == null) {
        files = new File[0];
    }
    log.info("Comparing %d items to %d files in %s.", blobsById.size(), files.length, blobDirPath);
    for (File file : files) {
        // Parse id and mod_content value from filename.
        Matcher matcher = PAT_BLOB_FILENAME.matcher(file.getName());
        int itemId = 0;
        int modContent = 0;
        if (matcher.matches()) {
            itemId = Integer.parseInt(matcher.group(1));
            modContent = Integer.parseInt(matcher.group(2));
        }

        BlobInfo blob = null;
        if (blobsById.containsKey(itemId)) {
            Iterator<BlobInfo> iterator = blobsById.get(itemId).iterator();
            while (iterator.hasNext()) {
                BlobInfo tempBlob = iterator.next();
                if (tempBlob.modContent == modContent) {
                    blob = tempBlob;
                    revisions.put(itemId, tempBlob);
                    iterator.remove();
                }
            }
        }

        if (blob == null) {
            BlobInfo unexpected = new BlobInfo();
            unexpected.volumeId = volumeId;
            unexpected.path = file.getAbsolutePath();
            unexpected.fileSize = file.length();
            results.unexpectedBlobs.put(itemId, unexpected);
        } else {
            blob.fileSize = file.length();
            blob.fileModContent = modContent;
            if (reportUsedBlobs) {
                results.usedBlobs.put(blob.itemId, blob);
            }
            if (checkSize) {
                blob.fileDataSize = getDataSize(file, blob.dbSize);
                if (blob.dbSize != blob.fileDataSize) {
                    results.incorrectSize.put(blob.itemId, blob);
                }
            }
        }
    }

    // Any remaining items have missing blobs.
    for (BlobInfo blob : blobsById.values()) {
        results.missingBlobs.put(blob.itemId, blob);
    }

    // Redefining incorrect revisions for all items that support single revision
    // If there exists a single item with the same itemID in both missingBlobs and unexpectedBlobs
    // and if there aren't any items with same itemId in revisions then it is categorised as incorrect revision
    Iterator<Integer> keyIterator = results.missingBlobs.keySet().iterator();
    while (keyIterator.hasNext()) {
        int itemId = keyIterator.next();
        List<BlobInfo> missingBlobs = new ArrayList<BlobInfo>(results.missingBlobs.get(itemId));
        List<BlobInfo> unexpectedBlobs = new ArrayList<BlobInfo>(results.unexpectedBlobs.get(itemId));
        if (missingBlobs.size() == 1 && unexpectedBlobs.size() == 1 && revisions.get(itemId).size() == 0) {
            BlobInfo incorrectRevision = new BlobInfo();
            BlobInfo missingBlob = missingBlobs.get(0);
            incorrectRevision.itemId = missingBlob.itemId;
            incorrectRevision.modContent = missingBlob.modContent;
            incorrectRevision.dbSize = missingBlob.dbSize;
            incorrectRevision.volumeId = missingBlob.volumeId;

            BlobInfo unexpectedBlob = unexpectedBlobs.get(0);
            incorrectRevision.path = unexpectedBlob.path;
            incorrectRevision.fileSize = unexpectedBlob.fileSize;
            incorrectRevision.fileModContent = unexpectedBlob.fileModContent;

            results.incorrectModContent.put(incorrectRevision.itemId, incorrectRevision);
            keyIterator.remove();
            results.unexpectedBlobs.removeAll(itemId);
        }
    }
}

From source file:com.palantir.atlasdb.sweep.SweepTaskRunnerImpl.java

@Override
public SweepResults run(String tableName, int batchSize, @Nullable byte[] startRow) {
    Preconditions.checkNotNull(tableName);
    Preconditions.checkState(!AtlasDbConstants.hiddenTables.contains(tableName));

    if (tableName.startsWith(AtlasDbConstants.NAMESPACE_PREFIX)) {
        // this happens sometimes; I think it's because some places in the code can
        // start this sweeper without doing the full normally ordered KVSModule startup.
        // I did check and sweep.stats did contain the FQ table name for all of the tables,
        // so it is at least broken in some way that still allows namespaced tables to eventually be swept.
        log.warn("The sweeper should not be run on tables passed through namespace mapping.");
        return SweepResults.EMPTY_SWEEP;
    }//from   w w w. j  av a2  s  .  co m
    if (keyValueService.getMetadataForTable(tableName).length == 0) {
        log.warn("The sweeper tried to sweep table '{}', but the table does not exist. Skipping table.",
                tableName);
        return SweepResults.EMPTY_SWEEP;
    }

    // Earliest start timestamp of any currently open transaction, with two caveats:
    // (1) unreadableTimestamps are calculated via wall-clock time, and so may not be correct
    //     under pathological clock conditions
    // (2) immutableTimestamps do not account for locks have timed out after checking their locks;
    //     such a transaction may have a start timestamp less than the immutableTimestamp, and it
    //     could still get successfully committed (its commit timestamp may or may not be less than
    //     the immutableTimestamp
    // Note that this is fine, because we'll either
    // (1) force old readers to abort (if they read a garbage collection sentinel), or
    // (2) force old writers to retry (note that we must roll back any uncommitted transactions that
    //     we encounter
    SweepStrategy sweepStrategy = sweepStrategyManager.get().get(tableName);
    if (sweepStrategy == null) {
        sweepStrategy = SweepStrategy.CONSERVATIVE;
    } else if (sweepStrategy == SweepStrategy.NOTHING) {
        return SweepResults.EMPTY_SWEEP;
    }
    if (startRow == null) {
        startRow = new byte[0];
    }
    RangeRequest rangeRequest = RangeRequest.builder().startRowInclusive(startRow).batchHint(batchSize).build();

    long sweepTimestamp = getSweepTimestamp(sweepStrategy);
    ClosableIterator<RowResult<Value>> valueResults;
    if (sweepStrategy == SweepStrategy.CONSERVATIVE) {
        valueResults = ClosableIterators.wrap(ImmutableList.<RowResult<Value>>of().iterator());
    } else {
        valueResults = keyValueService.getRange(tableName, rangeRequest, sweepTimestamp);
    }

    ClosableIterator<RowResult<Set<Long>>> rowResults = keyValueService.getRangeOfTimestamps(tableName,
            rangeRequest, sweepTimestamp);

    try {
        List<RowResult<Set<Long>>> rowResultTimestamps = ImmutableList
                .copyOf(Iterators.limit(rowResults, batchSize));
        PeekingIterator<RowResult<Value>> peekingValues = Iterators.peekingIterator(valueResults);
        Set<Cell> sentinelsToAdd = Sets.newHashSet();
        Multimap<Cell, Long> rowTimestamps = getTimestampsFromRowResults(rowResultTimestamps, sweepStrategy);
        Multimap<Cell, Long> cellTsPairsToSweep = getCellTsPairsToSweep(rowTimestamps, peekingValues,
                sweepTimestamp, sweepStrategy, sentinelsToAdd);
        sweepCells(tableName, cellTsPairsToSweep, sentinelsToAdd);
        byte[] nextRow = rowResultTimestamps.size() < batchSize ? null
                : RangeRequests.getNextStartRow(false, Iterables.getLast(rowResultTimestamps).getRowName());
        return new SweepResults(nextRow, rowResultTimestamps.size(), cellTsPairsToSweep.size());
    } finally {
        rowResults.close();
        valueResults.close();
    }
}

From source file:org.robotframework.ide.eclipse.main.plugin.project.build.libs.LibrariesBuilder.java

public void forceLibrariesRebuild(final Multimap<IProject, LibrarySpecification> groupedSpecifications,
        final SubMonitor monitor) {
    monitor.subTask("generating libdocs");
    final Multimap<IProject, GeneratorWithSource> groupedGenerators = LinkedHashMultimap.create();
    for (final IProject project : groupedSpecifications.keySet()) {
        for (final LibrarySpecification specification : groupedSpecifications.get(project)) {
            final GeneratorWithSource generatorWithSource = new GeneratorWithSource(
                    specification.getSourceFile(), provideGenerator(specification));
            groupedGenerators.put(project, generatorWithSource);
        }//ww w.  j  a v a2 s.c o m
    }

    monitor.setWorkRemaining(groupedGenerators.size());
    for (final IProject project : groupedGenerators.keySet()) {
        final RobotProject robotProject = RedPlugin.getModelManager().createProject(project);
        final RobotRuntimeEnvironment runtimeEnvironment = robotProject.getRuntimeEnvironment();

        for (final GeneratorWithSource generatorWithSource : groupedGenerators.get(project)) {
            monitor.subTask(generatorWithSource.generator.getMessage());
            try {
                if (project.exists()) {
                    generatorWithSource.generator.generateLibdocForcibly(runtimeEnvironment,
                            new RedEclipseProjectConfig(robotProject.getRobotProjectConfig())
                                    .createEnvironmentSearchPaths(project));
                }
            } catch (final RobotEnvironmentException e) {
                final IPath libspecFileLocation = generatorWithSource.sourceLibdocFile.getLocation();
                if (libspecFileLocation != null) {
                    libspecFileLocation.toFile().delete();
                }
                throw e;
            }
            monitor.worked(1);
        }
    }
    monitor.done();
}