Example usage for com.google.common.collect Iterables partition

List of usage examples for com.google.common.collect Iterables partition

Introduction

In this page you can find the example usage for com.google.common.collect Iterables partition.

Prototype

public static <T> Iterable<List<T>> partition(final Iterable<T> iterable, final int size) 

Source Link

Document

Divides an iterable into unmodifiable sublists of the given size (the final iterable may be smaller).

Usage

From source file:edu.harvard.med.screensaver.io.libraries.WellsSdfDataExporter.java

private void writeSDFileSearchResults(WellSdfWriter writer, Collection<String> keys,
        EntityDataFetcher<Well, String> dataFetcher) {
    Iterable<List<String>> partitions = Iterables.partition(keys, MAX_FETCH_SIZE);
    for (Iterable<String> partition : partitions) {
        Map<String, Well> entities = dataFetcher.fetchData(Sets.newHashSet(partition));
        for (Well well : entities.values()) {
            if (well.getLibrary().getReagentType().equals(SmallMoleculeReagent.class)) {
                writer.write(well, getLibraryContentsVersion());
            }//from w w w. ja  va 2 s .c  o  m
        }
    }
    // allow garbage collection
    _libraryContentsVersionRef.setValue(null);
}

From source file:com.sk89q.squirrelid.resolver.ParallelProfileService.java

@Override
public void findAllByName(Iterable<String> names, final Predicate<Profile> consumer)
        throws IOException, InterruptedException {
    CompletionService<Object> completion = new ExecutorCompletionService<Object>(executorService);
    int count = 0;
    for (final List<String> partition : Iterables.partition(names, getEffectiveProfilesPerJob())) {
        count++;//  w  w  w  .ja  v a  2  s  .c o  m
        completion.submit(new Callable<Object>() {
            @Override
            public Object call() throws Exception {
                resolver.findAllByName(partition, consumer);
                return null;
            }
        });
    }

    Throwable throwable = null;
    for (int i = 0; i < count; i++) {
        try {
            completion.take().get();
        } catch (ExecutionException e) {
            throwable = e.getCause();
        }
    }

    if (throwable != null) {
        if (throwable instanceof IOException) {
            throw (IOException) throwable;
        } else {
            throw new RuntimeException("Error occurred during the operation", throwable);
        }
    }
}

From source file:com.netflix.spinnaker.cats.redis.cache.AbstractRedisCache.java

@Override
public Collection<CacheData> getAll(String type, Collection<String> identifiers, CacheFilter cacheFilter) {
    if (identifiers.isEmpty()) {
        return new ArrayList<>();
    }/*  w  ww .java2 s.  c  o  m*/
    Collection<String> ids = new LinkedHashSet<>(identifiers);
    final List<String> knownRels;
    Set<String> allRelationships = scanMembers(allRelationshipsId(type));
    if (cacheFilter == null) {
        knownRels = new ArrayList<>(allRelationships);
    } else {
        knownRels = new ArrayList<>(cacheFilter.filter(CacheFilter.Type.RELATIONSHIP, allRelationships));
    }

    Collection<CacheData> result = new ArrayList<>(ids.size());

    for (List<String> idPart : Iterables.partition(ids, options.getMaxGetBatchSize())) {
        result.addAll(getItems(type, idPart, knownRels));
    }

    return result;
}

From source file:org.calrissian.mango.collect.CloseableIterables.java

/**
 * Divides a closeable iterable into unmodifiable sublists of the given size (the final
 * iterable may be smaller). For example, partitioning a closeable iterable containing
 * {@code [a, b, c, d, e]} with a partition size of 3 yields {@code
 * [[a, b, c], [d, e]]} -- an outer iterable containing two inner lists of
 * three and two elements, all in the original order.
 *
 * <p>Iterators returned by the returned iterable do not support the {@link
 * Iterator#remove()} method./*from  w  ww. j a v  a 2 s.co  m*/
 */
public static <T> CloseableIterable<List<T>> partition(final CloseableIterable<T> iterable, final int size) {
    return wrap(Iterables.partition(iterable, size), iterable);
}

From source file:com.google.gerrit.server.query.AndSource.java

private Iterable<T> buffer(ResultSet<T> scanner) {
    return FluentIterable.from(Iterables.partition(scanner, 50)).transformAndConcat(this::transformBuffer);
}

From source file:org.opennms.newts.gsod.MergeSort.java

public void execute(String... args) throws IOException {

    CmdLineParser parser = createCmdLineParser();
    try {/*w ww.ja v  a 2  s  .c o  m*/
        parser.parseArgument(args);
    } catch (CmdLineException e) {
        // handling of wrong arguments
        System.err.println(e.getMessage());
        parser.printUsage(System.err);
        return;
    }

    final MetricRegistry metrics = new MetricRegistry();
    ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).outputTo(System.err).convertRatesTo(SECONDS)
            .convertDurationsTo(MILLISECONDS).build();

    reporter.start(10, SECONDS);

    Meter linesMeter = metrics.meter("lines");
    Meter filesMeter = metrics.meter("files");
    Meter dirsMeter = metrics.meter("dirs");
    Meter batchMeter = metrics.meter("batches");
    Path root = m_source.toPath();

    if (m_targetDir == null) {
        m_targetDir = Files.createTempDir();
        System.err.println("Working Directory: " + m_targetDir);
    }

    LOG.debug("Scanning {} for GSOD data files...", root);

    FluentIterable<KeyedIterable<Path, Path>> dirs = FileIterable.groupFilesByDir(root);

    for (KeyedIterable<Path, Path> filesInDir : dirs) {
        Path subdir = root.relativize(filesInDir.getKey());
        String dirName = subdir.getFileName().toString();

        System.err.println("Sorted dir: " + subdir);
        FluentIterable<Iterable<String>> contentIterables = filesInDir.transform(this.<Path>meter(filesMeter))
                .transform(lines("YEARMODA"));
        FluentIterable<List<Iterable<String>>> batches = FluentIterable
                .from(Iterables.partition(contentIterables, m_mergeCount));
        FluentIterable<Iterable<GSODLine>> sortedBatches = batches.transform(lift2GsodLines())
                .transform(mergeSorter());

        Path sortedDir = m_targetDir.toPath().resolve(subdir);
        sortedDir.toFile().mkdirs();

        int count = 1;
        for (Iterable<GSODLine> batch : sortedBatches) {
            Path sortedFile = sortedDir.resolve(dirName + "-batch-" + (count++) + ".gz");
            System.err.println("Creating " + sortedFile);
            try (PrintStream out = open(sortedFile)) {
                out.println(HDR);
                for (GSODLine line : batch) {
                    out.println(line);
                    linesMeter.mark();
                }
            }
            batchMeter.mark();
        }

        dirsMeter.mark();

    }

}

From source file:org.geogig.osm.internal.history.HistoryDownloader.java

/**
 * @return the next available changeset, or absent if reached the last one
 * @throws IOException/*www. j av a 2s  . c  o m*/
 * @throws InterruptedException
 */
public Iterator<Changeset> fetchChangesets() {

    Range<Long> range = Range.closed(initialChangeset, finalChangeset);
    ContiguousSet<Long> changesetIds = ContiguousSet.create(range, DiscreteDomain.longs());
    final int fetchSize = 100;
    Iterable<List<Long>> partitions = Iterables.partition(changesetIds, fetchSize);

    final Function<List<Long>, Iterable<Changeset>> asChangesets = (batchIds) -> {
        Iterable<Changeset> changesets = downloader.fetchChangesets(batchIds);
        return changesets;
    };

    Iterable<Iterable<Changeset>> changesets = Iterables.transform(partitions, asChangesets);
    Iterable<Changeset> concat = Iterables.concat(changesets);
    return concat.iterator();
}

From source file:com.opengamma.bbg.loader.hts.BloombergHistoricalTimeSeriesLoader.java

@Override
protected HistoricalTimeSeriesLoaderResult doBulkLoad(HistoricalTimeSeriesLoaderRequest request) {
    ArgumentChecker.notNull(request, "request");
    ArgumentChecker.notNull(request.getDataField(), "dataField");

    Set<ExternalId> externalIds = request.getExternalIds();
    LocalDate startDate = request.getStartDate();
    LocalDate endDate = request.getEndDate();
    String dataProvider = request.getDataProvider();
    String dataField = request.getDataField();
    dataProvider = BloombergDataUtils.resolveDataProvider(dataProvider);
    if (startDate == null) {
        startDate = DEFAULT_START_DATE;//  w ww.j a  v  a  2  s. c  o  m
    }
    if (endDate == null) {
        endDate = LocalDate.MAX;
    }

    // finds the time-series that need loading
    Map<ExternalId, UniqueId> resultMap = new HashMap<ExternalId, UniqueId>();
    Set<ExternalId> missingTimeseries = findTimeSeries(externalIds, dataProvider, dataField, resultMap);

    // batch in groups of 100 to avoid out-of-memory issues
    for (List<ExternalId> partition : Iterables.partition(missingTimeseries, 100)) {
        Set<ExternalId> subSet = Sets.newHashSet(partition);
        fetchTimeSeries(subSet, dataField, dataProvider, startDate, endDate, resultMap);
    }
    return new HistoricalTimeSeriesLoaderResult(resultMap);
}

From source file:com.netflix.spinnaker.cats.redis.cache.RedisCache.java

private void mergeItems(String type, Collection<CacheData> items) {
    if (items.isEmpty()) {
        return;//ww  w  .  ja  v a2  s  . c o m
    }
    final Set<String> relationshipNames = new HashSet<>();
    final List<String> keysToSet = new LinkedList<>();
    final Set<String> idSet = new HashSet<>();

    final Map<String, Integer> ttlSecondsByKey = new HashMap<>();
    int skippedWrites = 0;

    final Map<String, byte[]> hashes = getHashes(type, items);

    final NavigableMap<byte[], byte[]> updatedHashes = new TreeMap<>(new ByteArrayComparator());

    for (CacheData item : items) {
        MergeOp op = buildMergeOp(type, item, hashes);
        relationshipNames.addAll(op.relNames);
        keysToSet.addAll(op.keysToSet);
        idSet.add(item.getId());
        updatedHashes.putAll(op.hashesToSet);
        skippedWrites += op.skippedWrites;

        if (item.getTtlSeconds() > 0) {
            for (String key : op.keysToSet) {
                ttlSecondsByKey.put(key, item.getTtlSeconds());
            }
        }
    }

    int saddOperations = 0;
    int msetOperations = 0;
    int hmsetOperations = 0;
    int pipelineOperations = 0;
    int expireOperations = 0;
    if (keysToSet.size() > 0) {
        try (Jedis jedis = source.getJedis()) {
            Pipeline pipeline = jedis.pipelined();
            for (List<String> idPart : Iterables.partition(idSet, options.getMaxSaddSize())) {
                final String[] ids = idPart.toArray(new String[idPart.size()]);
                pipeline.sadd(allOfTypeReindex(type), ids);
                saddOperations++;
                pipeline.sadd(allOfTypeId(type), ids);
                saddOperations++;
            }

            for (List<String> keys : Lists.partition(keysToSet, options.getMaxMsetSize())) {
                pipeline.mset(keys.toArray(new String[keys.size()]));
                msetOperations++;
            }

            if (!relationshipNames.isEmpty()) {
                for (List<String> relNamesPart : Iterables.partition(relationshipNames,
                        options.getMaxSaddSize())) {
                    pipeline.sadd(allRelationshipsId(type),
                            relNamesPart.toArray(new String[relNamesPart.size()]));
                    saddOperations++;
                }
            }

            if (!updatedHashes.isEmpty()) {
                for (List<byte[]> hashPart : Iterables.partition(updatedHashes.keySet(),
                        options.getMaxHmsetSize())) {
                    pipeline.hmset(hashesId(type), updatedHashes.subMap(hashPart.get(0), true,
                            hashPart.get(hashPart.size() - 1), true));
                    hmsetOperations++;
                }
            }
            pipeline.sync();
            pipelineOperations++;
        }
        try (Jedis jedis = source.getJedis()) {
            for (List<Map.Entry<String, Integer>> ttlPart : Iterables.partition(ttlSecondsByKey.entrySet(),
                    options.getMaxPipelineSize())) {
                Pipeline pipeline = jedis.pipelined();
                for (Map.Entry<String, Integer> ttlEntry : ttlPart) {
                    pipeline.expire(ttlEntry.getKey(), ttlEntry.getValue());
                }
                expireOperations += ttlPart.size();
                pipeline.sync();
                pipelineOperations++;
            }
        }
    }
    cacheMetrics.merge(prefix, type, items.size(), keysToSet.size() / 2, relationshipNames.size(),
            skippedWrites, updatedHashes.size(), saddOperations, msetOperations, hmsetOperations,
            pipelineOperations, expireOperations);
}

From source file:com.eucalyptus.cloudwatch.common.internal.domain.metricdata.MetricManager.java

private static void addManyMetrics(Multimap<Class, MetricEntity> metricMap) {
    for (Class c : metricMap.keySet()) {
        for (List<MetricEntity> dataBatchPartial : Iterables.partition(metricMap.get(c),
                METRIC_DATA_NUM_DB_OPERATIONS_PER_TRANSACTION)) {
            try (final TransactionResource db = Entities.transactionFor(c)) {
                int numOperations = 0;
                for (MetricEntity me : dataBatchPartial) {
                    numOperations++;/*from   www .  ja v a  2 s  . c om*/
                    if (numOperations % METRIC_DATA_NUM_DB_OPERATIONS_UNTIL_SESSION_FLUSH == 0) {
                        Entities.flushSession(c);
                        Entities.clearSession(c);
                    }
                    Entities.persist(me);
                }
                db.commit();
            }
        }
    }
}