Example usage for com.google.common.collect Iterables partition

List of usage examples for com.google.common.collect Iterables partition

Introduction

In this page you can find the example usage for com.google.common.collect Iterables partition.

Prototype

public static <T> Iterable<List<T>> partition(final Iterable<T> iterable, final int size) 

Source Link

Document

Divides an iterable into unmodifiable sublists of the given size (the final iterable may be smaller).

Usage

From source file:edu.umn.msi.tropix.persistence.dao.hibernate.TropixPersistenceTemplate.java

protected Iterable<List<String>> partition(final Iterable<String> ids) {
    return Iterables.partition(ids, 200);
}

From source file:com.eucalyptus.entities.EntityCache.java

@SuppressWarnings({ "unchecked", "ConstantConditions" })
private void refresh() {
    final List<Pair<String, Integer>> currentKeys = loadVersionMap();
    cache.keySet().retainAll(currentKeys);
    currentKeys.removeAll(cache.keySet());
    for (List<Pair<String, Integer>> keyBatch : Iterables.partition(currentKeys, batchSize)) {
        try (final TransactionResource db = Entities.transactionFor(example)) {
            List<E> entities = (List<E>) Entities.createCriteria(example.getClass())
                    .add(Example.create(example)).setReadOnly(true).setCacheable(false).setFetchSize(batchSize)
                    .add(Restrictions.in("id",
                            Lists.newArrayList(Iterables.transform(keyBatch, Pair.<String, Integer>left()))))
                    .list();/*from  www  .  j  ava 2s  .c o  m*/
            for (final E entity : entities) {
                cache.put(Pair.pair(getId(entity), entity.getVersion()), transformFunction.apply(entity));
            }
        }
    }
}

From source file:com.google.gerrit.server.git.ScanningChangeCacheImpl.java

public static List<Change> scan(Repository repo, ReviewDb db) throws OrmException, IOException {
    Map<String, Ref> refs = repo.getRefDatabase().getRefs(RefNames.REFS_CHANGES);
    Set<Change.Id> ids = new LinkedHashSet<>();
    for (Ref r : refs.values()) {
        Change.Id id = Change.Id.fromRef(r.getName());
        if (id != null) {
            ids.add(id);//w  ww  . j a v a 2 s  .  c om
        }
    }
    List<Change> changes = new ArrayList<>(ids.size());
    // A batch size of N may overload get(Iterable), so use something smaller,
    // but still >1.
    for (List<Change.Id> batch : Iterables.partition(ids, 30)) {
        Iterables.addAll(changes, db.changes().get(batch));
    }
    return changes;
}

From source file:org.candlepin.model.CertificateSerialCurator.java

/**
 * Delete expired serials.//w  w w .  j  a va  2 s  . c  om
 *
 * @return the number of rows deleted.
 */
public int deleteExpiredSerials() {
    // Some databases don't like to update based on a field that is being updated
    // So we must get expired ids, and then delete them
    @SuppressWarnings("unchecked")
    List<String> ids = this.currentSession().createCriteria(CertificateSerial.class)
            .add(Restrictions.le("expiration", Util.yesterday())).add(getRevokedCriteria())
            .setProjection(Projections.id()).addOrder(Order.asc("id")).list();

    if (ids.isEmpty()) {
        return 0;
    }

    String hql = "DELETE from CertificateSerial WHERE id IN (:expiredIds)";
    Query query = this.currentSession().createQuery(hql);

    int removed = 0;

    for (List<String> block : Iterables.partition(ids, AbstractHibernateCurator.IN_OPERATOR_BLOCK_SIZE)) {
        removed += query.setParameterList("expiredIds", block).executeUpdate();
    }

    return removed;
}

From source file:com.netflix.spinnaker.cats.redis.cache.AbstractRedisCache.java

@Override
public void evictAll(String type, Collection<String> identifiers) {
    if (identifiers.isEmpty()) {
        return;/*from w  w w  .ja v  a  2s  .c o  m*/
    }
    final Collection<String> allRelationships = scanMembers(allRelationshipsId(type));
    for (List<String> items : Iterables.partition(new HashSet<>(identifiers), options.getMaxEvictBatchSize())) {
        evictItems(type, items, allRelationships);
    }
}

From source file:io.crate.operation.ThreadPools.java

/**
 * Similar to {@link #runWithAvailableThreads(ThreadPoolExecutor, Collection)}
 * but this function will return a Future that wraps the futures of each callable
 *
 * @param executor           executor that is used to execute the callableList
 * @param poolSize           the corePoolSize of the given executor
 * @param callableCollection a collection of callable that should be executed
 * @param mergeFunction      function that will be applied to merge the results of multiple callable in case that they are
 *                           executed together if the threadPool is exhausted
 * @param <T>                type of the final result
 * @return a future that will return a list of the results of the callableList
 * @throws RejectedExecutionException/*w w w .ja va  2  s  .  c o m*/
 */
public static <T> ListenableFuture<List<T>> runWithAvailableThreads(ThreadPoolExecutor executor, int poolSize,
        Collection<Callable<T>> callableCollection, final Function<List<T>, T> mergeFunction)
        throws RejectedExecutionException {

    ListeningExecutorService listeningExecutorService = MoreExecutors.listeningDecorator(executor);

    List<ListenableFuture<T>> futures;
    int availableThreads = Math.max(poolSize - executor.getActiveCount(), 1);
    if (availableThreads < callableCollection.size()) {
        Iterable<List<Callable<T>>> partition = Iterables.partition(callableCollection,
                callableCollection.size() / availableThreads);

        futures = new ArrayList<>(availableThreads + 1);
        for (final List<Callable<T>> callableList : partition) {
            futures.add(listeningExecutorService.submit(new Callable<T>() {
                @Override
                public T call() throws Exception {
                    List<T> results = new ArrayList<T>(callableList.size());
                    for (Callable<T> tCallable : callableList) {
                        results.add(tCallable.call());
                    }
                    return mergeFunction.apply(results);
                }
            }));
        }
    } else {
        futures = new ArrayList<>(callableCollection.size());
        for (Callable<T> callable : callableCollection) {
            futures.add(listeningExecutorService.submit(callable));
        }
    }
    return Futures.allAsList(futures);
}

From source file:com.linkedin.bowser.core.expn.Expressions.java

public static MapExpn map(CommonTree treeNode, List<Expn> expressions) {
    if (expressions == null)
        return new MapExpn(treeNode, Collections.<Expn, Expn>emptyMap());

    if (expressions.size() % 2 != 0)
        throw new IllegalArgumentException("expected an even number of expressions: " + expressions.size());

    Map<Expn, Expn> expnMap = Maps.newHashMap();
    for (List<Expn> part : Iterables.partition(expressions, 2)) {
        expnMap.put(part.get(0), part.get(1));
    }//from   w  w w  . ja  v a2 s . com

    return new MapExpn(treeNode, expnMap);
}

From source file:org.apache.beam.runners.core.GroupAlsoByWindowsViaOutputBufferDoFn.java

@Override
public void processElement(ProcessContext c) throws Exception {
    K key = c.element().getKey();/*w  ww.j  a  v  a  2 s  . c om*/
    // Used with Batch, we know that all the data is available for this key. We can't use the
    // timer manager from the context because it doesn't exist. So we create one and emulate the
    // watermark, knowing that we have all data and it is in timestamp order.
    InMemoryTimerInternals timerInternals = new InMemoryTimerInternals();
    timerInternals.advanceProcessingTime(Instant.now());
    timerInternals.advanceSynchronizedProcessingTime(Instant.now());
    StateInternals<K> stateInternals = stateInternalsFactory.stateInternalsForKey(key);

    ReduceFnRunner<K, InputT, OutputT, W> reduceFnRunner = new ReduceFnRunner<>(key, strategy,
            ExecutableTriggerStateMachine
                    .create(TriggerStateMachines.stateMachineForTrigger(strategy.getTrigger())),
            stateInternals, timerInternals,
            WindowingInternalsAdapters.outputWindowedValue(c.windowingInternals()),
            WindowingInternalsAdapters.sideInputReader(c.windowingInternals()), droppedDueToClosedWindow,
            reduceFn, c.getPipelineOptions());

    Iterable<List<WindowedValue<InputT>>> chunks = Iterables.partition(c.element().getValue(), 1000);
    for (Iterable<WindowedValue<InputT>> chunk : chunks) {
        // Process the chunk of elements.
        reduceFnRunner.processElements(chunk);

        // Then, since elements are sorted by their timestamp, advance the input watermark
        // to the first element.
        timerInternals.advanceInputWatermark(chunk.iterator().next().getTimestamp());
        // Advance the processing times.
        timerInternals.advanceProcessingTime(Instant.now());
        timerInternals.advanceSynchronizedProcessingTime(Instant.now());

        // Fire all the eligible timers.
        fireEligibleTimers(timerInternals, reduceFnRunner);

        // Leave the output watermark undefined. Since there's no late data in batch mode
        // there's really no need to track it as we do for streaming.
    }

    // Finish any pending windows by advancing the input watermark to infinity.
    timerInternals.advanceInputWatermark(BoundedWindow.TIMESTAMP_MAX_VALUE);

    // Finally, advance the processing time to infinity to fire any timers.
    timerInternals.advanceProcessingTime(BoundedWindow.TIMESTAMP_MAX_VALUE);
    timerInternals.advanceSynchronizedProcessingTime(BoundedWindow.TIMESTAMP_MAX_VALUE);

    fireEligibleTimers(timerInternals, reduceFnRunner);

    reduceFnRunner.persist();
}

From source file:org.geowebcache.s3.TemporaryS3Folder.java

public void delete() {
    checkState(isConfigured(), "client not configured.");
    if (temporaryPrefix == null) {
        return;/*from w  w w  .j  a  va 2s.  c  om*/
    }

    Iterable<S3ObjectSummary> objects = S3Objects.withPrefix(s3, bucket, temporaryPrefix);
    Iterable<List<S3ObjectSummary>> partition = Iterables.partition(objects, 1000);
    for (List<S3ObjectSummary> os : partition) {
        List<KeyVersion> keys = Lists.transform(os, new Function<S3ObjectSummary, KeyVersion>() {
            @Override
            public KeyVersion apply(S3ObjectSummary input) {
                KeyVersion k = new KeyVersion(input.getKey());
                return k;
            }
        });
        DeleteObjectsRequest deleteRequest = new DeleteObjectsRequest(bucket);
        deleteRequest.setKeys(keys);
        s3.deleteObjects(deleteRequest);
    }
}

From source file:com.opengamma.web.analytics.ViewportResultsJsonCsvWriter.java

public String getCsv(ViewportResults viewportResults) {
    GridColumnGroups columnGroups = viewportResults.getColumns();
    String[] header1 = new String[columnGroups.getGroups().size()];
    String[] header2 = new String[columnGroups.getColumnCount()];

    int index = 0;
    for (GridColumnGroup gridColumnGroup : columnGroups.getGroups()) {
        header1[index++] = gridColumnGroup.getName();
    }/*from  w w  w  .  j a  v  a 2s  .  c  o m*/

    List<GridColumn> columns = columnGroups.getColumns();
    index = 0;
    for (GridColumn gridColumn : columns) {
        header2[index++] = gridColumn.getHeader();
    }

    StringWriter stringWriter = new StringWriter();
    @SuppressWarnings("resource")
    CSVWriter csvWriter = new CSVWriter(stringWriter);

    csvWriter.writeNext(header1);
    csvWriter.writeNext(header2);

    List<ResultsCell> viewportCells = viewportResults.getResults();
    Iterable<List<ResultsCell>> results = Iterables.partition(viewportCells, columnGroups.getColumnCount());
    for (List<ResultsCell> row : results) {
        String[] rowArray = new String[row.size()];
        int col = 0;
        for (ResultsCell cell : row) {
            Object cellValue = cell.getValue();
            if (cellValue instanceof RowTarget) {
                rowArray[col++] = ((RowTarget) cellValue).getName();
                continue;
            }

            ValueSpecification cellValueSpec = cell.getValueSpecification();
            Object formattedValue = _formatter.format(cellValue, cellValueSpec, cell.getFormat(),
                    cell.getInlineKey());
            if (formattedValue instanceof String) {
                rowArray[col++] = (String) formattedValue;
            } else {
                rowArray[col++] = formattedValue.toString();
            }
        }
        csvWriter.writeNext(rowArray);
    }
    return stringWriter.toString();
}