Example usage for com.google.common.collect Iterables partition

List of usage examples for com.google.common.collect Iterables partition

Introduction

In this page you can find the example usage for com.google.common.collect Iterables partition.

Prototype

public static <T> Iterable<List<T>> partition(final Iterable<T> iterable, final int size) 

Source Link

Document

Divides an iterable into unmodifiable sublists of the given size (the final iterable may be smaller).

Usage

From source file:com.palantir.common.collect.IterableView.java

public IterableView<List<T>> partition(final int size) {
    if (delegate() instanceof List) {
        /*//from   ww w .j ava2  s  . c  om
         * Use the more efficient Lists.partition which utilizes sublists
         * without allocating new lists for the returned partitions.
         */
        return of(Lists.partition(castAsList(), size));
    }

    return of(Iterables.partition(castAsIterable(), size));
}

From source file:com.palantir.atlasdb.cleaner.KeyValueServiceScrubberStore.java

@Override
public void queueCellsForScrubbing(Multimap<Cell, String> cellToTableNames, long scrubTimestamp,
        int batchSize) {
    Map<Cell, byte[]> values = Maps.newHashMap();
    for (Map.Entry<Cell, Collection<String>> entry : cellToTableNames.asMap().entrySet()) {
        Cell cell = entry.getKey();//  w  w w . j  a v  a  2s .co m
        Collection<String> tableNames = entry.getValue();
        // Doing the join here is safe--queueCellsForScrubbing is only called once per transaction
        // so we'll have all the table names for a given scrubTimestamp
        String joined = StringUtils.join(tableNames, AtlasDbConstants.SCRUB_TABLE_SEPARATOR_CHAR);
        values.put(cell, PtBytes.toBytes(joined));
    }
    for (List<Entry<Cell, byte[]>> batch : Iterables.partition(values.entrySet(), batchSize)) {
        Map<Cell, byte[]> batchMap = Maps.newHashMap();
        for (Entry<Cell, byte[]> e : batch) {
            batchMap.put(e.getKey(), e.getValue());
        }
        keyValueService.put(AtlasDbConstants.SCRUB_TABLE, batchMap, scrubTimestamp);
    }
}

From source file:utils.teamcity.wallt.view.wall.WallView.java

private void updateLayout() {
    getChildren().clear();//from www  .  j a v  a  2  s . com

    final Collection<TileViewModel> builds = _model.getDisplayedBuilds();
    final Collection<ProjectTileViewModel> projects = _model.getDisplayedProjects();

    final int totalTilesCount = builds.size() + projects.size();

    final int maxTilesByColumn = _model.getMaxTilesByColumnProperty().get();
    final int maxTilesByRow = _model.getMaxTilesByRowProperty().get();

    final int maxByScreens = max(1, maxTilesByColumn * maxTilesByRow);

    final int nbScreen = max(1,
            totalTilesCount / maxByScreens + ((totalTilesCount % maxByScreens > 0 ? 1 : 0)));

    int byScreen = max(1, totalTilesCount / nbScreen + ((totalTilesCount % nbScreen > 0 ? 1 : 0)));
    // We search to complete columns of screen with tiles, not to have empty blanks (ie having a number of column which are all completed)
    while (byScreen % maxTilesByColumn != 0)
        byScreen++;

    final int nbColums = max(1, byScreen / maxTilesByColumn + ((byScreen % maxTilesByColumn > 0 ? 1 : 0)));
    final int byColums = max(1, byScreen / nbColums + ((byScreen % nbColums > 0 ? 1 : 0)));

    final Iterable<List<Object>> screenPartition = Iterables.partition(Iterables.concat(builds, projects),
            byScreen);
    for (final List<Object> buildsInScreen : screenPartition) {
        final GridPane screenPane = buildScreenPane(buildsInScreen, nbColums, byColums);
        screenPane.setVisible(false);
        getChildren().add(screenPane);
    }

    displayNextScreen();
}

From source file:com.eucalyptus.cluster.callback.reporting.DefaultAbsoluteMetricConverter.java

protected static List<AbsoluteMetricQueueItem> dealWithAbsoluteMetrics(
        Iterable<AbsoluteMetricQueueItem> dataBatch) {
    List<AbsoluteMetricQueueItem> regularMetrics = new ArrayList<AbsoluteMetricQueueItem>();
    // We need to do some sorting to allow fewer db lookups.  There is also logic for different metric types, so they will be sorted now.

    // Some points do not actually go in.  If a data point represents an absolute value, the first one does not go in.
    // Also, some data points are added while we go through the list (derived metrics)

    // Deal with the absolute metrics
    // CPUUtilization
    // VolumeReadOps
    // VolumeWriteOps
    // VolumeConsumedReadWriteOps
    // VolumeReadBytes
    // VolumeWriteBytes
    // VolumeTotalReadTime
    // VolumeTotalWriteTime
    // VolumeTotalReadWriteTime (used to calculate VolumeIdleTime)
    // DiskReadOps
    // DiskWriteOps
    // DiskReadBytes
    // DiskWriteBytes
    // NetworkIn/*from w  ww .  ja v  a2 s.co  m*/
    // NetworkOut

    Multimap<String, AbsoluteMetricQueueItem> instanceMetricMap = LinkedListMultimap.create();
    Multimap<String, AbsoluteMetricQueueItem> volumeMetricMap = LinkedListMultimap.create();
    for (final AbsoluteMetricQueueItem item : dataBatch) {
        String nameSpace = item.getNamespace();
        MetricDatum datum = item.getMetricDatum();
        if (AbsoluteMetricHelper.AWS_EBS_NAMESPACE.equals(nameSpace)) {
            String volumeId = null;
            if ((datum.getDimensions() != null) && (datum.getDimensions().getMember() != null)) {
                for (Dimension dimension : datum.getDimensions().getMember()) {
                    if (AbsoluteMetricHelper.VOLUME_ID_DIM_NAME.equals(dimension.getName())) {
                        volumeId = dimension.getValue();
                    }
                }
            }
            if (volumeId == null) {
                continue; // this data point doesn't count.
            } else {
                volumeMetricMap.put(volumeId, item);
            }
        } else if (AbsoluteMetricHelper.AWS_EC2_NAMESPACE.equals(nameSpace)) {
            String instanceId = null;
            if ((datum.getDimensions() != null) && (datum.getDimensions().getMember() != null)) {
                for (Dimension dimension : datum.getDimensions().getMember()) {
                    if (AbsoluteMetricHelper.INSTANCE_ID_DIM_NAME.equals(dimension.getName())) {
                        instanceId = dimension.getValue();
                    }
                }
            }
            if (instanceId == null) {
                continue; // this data point doesn't count.
            } else {
                instanceMetricMap.put(instanceId, item);
            }
        } else {
            // not really an absolute metric, just leave it alone
            regularMetrics.add(item);
        }
    }
    for (List<String> partialVolumeKeySet : Iterables.partition(volumeMetricMap.keySet(),
            AbsoluteMetricQueue.ABSOLUTE_METRIC_NUM_DB_OPERATIONS_PER_TRANSACTION)) {
        try (final TransactionResource db = Entities.transactionFor(AbsoluteMetricHistory.class)) {
            int numVolumes = 0;
            for (String volumeId : partialVolumeKeySet) {
                AbsoluteMetricCache cache = new AbsoluteMetricCache(db);
                cache.load(AbsoluteMetricHelper.AWS_EBS_NAMESPACE, AbsoluteMetricHelper.VOLUME_ID_DIM_NAME,
                        volumeId);
                for (AbsoluteMetricQueueItem item : volumeMetricMap.get(volumeId)) {
                    String accountId = item.getAccountId();
                    String nameSpace = item.getNamespace();
                    MetricDatum datum = item.getMetricDatum();
                    if (AbsoluteMetricHelper.EBS_ABSOLUTE_METRICS.containsKey(datum.getMetricName())) {
                        // we check if the point below is a 'first' point, or maybe a point in the past.  Either case reject it.
                        if (!adjustAbsoluteVolumeStatisticSet(cache, datum, datum.getMetricName(),
                                AbsoluteMetricHelper.EBS_ABSOLUTE_METRICS.get(datum.getMetricName()), volumeId))
                            continue;
                    }
                    // special cases
                    // 1) VolumeThroughputPercentage -- this is 100% for provisioned volumes, and we need to insert a
                    //                                  data point for every timestamp that a volume event occurs.
                    //                                  To make sure we don't duplicate the effort, we choose one event at random, VolumeReadOps,
                    //                                  and create this new metric arbitrarily
                    if (AbsoluteMetricHelper.VOLUME_READ_OPS_METRIC_NAME.equals(datum.getMetricName())) { // special case
                        regularMetrics.add(
                                AbsoluteMetricHelper.createVolumeThroughputMetric(accountId, nameSpace, datum));
                    }
                    // 2) VolumeIdleTime -- we piggy back off of the metric we don't need VolumeTotalReadWriteTime, and convert it to VolumeIdleTime
                    if (AbsoluteMetricHelper.VOLUME_TOTAL_READ_WRITE_TIME_METRIC_NAME
                            .equals(datum.getMetricName())) {
                        AbsoluteMetricHelper.convertVolumeTotalReadWriteTimeToVolumeIdleTime(datum);
                    }
                    // 3) VolumeQueueLength -- this one comes in essentially correct, but we don't have a time duration for it, so we piggy back off
                    //                         the absolute metric framework
                    if (AbsoluteMetricHelper.VOLUME_QUEUE_LENGTH_METRIC_NAME.equals(datum.getMetricName())) {
                        if (!adjustAbsoluteVolumeQueueLengthStatisticSet(cache, datum, volumeId))
                            continue;
                    }
                    // Once here, our item has been appropriately adjusted.  Add it
                    regularMetrics.add(item);
                }
                numVolumes++;
                if (numVolumes
                        % AbsoluteMetricQueue.ABSOLUTE_METRIC_NUM_DB_OPERATIONS_UNTIL_SESSION_FLUSH == 0) {
                    Entities.flushSession(AbsoluteMetricHistory.class);
                    Entities.clearSession(AbsoluteMetricHistory.class);
                }
            }
            db.commit();
        }
    }
    for (List<String> partialInstanceKeySet : Iterables.partition(instanceMetricMap.keySet(),
            AbsoluteMetricQueue.ABSOLUTE_METRIC_NUM_DB_OPERATIONS_PER_TRANSACTION)) {
        try (final TransactionResource db = Entities.transactionFor(AbsoluteMetricHistory.class)) {
            int numInstances = 0;
            for (String instanceId : partialInstanceKeySet) {
                AbsoluteMetricCache cache = new AbsoluteMetricCache(db);
                cache.load(AbsoluteMetricHelper.AWS_EC2_NAMESPACE, AbsoluteMetricHelper.INSTANCE_ID_DIM_NAME,
                        instanceId);
                for (AbsoluteMetricQueueItem item : instanceMetricMap.get(instanceId)) {
                    String accountId = item.getAccountId();
                    String nameSpace = item.getNamespace();
                    MetricDatum datum = item.getMetricDatum();
                    if (AbsoluteMetricHelper.EC2_ABSOLUTE_METRICS.containsKey(datum.getMetricName())) {
                        if (!adjustAbsoluteInstanceStatisticSet(cache, datum, datum.getMetricName(),
                                AbsoluteMetricHelper.EC2_ABSOLUTE_METRICS.get(datum.getMetricName()),
                                instanceId))
                            continue;
                    } else if (AbsoluteMetricHelper.CPU_UTILIZATION_MS_ABSOLUTE_METRIC_NAME
                            .equals(datum.getMetricName())) { // special case
                        // we check if the point below is a 'first' point, or maybe a point in the past.  Either case reject it.
                        if (!adjustAbsoluteInstanceCPUStatisticSet(cache, datum,
                                AbsoluteMetricHelper.CPU_UTILIZATION_MS_ABSOLUTE_METRIC_NAME,
                                AbsoluteMetricHelper.CPU_UTILIZATION_METRIC_NAME, instanceId))
                            continue;
                    }
                    // Once here, our item has been appropriately adjusted.  Add it
                    regularMetrics.add(item);
                }
                numInstances++;
                if (numInstances
                        % AbsoluteMetricQueue.ABSOLUTE_METRIC_NUM_DB_OPERATIONS_UNTIL_SESSION_FLUSH == 0) {
                    Entities.flushSession(AbsoluteMetricHistory.class);
                    Entities.clearSession(AbsoluteMetricHistory.class);
                }
            }
            db.commit();
        }
    }
    return regularMetrics;
}

From source file:com.sk89q.squirrelid.resolver.HttpRepositoryService.java

@Override
public ImmutableList<Profile> findAllByName(Iterable<String> names) throws IOException, InterruptedException {
    Builder<Profile> builder = ImmutableList.builder();
    for (List<String> partition : Iterables.partition(names, MAX_NAMES_PER_REQUEST)) {
        builder.addAll(query(partition));
    }//from w w  w .  jav a 2s . c o m
    return builder.build();
}

From source file:com.accumulobook.designs.graph.Graph.java

/**
 * //ww w.j  a  va 2s. c  o  m
 * @param neighbors
 * @param batchScanner
 * @param edgeType
 * @return 
 */
public static Iterable<String> neighborsOfNeighbors(final Iterable<String> neighbors,
        final BatchScanner batchScanner, final String edgeType) {

    List<Iterable<String>> nextNeighbors = new ArrayList<>();

    // process given neighbors in batches of 100
    for (List<String> batch : Iterables.partition(neighbors, 100)) {
        batchScanner.setRanges(Lists.transform(batch, new Function<String, Range>() {
            @Override
            public Range apply(String f) {
                return Range.exact(f);
            }
        }));

        if (!edgeType.equals("ALL"))
            batchScanner.fetchColumnFamily(new Text(edgeType));

        nextNeighbors.add(Iterables.transform(batchScanner, new Function<Entry<Key, Value>, String>() {
            @Override
            public String apply(Entry<Key, Value> f) {
                return f.getKey().getColumnQualifier().toString();
            }
        }));
    }

    return Sets.newHashSet(Iterables.concat(nextNeighbors));
}

From source file:de.softwareforge.kafka.LoadCommand.java

@Override
public void execute() throws Exception {
    Logging logging = Logging.initialize();
    logging.configure(new LoggingConfiguration());
    new LoggingMBean().setLevel("kafka", "ERROR");

    String tableNames = loaderOptions.tables;
    final Map<String, TpchTable<?>> allTables = ImmutableMap
            .copyOf(Maps.uniqueIndex(TpchTable.getTables(), new Function<TpchTable<?>, String>() {
                @Override// w ww . ja  v  a2 s .  c  o  m
                public String apply(@Nonnull TpchTable<?> input) {
                    return input.getTableName();
                }
            }));

    List<String> tables;
    if (tableNames == null) {
        tables = ImmutableList.copyOf(allTables.keySet());
    } else {
        ImmutableList.Builder<String> builder = ImmutableList.builder();
        for (String tableName : Splitter.on(",").omitEmptyStrings().trimResults().split(tableNames)) {
            checkState(allTables.keySet().contains(tableName), "Table %s is unknown", tableName);
            builder.add(tableName);
        }
        tables = builder.build();
    }

    LOG.info("Processing tables: %s", tables);

    Properties props = new Properties();
    props.put("metadata.broker.list", loaderOptions.brokers);
    props.put("serializer.class", StringEncoder.class.getName());
    props.put("key.serializer.class", LongEncoder.class.getName());
    props.put("partitioner.class", LongPartitioner.class.getName());
    props.put("serializer.encoding", "UTF8");
    props.put("request.required.acks", "1");
    ProducerConfig producerConfig = new ProducerConfig(props);

    final ObjectMapper mapper = objectMapperProvider.get();
    mapper.enable(MapperFeature.AUTO_DETECT_GETTERS);

    final Producer<Long, String> producer = new Producer<>(producerConfig);

    ListeningExecutorService executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());

    ImmutableList.Builder<ListenableFuture<Long>> futureBuilder = ImmutableList.builder();

    for (final String table : tables) {
        ListenableFuture<Long> future = executor.submit(new Callable<Long>() {
            @Override
            public Long call() throws Exception {
                TpchTable<?> tpchTable = allTables.get(table);
                LOG.info("Loading table '%s' into topic '%s%s'...", table, loaderOptions.prefix, table);
                long count = 0;

                for (List<? extends TpchEntity> partition : Iterables.partition(
                        tpchTable.createGenerator(loaderOptions.tpchType.getScaleFactor(), 1, 1), 100)) {
                    ImmutableList.Builder<KeyedMessage<Long, String>> builder = ImmutableList.builder();
                    for (TpchEntity o : partition) {
                        builder.add(new KeyedMessage<>(loaderOptions.prefix + table, count++,
                                mapper.writeValueAsString(o)));
                    }
                    producer.send(builder.build());
                }
                LOG.info("Generated %d rows for table '%s'.", count, table);
                return count;
            }
        });
        futureBuilder.add(future);
    }

    Futures.allAsList(futureBuilder.build()).get();
    executor.shutdown();
    executor.awaitTermination(1, TimeUnit.DAYS);
    producer.close();
}

From source file:com.opengamma.bbg.loader.BloombergHistoricalTimeSeriesLoader.java

@Override
public Map<ExternalId, UniqueId> addTimeSeries(Set<ExternalId> externalIds, String dataProvider,
        String dataField, LocalDate startDate, LocalDate endDate) {
    ArgumentChecker.notEmpty(externalIds, "externalIds");
    ArgumentChecker.notNull(dataField, "dataField");
    dataProvider = BloombergDataUtils.resolveDataProvider(dataProvider);
    if (startDate == null) {
        startDate = DEFAULT_START_DATE;// w w  w .j a  v a 2s  .co  m
    }
    if (endDate == null) {
        endDate = DateUtils.previousWeekDay();
    }

    // finds the time-series that need loading
    Map<ExternalId, UniqueId> result = new HashMap<ExternalId, UniqueId>();
    Set<ExternalId> missingTimeseries = findTimeSeries(externalIds, dataProvider, dataField, result);

    // batch in groups of 100 to avoid out-of-memory issues
    for (List<ExternalId> partition : Iterables.partition(missingTimeseries, 100)) {
        Set<ExternalId> subSet = Sets.newHashSet(partition);
        fetchTimeSeries(subSet, dataField, dataProvider, startDate, endDate, result);
    }
    return result;
}

From source file:com.palantir.atlasdb.cleaner.KeyValueServiceScrubberStore.java

@Override
public void markCellsAsScrubbed(Multimap<Cell, Long> cellToScrubTimestamp, int batchSize) {
    for (List<Entry<Cell, Long>> batch : Iterables.partition(cellToScrubTimestamp.entries(), batchSize)) {
        Multimap<Cell, Long> batchMultimap = HashMultimap.create();
        for (Entry<Cell, Long> e : batch) {
            batchMultimap.put(e.getKey(), e.getValue());
        }//from  w  w  w  .  ja  v a 2s  .  co  m
        keyValueService.delete(AtlasDbConstants.SCRUB_TABLE, batchMultimap);
    }
}

From source file:com.google.gerrit.server.query.change.AndSource.java

private Iterable<ChangeData> buffer(ChangeDataSource source, ResultSet<ChangeData> scanner) {
    final boolean loadChange = !source.hasChange();
    return FluentIterable.from(Iterables.partition(scanner, 50))
            .transformAndConcat(new Function<List<ChangeData>, List<ChangeData>>() {
                @Override//  w  ww  .j  a v  a 2s  .  co  m
                public List<ChangeData> apply(List<ChangeData> buffer) {
                    if (loadChange) {
                        try {
                            ChangeData.ensureChangeLoaded(db, buffer);
                        } catch (OrmException e) {
                            throw new OrmRuntimeException(e);
                        }
                    }
                    return buffer;
                }
            });
}