Example usage for com.google.common.collect Iterables partition

List of usage examples for com.google.common.collect Iterables partition

Introduction

In this page you can find the example usage for com.google.common.collect Iterables partition.

Prototype

public static <T> Iterable<List<T>> partition(final Iterable<T> iterable, final int size) 

Source Link

Document

Divides an iterable into unmodifiable sublists of the given size (the final iterable may be smaller).

Usage

From source file:org.apache.metron.pcap.finalizer.PcapFinalizer.java

@Override
public Pageable<Path> finalizeJob(Map<String, Object> config) throws JobException {
    Configuration hadoopConfig = PcapOptions.HADOOP_CONF.get(config, Configuration.class);
    int recPerFile = PcapOptions.NUM_RECORDS_PER_FILE.getOrDefault(config, Integer.class,
            NUM_RECORDS_PER_FILE_DEFAULT);
    Path interimResultPath = PcapOptions.INTERIM_RESULT_PATH.get(config, PcapOptions.STRING_TO_PATH,
            Path.class);
    FileSystem fs = PcapOptions.FILESYSTEM.get(config, FileSystem.class);
    int parallelism = getNumThreads(PcapOptions.FINALIZER_THREADPOOL_SIZE.get(config, String.class));
    LOG.info("Finalizer running with parallelism set to " + parallelism);

    SequenceFileIterable interimResults = null;
    try {//  w w w  .j  a v  a  2  s. c  o  m
        interimResults = readInterimResults(interimResultPath, hadoopConfig, fs);
    } catch (IOException e) {
        throw new JobException("Unable to read interim job results while finalizing", e);
    }
    List<Path> outFiles = new ArrayList<>();
    try {
        Iterable<List<byte[]>> partitions = Iterables.partition(interimResults, recPerFile);
        Map<Path, List<byte[]>> toWrite = new HashMap<>();
        int part = 1;
        if (partitions.iterator().hasNext()) {
            for (List<byte[]> data : partitions) {
                Path outputPath = getOutputPath(config, part++);
                toWrite.put(outputPath, data);
            }
            outFiles = writeParallel(hadoopConfig, toWrite, parallelism);
        } else {
            LOG.info("No results returned.");
        }
    } catch (IOException e) {
        throw new JobException("Failed to finalize results", e);
    } finally {
        try {
            interimResults.cleanup();
        } catch (IOException e) {
            LOG.warn("Unable to cleanup files in HDFS", e);
        }
    }
    LOG.info("Done finalizing results");
    return new PcapPages(outFiles);
}

From source file:com.sk89q.squirrelid.resolver.ParallelProfileService.java

@Override
public ImmutableList<Profile> findAllByName(Iterable<String> names) throws IOException, InterruptedException {
    CompletionService<List<Profile>> completion = new ExecutorCompletionService<List<Profile>>(executorService);
    int count = 0;
    for (final List<String> partition : Iterables.partition(names, getEffectiveProfilesPerJob())) {
        count++;//from   w ww .  j a  v a 2  s.c o  m
        completion.submit(new Callable<List<Profile>>() {
            @Override
            public List<Profile> call() throws Exception {
                return resolver.findAllByName(partition);
            }
        });
    }

    Builder<Profile> builder = ImmutableList.builder();
    for (int i = 0; i < count; i++) {
        try {
            builder.addAll(completion.take().get());
        } catch (ExecutionException e) {
            if (e.getCause() instanceof IOException) {
                throw (IOException) e.getCause();
            } else {
                throw new RuntimeException("Error occurred during the operation", e);
            }
        }
    }
    return builder.build();
}

From source file:com.stratio.decision.functions.SaveToSolrActionExecutionFunction.java

@Override
public void process(Iterable<StratioStreamingMessage> messages) throws Exception {

    Integer partitionSize = maxBatchSize;

    if (partitionSize <= 0) {
        partitionSize = Iterables.size(messages);
    }//  w w w. j av  a 2  s .  co  m

    Iterable<List<StratioStreamingMessage>> partitionIterables = Iterables.partition(messages, partitionSize);

    try {

        for (List<StratioStreamingMessage> messageList : partitionIterables) {

            Map<String, Collection<SolrInputDocument>> elemntsToInsert = new HashMap<String, Collection<SolrInputDocument>>();
            int count = 0;
            for (StratioStreamingMessage stratioStreamingMessage : messageList) {
                count += 1;
                SolrInputDocument document = new SolrInputDocument();
                document.addField("stratio_decision_id", System.nanoTime() + "-" + count);
                for (ColumnNameTypeValue column : stratioStreamingMessage.getColumns()) {
                    document.addField(column.getColumn(), column.getValue());
                }
                checkCore(stratioStreamingMessage);
                Collection<SolrInputDocument> collection = elemntsToInsert
                        .get(stratioStreamingMessage.getStreamName());
                if (collection == null) {
                    collection = new HashSet<>();
                }
                collection.add(document);
                elemntsToInsert.put(stratioStreamingMessage.getStreamName(), collection);
            }
            while (retryStrategy.shouldRetry()) {
                try {
                    for (Map.Entry<String, Collection<SolrInputDocument>> elem : elemntsToInsert.entrySet()) {
                        getSolrclient(elem.getKey()).add(elem.getValue());
                    }
                    break;
                } catch (SolrException e) {
                    try {
                        log.error("Solr cloud status not yet properly initialized, retrying");
                        retryStrategy.errorOccured();
                    } catch (RuntimeException ex) {
                        log.error("Error while initializing Solr Cloud core ", ex.getMessage());
                    }
                }
            }
            flushClients();
        }
    } catch (Exception ex) {
        log.error("Error in Solr: " + ex.getMessage());
    }

}

From source file:com.google.api.ads.adwords.awreporting.server.appengine.exporter.ReportExporterAppEngine.java

/**
 * Creates the tasks that write export the Drive Doc reports.
 * @return the URL of the top level drive folder that will contain the reports
 *///www.j  av a 2 s  . c o m
public void exportReports(final String mccAccountId, final String dateStart, final String dateEnd,
        final Set<Long> accountIds, final Properties properties, final Long htmlTemplateId,
        final File outputDirectory, final Boolean sumAdExtensions)
        throws IOException, OAuthException, DocumentException {

    // Create the folder before creating the threads if it does not exist.
    Credential credential = RestServer.getAuthenticator().getOAuth2Credential(mccAccountId, false);

    GoogleDriveService.getGoogleDriveService(credential).getReportsFolder(mccAccountId).getWebContentLink();

    MccTaskCounter.increasePendingExportTasks(Long.valueOf(mccAccountId), accountIds.size());

    LOGGER.info("Generating PDF exporting tasks for " + accountIds.size() + " accounts");
    // Create a task for each 200 accounts that will create sub-tasks
    for (List<Long> partition : Iterables.partition(accountIds, 200)) {
        // Queues will wait 10 seconds to ensure that all creation tasks get queued.
        // Partition needs to be serializable
        QueueFactory.getDefaultQueue().add(TaskOptions.Builder
                .withPayload(new ExportTaskCreator(mccAccountId, Lists.newArrayList(partition), dateStart,
                        dateEnd, properties, htmlTemplateId, outputDirectory, sumAdExtensions))
                .countdownMillis(10 * 1000l));
    }
}

From source file:com.netflix.spinnaker.clouddriver.core.agent.CleanupPendingOnDemandCachesAgent.java

void run(Collection<Provider> providers) {
    providers.forEach(provider -> {/*w w  w .  j a  v a  2 s.  c  o  m*/
        String onDemandSetName = provider.getProviderName() + ":onDemand:members";
        List<String> onDemandKeys = scanMembers(onDemandSetName).stream().filter(s -> !s.equals("_ALL_"))
                .collect(Collectors.toList());

        Map<String, Response<Boolean>> existingOnDemandKeys = new HashMap<>();
        if (redisClientDelegate.supportsMultiKeyPipelines()) {
            redisClientDelegate.withMultiKeyPipeline(pipeline -> {
                for (List<String> partition : Iterables.partition(onDemandKeys,
                        redisCacheOptions.getMaxDelSize())) {
                    for (String id : partition) {
                        existingOnDemandKeys.put(id,
                                pipeline.exists(provider.getProviderName() + ":onDemand:attributes:" + id));
                    }
                }
                pipeline.sync();
            });
        } else {
            redisClientDelegate.withCommandsClient(client -> {
                onDemandKeys.stream()
                        .filter(k -> client.exists(provider.getProviderName() + "onDemand:attributes:" + k))
                        .forEach(k -> existingOnDemandKeys.put(k, new StaticResponse(Boolean.TRUE)));
            });
        }

        List<String> onDemandKeysToRemove = new ArrayList<>();
        for (String onDemandKey : onDemandKeys) {
            if (!existingOnDemandKeys.containsKey(onDemandKey)
                    || !existingOnDemandKeys.get(onDemandKey).get()) {
                onDemandKeysToRemove.add(onDemandKey);
            }
        }

        if (!onDemandKeysToRemove.isEmpty()) {
            log.info("Removing {} from {}", onDemandKeysToRemove.size(), onDemandSetName);
            log.debug("Removing {} from {}", onDemandKeysToRemove, onDemandSetName);

            redisClientDelegate.withMultiKeyPipeline(pipeline -> {
                for (List<String> idPartition : Lists.partition(onDemandKeysToRemove,
                        redisCacheOptions.getMaxDelSize())) {
                    String[] ids = idPartition.toArray(new String[idPartition.size()]);
                    pipeline.srem(onDemandSetName, ids);
                }

                pipeline.sync();
            });
        }
    });
}

From source file:org.eclipse.scada.ca.ui.importer.wizard.ImportWizard.java

protected void applyDiff(final IProgressMonitor parentMonitor) throws InterruptedException, ExecutionException {
    final SubMonitor monitor = SubMonitor.convert(parentMonitor, 100);
    monitor.setTaskName(Messages.ImportWizard_TaskName);

    final Collection<DiffEntry> result = this.mergeController.merge(wrap(monitor.newChild(10)));
    if (result.isEmpty()) {
        monitor.done();//from w  w  w.  ja va2  s .  c o m
        return;
    }

    final Iterable<List<DiffEntry>> splitted = Iterables.partition(result,
            Activator.getDefault().getPreferenceStore().getInt(PreferenceConstants.P_DEFAULT_CHUNK_SIZE));

    final SubMonitor sub = monitor.newChild(90);

    try {
        final int size = Iterables.size(splitted);
        sub.beginTask(Messages.ImportWizard_TaskName, size);

        int pos = 0;
        for (final Iterable<DiffEntry> i : splitted) {
            sub.subTask(String.format(Messages.ImportWizard_SubTaskName, pos, size));
            final List<DiffEntry> entries = new LinkedList<DiffEntry>();
            Iterables.addAll(entries, i);
            final NotifyFuture<Void> future = this.connection.getConnection().applyDiff(entries, null,
                    new DisplayCallbackHandler(getShell(), "Apply diff",
                            "Confirmation for applying diff is required"));
            future.get();

            pos++;
            sub.worked(1);
        }
    } finally {
        sub.done();
    }

}

From source file:li.klass.fhem.service.device.GenericDeviceService.java

public void setSubStates(FhemDevice device, List<StateToSet> statesToSet, Context context) {
    if (device.getXmlListDevice().getType().equalsIgnoreCase("FHT") && statesToSet.size() > 1) {
        Iterable<List<StateToSet>> partitions = Iterables.partition(statesToSet, 8);
        ImmutableList<String> parts = from(partitions).transform(FHT_CONCAT).toList();
        for (String toSet : parts) {
            setState(device, toSet, context, false);
        }//from   ww  w  .  j a  va2  s.c om
        for (StateToSet toSet : statesToSet) {
            invokeDeviceUpdateFor(device, toSet.getKey(), toSet.getValue());
        }
    } else {
        for (StateToSet toSet : statesToSet) {
            setSubState(device, toSet.getKey(), toSet.getValue(), context);
        }
    }
}

From source file:org.openscada.ca.ui.importer.wizard.ImportWizard.java

protected void applyDiff(final IProgressMonitor parentMonitor) throws InterruptedException, ExecutionException {
    final SubMonitor monitor = SubMonitor.convert(parentMonitor, 100);
    monitor.setTaskName(Messages.ImportWizard_TaskName);

    final Collection<DiffEntry> result = this.mergeController.merge(monitor.newChild(10));
    if (result.isEmpty()) {
        monitor.done();//  w  ww.  j  a va 2 s . com
        return;
    }

    final Iterable<List<DiffEntry>> splitted = Iterables.partition(result,
            Activator.getDefault().getPreferenceStore().getInt(PreferenceConstants.P_DEFAULT_CHUNK_SIZE));

    final SubMonitor sub = monitor.newChild(90);

    try {
        final int size = Iterables.size(splitted);
        sub.beginTask(Messages.ImportWizard_TaskName, size);

        int pos = 0;
        for (final Iterable<DiffEntry> i : splitted) {
            sub.subTask(String.format(Messages.ImportWizard_SubTaskName, pos, size));
            final List<DiffEntry> entries = new LinkedList<DiffEntry>();
            Iterables.addAll(entries, i);
            final NotifyFuture<Void> future = this.connection.getConnection().applyDiff(entries, null,
                    new DisplayCallbackHandler(getShell(), "Apply diff",
                            "Confirmation for applying diff is required"));
            future.get();

            pos++;
            sub.worked(1);
        }
    } finally {
        sub.done();
    }

}

From source file:com.google.devtools.build.lib.query2.AbstractEdgeVisitor.java

@Override
protected Iterable<Task> getVisitTasks(Collection<T> pendingVisits) {
    // Group pending visitation by the package of the new node, since we'll be targetfying the
    // node during the visitation.
    ListMultimap<PackageIdentifier, T> visitsByPackage = ArrayListMultimap.create();
    for (T visit : pendingVisits) {
        Label label = SkyQueryEnvironment.SKYKEY_TO_LABEL.apply(getNewNodeFromEdge(visit));
        if (label != null) {
            visitsByPackage.put(label.getPackageIdentifier(), visit);
        }/*from   w  w  w.  j a v a2  s  .  co  m*/
    }

    ImmutableList.Builder<Task> builder = ImmutableList.builder();

    // A couple notes here:
    // (i)  ArrayListMultimap#values returns the values grouped by key, which is exactly what we
    //      want.
    // (ii) ArrayListMultimap#values returns a Collection view, so we make a copy to avoid
    //      accidentally retaining the entire ArrayListMultimap object.
    for (Iterable<T> visitBatch : Iterables.partition(ImmutableList.copyOf(visitsByPackage.values()),
            ParallelSkyQueryUtils.VISIT_BATCH_SIZE)) {
        builder.add(new VisitTask(visitBatch));
    }

    return builder.build();
}

From source file:com.sk89q.squirrelid.resolver.HttpRepositoryService.java

@Override
public void findAllByName(Iterable<String> names, Predicate<Profile> consumer)
        throws IOException, InterruptedException {
    for (List<String> partition : Iterables.partition(names, MAX_NAMES_PER_REQUEST)) {
        for (Profile profile : query(partition)) {
            consumer.apply(profile);/*from  w  w  w .  j av a 2 s.  c om*/
        }
    }
}