Example usage for java.lang Iterable spliterator

List of usage examples for java.lang Iterable spliterator

Introduction

In this page you can find the example usage for java.lang Iterable spliterator.

Prototype

default Spliterator<T> spliterator() 

Source Link

Document

Creates a Spliterator over the elements described by this Iterable .

Usage

From source file:am.ik.categolj2.domain.service.entry.EntryServiceImpl.java

void applyRelations(Iterable<Entry> entries) {
    List<Integer> entryIds = StreamSupport.stream(entries.spliterator(), false).map(Entry::getEntryId)
            .collect(Collectors.toList());
    if (!entryIds.isEmpty()) {
        // apply categories
        List<Category> categories = categoryRepository.findByEntryIds(entryIds);

        Multimap<Integer, Category> categoryMultimap = TreeMultimap.create();
        for (Category c : categories) {
            categoryMultimap.put(c.getEntry().getId(), c);
        }//from www . j  ava  2  s  .  c o  m
        for (Entry entry : entries) {
            entry.setCategory(new ArrayList<>(categoryMultimap.get(entry.getId())));
        }
        // apply tags
        List<TagAndEntryId> tags = tagRepository.findByEntryIds(entryIds);
        Multimap<Integer, Tag> tagMultimap = HashMultimap.create();
        for (TagAndEntryId tag : tags) {
            tagMultimap.put(tag.getEntryId(), tag.getTag());
        }
        for (Entry entry : entries) {
            entry.setTags(new LinkedHashSet<>(tagMultimap.get(entry.getEntryId())));
        }
    }
}

From source file:nu.yona.server.goals.service.ActivityCategoryService.java

private void assertNoDuplicateNames(Set<UUID> idsToSkip, Map<Locale, String> localizableName) {
    Iterable<ActivityCategory> allCategories = repository.findAll();
    List<ActivityCategory> categoriesToConsider = StreamSupport.stream(allCategories.spliterator(), false)
            .filter(c -> !idsToSkip.contains(c.getId())).collect(Collectors.toList());
    for (Entry<Locale, String> localeAndName : localizableName.entrySet()) {
        assertNoDuplicateNames(categoriesToConsider, localeAndName);
    }/* w w  w. j  a  v a  2 s .co m*/
}

From source file:edu.pitt.dbmi.ccd.anno.annotation.data.AnnotationDataResourceAssembler.java

/**
 * convert AnnotationDatas to AnnotationDataResources
 *
 * @param annotations entities// ww  w. j a  va2s  .  co m
 * @return List of resources
 */
@Override
public List<AnnotationDataResource> toResources(Iterable<? extends AnnotationData> annotations)
        throws IllegalArgumentException {
    // Assert annotations is not empty
    Assert.isTrue(annotations.iterator().hasNext());
    return StreamSupport.stream(annotations.spliterator(), false).map(this::toResource)
            .collect(Collectors.toList());
}

From source file:edu.pitt.dbmi.ccd.anno.annotation.AnnotationResourceAssembler.java

/**
 * convert Annotations to AnnotationResources
 *
 * @param annotations entities//from ww w.j a  va2  s  .c  o m
 * @return list of resources
 */
@Override
public List<AnnotationResource> toResources(Iterable<? extends Annotation> annotations)
        throws IllegalArgumentException {
    // Assert annotations is not empty
    Assert.isTrue(annotations.iterator().hasNext());
    return StreamSupport.stream(annotations.spliterator(), false).map(this::toResource)
            .collect(Collectors.toList());
}

From source file:com.uber.hoodie.io.compact.HoodieRealtimeTableCompactor.java

private List<WriteStatus> compact(HoodieCopyOnWriteTable hoodieCopyOnWriteTable,
        HoodieTableMetaClient metaClient, HoodieWriteConfig config, CompactionOperation operation,
        String commitTime) throws IOException {
    FileSystem fs = metaClient.getFs();
    Schema readerSchema = HoodieAvroUtils.addMetadataFields(new Schema.Parser().parse(config.getSchema()));

    log.info("Compacting base " + operation.getDataFilePath() + " with delta files "
            + operation.getDeltaFilePaths() + " for commit " + commitTime);
    // TODO - FIX THIS
    // Reads the entire avro file. Always only specific blocks should be read from the avro file
    // (failure recover).
    // Load all the delta commits since the last compaction commit and get all the blocks to be
    // loaded and load it using CompositeAvroLogReader
    // Since a DeltaCommit is not defined yet, reading all the records. revisit this soon.
    String maxInstantTime = metaClient.getActiveTimeline()
            .getTimelineOfActions(Sets.newHashSet(HoodieTimeline.COMMIT_ACTION, HoodieTimeline.ROLLBACK_ACTION,
                    HoodieTimeline.DELTA_COMMIT_ACTION))

            .filterCompletedInstants().lastInstant().get().getTimestamp();
    log.info("MaxMemoryPerCompaction => " + config.getMaxMemoryPerCompaction());
    HoodieMergedLogRecordScanner scanner = new HoodieMergedLogRecordScanner(fs, metaClient.getBasePath(),
            operation.getDeltaFilePaths(), readerSchema, maxInstantTime, config.getMaxMemoryPerCompaction(),
            config.getCompactionLazyBlockReadEnabled(), config.getCompactionReverseLogReadEnabled(),
            config.getMaxDFSStreamBufferSize(), config.getSpillableMapBasePath());
    if (!scanner.iterator().hasNext()) {
        return Lists.<WriteStatus>newArrayList();
    }//  w ww .j a v  a  2  s  .  c o  m

    Optional<HoodieDataFile> oldDataFileOpt = hoodieCopyOnWriteTable.getROFileSystemView()
            .getLatestDataFilesOn(operation.getPartitionPath(), operation.getBaseInstantTime())
            .filter(df -> df.getFileId().equals(operation.getFileId())).findFirst();

    // Compacting is very similar to applying updates to existing file
    Iterator<List<WriteStatus>> result;
    // If the dataFile is present, there is a base parquet file present, perform updates else perform inserts into a
    // new base parquet file.
    if (operation.getDataFilePath().isPresent()) {
        result = hoodieCopyOnWriteTable.handleUpdate(commitTime, operation.getFileId(), scanner.getRecords(),
                oldDataFileOpt);
    } else {
        result = hoodieCopyOnWriteTable.handleInsert(commitTime, operation.getPartitionPath(),
                operation.getFileId(), scanner.iterator());
    }
    Iterable<List<WriteStatus>> resultIterable = () -> result;
    return StreamSupport.stream(resultIterable.spliterator(), false).flatMap(Collection::stream).map(s -> {
        s.getStat().setTotalUpdatedRecordsCompacted(scanner.getNumMergedRecordsInLog());
        s.getStat().setTotalLogFilesCompacted(scanner.getTotalLogFiles());
        s.getStat().setTotalLogRecords(scanner.getTotalLogRecords());
        s.getStat().setPartitionPath(operation.getPartitionPath());
        s.getStat().setTotalLogSizeCompacted(
                operation.getMetrics().get(CompactionStrategy.TOTAL_LOG_FILE_SIZE).longValue());
        s.getStat().setTotalLogBlocks(scanner.getTotalLogBlocks());
        s.getStat().setTotalCorruptLogBlock(scanner.getTotalCorruptBlocks());
        s.getStat().setTotalRollbackBlocks(scanner.getTotalRollbacks());
        RuntimeStats runtimeStats = new RuntimeStats();
        runtimeStats.setTotalScanTime(scanner.getTotalTimeTakenToReadAndMergeBlocks());
        s.getStat().setRuntimeStats(runtimeStats);
        return s;
    }).collect(toList());
}

From source file:com._4dconcept.springframework.data.marklogic.repository.support.SimpleMarklogicRepository.java

@Override
@Transactional//from   ww w .  j av a  2s .  com
public <S extends T> List<S> saveAll(Iterable<S> entities) {
    Assert.notNull(entities, "entities must not be null");
    return StreamSupport.stream(entities.spliterator(), false).map(this::save).collect(Collectors.toList());
}

From source file:com.netflix.spinnaker.orca.clouddriver.tasks.manifest.DeployManifestTask.java

@Nonnull
@Override/*  w  w  w. ja v a 2s  .com*/
public TaskResult execute(@Nonnull Stage stage) {
    String credentials = getCredentials(stage);
    String cloudProvider = getCloudProvider(stage);

    List<Artifact> artifacts = artifactResolver.getArtifacts(stage);
    Map task = new HashMap(stage.getContext());
    String artifactSource = (String) task.get("source");
    if (StringUtils.isNotEmpty(artifactSource) && artifactSource.equals("artifact")) {
        Artifact manifestArtifact = artifactResolver.getBoundArtifactForId(stage,
                task.get("manifestArtifactId").toString());

        if (manifestArtifact == null) {
            throw new IllegalArgumentException(
                    "No artifact could be bound to '" + task.get("manifestArtifactId") + "'");
        }

        log.info("Using {} as the manifest to be deployed", manifestArtifact);

        manifestArtifact.setArtifactAccount((String) task.get("manifestArtifactAccount"));
        Object parsedManifests = retrySupport.retry(() -> {
            try {
                Response manifestText = oort.fetchArtifact(manifestArtifact);

                Iterable<Object> rawManifests = yamlParser.get().loadAll(manifestText.getBody().in());
                List<Map> manifests = StreamSupport.stream(rawManifests.spliterator(), false).map(m -> {
                    try {
                        return Collections.singletonList(objectMapper.convertValue(m, Map.class));
                    } catch (Exception e) {
                        return (List<Map>) objectMapper.convertValue(m, List.class);
                    }
                }).flatMap(Collection::stream).collect(Collectors.toList());

                Map<String, Object> manifestWrapper = new HashMap<>();
                manifestWrapper.put("manifests", manifests);

                manifestWrapper = contextParameterProcessor.process(manifestWrapper,
                        contextParameterProcessor.buildExecutionContext(stage, true), true);

                if (manifestWrapper.containsKey("expressionEvaluationSummary")) {
                    throw new IllegalStateException("Failure evaluating manifest expressions: "
                            + manifestWrapper.get("expressionEvaluationSummary"));
                }

                return manifestWrapper.get("manifests");
            } catch (Exception e) {
                log.warn("Failure fetching/parsing manifests from {}", manifestArtifact, e);
                // forces a retry
                throw new IllegalStateException(e);
            }
        }, 10, 200, true); // retry 10x, starting at .2s intervals
        task.put("manifests", parsedManifests);
        task.put("source", "text");
    }

    List<String> requiredArtifactIds = (List<String>) task.get("requiredArtifactIds");
    requiredArtifactIds = requiredArtifactIds == null ? new ArrayList<>() : requiredArtifactIds;
    List<Artifact> requiredArtifacts = requiredArtifactIds.stream()
            .map(id -> artifactResolver.getBoundArtifactForId(stage, id)).collect(Collectors.toList());

    log.info("Deploying {} artifacts within the provided manifest", requiredArtifacts);

    task.put("requiredArtifacts", requiredArtifacts);
    task.put("optionalArtifacts", artifacts);
    Map<String, Map> operation = new ImmutableMap.Builder<String, Map>().put(TASK_NAME, task).build();

    TaskId taskId = kato.requestOperations(cloudProvider, Collections.singletonList(operation)).toBlocking()
            .first();

    Map<String, Object> outputs = new ImmutableMap.Builder<String, Object>().put("kato.result.expected", true)
            .put("kato.last.task.id", taskId).put("deploy.account.name", credentials).build();

    return new TaskResult(ExecutionStatus.SUCCEEDED, outputs);
}

From source file:org.bozzo.ipplan.domain.model.ui.ZoneResource.java

/**
 * @param id/*w w  w . j  ava2s. c o m*/
 * @param infraId
 * @param ip
 * @param description
 */
@JsonCreator
public ZoneResource(@JsonProperty("id") Long id, @JsonProperty Integer infraId, @JsonProperty Long ip,
        @JsonProperty String description, @JsonProperty Iterable<Range> ranges) {
    super();
    this.id = id;
    this.infraId = infraId;
    this.ip = ip;
    this.description = description;
    if (ranges != null) {
        this.setRanges(StreamSupport.stream(ranges.spliterator(), true).map(new ToRangeResourceFunction()));
    }
}

From source file:fr.landel.utils.assertor.utils.AssertorMap.java

private static <M extends Map<K, V>, K, V, T> boolean contains(final M map, final Iterable<T> objects,
        final Predicate<T> predicate, final boolean all, final boolean not,
        final EnumAnalysisMode analysisMode) {

    long found = 0;

    if (EnumAnalysisMode.STANDARD.equals(analysisMode)) {
        for (T object : objects) {
            if (predicate.test(object)) {
                ++found;/*from w  ww . jav a  2  s.co m*/
            }
        }
    } else {
        found = StreamSupport.stream(objects.spliterator(), EnumAnalysisMode.PARALLEL.equals(analysisMode))
                .filter(predicate).count();
    }

    return HelperAssertor.isValid(all, not, found, IterableUtils.size(objects));
}

From source file:com.adobe.acs.commons.data.Spreadsheet.java

/**
 * Parse out the input file synchronously for easier unit test validation
 *
 * @return List of files that will be imported, including any renditions
 * @throws IOException if the file couldn't be read
 *//*w ww .  ja  va  2s .com*/
private void parseInputFile(InputStream file) throws IOException {

    XSSFWorkbook workbook = new XSSFWorkbook(file);

    final XSSFSheet sheet = workbook.getSheetAt(0);
    rowCount = sheet.getLastRowNum();
    final Iterator<Row> rows = sheet.rowIterator();

    Row firstRow = rows.next();
    headerRow = readRow(firstRow).stream().map(Variant::toString).map(this::convertHeaderName)
            .collect(Collectors.toList());
    headerTypes = readRow(firstRow).stream().map(Variant::toString)
            .collect(Collectors.toMap(this::convertHeaderName, this::detectTypeFromName, this::upgradeToArray));

    Iterable<Row> remainingRows = () -> rows;
    dataRows = StreamSupport.stream(remainingRows.spliterator(), false).map(this::buildRow)
            .filter(Optional::isPresent).map(Optional::get).collect(Collectors.toList());
}