Example usage for java.util.stream Collectors toMap

List of usage examples for java.util.stream Collectors toMap

Introduction

In this page you can find the example usage for java.util.stream Collectors toMap.

Prototype

public static <T, K, U> Collector<T, ?, Map<K, U>> toMap(Function<? super T, ? extends K> keyMapper,
        Function<? super T, ? extends U> valueMapper) 

Source Link

Document

Returns a Collector that accumulates elements into a Map whose keys and values are the result of applying the provided mapping functions to the input elements.

Usage

From source file:io.fabric8.spring.cloud.kubernetes.archaius.ArchaiusConfigMapSourceConfiguration.java

private static Map<String, Object> asObjectMap(Map<String, String> source) {
    return source.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}

From source file:com.dotcms.rest.api.v1.sites.ruleengine.rules.conditions.ConditionValueResource.java

@GET
@NoCache/*from   w w  w  .ja v  a2  s . c o m*/
@Path("/conditions/{conditionId}/conditionValues")
@Produces(MediaType.APPLICATION_JSON)
public Response list(@Context HttpServletRequest request, @PathParam("siteId") String siteId,
        @PathParam("conditionId") String conditionId) throws JSONException {

    siteId = checkNotEmpty(siteId, BadRequestException.class, "Site Id is required.");
    conditionId = checkNotEmpty(conditionId, BadRequestException.class, "Condition Id is required.");
    User user = getUser(request);
    getHost(siteId, user);
    Condition condition = getCondition(conditionId, user);
    List<RestConditionValue> restConditionValues = getValuesInternal(user, condition);
    java.util.Map<String, RestConditionValue> hash = restConditionValues.stream()
            .collect(Collectors.toMap(restConditionValue -> restConditionValue.id, Function.identity()));

    return Response.ok(hash).build();
}

From source file:com.thinkbiganalytics.nifi.feedmgr.TemplateInstanceCreator.java

public TemplateInstanceCreator(LegacyNifiRestClient restClient, String templateId,
        Map<String, Object> staticConfigPropertyMap, boolean createReusableFlow,
        ReusableTemplateCreationCallback creationCallback) {
    this.restClient = restClient;
    this.templateId = templateId;
    this.createReusableFlow = createReusableFlow;
    this.staticConfigPropertyMap = staticConfigPropertyMap;
    if (staticConfigPropertyMap != null) {
        //transform the object map to the String map
        staticConfigPropertyStringMap = staticConfigPropertyMap.entrySet().stream().collect(Collectors
                .toMap(Map.Entry::getKey, e -> e.getValue() != null ? e.getValue().toString() : null));
    }//  w  w  w .ja v  a 2 s  .  c  o m
    if (staticConfigPropertyStringMap == null) {
        staticConfigPropertyStringMap = new HashMap<>();
    }
    this.creationCallback = creationCallback;
}

From source file:org.hawkular.rest.json.RelationshipJacksonDeserializer.java

@Override
public Relationship deserialize(JsonParser jp, DeserializationContext deserializationContext)
        throws IOException {
    JsonNode node = jp.getCodec().readTree(jp);
    String id = node.get(FIELD_ID) != null ? node.get(FIELD_ID).asText() : null;

    // other fields are not compulsory, e.g. when deleting the relationship {id: foo} is just fine
    String name = "";
    if (node.get(FIELD_NAME) != null) {
        name = node.get(FIELD_NAME).asText();
    }//from   w  w  w.  j  a v  a 2 s .co  m
    CanonicalPath source = null, target = null;
    if (node.get(FIELD_SOURCE) != null && !node.get(FIELD_SOURCE).asText().isEmpty()) {
        String sourcePath = node.get(FIELD_SOURCE).asText();
        source = CanonicalPath.fromString(sourcePath);
    }
    if (node.get(FIELD_TARGET) != null && !node.get(FIELD_TARGET).asText().isEmpty()) {
        String targetPath = node.get(FIELD_TARGET).asText();
        target = CanonicalPath.fromString(targetPath);
    }

    JsonNode properties = node.get(FIELD_PROPERTIES);
    Map<String, Object> relProperties = null;
    if (properties != null) {
        try {
            Stream<Map.Entry<String, JsonNode>> stream = StreamSupport.stream(
                    Spliterators.spliteratorUnknownSize(properties.fields(), Spliterator.ORDERED), false);

            relProperties = stream.collect(Collectors.toMap(Map.Entry::getKey,
                    ((Function<Map.Entry<String, JsonNode>, JsonNode>) Map.Entry::getValue)
                            .andThen(x -> (Object) x.asText())));
        } catch (Exception e) {
            throw new IllegalArgumentException("Error during relationship deserialization,"
                    + " unable to recognize properties: " + properties);
        }
    }

    return new Relationship(id, name, source, target, relProperties);
}

From source file:com.linecorp.armeria.server.docs.DocService.java

/**
 * Creates a new instance, prepopulating debug forms with the provided {@code sampleRequests}.
 * {@code sampleRequests} should be a list of Thrift argument objects for methods that should be
 * prepopulated (e.g., a populated hello_args object for the hello method on HelloService).
 *///w w  w .  ja va  2 s  .c  om
public DocService(Iterable<? extends TBase<?, ?>> sampleRequests) {
    super(ofExact("/specification.json", HttpFileService.forVfs(new DocServiceVfs())),
            ofCatchAll(HttpFileService.forClassPath(DocService.class.getClassLoader(),
                    "com/linecorp/armeria/server/docs")));
    requireNonNull(sampleRequests, "sampleRequests");
    this.sampleRequests = StreamSupport.stream(sampleRequests.spliterator(), false)
            .collect(Collectors.toMap(Object::getClass, Function.identity()));
}

From source file:org.obiba.mica.study.service.StudyPackageImportServiceImpl.java

@Override
public void importZip(InputStream inputStream, boolean publish) throws IOException {
    final StudyPackage studyPackage = new StudyPackage(inputStream);
    if (studyPackage.study != null) {
        Map<String, ByteSource> dict = studyPackage.attachments.entrySet().stream()
                .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
        Optional.ofNullable(studyPackage.study.getLogo()).ifPresent(a -> saveAttachmentTempFile(dict, a));
        Set<String> attachmentIds = Sets.newHashSet();

        studyPackage.studyAttachments.forEach(a -> {
            if (attachmentIds.contains(a.getId())) {
                String origId = a.getId();
                a.setId(new ObjectId().toString());
                saveAttachmentTempFile(dict, a, origId);
            } else {
                saveAttachmentTempFile(dict, a);
                attachmentIds.add(a.getId());
            }//  w w  w .  j  a  va 2 s. c  om
        });

        importStudy(studyPackage.study, studyPackage.studyAttachments, publish);

        for (Network net : studyPackage.networks) {
            importNetwork(net, publish, studyPackage);
        }

        studyPackage.datasets.forEach(ds -> importDataset(ds, publish));
    }
}

From source file:com.uber.hoodie.hadoop.realtime.HoodieRealtimeInputFormat.java

@Override
public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {

    Stream<FileSplit> fileSplits = Arrays.stream(super.getSplits(job, numSplits)).map(is -> (FileSplit) is);

    // obtain all unique parent folders for splits
    Map<Path, List<FileSplit>> partitionsToParquetSplits = fileSplits
            .collect(Collectors.groupingBy(split -> split.getPath().getParent()));
    // TODO(vc): Should we handle also non-hoodie splits here?
    Map<String, HoodieTableMetaClient> metaClientMap = new HashMap<>();
    Map<Path, HoodieTableMetaClient> partitionsToMetaClient = partitionsToParquetSplits.keySet().stream()
            .collect(Collectors.toMap(Function.identity(), p -> {
                // find if we have a metaclient already for this partition.
                Optional<String> matchingBasePath = metaClientMap.keySet().stream()
                        .filter(basePath -> p.toString().startsWith(basePath)).findFirst();
                if (matchingBasePath.isPresent()) {
                    return metaClientMap.get(matchingBasePath.get());
                }/*  ww w  . ja v  a 2  s .c  o m*/

                try {
                    HoodieTableMetaClient metaClient = getTableMetaClient(p.getFileSystem(conf), p);
                    metaClientMap.put(metaClient.getBasePath(), metaClient);
                    return metaClient;
                } catch (IOException e) {
                    throw new HoodieIOException("Error creating hoodie meta client against : " + p, e);
                }
            }));

    // for all unique split parents, obtain all delta files based on delta commit timeline, grouped on file id
    List<HoodieRealtimeFileSplit> rtSplits = new ArrayList<>();
    partitionsToParquetSplits.keySet().stream().forEach(partitionPath -> {
        // for each partition path obtain the data & log file groupings, then map back to inputsplits
        HoodieTableMetaClient metaClient = partitionsToMetaClient.get(partitionPath);
        HoodieTableFileSystemView fsView = new HoodieTableFileSystemView(metaClient,
                metaClient.getActiveTimeline());
        String relPartitionPath = FSUtils.getRelativePartitionPath(new Path(metaClient.getBasePath()),
                partitionPath);

        try {
            Stream<FileSlice> latestFileSlices = fsView.getLatestFileSlices(relPartitionPath);

            // subgroup splits again by file id & match with log files.
            Map<String, List<FileSplit>> groupedInputSplits = partitionsToParquetSplits.get(partitionPath)
                    .stream()
                    .collect(Collectors.groupingBy(split -> FSUtils.getFileId(split.getPath().getName())));
            latestFileSlices.forEach(fileSlice -> {
                List<FileSplit> dataFileSplits = groupedInputSplits.get(fileSlice.getFileId());
                dataFileSplits.forEach(split -> {
                    try {
                        List<String> logFilePaths = fileSlice.getLogFiles()
                                .map(logFile -> logFile.getPath().toString()).collect(Collectors.toList());
                        // Get the maxCommit from the last delta or compaction or commit - when bootstrapped from COW table
                        String maxCommitTime = metaClient.getActiveTimeline()
                                .getTimelineOfActions(Sets.newHashSet(HoodieTimeline.COMMIT_ACTION,
                                        HoodieTimeline.COMPACTION_ACTION, HoodieTimeline.DELTA_COMMIT_ACTION))
                                .filterCompletedInstants().lastInstant().get().getTimestamp();
                        rtSplits.add(new HoodieRealtimeFileSplit(split, logFilePaths, maxCommitTime));
                    } catch (IOException e) {
                        throw new HoodieIOException("Error creating hoodie real time split ", e);
                    }
                });
            });
        } catch (Exception e) {
            throw new HoodieException("Error obtaining data file/log file grouping: " + partitionPath, e);
        }
    });
    LOG.info("Returning a total splits of " + rtSplits.size());
    return rtSplits.toArray(new InputSplit[rtSplits.size()]);
}

From source file:com.github.xdcrafts.flower.spring.impl.AbstractActionFactoryBean.java

/**
 * Resolves middleware assigned to action by it's name.
 *//*from   w  w w . jav  a2  s .  c  om*/
protected List<Middleware> getMiddleware(String name) {
    final List<Middleware> middleware = this.applicationContext
            .getBeansOfType(MiddlewareDefinition.class, true, false).values().stream()
            .flatMap(d -> d.getDefinition().entrySet().stream())
            .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)).get(name);
    return middleware == null ? Collections.emptyList() : middleware;
}

From source file:org.lecture.service.CompilerServiceImpl.java

private void cleanEmptySources(SourceContainer container) {
    container/*from   w ww . ja  va 2s  . c o  m*/
            .setSources(container.getSources().entrySet().stream().filter(entry -> !entry.getValue().equals(""))
                    .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)));
}

From source file:io.mapzone.arena.csw.catalog.CswMetadataDCMI.java

@Override
public Map<String, String> getConnectionParams() {
    return record().URI.stream()
            // io.mapzone.controller.um.repository.ProjectCatalogSynchronizer
            .filter(uri -> ArenaConfigMBean.CONNECTION_PARAM_NAME.equals(uri.description))
            .collect(Collectors.toMap(uri -> uri.name, uri -> uri.value));
}