Example usage for java.util.stream Collectors toMap

List of usage examples for java.util.stream Collectors toMap

Introduction

In this page you can find the example usage for java.util.stream Collectors toMap.

Prototype

public static <T, K, U> Collector<T, ?, Map<K, U>> toMap(Function<? super T, ? extends K> keyMapper,
        Function<? super T, ? extends U> valueMapper) 

Source Link

Document

Returns a Collector that accumulates elements into a Map whose keys and values are the result of applying the provided mapping functions to the input elements.

Usage

From source file:com.dotcms.rest.api.v1.sites.ruleengine.rules.conditions.ConditionGroupResource.java

@GET
@NoCache/*from w w  w  .  ja  v  a 2  s .c  o m*/
@Path("/rules/{ruleId}/conditionGroups")
@Produces(MediaType.APPLICATION_JSON)
public Response list(@Context HttpServletRequest request, @PathParam("siteId") String siteId,
        @PathParam("ruleId") String ruleId) throws JSONException {

    siteId = checkNotEmpty(siteId, BadRequestException.class, "Site Id is required.");
    ruleId = checkNotEmpty(ruleId, BadRequestException.class, "Rule Id is required.");
    User user = getUser(request);
    getHost(siteId, user);
    Rule rule = getRule(ruleId, user);
    List<RestConditionGroup> restConditionGroups = getGroupsInternal(user, rule);
    java.util.Map<String, RestConditionGroup> hash = restConditionGroups.stream()
            .collect(Collectors.toMap(restGroup -> restGroup.id, Function.identity()));

    return Response.ok(hash).build();
}

From source file:com.sastix.cms.server.services.content.impl.ZipHandlerServiceImpl.java

@Override
public ResourceDTO handleZip(Resource zipResource) {

    final Path zipPath;
    try {//  w w w.jav a 2  s . c  o m
        zipPath = hashedDirectoryService.getDataByURI(zipResource.getUri(), zipResource.getResourceTenantId());
    } catch (IOException | URISyntaxException e) {
        throw new ResourceAccessError(e.getMessage());
    }

    FileSystem zipfs = null;
    try {
        zipfs = FileSystems.newFileSystem(zipPath, null);
        final Path root = zipfs.getPath("/");
        final int maxDepth = 1;

        // Search for specific files.
        final Map<String, Path> map = Files
                .find(root, maxDepth, (path_,
                        attr) -> path_.getFileName() != null && (path_.toString().endsWith(METADATA_JSON_FILE)
                                || path_.toString().endsWith(METADATA_XML_FILE)))
                .collect(Collectors.toMap(p -> p.toAbsolutePath().toString().substring(1), p -> p));

        final String zipType;
        final String startPage;

        // Check if it is a cms file
        if (map.containsKey(METADATA_JSON_FILE)) {
            zipType = METADATA_JSON_FILE;
            LOG.info("Found CMS Metadata File " + map.get(zipType).toString());
            startPage = findStartPage(map.get(zipType));
            // Check if it is a Scrom file
        } else if (map.containsKey(METADATA_XML_FILE)) {
            zipType = METADATA_XML_FILE;
            LOG.info("Found CMS Metadata File " + map.get(zipType).toString());
            startPage = findScormStartPage(map.get(zipType));

        } else {
            throw new ResourceAccessError("Zip " + zipResource.getName() + " is not supported. "
                    + METADATA_JSON_FILE + " and " + METADATA_XML_FILE + " are missing");
        }

        LOG.trace(startPage);

        final List<ResourceDTO> resourceDTOs = new LinkedList<>();

        /* Path inside ZIP File */
        Files.walkFileTree(root, new SimpleFileVisitor<Path>() {

            @Override
            public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {

                final String parentContext = zipResource.getUri().split("-")[0] + "-"
                        + zipResource.getResourceTenantId();
                final CreateResourceDTO createResourceDTO = new CreateResourceDTO();
                createResourceDTO.setResourceMediaType(tika.detect(file.toString()));
                createResourceDTO.setResourceAuthor(zipResource.getAuthor());
                createResourceDTO.setResourceExternalURI(file.toUri().toString());
                createResourceDTO.setResourceName(file.toString().substring(1));
                createResourceDTO.setResourceTenantId(zipResource.getResourceTenantId());

                final Resource resource = resourceService.insertChildResource(createResourceDTO, parentContext,
                        zipResource);

                distributedCacheService.cacheIt(resource.getUri(), resource.getResourceTenantId());

                if (file.toString().substring(1).equals(startPage)) {
                    resourceDTOs.add(0, crs.convertToDTO(resource));
                } else {
                    resourceDTOs.add(crs.convertToDTO(resource));
                }

                return FileVisitResult.CONTINUE;
            }

            @Override
            public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
                return FileVisitResult.CONTINUE;
            }
        });

        final ResourceDTO parentResourceDto = resourceDTOs.remove(0);
        parentResourceDto.setResourcesList(resourceDTOs);
        return parentResourceDto;

    } catch (IOException e) {
        throw new ResourceAccessError("Error while analyzing " + zipResource.toString());
    } finally {
        if (zipfs != null && zipfs.isOpen()) {
            try {
                LOG.info("Closing FileSystem");
                zipfs.close();
            } catch (IOException e) {
                LOG.error(e.getMessage());
                e.printStackTrace();
                throw new ResourceAccessError("Error while analyzing " + zipResource.toString());
            }
        }
    }
}

From source file:fi.helsinki.opintoni.service.usefullink.UsefulLinkTransactionalService.java

public List<UsefulLinkDto> updateOrder(Long userId, OrderUsefulLinksDto orderUsefulLinksDto, Locale locale) {
    List<UsefulLink> usefulLinks = usefulLinkRepository.findByUserIdOrderByOrderIndexAsc(userId);

    Map<Long, UsefulLink> usefulLinkMap = usefulLinks.stream().collect(Collectors.toMap(u -> u.id, u -> u));

    IntStream.range(0, orderUsefulLinksDto.usefulLinkIds.size()).forEach(i -> {
        Long usefulLinkId = orderUsefulLinksDto.usefulLinkIds.get(i);
        usefulLinkMap.get(usefulLinkId).orderIndex = i;
    });/*from   w  w w .j a  v a2  s.c  om*/

    return usefulLinks.stream().sorted(UsefulLink::compareTo)
            .map(usefulLink -> usefulLinkConverter.toDto(usefulLink, locale)).collect(Collectors.toList());
}

From source file:com.devicehive.service.DeviceCommandServiceTest.java

@Test
@DirtiesContext(methodMode = DirtiesContext.MethodMode.BEFORE_METHOD)
public void testFindCommandsByGuid() throws Exception {
    final List<String> guids = IntStream.range(0, 5).mapToObj(i -> UUID.randomUUID().toString())
            .collect(Collectors.toList());
    final Date timestampSt = timestampService.getDate();
    final Date timestampEnd = timestampService.getDate();
    final String parameters = "{\"param1\":\"value1\",\"param2\":\"value2\"}";

    final Set<String> guidsForSearch = new HashSet<>(Arrays.asList(guids.get(0), guids.get(2), guids.get(3)));

    final Map<String, DeviceCommand> commandMap = guidsForSearch.stream()
            .collect(Collectors.toMap(Function.identity(), guid -> {
                DeviceCommand command = new DeviceCommand();
                command.setId(System.nanoTime());
                command.setDeviceGuid(guid);
                command.setCommand(RandomStringUtils.randomAlphabetic(10));
                command.setTimestamp(timestampService.getDate());
                command.setParameters(new JsonStringWrapper(parameters));
                command.setStatus(DEFAULT_STATUS);
                return command;
            }));/*from   ww  w .j  av  a 2 s .  co m*/

    when(requestHandler.handle(any(Request.class))).then(invocation -> {
        Request request = invocation.getArgumentAt(0, Request.class);
        String guid = request.getBody().cast(CommandSearchRequest.class).getGuid();
        CommandSearchResponse response = new CommandSearchResponse();
        response.setCommands(Collections.singletonList(commandMap.get(guid)));
        return Response.newBuilder().withBody(response).buildSuccess();
    });

    deviceCommandService.find(guidsForSearch, Collections.emptySet(), timestampSt, timestampEnd, DEFAULT_STATUS)
            .thenAccept(commands -> {
                assertEquals(3, commands.size());
                assertEquals(new HashSet<>(commandMap.values()), new HashSet<>(commands));
            }).get(15, TimeUnit.SECONDS);

    verify(requestHandler, times(3)).handle(argument.capture());
}

From source file:edu.washington.gs.skyline.model.quantification.QuantificationTest.java

public void testRatioToHeavy() throws Exception {
    List<InputRecord> allInputRecords = readInputRecords("NoNormalizationInput.csv");
    allInputRecords = filterRecords(new NormalizationMethod.RatioToLabel("heavy"), allInputRecords);
    Map<RecordKey, Double> expected = readExpectedRows("RatioToHeavy.csv");
    for (Map.Entry<RecordKey, Double> entry : expected.entrySet()) {
        List<InputRecord> records = allInputRecords.stream()
                .filter(record -> record.getRecordKey().equals(entry.getKey())).collect(Collectors.toList());
        TransitionAreas numerator = TransitionAreas
                .fromMap(records.stream().filter(record -> "light".equals(record.getIsotopeLabelType()))
                        .collect(Collectors.toMap(InputRecord::getTransitionKey, InputRecord::getArea)));
        TransitionAreas denominator = TransitionAreas
                .fromMap(records.stream().filter(record -> "heavy".equals(record.getIsotopeLabelType()))
                        .collect(Collectors.toMap(InputRecord::getTransitionKey, InputRecord::getArea)));

        Double actualArea = numerator.ratioTo(denominator);
        assertCloseEnough(entry.getValue(), actualArea);
    }//from ww  w.ja  v  a  2 s . co  m
}

From source file:com.thinkbiganalytics.feedmgr.service.feed.FeedHiveTableService.java

/**
 * Updates the column descriptions in the Hive metastore for the specified feed.
 *
 * @param feed the feed to update//from  w  w w  . j a  v  a 2 s  .  c  o  m
 * @throws DataAccessException if there is any problem
 */
public void updateColumnDescriptions(@Nonnull final FeedMetadata feed) {
    final List<Field> feedFields = Optional.ofNullable(feed.getTable()).map(TableSetup::getTableSchema)
            .map(TableSchema::getFields).orElse(null);
    if (feedFields != null && !feedFields.isEmpty()) {
        final TableSchema hiveSchema = hiveService.getTableSchema(feed.getSystemCategoryName(),
                feed.getSystemFeedName());
        if (hiveSchema != null) {
            final Map<String, Field> hiveFieldMap = hiveSchema.getFields().stream()
                    .collect(Collectors.toMap(field -> field.getName().toLowerCase(), Function.identity()));
            feedFields.stream().filter(feedField -> {
                final Field hiveField = hiveFieldMap.get(feedField.getName().toLowerCase());
                return hiveField != null
                        && (StringUtils.isNotEmpty(feedField.getDescription())
                                || StringUtils.isNotEmpty(hiveField.getDescription()))
                        && !Objects.equals(feedField.getDescription(), hiveField.getDescription());
            }).forEach(feedField -> changeColumn(feed, feedField.getName(), feedField));
        }
    }
}

From source file:com.thinkbiganalytics.feedmgr.util.ImportUtil.java

public static void addToImportOptionsSensitiveProperties(ImportOptions importOptions,
        List<NifiProperty> sensitiveProperties, ImportComponent component) {
    ImportComponentOption option = importOptions.findImportComponentOption(component);
    if (option.getProperties().isEmpty()) {
        option.setProperties(sensitiveProperties.stream().map(p -> new ImportProperty(p.getProcessorName(),
                p.getProcessorId(), p.getKey(), "", p.getProcessorType())).collect(Collectors.toList()));
    } else {//from   w  w  w  .ja v  a2 s.c  o  m
        //only add in those that are unique
        Map<String, ImportProperty> propertyMap = option.getProperties().stream()
                .collect(Collectors.toMap(p -> p.getProcessorNameTypeKey(), p -> p));
        sensitiveProperties.stream()
                .filter(nifiProperty -> !propertyMap.containsKey(nifiProperty.getProcessorNameTypeKey()))
                .forEach(p -> {
                    option.getProperties().add(new ImportProperty(p.getProcessorName(), p.getProcessorId(),
                            p.getKey(), "", p.getProcessorType()));
                });
    }
}

From source file:com.yfiton.core.Yfiton.java

private Map<String, Map<String, String>> loadPreferences(HierarchicalINIConfiguration configuration,
        Notifier notifier) {//from   www.  java2s .com
    Set<String> sections = configuration.getSections();

    return sections.stream().filter(isEqual(null).negate().and(section -> notifier.getKey().equals(section)))
            .collect(Collectors.toMap(Function.identity(),
                    section -> configuration.getSection(section).getRootNode().getChildren().stream().collect(
                            Collectors.toMap(ConfigurationNode::getName, node -> (String) node.getValue()))));
}

From source file:fi.helsinki.opintoni.service.UserNotificationService.java

private Map<String, CourseDto> getCoursesByRealisationId(Set<CourseDto> courseDtos) {
    return courseDtos.stream().collect(Collectors.toMap(dto -> dto.realisationId, dto -> dto));
}

From source file:com.coveo.spillway.storage.RedisStorage.java

@Override
public Map<LimitKey, Integer> addAndGet(Collection<AddAndGetRequest> requests) {
    Pipeline pipeline = jedis.pipelined();

    Map<LimitKey, Response<Long>> responses = new LinkedHashMap<>();
    for (AddAndGetRequest request : requests) {
        pipeline.multi();/*from  w  ww  .  j  a v  a 2 s. c  o m*/
        LimitKey limitKey = LimitKey.fromRequest(request);
        String redisKey = Stream
                .of(keyPrefix, limitKey.getResource(), limitKey.getLimitName(), limitKey.getProperty(),
                        limitKey.getBucket().toString())
                .map(RedisStorage::clean).collect(Collectors.joining(KEY_SEPARATOR));

        responses.put(limitKey, pipeline.incrBy(redisKey, request.getCost()));
        // We set the expire to twice the expiration period. The expiration is there to ensure that we don't fill the Redis cluster with
        // useless keys. The actual expiration mechanism is handled by the bucketing mechanism.
        pipeline.expire(redisKey, (int) request.getExpiration().getSeconds() * 2);
        pipeline.exec();
    }

    pipeline.sync();
    return responses.entrySet().stream()
            .collect(Collectors.toMap(Map.Entry::getKey, kvp -> kvp.getValue().get().intValue()));
}