Example usage for java.util Set stream

List of usage examples for java.util Set stream

Introduction

In this page you can find the example usage for java.util Set stream.

Prototype

default Stream<E> stream() 

Source Link

Document

Returns a sequential Stream with this collection as its source.

Usage

From source file:nu.yona.server.device.service.DeviceServiceTestConfiguration.java

@Test
public void getDeviceAnonymizedId_byId_tryGetNonExistingId_exception() {
    expectedException.expect(DeviceServiceException.class);
    expectedException.expect(hasMessageId("error.device.not.found.id"));

    // Add devices
    String deviceName1 = "First";
    OperatingSystem operatingSystem1 = OperatingSystem.ANDROID;
    addDeviceToRichard(0, deviceName1, operatingSystem1);

    String deviceName2 = "Second";
    OperatingSystem operatingSystem2 = OperatingSystem.IOS;
    addDeviceToRichard(1, deviceName2, operatingSystem2);

    UserDevice notAddedDevice = createDevice(2, "NotAddedDevice", OperatingSystem.IOS, SOME_APP_VERSION,
            SUPPORTED_APP_VERSION_CODE);

    // Verify two devices are present
    Set<UserDevice> devices = richard.getDevices();
    assertThat(devices.stream().map(UserDevice::getName).collect(Collectors.toSet()),
            containsInAnyOrder(deviceName1, deviceName2));

    // Try to get the anonymized ID for a nonexisting device ID
    service.getDeviceAnonymizedId(createRichardUserDto(), notAddedDevice.getId());
}

From source file:com.thinkbiganalytics.feedmgr.rest.controller.TemplatesRestController.java

@GET
@Path("/registered/{templateId}/actions/allowed")
@Produces(MediaType.APPLICATION_JSON)/*  ww w. j  a  va2  s.com*/
@ApiOperation("Gets the list of actions permitted for the given username and/or groups.")
@ApiResponses({ @ApiResponse(code = 200, message = "Returns the actions.", response = ActionGroup.class),
        @ApiResponse(code = 404, message = "A template with the given ID does not exist.", response = RestResponseStatus.class) })
public Response getAllowedActions(@PathParam("templateId") String templateIdStr,
        @QueryParam("user") Set<String> userNames, @QueryParam("group") Set<String> groupNames) {
    log.debug("Get allowed actions for template: {}", templateIdStr);

    Set<? extends Principal> users = Arrays.stream(this.securityTransform.asUserPrincipals(userNames))
            .collect(Collectors.toSet());
    Set<? extends Principal> groups = Arrays.stream(this.securityTransform.asGroupPrincipals(groupNames))
            .collect(Collectors.toSet());

    return this.securityService
            .getAllowedTemplateActions(templateIdStr,
                    Stream.concat(users.stream(), groups.stream()).collect(Collectors.toSet()))
            .map(g -> Response.ok(g).build()).orElseThrow(() -> new WebApplicationException(
                    "A template with the given ID does not exist: " + templateIdStr, Status.NOT_FOUND));
}

From source file:com.netflix.spinnaker.fiat.permissions.RedisPermissionsRepository.java

@Override
public Map<String, UserPermission> getAllByRoles(List<String> anyRoles) {
    if (anyRoles == null) {
        return getAllById();
    } else if (anyRoles.isEmpty()) {
        val unrestricted = get(UNRESTRICTED);
        if (unrestricted.isPresent()) {
            val map = new HashMap<String, UserPermission>();
            map.put(UNRESTRICTED, unrestricted.get());
            return map;
        }/*  w w w .j  av a 2s.c o  m*/
        return new HashMap<>();
    }

    try (Jedis jedis = jedisSource.getJedis()) {
        Pipeline p = jedis.pipelined();
        List<Response<Set<String>>> responses = anyRoles.stream().map(role -> p.smembers(roleKey(role)))
                .collect(Collectors.toList());
        p.sync();

        Set<String> dedupedUsernames = responses.stream().flatMap(response -> response.get().stream())
                .collect(Collectors.toSet());
        dedupedUsernames.add(UNRESTRICTED);

        Table<String, ResourceType, Response<Map<String, String>>> responseTable = getAllFromRedis(
                dedupedUsernames);
        if (responseTable == null) {
            return new HashMap<>(0);
        }

        RawUserPermission rawUnrestricted = new RawUserPermission(responseTable.row(UNRESTRICTED));
        UserPermission unrestrictedUser = getUserPermission(UNRESTRICTED, rawUnrestricted);

        return dedupedUsernames.stream().map(userId -> {
            RawUserPermission rawUser = new RawUserPermission(responseTable.row(userId));
            return getUserPermission(userId, rawUser);
        }).collect(Collectors.toMap(UserPermission::getId, permission -> permission.merge(unrestrictedUser)));
    }
}

From source file:nu.yona.server.device.service.DeviceServiceTestConfiguration.java

private void removeDuplicateDefaultDevicesFirstOrSecond(int indexToRetain) {
    // Add devices
    String deviceName1 = "First";
    OperatingSystem operatingSystem1 = OperatingSystem.ANDROID;
    LocalDateTime startTime = TimeUtil.utcNow();
    UserDevice device1 = addDeviceToRichard(0, deviceName1, operatingSystem1);
    ActivityData activity1 = new ActivityData("WhatsApp", 10, 2);
    ActivityData activity2 = new ActivityData("WhatsApp", 5, 1);
    Set<Activity> device1Activities = addActivities(device1, startTime, activity1, activity2);

    String deviceName2 = "Second";
    OperatingSystem operatingSystem2 = OperatingSystem.IOS;
    ActivityData activity3 = new ActivityData("WhatsApp", 9, 2);
    ActivityData activity4 = new ActivityData("WhatsApp", 3, 1);
    UserDevice device2 = addDeviceToRichard(0, deviceName2, operatingSystem2);
    Set<Activity> device2Activities = addActivities(device2, startTime, activity3, activity4);

    List<UserDevice> createdDevices = Arrays.asList(device1, device2);

    // Verify two devices are present
    Set<UserDevice> devices = richard.getDevices();
    assertThat(devices.stream().map(UserDevice::getName).collect(Collectors.toSet()),
            containsInAnyOrder(deviceName1, deviceName2));

    service.removeDuplicateDefaultDevices(createRichardUserDto(), createdDevices.get(indexToRetain).getId());

    // Assert success
    assertThat(devices.stream().map(UserDevice::getName).collect(Collectors.toSet()),
            containsInAnyOrder(createdDevices.get(indexToRetain).getName()));
    device1Activities.forEach(a -> assertThat(a.getDeviceAnonymized().get(),
            sameInstance(createdDevices.get(indexToRetain).getDeviceAnonymized())));
    device2Activities.forEach(a -> assertThat(a.getDeviceAnonymized().get(),
            sameInstance(createdDevices.get(indexToRetain).getDeviceAnonymized())));
}

From source file:io.github.jeddict.jpa.spec.sync.JavaClassSyncHandler.java

private void syncImplementedTypes(List<ClassOrInterfaceType> implementedTypes,
        Map<String, ImportDeclaration> imports) {
    Set<ReferenceClass> allInterfaces = new LinkedHashSet<>(javaClass.getRootElement().getInterfaces());
    allInterfaces.addAll(javaClass.getInterfaces());

    for (ClassOrInterfaceType implementedType : implementedTypes) {
        String implementedExprName = implementedType.getNameAsString();
        String implementedName;/*from  ww w.  jav a2  s .c  o  m*/
        if (isFQN(implementedExprName)) {
            implementedName = unqualify(implementedExprName);
        } else {
            implementedName = implementedExprName;
        }

        String value = implementedType.toString();
        if (!allInterfaces.stream().filter(inter -> inter.isEnable())
                .filter(inter -> inter.getName().contains(implementedName)).findAny().isPresent()) {
            javaClass.addRuntimeInterface(new ReferenceClass(value));
            syncImportSnippet(value, imports);
            ;
        }
    }
}

From source file:org.hawkular.alerts.api.model.dampening.Dampening.java

public void perform(Match match, Set<ConditionEval> conditionEvalSet) {
    if (null == match) {
        throw new IllegalArgumentException("Match can not be null");
    }//from  w  ww  .j a v a2s .com
    if (null == conditionEvalSet || isEmpty(conditionEvalSet)) {
        throw new IllegalArgumentException("ConditionEval Set can not be null or empty");
    }

    // The currentEvals map holds the most recent eval for each condition in the condition set.
    conditionEvalSet.stream()
            .forEach(conditionEval -> currentEvals.put(conditionEval.getConditionSetIndex(), conditionEval));

    // The conditionEvals for the same trigger will all have the same condition set size, so just use the first
    int conditionSetSize = conditionEvalSet.iterator().next().getConditionSetSize();
    boolean trueEval = false;
    switch (match) {
    case ALL:
        // Don't perform a dampening eval until we have a conditionEval for each member of the ConditionSet.
        if (currentEvals.size() < conditionSetSize) {
            return;
        }
        // Otherwise, all condition evals must be true for the condition set eval to be true
        trueEval = true;
        for (ConditionEval ce : currentEvals.values()) {
            if (!ce.isMatch()) {
                trueEval = false;
                break;
            }
        }
        break;
    case ANY:
        // we only need one true condition eval for the condition set eval to be true
        trueEval = false;
        for (ConditionEval ce : currentEvals.values()) {
            if (ce.isMatch()) {
                trueEval = true;
                break;
            }
        }
        break;
    default:
        throw new IllegalArgumentException("Unexpected Match type: " + match.name());
    }

    // If we had previously started our time and now have exceeded our time limit then we must start over
    long now = System.currentTimeMillis();
    if (type == Type.RELAXED_TIME && trueEvalsStartTime != 0L) {
        if ((now - trueEvalsStartTime) > evalTimeSetting) {
            reset();
        }
    }

    numEvals += 1;
    if (trueEval) {
        numTrueEvals += 1;
        addSatisfyingEvals(new HashSet<>(currentEvals.values()));

        switch (type) {
        case STRICT:
        case RELAXED_COUNT:
            if (numTrueEvals == evalTrueSetting) {
                satisfied = true;
            }
            break;

        case RELAXED_TIME:
            if (trueEvalsStartTime == 0L) {
                trueEvalsStartTime = now;
            }
            if ((numTrueEvals == evalTrueSetting) && ((now - trueEvalsStartTime) < evalTimeSetting)) {
                satisfied = true;
            }
            break;
        case STRICT_TIME:
        case STRICT_TIMEOUT:
            if (trueEvalsStartTime == 0L) {
                trueEvalsStartTime = now;

            } else if ((now - trueEvalsStartTime) >= evalTimeSetting) {
                satisfied = true;
            }
            break;
        }
    } else {
        switch (type) {
        case STRICT:
        case STRICT_TIME:
        case STRICT_TIMEOUT:
            reset();
            break;
        case RELAXED_COUNT:
            int numNeeded = evalTrueSetting - numTrueEvals;
            int chancesLeft = evalTotalSetting - numEvals;
            if (numNeeded > chancesLeft) {
                reset();
            }
            break;
        case RELAXED_TIME:
            break;
        }
    }
}

From source file:io.pravega.test.integration.MultiReadersEndToEndTest.java

private void runTestUsingMock(final Set<String> streamNames, final int numParallelReaders,
        final int numSegments) throws Exception {
    int servicePort = TestUtils.getAvailableListenPort();
    ServiceBuilder serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
    serviceBuilder.initialize();//from w w  w .  j av a  2s .c om
    StreamSegmentStore store = serviceBuilder.createStreamSegmentService();
    @Cleanup
    PravegaConnectionListener server = new PravegaConnectionListener(false, servicePort, store);
    server.startListening();
    @Cleanup
    MockStreamManager streamManager = new MockStreamManager("scope", "localhost", servicePort);
    MockClientFactory clientFactory = streamManager.getClientFactory();
    streamManager.createScope("scope");
    streamNames.stream().forEach(stream -> {
        streamManager.createStream("scope", stream, StreamConfiguration.builder().scope("scope")
                .streamName(stream).scalingPolicy(ScalingPolicy.fixed(numSegments)).build());
        EventStreamWriter<Integer> eventWriter = clientFactory.createEventWriter(stream,
                new IntegerSerializer(), EventWriterConfig.builder().build());
        for (Integer i = 0; i < NUM_TEST_EVENTS; i++) {
            eventWriter.writeEvent(String.valueOf(i), i);
        }
        eventWriter.flush();
        log.info("Wrote {} events", NUM_TEST_EVENTS);
    });

    final String readerGroupName = "testReaderGroup";
    streamManager.createReaderGroup(readerGroupName, ReaderGroupConfig.builder().startingTime(0).build(),
            streamNames);

    Collection<Integer> read = readAllEvents(numParallelReaders, clientFactory, readerGroupName, numSegments);

    Assert.assertEquals(NUM_TEST_EVENTS * streamNames.size(), read.size());
    // Check unique events.
    Assert.assertEquals(NUM_TEST_EVENTS, new TreeSet<>(read).size());
    streamManager.deleteReaderGroup(readerGroupName);
}

From source file:com.autonomy.aci.client.transport.impl.AciHttpClientImpl.java

/**
 * Create a {@code PostMethod} and adds the ACI parameters to the request body.
 * @param serverDetails The details of the ACI server the request will be sent to
 * @param parameters    The parameters to send with the ACI action.
 * @return An {@code HttpPost} that is ready to execute the ACI action.
 * @throws UnsupportedEncodingException Will be thrown if <tt>serverDetails.getCharsetName()</tt> returns a
 *                                      charset that is not supported by the JVM
 * @throws URISyntaxException           If there was a problem construction the request URI from the
 *                                      <tt>serverDetails</tt> and <tt>parameters</tt>
 *///ww w . ja v a 2s. c om
private HttpUriRequest createPostMethod(final AciServerDetails serverDetails,
        final Set<? extends ActionParameter<?>> parameters)
        throws URISyntaxException, UnsupportedEncodingException {
    LOGGER.trace("createPostMethod() called...");

    // Create the URI to use...
    final URI uri = new URIBuilder()
            .setScheme(serverDetails.getProtocol().toString().toLowerCase(Locale.ENGLISH))
            .setHost(serverDetails.getHost()).setPort(serverDetails.getPort()).setPath("/").build();

    // Create the method...
    final HttpPost method = new HttpPost(uri);

    final Charset charset = Charset.forName(serverDetails.getCharsetName());

    final boolean requiresMultipart = parameters.stream().anyMatch(ActionParameter::requiresPostRequest);

    if (requiresMultipart) {
        final MultipartEntityBuilder multipartEntityBuilder = MultipartEntityBuilder.create();
        multipartEntityBuilder.setCharset(charset);

        parameters.forEach(parameter -> parameter.addToEntity(multipartEntityBuilder, charset));

        // Convert the parameters into an entity...
        method.setEntity(multipartEntityBuilder.build());
    } else {
        method.setEntity(new StringEntity(convertParameters(parameters, serverDetails.getCharsetName()),
                serverDetails.getCharsetName()));
    }

    // Return the method...
    return method;
}

From source file:com.netflix.spinnaker.clouddriver.kubernetes.v2.caching.view.provider.KubernetesV2ClusterProvider.java

private Set<KubernetesV2Cluster> translateClustersWithRelationships(Collection<CacheData> clusterData) {
    // TODO(lwander) possible optimization: store lb relationships in cluster object to cut down on number of loads here.
    List<CacheData> serverGroupData = kindMap.translateSpinnakerKind(SERVER_GROUPS).stream()
            .map(kind -> cacheUtils.loadRelationshipsFromCache(clusterData, kind.toString()))
            .flatMap(Collection::stream).collect(Collectors.toList());

    List<CacheData> loadBalancerData = kindMap.translateSpinnakerKind(LOAD_BALANCERS).stream()
            .map(kind -> cacheUtils.loadRelationshipsFromCache(serverGroupData, kind.toString()))
            .flatMap(Collection::stream).collect(Collectors.toList());

    List<CacheData> instanceData = kindMap.translateSpinnakerKind(INSTANCES).stream()
            .map(kind -> cacheUtils.loadRelationshipsFromCache(serverGroupData, kind.toString()))
            .flatMap(Collection::stream).collect(Collectors.toList());

    Map<String, List<CacheData>> clusterToServerGroups = new HashMap<>();
    for (CacheData serverGroupDatum : serverGroupData) {
        Collection<String> clusterKeys = serverGroupDatum.getRelationships().get(CLUSTERS.toString());
        if (clusterKeys == null || clusterKeys.size() != 1) {
            log.warn("Malformed cache, server group stored without cluster");
            continue;
        }//w w  w .  java  2  s  . co m

        String clusterKey = clusterKeys.iterator().next();
        List<CacheData> storedData = clusterToServerGroups.getOrDefault(clusterKey, new ArrayList<>());
        storedData.add(serverGroupDatum);
        clusterToServerGroups.put(clusterKey, storedData);
    }

    Map<String, List<CacheData>> serverGroupToLoadBalancers = cacheUtils.mapByRelationship(loadBalancerData,
            SERVER_GROUPS);
    Map<String, List<CacheData>> serverGroupToInstances = cacheUtils.mapByRelationship(instanceData,
            SERVER_GROUPS);
    Map<String, List<CacheData>> loadBalancerToServerGroups = cacheUtils.mapByRelationship(serverGroupData,
            LOAD_BALANCERS);

    Set<KubernetesV2Cluster> result = new HashSet<>();
    for (CacheData clusterDatum : clusterData) {
        List<CacheData> clusterServerGroups = clusterToServerGroups.getOrDefault(clusterDatum.getId(),
                new ArrayList<>());
        List<CacheData> clusterLoadBalancers = clusterServerGroups.stream().map(CacheData::getId)
                .map(id -> serverGroupToLoadBalancers.getOrDefault(id, new ArrayList<>()))
                .flatMap(Collection::stream).collect(Collectors.toList());

        result.add(translateCluster(clusterDatum, clusterServerGroups, clusterLoadBalancers,
                serverGroupToInstances, loadBalancerToServerGroups, serverGroupToLoadBalancers));
    }

    return result.stream().filter(Objects::nonNull).collect(Collectors.toSet());
}