Example usage for java.util Optional ifPresent

List of usage examples for java.util Optional ifPresent

Introduction

In this page you can find the example usage for java.util Optional ifPresent.

Prototype

public void ifPresent(Consumer<? super T> action) 

Source Link

Document

If a value is present, performs the given action with the value, otherwise does nothing.

Usage

From source file:tech.beshu.ror.integration.JwtAuthTests.java

private int test(Optional<String> token, Optional<String> headerName, boolean withBearer) throws Exception {
    RestClient rc = container.getClient();
    HttpGet req = new HttpGet(rc.from("/_cat/indices"));
    token.ifPresent(t -> req.addHeader(headerName.orElse("Authorization"), (withBearer ? "Bearer " : "") + t));
    HttpResponse resp = rc.execute(req);
    return resp.getStatusLine().getStatusCode();
}

From source file:org.pdfsam.split.SplitModule.java

@Override
protected Builder<? extends AbstractSplitByPageParameters> getBuilder(Consumer<String> onError) {
    Optional<SinglePdfSourceMultipleOutputParametersBuilder<? extends AbstractSplitByPageParameters>> builder = Optional
            .ofNullable(splitOptions.getBuilder(onError));
    builder.ifPresent(b -> {
        selectionPane.apply(b, onError);
        destinationDirectoryField.apply(b, onError);
        destinationPane.apply(b, onError);
        prefix.apply(b, onError);//from   www.  j  a  v  a  2 s  .  co m
    });
    return builder.orElse(null);
}

From source file:natalia.dymnikova.cluster.scheduler.impl.SetFlowFactory.java

public SetFlow makeFlow(final String flowName, final Optional<String> parentFlowName,
        final List<StageContainer> resolvedStages) {
    final GetAddressesStrategy strategy = strategyFactory.getAddressStrategy();
    final SetFlow.Builder flow = SetFlow.newBuilder().setFlowName(flowName);
    parentFlowName.ifPresent(flow::setParentFlowName);

    final List<Optional<Address>> addresses = strategy.getNodes(createTree(resolvedStages.get(0)));

    flow.setStage(makeStages(resolvedStages, addresses, 0));
    return flow.build();
}

From source file:com.netflix.spinnaker.halyard.deploy.spinnaker.v1.service.local.git.LocalGitDeckService.java

@Override
public List<Profile> getProfiles(DeploymentConfiguration deploymentConfiguration,
        SpinnakerRuntimeSettings endpoints) {
    List<Profile> result = new ArrayList<>();
    Profile deckProfile = deckProfileFactory.getProfile(deckSettingsPath, deckPath, deploymentConfiguration,
            endpoints);//ww w.ja v a  2s  .  co  m

    String deploymentName = deploymentConfiguration.getName();
    Path userProfilePath = halconfigDirectoryStructure.getUserProfilePath(deploymentName);
    Optional<Profile> settingsLocalProfile = this.customProfile(deploymentConfiguration, endpoints,
            Paths.get(userProfilePath.toString(), deckSettingsLocalPath), deckSettingsLocalPath);
    settingsLocalProfile.ifPresent(p -> deckProfile.appendContents(p.getContents()));

    result.add(deckProfile);
    return result;
}

From source file:com.epam.ta.reportportal.core.launch.impl.FinishLaunchHandler.java

@Override
public OperationCompletionRS finishLaunch(String launchId, FinishExecutionRQ finishLaunchRQ, String projectName,
        String username) {//  w w w. j a  v a 2s. c  o m

    Launch launch = launchRepository.findOne(launchId);
    validate(launchId, launch, finishLaunchRQ);
    Project project = validateRoles(launch, username, projectName);

    launch.setEndTime(finishLaunchRQ.getEndTime());
    Optional<Status> status = fromValue(finishLaunchRQ.getStatus());
    status.ifPresent(providedStatus -> {
        /* Validate provided status */
        expect(providedStatus, not(Preconditions.statusIn(IN_PROGRESS, SKIPPED))).verify(
                INCORRECT_FINISH_STATUS,
                formattedSupplier("Cannot finish launch '{}' with status '{}'", launchId, providedStatus));
        /* Validate actual launch status */
        if (PASSED.equals(providedStatus)) {
            /* Validate actual launch status */
            expect(launch.getStatus(), Preconditions.statusIn(IN_PROGRESS, PASSED)).verify(
                    INCORRECT_FINISH_STATUS,
                    formattedSupplier("Cannot finish launch '{}' with current status '{}' as 'PASSED'",
                            launchId, launch.getStatus()));
            /*
             * Calculate status from launch statistics and validate it
             */
            Status fromStatistics = StatisticsHelper.getStatusFromStatistics(launch.getStatistics());
            expect(fromStatistics, Preconditions.statusIn(IN_PROGRESS, PASSED)).verify(INCORRECT_FINISH_STATUS,
                    formattedSupplier(
                            "Cannot finish launch '{}' with calculated automatically status '{}' as 'PASSED'",
                            launchId, fromStatistics));
        }
    });
    launch.setStatus(status.orElse(StatisticsHelper.getStatusFromStatistics(launch.getStatistics())));
    try {
        launchRepository.save(launch);
    } catch (Exception exp) {
        throw new ReportPortalException("Error while Launch updating.", exp);
    }
    eventPublisher.publishEvent(new LaunchFinishedEvent(launch, project));
    return new OperationCompletionRS("Launch with ID = '" + launchId + "' successfully finished.");
}

From source file:com.netflix.spinnaker.orca.clouddriver.utils.TrafficGuard.java

public void verifyInstanceTermination(String serverGroupNameFromStage, List<String> instanceIds, String account,
        Location location, String cloudProvider, String operationDescriptor) {
    Map<String, List<String>> instancesPerServerGroup = new HashMap<>();
    for (String instanceId : instanceIds) {
        String serverGroupName = serverGroupNameFromStage;
        if (serverGroupName == null) {
            Optional<String> resolvedServerGroupName = resolveServerGroupNameForInstance(instanceId, account,
                    location.getValue(), cloudProvider);
            serverGroupName = resolvedServerGroupName.orElse(null);
        }//from  www. j  av a  2s.co m

        if (serverGroupName != null) {
            instancesPerServerGroup.computeIfAbsent(serverGroupName, serverGroup -> new ArrayList<>())
                    .add(instanceId);
        }
    }

    instancesPerServerGroup.entrySet().forEach(entry -> {
        String serverGroupName = entry.getKey();
        Names names = Names.parseName(serverGroupName);
        if (hasDisableLock(names.getCluster(), account, location)) {
            Optional<TargetServerGroup> targetServerGroup = oortHelper.getTargetServerGroup(account,
                    serverGroupName, location.getValue(), cloudProvider);

            targetServerGroup.ifPresent(serverGroup -> {
                Optional<Map> thisInstance = serverGroup.getInstances().stream()
                        .filter(i -> "Up".equals(i.get("healthState"))).findFirst();
                if (thisInstance.isPresent() && "Up".equals(thisInstance.get().get("healthState"))) {
                    long otherActiveInstances = serverGroup.getInstances().stream().filter(
                            i -> "Up".equals(i.get("healthState")) && !entry.getValue().contains(i.get("name")))
                            .count();
                    if (otherActiveInstances == 0) {
                        verifyOtherServerGroupsAreTakingTraffic(serverGroupName, location, account,
                                cloudProvider, operationDescriptor);
                    }
                }
            });
        }
    });
}

From source file:com.ikanow.aleph2.remote.es_tests.SimpleReadableCrudTest.java

public void runTest() throws IOException {
    System.out.println("running test");

    final String bucket_str = Resources.toString(
            Resources.getResource("com/ikanow/aleph2/remote/es_tests/test_es_databucket.json"), Charsets.UTF_8);

    final DataBucketBean bucket = BeanTemplateUtils.from(bucket_str, DataBucketBean.class).get();

    final Optional<IReadOnlyCrudService<JsonNode>> maybe_read_crud = _service_context
            .getService(IDocumentService.class, Optional.empty()).flatMap(ds -> ds.getDataService())
            .flatMap(ds -> ds.getReadableCrudService(JsonNode.class, Arrays.asList(bucket), Optional.empty()));

    maybe_read_crud.ifPresent(
            read_crud -> System.out.println("*************** " + read_crud.countObjects().join().intValue()));

    QueryComponent<JsonNode> query = CrudUtils.allOf()
            .withAny("email_key.raw",
                    Arrays.asList("dionne05@gmail.com", "dionne05@gmail.com", "dionnewalker2016@gmail.com",
                            "diontaethompson10@gmil.com", "dionteb41@gmail.com", "diquangantt@ymail.com"))
            .limit(Integer.MAX_VALUE);

    System.out.println(query.toString());

    final Cursor<JsonNode> q = maybe_read_crud.get().getObjectsBySpec(query).join();

    q.iterator().forEachRemaining(j -> System.out.println("??? " + j.toString()));
}

From source file:com.ikanow.aleph2.graph.titan.utils.TitanGraphBuildingUtils.java

/** (3/3) Merges user generated edges/vertices with the ones already in the system 
 * @param tx// ww  w. j a va2 s  .  c  o m
 * @param config
 * @param security_service
 * @param logger
 * @param maybe_merger
 * @param mergeable
 */
public static void buildGraph_handleMerge(final TitanTransaction tx, final GraphSchemaBean config,
        final Tuple2<String, ISecurityService> security_service, final Optional<IBucketLogger> logger,
        final MutableStatsBean mutable_stats, final Collection<ObjectNode> mutable_new_vertex_keys,
        final Optional<Tuple2<IEnrichmentBatchModule, GraphMergeEnrichmentContext>> maybe_merger,
        final DataBucketBean bucket,
        final Stream<Tuple4<ObjectNode, List<ObjectNode>, List<ObjectNode>, List<Vertex>>> mergeable) {
    final org.apache.tinkerpop.shaded.jackson.databind.ObjectMapper titan_mapper = tx.io(IoCore.graphson())
            .mapper().create().createMapper();
    final Multimap<JsonNode, Edge> mutable_existing_edge_endpoint_store = LinkedHashMultimap.create(); //(lazy simple way of handling 1.3/2)
    final Map<ObjectNode, Vertex> mutable_per_merge_cached_vertices = new HashMap<>();

    mergeable.forEach(t4 -> {

        //TODO (ALEPH-15): handling properties: add new properties and:
        // remove any properties of any vertex/edge over which the user does not have read permission .. and then re-combine later

        final ObjectNode key = t4._1();
        final List<ObjectNode> vertices = t4._2();
        final List<ObjectNode> edges = t4._3();
        final List<Vertex> existing_vertices = t4._4();
        mutable_stats.vertex_matches_found += existing_vertices.size();

        // 1) First step is to sort out the _vertices_, here's the cases:

        // 1.1) If there's no matching vertices then create a new vertex and get the id (via a merge if finalize is set)
        //      (overwrite the _id then map to a Vertex)
        // 1.2) If there are >0 matching vertices (and only one incoming vertex) then we run a merge in which the user "has to do" the following:
        // 1.2.a) pick the winning vertex (or emit the current one to create a "duplicate node"?)
        // 1.2.a.1) (Allow user to delete the others if he has permission, by the usual emit "id" only - but don't automatically do it because it gets complicated what to do with the other _bs)
        // 1.2.b) copy any properties from the original objects into the winner and remove any so-desired properties

        final long prev_created = mutable_stats.vertices_created; //(nasty hack, see below)

        final Optional<Vertex> maybe_vertex_winner = invokeUserMergeCode(tx, config, security_service, logger,
                maybe_merger, titan_mapper, mutable_stats, Vertex.class, bucket.full_name(), key, vertices,
                existing_vertices, Collections.emptyMap()).stream().findFirst();

        maybe_vertex_winner.ifPresent(vertex_winner -> {
            mutable_per_merge_cached_vertices.put(key, vertex_winner);

            //(slighty nasty hack, use stats to see if a vertex was created vs updated...)
            if (mutable_stats.vertices_created > prev_created) {
                mutable_new_vertex_keys.add(key);
            }

            // 1.3) Tidy up (mutate) the edges            

            // 1.3.1) Make a store of all the existing edges (won't worry about in/out, it will sort itself out)

            Stream.of(Optionals.streamOf(vertex_winner.edges(Direction.IN), false),
                    Optionals.streamOf(vertex_winner.edges(Direction.OUT), false),
                    Optionals.streamOf(vertex_winner.edges(Direction.BOTH), false)).flatMap(__ -> __)
                    .forEach(e -> {
                        mutable_existing_edge_endpoint_store.put(key, e);
                    });

            // 1.3.2) Handle incoming edges:

            final Map<ObjectNode, List<ObjectNode>> grouped_edges = finalEdgeGrouping(key, vertex_winner,
                    edges);

            // 2) By here we have a list of vertices and we've mutated the edges to fill in the _inV and _outV
            // 2.1) Now get the potentially matching edges from each of the selected vertices:
            // 2.1.1) If there's no matching edges (and only one incoming edge) then create a new edge (via a merge if finalize is set)
            // 2.1.2) If there are >0 matching edges then run a merge against the edges, pick the current one

            // OK now for any resolved edges (ie grouped_edges), match up with the mutable store (which will be correctly populated by construction):

            grouped_edges.entrySet().stream().forEach(kv -> {

                final Function<String, Map<Object, Edge>> getEdges = in_or_out -> Optionals
                        .ofNullable(mutable_existing_edge_endpoint_store.get(kv.getKey().get(in_or_out)))
                        .stream().filter(e -> labelMatches(kv.getKey(), e))
                        .filter(e -> isAllowed(bucket.full_name(), security_service, e)) // (check authorized)
                        .collect(Collectors.toMap(e -> e.id(), e -> e));
                final Map<Object, Edge> in_existing = getEdges.apply(GraphAnnotationBean.inV);
                final Map<Object, Edge> out_existing = getEdges.apply(GraphAnnotationBean.outV);

                final List<Edge> existing_edges = BucketUtils.isTestBucket(bucket) ? Collections.emptyList()
                        : Stream.of(
                                Maps.difference(in_existing, out_existing).entriesInCommon().values().stream(),
                                in_existing.values().stream().filter(e -> e.inVertex() == e.outVertex()) // (handle the case where an edge starts/ends at the same node)
                ).flatMap(__ -> __).collect(Collectors.toList());

                mutable_stats.edge_matches_found += existing_edges.size();

                invokeUserMergeCode(tx, config, security_service, logger, maybe_merger, titan_mapper,
                        mutable_stats, Edge.class, bucket.full_name(), kv.getKey(), kv.getValue(),
                        existing_edges, mutable_per_merge_cached_vertices);
            });
        });

    });

    //TRACE
    //System.err.println(new Date() + ": VERTICES FOUND = " + mutable_existing_vertex_store);
}

From source file:org.smigo.message.MessageHandler.java

public void addNewYearNewsMessage(Optional<AuthenticatedUser> optionalUser, int year, List<Plant> plants,
        Locale locale) {//from w  w w  . j a v a2  s .  c  o  m
    final List<Integer> validYears = Arrays.asList(Year.now().getValue(), Year.now().plusYears(1).getValue());
    if (!optionalUser.isPresent() || !validYears.contains(year) || plants.size() < 10) {
        optionalUser.ifPresent(u -> log.info("Not adding new year news message for:" + u.getUsername()));
        return;
    }
    final AuthenticatedUser user = optionalUser.get();
    final MessageAdd message = new MessageAdd();
    final String text = messageSource.getMessage("msg.useraddedyearmessage",
            new Object[] { user.getUsername(), year }, locale);
    message.setText(text);
    message.setSubmitterUserId(1);
    message.setLocale(locale.toLanguageTag());
    messageDao.addMessage(message);
}

From source file:ee.ria.xroad.opmonitordaemon.HealthDataRequestHandler.java

@SuppressWarnings("unchecked")
private ServiceEventsType buildServiceEvents(ServiceId service) {
    ServiceEventsType serviceEvents = OBJECT_FACTORY.createServiceEventsType();

    serviceEvents.setService(service);//from  w  w w. j a v  a  2s  .c o  m

    Optional<Gauge<Long>> lastSuccessfulRequestTimestamp = Optional
            .ofNullable(findGauge(healthMetricRegistry, getLastRequestTimestampGaugeName(service, true)));
    lastSuccessfulRequestTimestamp
            .ifPresent(g -> serviceEvents.setLastSuccessfulRequestTimestamp(g.getValue()));

    Optional<Gauge<Long>> lastUnsuccessfulRequestTimestamp = Optional
            .ofNullable(findGauge(healthMetricRegistry, getLastRequestTimestampGaugeName(service, false)));
    lastUnsuccessfulRequestTimestamp
            .ifPresent(g -> serviceEvents.setLastUnsuccessfulRequestTimestamp(g.getValue()));

    serviceEvents.setLastPeriodStatistics(buildLastPeriodStats(service));

    return serviceEvents;
}