Example usage for java.util Optional map

List of usage examples for java.util Optional map

Introduction

In this page you can find the example usage for java.util Optional map.

Prototype

public <U> Optional<U> map(Function<? super T, ? extends U> mapper) 

Source Link

Document

If a value is present, returns an Optional describing (as if by #ofNullable ) the result of applying the given mapping function to the value, otherwise returns an empty Optional .

Usage

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

/** Utility to get the list (currently 0/1) of enrichment modules that define deduplication handling
 * @param doc_schema//www .jav a  2s .c o m
 * @return
 */
final protected static Collection<IEnrichmentBatchModule> getEnrichmentModules(
        final IEnrichmentModuleContext context, final EnrichmentControlMetadataBean cfg) {

    final Optional<String> entry_point = Optional.ofNullable(cfg.entry_point()).map(Optional::of)
            .orElseGet(() -> {
                // Get the shared library bean:

                return BucketUtils
                        .getBatchEntryPoint(
                                context.getServiceContext().getCoreManagementDbService().readOnlyVersion()
                                        .getSharedLibraryStore()
                                        .getObjectBySpec(CrudUtils.anyOf(SharedLibraryBean.class)
                                                .when(SharedLibraryBean::_id, cfg.module_name_or_id())
                                                .when(SharedLibraryBean::path_name, cfg.module_name_or_id()))
                                        .join()
                                        .map(bean -> (Map<String, SharedLibraryBean>) ImmutableMap
                                                .of(cfg.module_name_or_id(), bean))
                                        .orElse(Collections.<String, SharedLibraryBean>emptyMap()),
                                cfg);
            });

    return entry_point
            .map(Lambdas.wrap_u(ep -> (IEnrichmentBatchModule) Class
                    .forName(ep, true, Thread.currentThread().getContextClassLoader()).newInstance()))
            .map(i -> Arrays.asList(i)).orElse(Collections.emptyList());
}

From source file:com.spotify.heroic.suggest.elasticsearch.ElasticsearchSuggestModule.java

@JsonCreator
public ElasticsearchSuggestModule(@JsonProperty("id") Optional<String> id,
        @JsonProperty("groups") Optional<Groups> groups,
        @JsonProperty("connection") Optional<ConnectionModule> connection,
        @JsonProperty("writesPerSecond") Optional<Double> writesPerSecond,
        @JsonProperty("rateLimitSlowStartSeconds") Optional<Long> rateLimitSlowStartSeconds,
        @JsonProperty("writeCacheDurationMinutes") Optional<Long> writeCacheDurationMinutes,
        @JsonProperty("templateName") Optional<String> templateName,
        @JsonProperty("backendType") Optional<String> backendType,
        @JsonProperty("configure") Optional<Boolean> configure) {
    this.id = id;
    this.groups = groups.orElseGet(Groups::empty).or(DEFAULT_GROUP);
    this.connection = connection.orElseGet(ConnectionModule::buildDefault);
    this.writesPerSecond = writesPerSecond.orElse(DEFAULT_WRITES_PER_SECOND);
    this.rateLimitSlowStartSeconds = rateLimitSlowStartSeconds.orElse(DEFAULT_RATE_LIMIT_SLOW_START_SECONDS);
    this.writeCacheDurationMinutes = writeCacheDurationMinutes.orElse(DEFAULT_WRITES_CACHE_DURATION_MINUTES);
    this.templateName = templateName.orElse(DEFAULT_TEMPLATE_NAME);
    this.backendType = backendType.orElse(DEFAULT_BACKEND_TYPE);
    this.type = backendType.map(this::lookupBackendType).orElse(defaultSetup);
    this.configure = configure.orElse(DEFAULT_CONFIGURE);
}

From source file:alfio.manager.EventManager.java

private Stream<MapSqlParameterSource> generateTicketsForCategory(TicketCategory tc, Event event,
        Date creationDate, int existing) {
    Optional<TicketCategory> filteredTC = Optional.of(tc).filter(TicketCategory::isBounded);
    int missingTickets = filteredTC.map(c -> Math.abs(c.getMaxTickets() - existing))
            .orElseGet(() -> eventRepository.countExistingTickets(event.getId()) - existing);
    return generateStreamForTicketCreation(missingTickets)
            .map(ps -> buildTicketParams(event.getId(), creationDate, filteredTC, tc.getSrcPriceCts(), ps));
}

From source file:com.yahoo.bard.webservice.web.endpoints.JobsServlet.java

/**
 * Check whether the PreResponse contains an error and if it does, return an Observable wrapping the error else
 * return an Observable wrapping the PreResponse as is.
 *
 * @param preResponse  The PreResponse to be inspected
 * @param uriInfo  uriInfo object to get uriBuilder
 * @param paginationParameters  user's requested pagination parameters
 *
 * @return An Observable wrapping the PreResponse or an Observable wrapping a ResponseException
 *//*from ww w.  java  2 s.  co  m*/
protected Observable<PreResponse> handlePreResponseWithError(PreResponse preResponse, UriInfo uriInfo,
        Optional<PaginationParameters> paginationParameters) {
    ResponseContext responseContext = preResponse.getResponseContext();

    if (responseContext.containsKey(ResponseContextKeys.STATUS.getName())) {
        ResponseException responseException = new ResponseException(
                (Integer) responseContext.get(ResponseContextKeys.STATUS.getName()),
                (String) responseContext.get(ResponseContextKeys.ERROR_MESSAGE.getName()),
                (String) responseContext.get(ResponseContextKeys.ERROR_MESSAGE.getName()), null);
        return Observable.error(responseException);
    }

    return paginationParameters
            .map(pageParams -> new AllPagesPagination<>(preResponse.getResultSet(), pageParams))
            .map(page -> new PreResponse(
                    new ResultSet(page.getPageOfData(), preResponse.getResultSet().getSchema()),
                    addPaginationInfoToResponseContext(responseContext, uriInfo, page)))
            .map(Observable::just).orElse(Observable.just(preResponse));
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

@Override
public void onObjectBatch(final Stream<Tuple2<Long, IBatchRecord>> batch, final Optional<Integer> batch_size,
        final Optional<JsonNode> grouping_key) {
    if (_deduplication_is_disabled.get()) {
        // no deduplication, generally shouldn't be here...
        //.. but if we are, make do the best we can
        batch.forEach(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(),
                Optional.empty(), Optional.empty()));
        return;/*  ww  w . j  a v  a2  s .  c o m*/
    }

    // Create big query

    final Tuple3<QueryComponent<JsonNode>, List<Tuple2<JsonNode, Tuple2<Long, IBatchRecord>>>, Either<String, List<String>>> fieldinfo_dedupquery_keyfields = getDedupQuery(
            batch, _dedup_fields.get(), _db_mapper.get());

    // Get duplicate results

    final Tuple2<List<String>, Boolean> fields_include = getIncludeFields(_policy.get(), _dedup_fields.get(),
            _timestamp_field.get());

    final CompletableFuture<Iterator<JsonNode>> dedup_res = fieldinfo_dedupquery_keyfields._2().isEmpty()
            ? CompletableFuture.completedFuture(Collections.<JsonNode>emptyList().iterator())
            : _dedup_context.get().getObjectsBySpec(fieldinfo_dedupquery_keyfields._1(), fields_include._1(),
                    fields_include._2()).thenApply(cursor -> cursor.iterator());

    // Wait for it to finsh

    //(create handy results structure if so)
    final LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> mutable_obj_map = fieldinfo_dedupquery_keyfields
            ._2().stream()
            .collect(Collector.of(
                    () -> new LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>>(),
                    (acc, t2) -> {
                        // (ie only the first element is added, duplicate elements are removed)
                        final Tuple3<Long, IBatchRecord, ObjectNode> t3 = Tuples._3T(t2._2()._1(), t2._2()._2(),
                                _mapper.createObjectNode());
                        acc.compute(t2._1(), (k, v) -> {
                            final LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>> new_list = (null == v)
                                    ? new LinkedList<>()
                                    : v;
                            new_list.add(t3);
                            return new_list;
                        });
                    }, (map1, map2) -> {
                        map1.putAll(map2);
                        return map1;
                    }));

    //TODO (ALEPH-20): add timestamps to annotation
    //TODO (ALEPH-20): support different timestamp fields for the different buckets
    //TODO (ALEPH-20): really need to support >1 current enrichment job 
    //                 ^^(Really really longer term you should be able to decide what objects you want and what you don't  <- NOTE: don't remember what i meant here)

    final Iterator<JsonNode> cursor = dedup_res.join();

    // Handle the results

    final Stream<JsonNode> records_to_delete = Lambdas.get(() -> {
        if (isCustom(_doc_schema.get().deduplication_policy())
                || _doc_schema.get().delete_unhandled_duplicates()) {
            return Optionals.streamOf(cursor, true)
                    .collect(Collectors.groupingBy(
                            ret_obj -> getKeyFieldsAgain(ret_obj, fieldinfo_dedupquery_keyfields._3())))
                    .entrySet().stream().<JsonNode>flatMap(kv -> {

                        final Optional<JsonNode> maybe_key = kv.getKey();
                        final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                                .map(key -> mutable_obj_map.get(key));

                        // Stats:
                        _mutable_stats.duplicate_keys++;
                        _mutable_stats.duplicates_existing += kv.getValue().size();
                        _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                        //DEBUG
                        //System.out.println("?? " + kv.getValue().size() + " vs " + maybe_key + " vs " + matching_records.map(x -> Integer.toString(x.size())).orElse("(no match)"));

                        return matching_records
                                .<Stream<JsonNode>>map(records -> handleDuplicateRecord(_doc_schema.get(),
                                        _custom_handler.optional().map(
                                                handler -> Tuples._2T(handler, this._custom_context.get())),
                                        _timestamp_field.get(), records, kv.getValue(), maybe_key.get(),
                                        mutable_obj_map))
                                .orElse(Stream.empty());
                    });
        } else {
            Optionals.streamOf(cursor, true).forEach(ret_obj -> {
                final Optional<JsonNode> maybe_key = getKeyFieldsAgain(ret_obj,
                        fieldinfo_dedupquery_keyfields._3());
                final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                        .map(key -> mutable_obj_map.get(key));

                //DEBUG
                //System.out.println("?? " + ret_obj + " vs " + maybe_key + " vs " + matching_record.map(x -> x._2().getJson().toString()).orElse("(no match)"));

                // Stats:
                _mutable_stats.duplicate_keys++;
                _mutable_stats.duplicates_existing++;
                _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                matching_records.ifPresent(records -> handleDuplicateRecord(_doc_schema.get(),
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        _timestamp_field.get(), records, Arrays.asList(ret_obj), maybe_key.get(),
                        mutable_obj_map));
            });
            return Stream.<JsonNode>empty();
        }
    });

    final List<Object> ids = records_to_delete.map(j -> jsonToObject(j)).filter(j -> null != j)
            .collect(Collectors.toList());

    if (!ids.isEmpty()) { // fire a bulk deletion request
        mutable_uncompleted_deletes.add(
                _dedup_context.get().deleteObjectsBySpec(CrudUtils.allOf().withAny(AnnotationBean._ID, ids)));

        _mutable_stats.deleted += ids.size();

        //(quickly see if we can reduce the number of outstanding requests)
        final Iterator<CompletableFuture<Long>> it = mutable_uncompleted_deletes.iterator();
        while (it.hasNext()) {
            final CompletableFuture<Long> cf = it.next();
            if (cf.isDone()) {
                it.remove();
            } else
                break; // ie stop as soon as we hit one that isn't complete)
        }
    }

    _mutable_stats.nonduplicate_keys += mutable_obj_map.size();

    if (Optional.ofNullable(_doc_schema.get().custom_finalize_all_objects()).orElse(false)) {
        mutable_obj_map.entrySet().stream()
                .forEach(kv -> handleCustomDeduplication(
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        kv.getValue(), Collections.emptyList(), kv.getKey()));
    } else { // Just emit the last element of each grouped object set
        mutable_obj_map.values().stream().map(t -> t.peekLast())
                .forEach(t -> _context.get().emitImmutableObject(t._1(), t._2().getJson(), Optional.of(t._3()),
                        Optional.empty(), Optional.empty()));
    }
}

From source file:io.divolte.server.recordmapping.DslRecordMapping.java

public <T> void map(final String fieldName, final T literal) {
    if (!COMPATIBLE_PRIMITIVES.containsKey(literal.getClass())) {
        throw new SchemaMappingException(
                "Type error. Cannot map literal %s of type %s. Only primitive types are allowed.",
                literal.toString(), literal.getClass());
    }/*from w w  w.j  a  v  a  2  s  . c om*/

    final Field field = schema.getField(fieldName);
    if (field == null) {
        throw new SchemaMappingException("Field %s does not exist in Avro schema; error in mapping %s onto %s",
                fieldName, literal, fieldName);
    }

    final Optional<Schema> targetSchema = unpackNullableUnion(field.schema());
    if (!targetSchema.map((s) -> s.getType() == COMPATIBLE_PRIMITIVES.get(literal.getClass())).orElse(false)) {
        throw new SchemaMappingException(
                "Type mismatch. Cannot map literal %s of type %s onto a field of type %s (type of value and schema of field do not match).",
                literal.toString(), literal.getClass(), field.schema());
    }

    stack.getLast().add((h, e, c, r) -> {
        r.set(field, literal);
        return MappingAction.MappingResult.CONTINUE;
    });
}

From source file:alfio.manager.EventManager.java

private void insertAdditionalField(Event event, AdditionalField f, int order) {
    String serializedRestrictedValues = toSerializedRestrictedValues(f);
    Optional<EventModification.AdditionalService> linkedAdditionalService = Optional
            .ofNullable(f.getLinkedAdditionalService());
    Integer additionalServiceId = linkedAdditionalService
            .map(as -> Optional.ofNullable(as.getId()).orElseGet(() -> findAdditionalService(event, as)))
            .orElse(-1);/*from   w  ww .  jav  a  2s. c  om*/
    Context context = linkedAdditionalService.isPresent() ? Context.ADDITIONAL_SERVICE : Context.ATTENDEE;
    int configurationId = ticketFieldRepository
            .insertConfiguration(event.getId(), f.getName(), order, f.getType(), serializedRestrictedValues,
                    f.getMaxLength(), f.getMinLength(), f.isRequired(), context, additionalServiceId)
            .getKey();
    f.getDescription().forEach((locale, value) -> ticketFieldRepository.insertDescription(configurationId,
            locale, Json.GSON.toJson(value)));
}

From source file:org.ow2.proactive.connector.iaas.cloud.provider.azure.AzureProvider.java

private VirtualMachine.DefinitionStages.WithLinuxCreateManaged configureLinuxVirtualMachine(Azure azureService,
        String instanceTag, Region region, ResourceGroup resourceGroup, InstanceCredentials instanceCredentials,
        VirtualMachineCustomImage image, Creatable<NetworkInterface> creatableNetworkInterface) {
    // Retrieve optional credentials
    Optional<String> optionalUsername = Optional.ofNullable(instanceCredentials)
            .map(InstanceCredentials::getUsername);
    Optional<String> optionalPassword = Optional.ofNullable(instanceCredentials)
            .map(InstanceCredentials::getPassword);
    Optional<String> optionalPublicKey = Optional.ofNullable(instanceCredentials)
            .map(InstanceCredentials::getPublicKey);

    // Prepare the VM without credentials
    VirtualMachine.DefinitionStages.WithLinuxRootPasswordOrPublicKeyManaged creatableVMWithoutCredentials = azureService
            .virtualMachines().define(instanceTag).withRegion(region).withExistingResourceGroup(resourceGroup)
            .withNewPrimaryNetworkInterface(creatableNetworkInterface).withLinuxCustomImage(image.id())
            .withRootUsername(optionalUsername.orElse(DEFAULT_USERNAME));

    // Set the credentials (whether password or SSH key)
    return optionalPublicKey.map(creatableVMWithoutCredentials::withSsh).orElseGet(
            () -> creatableVMWithoutCredentials.withRootPassword(optionalPassword.orElse(DEFAULT_PASSWORD)));
}

From source file:com.thinkbiganalytics.feedmgr.rest.controller.DatasourceController.java

/**
 * Gets the table names from the specified data source.
 *
 * @param idStr  the data source id/*ww w  .  jav a2s  .  c om*/
 * @param schema the schema name, or {@code null} for all schemas
 * @return the list of table names
 */
@GET
@Path("{id}/tables")
@Produces(MediaType.APPLICATION_JSON)
@ApiOperation(value = "Gets the table names from the data source.", notes = "Connects to the database specified by the data source.")
@ApiResponses({
        @ApiResponse(code = 200, message = "Returns the table names.", response = String.class, responseContainer = "List"),
        @ApiResponse(code = 403, message = "Access denied.", response = RestResponseStatus.class),
        @ApiResponse(code = 404, message = "A JDBC data source with that id does not exist.", response = RestResponseStatus.class),
        @ApiResponse(code = 500, message = "NiFi or the database are unavailable.", response = RestResponseStatus.class) })
public Response getTableNames(@PathParam("id") final String idStr, @QueryParam("schema") final String schema,
        @QueryParam("tableName") final String tableName) {
    // Verify user has access to data source
    final Optional<com.thinkbiganalytics.metadata.api.datasource.Datasource.ID> id = metadata.read(() -> {
        accessController.checkPermission(AccessController.SERVICES,
                FeedServicesAccessControl.ACCESS_DATASOURCES);

        final com.thinkbiganalytics.metadata.api.datasource.Datasource datasource = datasetProvider
                .getDatasource(datasetProvider.resolve(idStr));
        return Optional.ofNullable(datasource)
                .map(com.thinkbiganalytics.metadata.api.datasource.Datasource::getId);
    });

    // Retrieve table names using system user
    return metadata.read(() -> {
        final List<String> tables = id.map(datasetProvider::getDatasource)
                .map(ds -> datasourceTransform.toDatasource(ds, DatasourceModelTransform.Level.ADMIN))
                .filter(JdbcDatasource.class::isInstance).map(JdbcDatasource.class::cast)
                .map(datasource -> dbcpConnectionPoolTableInfo.getTableNamesForDatasource(datasource, schema,
                        tableName))
                .orElseThrow(
                        () -> new NotFoundException("No JDBC datasource exists with the given ID: " + idStr));
        return Response.ok(tables).build();
    }, MetadataAccess.SERVICE);
}

From source file:com.thinkbiganalytics.feedmgr.rest.controller.DatasourceController.java

/**
 * Gets the schema of the specified table using the specified data source.
 *
 * @param idStr     the data source id/* w w w . j a va2s .co m*/
 * @param tableName the table name
 * @param schema    the schema name, or {@code null} to search all schemas
 * @return the table and field details
 */
@GET
@Path("{id}/tables/{tableName}")
@Produces(MediaType.APPLICATION_JSON)
@ApiOperation(value = "Gets the schema of the specified table.", notes = "Connects to the database specified by the data source.")
@ApiResponses({ @ApiResponse(code = 200, message = "Returns the table schema.", response = TableSchema.class),
        @ApiResponse(code = 403, message = "Access denied.", response = RestResponseStatus.class),
        @ApiResponse(code = 404, message = "A JDBC data source with that id does not exist.", response = RestResponseStatus.class),
        @ApiResponse(code = 500, message = "NiFi or the database are unavailable.", response = RestResponseStatus.class) })
public Response describeTable(@PathParam("id") final String idStr,
        @PathParam("tableName") final String tableName, @QueryParam("schema") final String schema) {
    // Verify user has access to data source
    final Optional<com.thinkbiganalytics.metadata.api.datasource.Datasource.ID> id = metadata.read(() -> {
        accessController.checkPermission(AccessController.SERVICES,
                FeedServicesAccessControl.ACCESS_DATASOURCES);

        final com.thinkbiganalytics.metadata.api.datasource.Datasource datasource = datasetProvider
                .getDatasource(datasetProvider.resolve(idStr));
        return Optional.ofNullable(datasource)
                .map(com.thinkbiganalytics.metadata.api.datasource.Datasource::getId);
    });

    // Retrieve table description using system user
    return metadata.read(() -> {
        final TableSchema tableSchema = id.map(datasetProvider::getDatasource)
                .map(ds -> datasourceTransform.toDatasource(ds, DatasourceModelTransform.Level.ADMIN))
                .filter(JdbcDatasource.class::isInstance).map(JdbcDatasource.class::cast)
                .map(datasource -> dbcpConnectionPoolTableInfo.describeTableForDatasource(datasource, schema,
                        tableName))
                .orElseThrow(
                        () -> new NotFoundException("No JDBC datasource exists with the given ID: " + idStr));
        return Response.ok(tableSchema).build();
    }, MetadataAccess.SERVICE);
}