Example usage for java.util Optional map

List of usage examples for java.util Optional map

Introduction

In this page you can find the example usage for java.util Optional map.

Prototype

public <U> Optional<U> map(Function<? super T, ? extends U> mapper) 

Source Link

Document

If a value is present, returns an Optional describing (as if by #ofNullable ) the result of applying the given mapping function to the value, otherwise returns an empty Optional .

Usage

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

/** Utility function for adding a set of objects to a single index
 * @param rw_context - either the index/type context, or just (index,type) for retries 
 * @param new_object - either the object to insert/save, or (id, string source) (must be the object(left) if the index/type context (ie left) is used for "rw_context")
 * @param replace_if_present - replace the existing object (else error)
 * @param bulk - whether being called as part of a bulk operation
 * @return/*  www.j  a  v a  2  s. co  m*/
 */
private IndexRequestBuilder singleObjectIndexRequest(
        final Either<ReadWriteContext, Tuple2<String, String>> rw_context,
        final Either<O, Tuple2<String, String>> new_object, final boolean replace_if_present,
        final boolean bulk) {
    final Either<JsonNode, Tuple2<String, String>> json_object = new_object.left().map(left -> {
        return ((JsonNode.class.isAssignableFrom(_state.clazz)) ? (JsonNode) left
                : BeanTemplateUtils.toJson(left));
    });

    final Optional<String> maybe_preferred_index = rw_context.<Optional<String>>either(
            left -> Optional.of(left.indexContext().getWritableIndex(Optional.of(json_object.left().value()))),
            right -> Optional.empty());

    // Get and remove some built-in fields if present
    final Optional<String> maybe_id = json_object.<Optional<String>>either(
            json -> Optional.ofNullable(((ObjectNode) json).remove(JsonUtils._ID)).map(j -> j.asText()),
            json_str -> Optional.empty());
    final Optional<String> maybe_type = json_object.<Optional<String>>either(json -> Optional
            .ofNullable(((ObjectNode) json).remove(ElasticsearchUtils._TYPE)).map(j -> j.asText()),
            json_str -> Optional.empty());

    // For security reasons this needs to be a substring of the primary segment
    final Optional<String> maybe_index = json_object.<Optional<String>>either(json -> Optional
            .ofNullable(((ObjectNode) json).remove(ElasticsearchUtils._INDEX)).map(j -> j.asText()),
            json_str -> Optional.empty()).filter(index -> {
                final String preferred_index = maybe_preferred_index.get(); // (exists by construction)
                final int id_index = preferred_index.lastIndexOf("__");

                if (id_index > 0) {
                    final String reqd_base = preferred_index.substring(0, id_index + 14); // 2 for __ + 12 for UUID
                    return index.startsWith(reqd_base);
                } else
                    return false;
            });

    return Optional
            .of(rw_context
                    .<IndexRequestBuilder>either(
                            left -> _state.client.prepareIndex(
                                    maybe_index.orElseGet(() -> maybe_preferred_index.get()), //(exists by construction)
                                    maybe_type.orElseGet(() -> left.typeContext().getWriteType())),
                            right -> _state.client.prepareIndex(right._1(), right._2()))
                    .setOpType(replace_if_present ? OpType.INDEX : OpType.CREATE)
                    .setConsistencyLevel(WriteConsistencyLevel.ONE)
                    .setRefresh(!bulk && CreationPolicy.OPTIMIZED != _state.creation_policy)
                    .setSource(json_object.<String>either(left -> left.toString(), right -> right._2())))
            .map(i -> json_object.<IndexRequestBuilder>either(left -> maybe_id.map(id -> i.setId(id)).orElse(i),
                    right -> i.setId(right._1())))
            //DEBUG
            //.map(irb -> { System.out.println("REQUEST INDICES = " + Arrays.toString(irb.request().indices())); return irb; })
            .get();
}

From source file:com.ikanow.aleph2.core.shared.services.MultiDataService.java

/** User c'tor - standard case
 * @param bucket/* w w w  .j a  v a2 s . c o m*/
 * @param context
 * @param maybe_get_buffer_name
 */
protected MultiDataService(final DataBucketBean bucket, final IServiceContext context,
        final Optional<Function<IGenericDataService, Optional<String>>> maybe_get_storage_type,
        final Optional<Function<IGenericDataService, Optional<String>>> maybe_get_buffer_name) {
    // Insert or overwrite mode:
    _doc_write_mode = getWriteMode(bucket);

    _services = DataServiceUtils.selectDataServices(bucket.data_schema(), context);

    _services.asMap().entrySet().stream().forEach(kv -> {
        final Set<String> vals = kv.getValue().stream().collect(Collectors.toSet());
        // (the order doesn't really matter here, so just to "look" sensible:)
        if (vals.contains(DataSchemaBean.SearchIndexSchemaBean.name)) {
            Tuple2<IDataWriteService<JsonNode>, IDataWriteService.IBatchSubservice<JsonNode>> t2 = getWriters(
                    bucket, kv.getKey(), maybe_get_storage_type, maybe_get_buffer_name);
            _crud_index_service = t2._1();
            _batch_index_service = t2._2();
            storeWriters(t2, vals);
        } else if (vals.contains(DataSchemaBean.DocumentSchemaBean.name)) {
            Tuple2<IDataWriteService<JsonNode>, IDataWriteService.IBatchSubservice<JsonNode>> t2 = getWriters(
                    bucket, kv.getKey(), maybe_get_storage_type, maybe_get_buffer_name);
            _crud_doc_service = t2._1();
            _batch_doc_service = t2._2();
            storeWriters(t2, vals);
        } else if (vals.contains(DataSchemaBean.DataWarehouseSchemaBean.name)) {
            Tuple2<IDataWriteService<JsonNode>, IDataWriteService.IBatchSubservice<JsonNode>> t2 = getWriters(
                    bucket, kv.getKey(), maybe_get_storage_type, maybe_get_buffer_name);
            _crud_data_warehouse_service = t2._1();
            _batch_data_warehouse_service = t2._2();
            storeWriters(t2, vals);
        } else if (vals.contains(DataSchemaBean.GraphSchemaBean.name)) {
            Tuple2<IDataWriteService<JsonNode>, IDataWriteService.IBatchSubservice<JsonNode>> t2 = getWriters(
                    bucket, kv.getKey(), maybe_get_storage_type, maybe_get_buffer_name);
            _crud_graph_service = t2._1();
            _batch_graph_service = t2._2();
            storeWriters(t2, vals);
        } else if (vals.contains(DataSchemaBean.ColumnarSchemaBean.name)) {
            Tuple2<IDataWriteService<JsonNode>, IDataWriteService.IBatchSubservice<JsonNode>> t2 = getWriters(
                    bucket, kv.getKey(), maybe_get_storage_type, maybe_get_buffer_name);
            _crud_columnar_service = t2._1();
            _batch_columnar_service = t2._2();
            storeWriters(t2, vals);
        } else if (vals.contains(DataSchemaBean.TemporalSchemaBean.name)) {
            Tuple2<IDataWriteService<JsonNode>, IDataWriteService.IBatchSubservice<JsonNode>> t2 = getWriters(
                    bucket, kv.getKey(), maybe_get_storage_type, maybe_get_buffer_name);
            _crud_temporal_service = t2._1();
            _batch_temporal_service = t2._2();
            storeWriters(t2, vals);
        }
        if (vals.contains(DataSchemaBean.StorageSchemaBean.name)) { // (note storage is a bit different, fix the "processed mode")
            Tuple2<IDataWriteService<JsonNode>, IDataWriteService.IBatchSubservice<JsonNode>> t2 = getWriters(
                    bucket, kv.getKey(), maybe_get_storage_type.map(Optional::of).orElseGet(() -> {
                        return Optional
                                .of(__ -> Optional.of(IStorageService.StorageStage.processed.toString()));
                    }), maybe_get_buffer_name);
            _crud_storage_service = t2._1();
            _batch_storage_service = t2._2();
            storeWriters(t2, vals);
        }
    });
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Worker function for storeObject
 * @param new_object - the bucket to create
 * @param old_bucket - the version of the bucket being overwritte, if an update
 * @param validation_info - validation info to be presented to the user
 * @param replace_if_present - update move
 * @return - the user return value//from w w  w .  j  a  v a  2  s . c  o m
 * @throws Exception
 */
public ManagementFuture<Supplier<Object>> storeValidatedObject(final DataBucketBean new_object,
        final Optional<DataBucketBean> old_bucket, final Collection<BasicMessageBean> validation_info,
        boolean replace_if_present) throws Exception {
    final MethodNamingHelper<DataBucketStatusBean> helper = BeanTemplateUtils.from(DataBucketStatusBean.class);

    // Error if a bucket status doesn't exist - must create a bucket status before creating the bucket
    // (note the above validation ensures the bucket has an _id)
    // (obviously need to block here until we're sure..)

    final CompletableFuture<Optional<DataBucketStatusBean>> corresponding_status = _underlying_data_bucket_status_db
            .get().getObjectById(new_object._id(),
                    Arrays.asList(helper.field(DataBucketStatusBean::_id),
                            helper.field(DataBucketStatusBean::node_affinity),
                            helper.field(DataBucketStatusBean::confirmed_master_enrichment_type),
                            helper.field(DataBucketStatusBean::confirmed_suspended),
                            helper.field(DataBucketStatusBean::confirmed_multi_node_enabled),
                            helper.field(DataBucketStatusBean::suspended),
                            helper.field(DataBucketStatusBean::quarantined_until)),
                    true);

    if (!corresponding_status.get().isPresent()) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException(
                        ErrorUtils.get(ManagementDbErrorUtils.BUCKET_CANNOT_BE_CREATED_WITHOUT_BUCKET_STATUS,
                                new_object.full_name()))),
                CompletableFuture.completedFuture(Collections.emptyList()));
    }

    // Some fields like multi-node, you can only change if the bucket status is set to suspended, to make
    // the control logic easy
    old_bucket.ifPresent(ob -> {
        validation_info.addAll(checkForInactiveOnlyUpdates(new_object, ob, corresponding_status.join().get()));
        // (corresponding_status present and completed because of above check) 
    });
    if (!validation_info.isEmpty() && validation_info.stream().anyMatch(m -> !m.success())) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException("Bucket not valid, see management channels")),
                CompletableFuture.completedFuture(validation_info));
    }
    // Made it this far, try to set the next_poll_time in the status object
    if (null != new_object.poll_frequency()) {
        //get the next poll time
        final Date next_poll_time = TimeUtils
                .getForwardSchedule(new_object.poll_frequency(), Optional.of(new Date())).success();
        //update the status
        _underlying_data_bucket_status_db.get().updateObjectById(new_object._id(), CrudUtils
                .update(DataBucketStatusBean.class).set(DataBucketStatusBean::next_poll_date, next_poll_time));
    }

    // Create the directories

    try {
        createFilePaths(new_object, _storage_service.get());
        //if logging is enabled, create the logging filepath also
        if (Optionals.of(() -> new_object.management_schema().logging_schema().enabled()).orElse(false)) {
            createFilePaths(BucketUtils.convertDataBucketBeanToLogging(new_object), _storage_service.get());
        }
    } catch (Exception e) { // Error creating directory, haven't created object yet so just back out now

        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
    // OK if the bucket is validated we can store it (and create a status object)

    final CompletableFuture<Supplier<Object>> ret_val = _underlying_data_bucket_db.get().storeObject(new_object,
            replace_if_present);
    final boolean is_suspended = DataBucketStatusCrudService
            .bucketIsSuspended(corresponding_status.get().get());

    // Register the bucket update with any applicable data services      

    final Multimap<IDataServiceProvider, String> data_service_info = DataServiceUtils
            .selectDataServices(new_object.data_schema(), _service_context);
    final Optional<Multimap<IDataServiceProvider, String>> old_data_service_info = old_bucket
            .map(old -> DataServiceUtils.selectDataServices(old.data_schema(), _service_context));

    final List<CompletableFuture<Collection<BasicMessageBean>>> ds_update_results = data_service_info.asMap()
            .entrySet().stream()
            .map(kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                    kv.getValue().stream().collect(Collectors.toSet()),
                    old_data_service_info.map(old_map -> old_map.get(kv.getKey()))
                            .map(old_servs -> old_servs.stream().collect(Collectors.toSet()))
                            .orElse(Collections.emptySet())))
            .collect(Collectors.toList());

    // Process old data services that are no longer in use
    final List<CompletableFuture<Collection<BasicMessageBean>>> old_ds_update_results = old_data_service_info
            .map(old_ds_info -> {
                return old_ds_info.asMap().entrySet().stream()
                        .filter(kv -> !data_service_info.containsKey(kv.getKey()))
                        .<CompletableFuture<Collection<BasicMessageBean>>>map(
                                kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                                        Collections.emptySet(),
                                        kv.getValue().stream().collect(Collectors.toSet())))
                        .collect(Collectors.toList());
            }).orElse(Collections.emptyList());

    //(combine)
    @SuppressWarnings("unchecked")
    CompletableFuture<Collection<BasicMessageBean>> all_service_registration_complete[] = Stream
            .concat(ds_update_results.stream(), old_ds_update_results.stream())
            .toArray(CompletableFuture[]::new);

    // Get the status and then decide whether to broadcast out the new/update message

    final CompletableFuture<Collection<BasicMessageBean>> mgmt_results = CompletableFuture
            .allOf(all_service_registration_complete)
            .thenCombine(
                    old_bucket.isPresent()
                            ? requestUpdatedBucket(new_object, old_bucket.get(),
                                    corresponding_status.get().get(), _actor_context,
                                    _underlying_data_bucket_status_db.get(), _bucket_action_retry_store.get())
                            : requestNewBucket(new_object, is_suspended,
                                    _underlying_data_bucket_status_db.get(), _actor_context),
                    (__, harvest_results) -> {
                        return (Collection<BasicMessageBean>) Stream
                                .concat(Arrays.stream(all_service_registration_complete)
                                        .flatMap(s -> s.join().stream()), harvest_results.stream())
                                .collect(Collectors.toList());
                    })
            .exceptionally(t -> Arrays.asList(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "storeValidatedObject", ErrorUtils.get("{0}", t))));

    // Update the status depending on the results of the management channels

    return FutureUtils.createManagementFuture(ret_val,
            MgmtCrudUtils
                    .handleUpdatingStatus(new_object, corresponding_status.get().get(), is_suspended,
                            mgmt_results, _underlying_data_bucket_status_db.get())
                    .thenApply(msgs -> Stream.concat(msgs.stream(), validation_info.stream())
                            .collect(Collectors.toList())));
}

From source file:alfio.controller.EventController.java

@RequestMapping(value = "/event/{eventName}", method = { RequestMethod.GET, RequestMethod.HEAD })
public String showEvent(@PathVariable("eventName") String eventName, Model model, HttpServletRequest request,
        Locale locale) {//from ww  w  .j  av a 2 s . c  o m

    return eventRepository.findOptionalByShortName(eventName)
            .filter(e -> e.getStatus() != Event.Status.DISABLED).map(event -> {
                Optional<String> maybeSpecialCode = SessionUtil.retrieveSpecialPriceCode(request);
                Optional<SpecialPrice> specialCode = maybeSpecialCode
                        .flatMap((trimmedCode) -> specialPriceRepository.getByCode(trimmedCode));

                Optional<PromoCodeDiscount> promoCodeDiscount = SessionUtil
                        .retrievePromotionCodeDiscount(request).flatMap((code) -> promoCodeRepository
                                .findPromoCodeInEventOrOrganization(event.getId(), code));

                final ZonedDateTime now = ZonedDateTime.now(event.getZoneId());
                //hide access restricted ticket categories
                List<TicketCategory> ticketCategories = ticketCategoryRepository
                        .findAllTicketCategories(event.getId());
                Map<Integer, String> categoriesDescription = ticketCategoryDescriptionRepository
                        .descriptionsByTicketCategory(ticketCategories.stream().map(TicketCategory::getId)
                                .collect(Collectors.toList()), locale.getLanguage());

                List<SaleableTicketCategory> saleableTicketCategories = ticketCategories.stream()
                        .filter((c) -> !c.isAccessRestricted() || (specialCode
                                .filter(sc -> sc.getTicketCategoryId() == c.getId()).isPresent()))
                        .map((m) -> new SaleableTicketCategory(m,
                                categoriesDescription.getOrDefault(m.getId(), ""), now, event,
                                ticketReservationManager.countAvailableTickets(event, m),
                                configurationManager.getIntConfigValue(
                                        Configuration.from(event.getOrganizationId(), event.getId(), m.getId(),
                                                ConfigurationKeys.MAX_AMOUNT_OF_TICKETS_BY_RESERVATION),
                                        5),
                                promoCodeDiscount.filter(promoCode -> shouldApplyDiscount(promoCode, m))
                                        .orElse(null)))
                        .collect(Collectors.toList());
                //

                final int orgId = event.getOrganizationId();
                final int eventId = event.getId();
                Map<ConfigurationKeys, Optional<String>> geoInfoConfiguration = configurationManager
                        .getStringConfigValueFrom(
                                Configuration.from(orgId, eventId, ConfigurationKeys.MAPS_PROVIDER),
                                Configuration.from(orgId, eventId, ConfigurationKeys.MAPS_CLIENT_API_KEY),
                                Configuration.from(orgId, eventId, ConfigurationKeys.MAPS_HERE_APP_ID),
                                Configuration.from(orgId, eventId, ConfigurationKeys.MAPS_HERE_APP_CODE));

                LocationDescriptor ld = LocationDescriptor.fromGeoData(event.getLatLong(),
                        TimeZone.getTimeZone(event.getTimeZone()), geoInfoConfiguration);

                final boolean hasAccessPromotions = ticketCategoryRepository
                        .countAccessRestrictedRepositoryByEventId(event.getId()) > 0
                        || promoCodeRepository.countByEventAndOrganizationId(event.getId(),
                                event.getOrganizationId()) > 0;

                String eventDescription = eventDescriptionRepository
                        .findDescriptionByEventIdTypeAndLocale(event.getId(),
                                EventDescription.EventDescriptionType.DESCRIPTION, locale.getLanguage())
                        .orElse("");

                final EventDescriptor eventDescriptor = new EventDescriptor(event, eventDescription);
                List<SaleableTicketCategory> expiredCategories = saleableTicketCategories.stream()
                        .filter(SaleableTicketCategory::getExpired).collect(Collectors.toList());
                List<SaleableTicketCategory> validCategories = saleableTicketCategories.stream()
                        .filter(tc -> !tc.getExpired()).collect(Collectors.toList());
                List<SaleableAdditionalService> additionalServices = additionalServiceRepository
                        .loadAllForEvent(event.getId()).stream().map((as) -> getSaleableAdditionalService(event,
                                locale, as, promoCodeDiscount.orElse(null)))
                        .collect(Collectors.toList());
                Predicate<SaleableTicketCategory> waitingQueueTargetCategory = tc -> !tc.getExpired()
                        && !tc.isBounded();
                boolean validPaymentConfigured = isEventHasValidPaymentConfigurations(event,
                        configurationManager);

                List<SaleableAdditionalService> notExpiredServices = additionalServices.stream()
                        .filter(SaleableAdditionalService::isNotExpired).collect(Collectors.toList());

                List<SaleableAdditionalService> supplements = adjustIndex(0,
                        notExpiredServices.stream()
                                .filter(a -> a.getType() == AdditionalService.AdditionalServiceType.SUPPLEMENT)
                                .collect(Collectors.toList()));
                List<SaleableAdditionalService> donations = adjustIndex(supplements.size(),
                        notExpiredServices.stream()
                                .filter(a -> a.getType() == AdditionalService.AdditionalServiceType.DONATION)
                                .collect(Collectors.toList()));

                model.addAttribute("event", eventDescriptor)//
                        .addAttribute("organization", organizationRepository.getById(event.getOrganizationId()))
                        .addAttribute("ticketCategories", validCategories)//
                        .addAttribute("expiredCategories", expiredCategories)//
                        .addAttribute("containsExpiredCategories", !expiredCategories.isEmpty())//
                        .addAttribute("showNoCategoriesWarning", validCategories.isEmpty())
                        .addAttribute("hasAccessPromotions", hasAccessPromotions)
                        .addAttribute("promoCode", specialCode.map(SpecialPrice::getCode).orElse(null))
                        .addAttribute("locationDescriptor", ld)
                        .addAttribute("pageTitle", "show-event.header.title")
                        .addAttribute("hasPromoCodeDiscount", promoCodeDiscount.isPresent())
                        .addAttribute("promoCodeDiscount", promoCodeDiscount.orElse(null))
                        .addAttribute("displayWaitingQueueForm",
                                EventUtil.displayWaitingQueueForm(event, saleableTicketCategories,
                                        configurationManager, eventStatisticsManager.noSeatsAvailable()))
                        .addAttribute("displayCategorySelectionForWaitingQueue",
                                saleableTicketCategories.stream().filter(waitingQueueTargetCategory)
                                        .count() > 1)
                        .addAttribute("unboundedCategories",
                                saleableTicketCategories.stream().filter(waitingQueueTargetCategory)
                                        .collect(Collectors.toList()))
                        .addAttribute("preSales", EventUtil.isPreSales(event, saleableTicketCategories))
                        .addAttribute("userLanguage", locale.getLanguage())
                        .addAttribute("showAdditionalServices", !notExpiredServices.isEmpty())
                        .addAttribute("showAdditionalServicesDonations", !donations.isEmpty())
                        .addAttribute("showAdditionalServicesSupplements", !supplements.isEmpty())
                        .addAttribute("enabledAdditionalServicesDonations", donations)
                        .addAttribute("enabledAdditionalServicesSupplements", supplements)
                        .addAttribute("forwardButtonDisabled",
                                (saleableTicketCategories.stream()
                                        .noneMatch(SaleableTicketCategory::getSaleable))
                                        || !validPaymentConfigured)
                        .addAttribute("useFirstAndLastName", event.mustUseFirstAndLastName())
                        .addAttribute("validPaymentMethodAvailable", validPaymentConfigured)
                        .addAttribute("validityStart", event.getBegin())
                        .addAttribute("validityEnd", event.getEnd());

                model.asMap().putIfAbsent("hasErrors", false);//
                return "/event/show-event";
            }).orElse(REDIRECT + "/");
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.ElasticsearchIndexUtils.java

/** Creates a mapping for the bucket - columnar elements
 *  ALSO INCLUDES THE PER-FIELD CONFIGURATION FROM THE SEARCH_INDEX_SCHEMA AND TEMPORAL_SCHMEA
 * @param bucket/*  w w  w  .  j  ava2  s. co  m*/
 * @return
 * @throws IOException 
 */
public static XContentBuilder getColumnarMapping(final DataBucketBean bucket,
        Optional<XContentBuilder> to_embed,
        final LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode> field_lookups,
        final JsonNode enabled_not_analyzed, final JsonNode enabled_analyzed,
        final JsonNode default_not_analyzed, final JsonNode default_analyzed,
        final Optional<JsonNode> doc_schema, final SearchIndexSchemaDefaultBean search_index_schema_override,
        final ObjectMapper mapper, final String index_type) {
    try {
        final XContentBuilder start = to_embed.orElse(XContentFactory.jsonBuilder().startObject());
        final boolean columnar_enabled = Optional.ofNullable(bucket.data_schema())
                .map(DataSchemaBean::columnar_schema).filter(s -> Optional.ofNullable(s.enabled()).orElse(true))
                .isPresent();

        final Map<Either<String, Tuple2<String, String>>, String> type_override = Optionals
                .of(() -> bucket.data_schema().search_index_schema().type_override()).map(m -> buildTypeMap(m))
                .orElse(Collections.emptyMap());

        // If no columnar settings are specified then go with a sensible default
        final Optional<DataSchemaBean.ColumnarSchemaBean> maybe_user_columnar_schema = Optionals
                .of(() -> bucket.data_schema().columnar_schema());
        final DataSchemaBean.ColumnarSchemaBean columnar_schema = maybe_user_columnar_schema
                .filter(__ -> columnar_enabled).filter(schema -> (null == schema.field_include_list()) && // ie the entire thing is empty
                        (null == schema.field_exclude_list()) && (null == schema.field_include_pattern_list())
                        && (null == schema.field_type_include_list())
                        && (null == schema.field_exclude_pattern_list())
                        && (null == schema.field_type_exclude_list()))
                .map(schema -> BeanTemplateUtils.clone(schema)
                        .with(DataSchemaBean.ColumnarSchemaBean::field_type_include_list,
                                Arrays.asList("string", "number", "date"))
                        .done())
                .orElseGet(() -> maybe_user_columnar_schema.orElse(null)) // (NOTE: can only be null if columnar_enabled is false)
        ;

        final LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode> column_lookups_pretypes = Stream
                .of(columnar_enabled
                        ? createFieldIncludeLookups(
                                Optionals.ofNullable(columnar_schema.field_include_list()).stream(),
                                fn -> getKey(fn), field_lookups, enabled_not_analyzed, enabled_analyzed, true,
                                search_index_schema_override, type_override, mapper, index_type)
                        : Stream.<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>>empty(),
                        columnar_enabled
                                ? createFieldExcludeLookups(
                                        Optionals.ofNullable(columnar_schema.field_exclude_list()).stream(),
                                        fn -> getKey(fn), field_lookups, search_index_schema_override,
                                        type_override, mapper, index_type)
                                : Stream.<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>>empty(),
                        columnar_enabled
                                ? createFieldIncludeLookups(
                                        Optionals.ofNullable(columnar_schema.field_include_pattern_list())
                                                .stream(),
                                        fn -> Either.right(Tuples._2T(fn, "*")), field_lookups,
                                        enabled_not_analyzed, enabled_analyzed, true,
                                        search_index_schema_override, type_override, mapper, index_type)
                                : Stream.<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>>empty(),
                        columnar_enabled
                                ? createFieldIncludeLookups(
                                        Optionals.ofNullable(columnar_schema.field_type_include_list())
                                                .stream(),
                                        fn -> Either.right(Tuples._2T("*", fn)), field_lookups,
                                        enabled_not_analyzed, enabled_analyzed, true,
                                        search_index_schema_override, type_override, mapper, index_type)
                                : Stream.<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>>empty(),
                        columnar_enabled
                                ? createFieldExcludeLookups(
                                        Optionals.ofNullable(columnar_schema.field_exclude_pattern_list())
                                                .stream(),
                                        fn -> Either.right(Tuples._2T(fn, "*")), field_lookups,
                                        search_index_schema_override, type_override, mapper, index_type)
                                : Stream.<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>>empty(),
                        columnar_enabled
                                ? createFieldExcludeLookups(
                                        Optionals.ofNullable(columnar_schema.field_type_exclude_list())
                                                .stream(),
                                        fn -> Either.right(Tuples._2T("*", fn)), field_lookups,
                                        search_index_schema_override, type_override, mapper, index_type)
                                : Stream.<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>>empty(),

                        // Finally add the default columnar lookups to the unmentioned strings (ensures that *_* is at the end)

                        field_lookups.entrySet().stream()
                                .flatMap(kv -> createFieldIncludeLookups(Stream.of(kv.getKey().toString()),
                                        __ -> kv.getKey(), field_lookups, default_not_analyzed,
                                        default_analyzed, false, search_index_schema_override, type_override,
                                        mapper, index_type)))
                .flatMap(x -> x).collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2(), (v1, v2) -> v1, // (ie ignore duplicates)
                        () -> new LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode>()));
        ;

        // Also any types that didn't map onto one of the fields or tokens:
        final LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode> column_lookups_types = type_override
                .entrySet().stream()
                // (filter - convert name/* to name/type and check if I've already created such an entry using the type map)
                .filter(kv -> !column_lookups_pretypes
                        .containsKey(kv.getKey().either(s -> s, t2 -> Tuples._2T(t2._1(), kv.getValue()))))
                .flatMap(kv -> createFieldIncludeLookups(Stream.of(kv.getKey().toString()),
                        __ -> kv.getKey().<Either<String, Tuple2<String, String>>>either(s -> Either.left(s),
                                t2 -> Either.right(Tuples._2T(t2._1(), kv.getValue()))),
                        field_lookups, default_not_analyzed, default_analyzed, false,
                        search_index_schema_override, type_override, mapper, index_type))
                .collect(Collectors.toMap(t2 -> t2._1(), t2 -> t2._2(), (v1, v2) -> v1,
                        () -> new LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode>()));

        final LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode> column_lookups = Stream
                .concat(column_lookups_pretypes.entrySet().stream(), column_lookups_types.entrySet().stream())
                .sorted((a, b) -> Integer.compare(sortKey(a.getKey()), sortKey(b.getKey())))
                .collect(Collectors.toMap(t2 -> t2.getKey(), t2 -> t2.getValue(), (v1, v2) -> v1,
                        () -> new LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode>()));

        final XContentBuilder properties = column_lookups.entrySet().stream()
                // properties not dynamic_templates
                .filter(kv -> kv.getKey().isLeft())
                // overwrite with version built using columns if it exists
                .map(kv -> Tuples._2T(kv.getKey(),
                        column_lookups.getOrDefault(kv.getKey(), kv.getValue())))
                .reduce(Optional.of(start.startObject("properties")) // add doc_schema if it exists
                        .map(props -> doc_schema
                                .map(ds -> Optionals.streamOf(ds.fields(), false)
                                        .reduce(props,
                                                Lambdas.wrap_u((acc, kv) -> acc.rawField(kv.getKey(),
                                                        kv.getValue().toString().getBytes())),
                                                (acc1, acc2) -> acc1 // shouldn't be possible
                                )).orElse(props)).get(),
                        Lambdas.wrap_u((acc, t2) -> acc.rawField(t2._1().left().value(),
                                t2._2().toString().getBytes())), // (left by construction) 
                        (acc1, acc2) -> acc1) // (not actually possible)
                .endObject();

        final XContentBuilder templates = column_lookups.entrySet().stream()
                // properties not dynamic_templates
                .filter(kv -> kv.getKey().isRight())
                // overwrite with version built using columns if it exists
                .map(kv -> Tuples._2T(kv.getKey(), column_lookups.getOrDefault(kv.getKey(), kv.getValue())))
                .reduce(properties.startArray("dynamic_templates"),
                        Lambdas.wrap_u((acc, t2) -> acc.startObject()
                                .rawField(getFieldNameFromMatchPair(t2._1().right().value()),
                                        t2._2().toString().getBytes()) // (right by construction)
                                .endObject()),
                        (acc1, acc2) -> acc1) // (not actually possible)
                .endArray();

        return templates;
    } catch (IOException e) {
        //Handle in-practice-impossible "IOException"
        return null;
    }
}

From source file:nl.knaw.huygens.alexandria.service.TinkerPopService.java

@Override
public Optional<AlexandriaAnnotation> readAnnotation(UUID uuid, Integer revision) {
    Optional<AnnotationVF> versionedAnnotation = storage.readVF(AnnotationVF.class, uuid, revision);
    if (versionedAnnotation.isPresent()) {
        return versionedAnnotation.map(this::deframeAnnotation);
    } else {/*from   w ww  .j  a  v  a  2  s . c  om*/
        Optional<AnnotationVF> currentAnnotation = storage.readVF(AnnotationVF.class, uuid);
        if (currentAnnotation.isPresent() && currentAnnotation.get().getRevision().equals(revision)) {
            return currentAnnotation.map(this::deframeAnnotation);
        } else {
            return Optional.empty();
        }
    }
}

From source file:objective.taskboard.controller.FollowUpController.java

private String templateDate(Optional<LocalDate> date, ZoneId timezone) {
    return date.map(d -> ZonedDateTime.of(d, LocalTime.MIDNIGHT, timezone)).orElse(ZonedDateTime.now(timezone))
            .format(formatter);/*from   www .  ja v  a  2 s  .  c o m*/
}

From source file:objective.taskboard.data.Issue.java

public String getReleaseId() {
    if (releaseId != null)
        return releaseId;

    Optional<Issue> pc = getParentCard();
    return pc.map(issue -> issue.getReleaseId()).orElse(null);
}

From source file:org.apache.bookkeeper.util.collections.SynchronizedHashMultiMap.java

public synchronized Optional<V> removeAny(K k) {
    Set<Pair<K, V>> set = map.getOrDefault(k.hashCode(), Collections.emptySet());
    Optional<Pair<K, V>> pair = set.stream().filter(p -> p.getLeft().equals(k)).findAny();
    pair.ifPresent(p -> set.remove(p));
    return pair.map(p -> p.getRight());
}

From source file:org.apache.hadoop.hbase.client.RawAsyncHBaseAdmin.java

private CompletableFuture<Boolean> isTableAvailable(TableName tableName, Optional<byte[][]> splitKeys) {
    if (TableName.isMetaTableName(tableName)) {
        return connection.registry.getMetaRegionLocation().thenApply(locs -> Stream
                .of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null));
    }/* w  w  w  .  j av  a 2 s  .  c o m*/
    CompletableFuture<Boolean> future = new CompletableFuture<>();
    addListener(isTableEnabled(tableName), (enabled, error) -> {
        if (error != null) {
            if (error instanceof TableNotFoundException) {
                future.complete(false);
            } else {
                future.completeExceptionally(error);
            }
            return;
        }
        if (!enabled) {
            future.complete(false);
        } else {
            addListener(AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, Optional.of(tableName)),
                    (locations, error1) -> {
                        if (error1 != null) {
                            future.completeExceptionally(error1);
                            return;
                        }
                        List<HRegionLocation> notDeployedRegions = locations.stream()
                                .filter(loc -> loc.getServerName() == null).collect(Collectors.toList());
                        if (notDeployedRegions.size() > 0) {
                            if (LOG.isDebugEnabled()) {
                                LOG.debug("Table " + tableName + " has " + notDeployedRegions.size()
                                        + " regions");
                            }
                            future.complete(false);
                            return;
                        }

                        Optional<Boolean> available = splitKeys
                                .map(keys -> compareRegionsWithSplitKeys(locations, keys));
                        future.complete(available.orElse(true));
                    });
        }
    });
    return future;
}