Example usage for java.util Optional orElse

List of usage examples for java.util Optional orElse

Introduction

In this page you can find the example usage for java.util Optional orElse.

Prototype

public T orElse(T other) 

Source Link

Document

If a value is present, returns the value, otherwise returns other .

Usage

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

public ElasticsearchCrudService(final Class<O> bean_clazz, final ElasticsearchContext es_context,
        final Optional<Boolean> id_ranges_ok, final CreationPolicy creation_policy,
        final Optional<String> auth_fieldname, final Optional<AuthorizationBean> auth,
        final Optional<ProjectBean> project,
        final Optional<DataSchemaBean.WriteSettings> batch_write_settings) {
    _state = new State(bean_clazz, es_context, id_ranges_ok.orElse(false), creation_policy, auth_fieldname,
            auth, project);//  w  ww .ja v  a 2 s . c om
    _object_mapper = BeanTemplateUtils.configureMapper(Optional.empty());
    _batch_write_settings = batch_write_settings;
}

From source file:com.orange.ngsi2.server.Ngsi2BaseController.java

/**
 * Endpoint get /v2/types/*  w w  w.j  ava2  s.c  o m*/
 * @param limit an optional limit (0 for none)
 * @param offset an optional offset (0 for none)
 * @param options an optional list of options separated by comma. Possible value for option: count.
 *        values option is not supported.
 *        If count is present then the total number of entities is returned in the response as a HTTP header named `X-Total-Count`.
 * @return the entity type json object and http status 200 (ok)
 * @throws Exception
 */
@RequestMapping(method = RequestMethod.GET, value = { "/types" })
final public ResponseEntity<List<EntityType>> retrieveEntityTypesEndpoint(@RequestParam Optional<Integer> limit,
        @RequestParam Optional<Integer> offset, @RequestParam Optional<Set<String>> options) throws Exception {

    boolean count = false;
    if (options.isPresent()) {
        //TODO: to support values as options
        if (options.get().contains("values")) {
            throw new UnsupportedOptionException("values");
        }
        count = options.get().contains("count");
    }
    Paginated<EntityType> entityTypes = retrieveEntityTypes(limit.orElse(0), offset.orElse(0), count);
    if (count) {
        return new ResponseEntity<>(entityTypes.getItems(), xTotalCountHeader(entityTypes.getTotal()),
                HttpStatus.OK);
    }
    return new ResponseEntity<>(entityTypes.getItems(), HttpStatus.OK);
}

From source file:com.ikanow.aleph2.shared.crud.mongodb.services.MongoDbCrudService.java

@Override
public CompletableFuture<Optional<O>> updateAndReturnObjectBySpec(QueryComponent<O> unique_spec,
        final Optional<Boolean> upsert, final UpdateComponent<O> update, final Optional<Boolean> before_updated,
        final List<String> field_list, final boolean include) {
    try {/*w w  w. ja  v  a2s  .  com*/
        final Tuple2<DBObject, DBObject> query_and_meta = MongoDbUtils.convertToMongoQuery(unique_spec);
        final DBObject update_object = MongoDbUtils.createUpdateObject(update);

        final BasicDBObject fields = getFields(field_list, include);

        // ($unset: null removes the object, only possible via the UpdateComponent.deleteObject call) 
        final boolean do_remove = update_object.containsField("$unset")
                && (null == update_object.get("$unset"));

        final O ret_val = do_remove
                ? _state.coll.findAndModify(query_and_meta._1(), fields,
                        (DBObject) query_and_meta._2().get("$sort"), do_remove, (DBObject) null, false, false)
                : _state.coll.findAndModify(query_and_meta._1(), fields,
                        (DBObject) query_and_meta._2().get("$sort"), false, update_object,
                        !before_updated.orElse(false), upsert.orElse(false));

        return CompletableFuture.completedFuture(Optional.ofNullable(ret_val));
    } catch (Exception e) {
        return FutureUtils.<Optional<O>>returnError(e);
    }
}

From source file:com.ethercamp.harmony.service.BlockchainInfoService.java

@Override
public void onApplicationEvent(ApplicationEvent event) {
    if (event instanceof EmbeddedServletContainerInitializedEvent) {
        serverPort = ((EmbeddedServletContainerInitializedEvent) event).getEmbeddedServletContainer().getPort();

        final boolean isPrivateNetwork = env.getProperty("networkProfile", "").equalsIgnoreCase("private");
        final boolean isClassicNetwork = env.getProperty("networkProfile", "").equalsIgnoreCase("classic");

        // find out network name
        final Optional<String> blockHash = Optional.ofNullable(blockchain.getBlockByNumber(0l))
                .map(block -> Hex.toHexString(block.getHash()));
        final Pair<String, Optional<String>> networkInfo;
        if (isPrivateNetwork) {
            networkInfo = Pair.of("Private Miner Network", Optional.empty());
        } else if (isClassicNetwork) {
            networkInfo = Pair.of("Classic ETC", Optional.empty());
        } else {/*from   www  .  j av  a 2s. c  o  m*/
            networkInfo = blockHash
                    .flatMap(hash -> Optional.ofNullable(BlockchainConsts.getNetworkInfo(env, hash)))
                    .orElse(Pair.of("Unknown network", Optional.empty()));
        }

        final boolean isContractsFeatureEnabled = env.getProperty("feature.contract.enabled", "false")
                .equalsIgnoreCase("true");
        if (!isContractsFeatureEnabled) {
            VM.setVmHook(null);
            log.info("Disabled VM hook due to contracts feature disabled");
        }

        initialInfo.set(new InitialInfoDTO(config.projectVersion() + "-" + config.projectVersionModifier(),
                "Hash: " + BuildInfo.buildHash + ",   Created: " + BuildInfo.buildTime,
                env.getProperty("app.version"), networkInfo.getFirst(), networkInfo.getSecond().orElse(null),
                blockHash.orElse(null), System.currentTimeMillis(), Hex.toHexString(config.nodeId()),
                serverPort, isPrivateNetwork, env.getProperty("portCheckerUrl"), config.bindIp(),
                isContractsFeatureEnabled));

        final String ANSI_RESET = "\u001B[0m";
        final String ANSI_BLUE = "\u001B[34m";
        System.out.println("EthereumJ database dir location: " + systemProperties.databaseDir());
        System.out.println("EthereumJ keystore dir location: " + keystore.getKeyStoreLocation());
        System.out.println(ANSI_BLUE + "Server started at http://localhost:" + serverPort + "" + ANSI_RESET);

        if (!config.getConfig().hasPath("logs.keepStdOut")
                || !config.getConfig().getBoolean("logs.keepStdOut")) {
            createLogAppenderForMessaging();
        }
    }
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.services.TestElasticsearchIndexService.java

/** Builds the alias name (NOTE: always default not primary buffer name - that's the whole point)
 * @param bucket/*from  www.  j a  v a 2  s . c o m*/
 * @param time_suffix
 * @return
 */
public List<String> getAliasedBuffers(final DataBucketBean bucket, final Optional<String> time_suffix) {
    return _crud_factory.getClient().admin().indices().prepareStats().clear()
            .setIndices("r__" + ElasticsearchIndexUtils.getBaseIndexName(bucket, Optional.empty())
                    + time_suffix.orElse("*"))
            .setStore(true).get().getIndices().keySet().stream().sorted().collect(Collectors.toList());
}

From source file:org.onosproject.store.ecmap.EventuallyConsistentMapImpl.java

private MapValue<V> removeInternal(K key, Optional<V> value, Optional<MapValue<V>> tombstone) {
    checkState(!destroyed, destroyedMessage);
    checkNotNull(key, ERROR_NULL_KEY);//from w w  w  .j  a va2  s  .  c o  m
    checkNotNull(value, ERROR_NULL_VALUE);
    tombstone.ifPresent(v -> checkState(v.isTombstone()));

    counter.incrementCount();
    AtomicBoolean updated = new AtomicBoolean(false);
    AtomicReference<MapValue<V>> previousValue = new AtomicReference<>();
    items.compute(key, (k, existing) -> {
        boolean valueMatches = true;
        if (value.isPresent() && existing != null && existing.isAlive()) {
            valueMatches = Objects.equals(value.get(), existing.get());
        }
        if (existing == null) {
            log.trace("ECMap Remove: Existing value for key {} is already null", k);
        }
        if (valueMatches) {
            if (existing == null) {
                updated.set(tombstone.isPresent());
            } else {
                updated.set(!tombstone.isPresent() || tombstone.get().isNewerThan(existing));
            }
        }
        if (updated.get()) {
            previousValue.set(existing);
            return tombstone.orElse(null);
        } else {
            return existing;
        }
    });
    if (updated.get()) {
        if (persistent) {
            if (tombstone.isPresent()) {
                persistentStore.update(key, tombstone.get());
            } else {
                persistentStore.remove(key);
            }
        }
    }
    return previousValue.get();
}

From source file:com.ikanow.aleph2.shared.crud.elasticsearch.services.ElasticsearchCrudService.java

@Override
public CompletableFuture<Long> deleteObjectsBySpec(final QueryComponent<O> spec) {
    try {/*w  ww .ja  va  2s.  c  o  m*/
        Tuple2<FilterBuilder, UnaryOperator<SearchRequestBuilder>> query = ElasticsearchUtils
                .convertToElasticsearchFilter(spec, _state.id_ranges_ok);

        final Optional<Long> maybe_size = Optional.ofNullable(spec.getLimit()).filter(x -> x > 0);
        // (don't scroll if a limit is set and we're sorting - note sorting is ignored otherwise)
        final boolean scroll = !(maybe_size.isPresent() && !Optionals.ofNullable(spec.getOrderBy()).isEmpty());
        final long max_size = maybe_size.orElse((long) Integer.MAX_VALUE).intValue();

        final SearchRequestBuilder srb = Optional.of(_state.client.prepareSearch()
                .setIndices(_state.es_context.indexContext().getReadableIndexArray(Optional.empty()))
                .setTypes(_state.es_context.typeContext().getReadableTypeArray())
                .setQuery(QueryBuilders.constantScoreQuery(query._1())).setSize(1000).setFetchSource(false)
                .setNoFields()).map(
                        s -> (!scroll && (null != spec.getOrderBy()))
                                ? spec.getOrderBy().stream().reduce(s,
                                        (ss, sort) -> ss.addSort(sort._1(),
                                                sort._2() > 0 ? SortOrder.ASC : SortOrder.DESC),
                                        (s1, s2) -> s1)
                                : s)
                .map(s -> scroll ? s.setSearchType(SearchType.SCAN).setScroll(new TimeValue(60000)) : s).get();

        return ElasticsearchFutureUtils.wrap(srb.execute(), sr -> {
            long mutable_count = 0L;
            final int batch_size = 50;
            PingPongList<CompletableFuture<?>> mutable_future_batches = new PingPongList<>(batch_size);

            if (scroll && ((sr.getHits().totalHits() > 0) && (0 == sr.getHits().getHits().length))) {
                //(odd workaround, if number of hits < scroll size, then the reply contains no hits, need to scroll an extra time to get it)
                sr = _state.client.prepareSearchScroll(sr.getScrollId()).setScroll(new TimeValue(60000))
                        .execute().actionGet();
            }
            while ((sr.getHits().getHits().length > 0) && (mutable_count < max_size)) {
                BulkRequestBuilder bulk_request = _state.client.prepareBulk();
                for (SearchHit sh : sr.getHits().getHits()) {
                    bulk_request.add(_state.client.prepareDelete().setIndex(sh.index()).setId(sh.id())
                            .setType(sh.type()));

                    mutable_count++; // (for now we'll just report on the _ids we found)
                    if (mutable_count >= max_size)
                        break;
                }
                // We're full, so wait for the first half of the data to complete
                if (mutable_future_batches
                        .add(ElasticsearchFutureUtils.wrap(bulk_request.execute(), __ -> null))) {
                    try {
                        CompletableFuture.allOf(mutable_future_batches.getAboutToBeOverwrittenList().stream()
                                .toArray(CompletableFuture[]::new)).join();
                    } catch (Exception e) {
                    } // just carry on if fails, probably more important to keep trying to delete

                    mutable_future_batches.getAboutToBeOverwrittenList().clear();
                }
                if (scroll && (mutable_count < max_size))
                    sr = _state.client.prepareSearchScroll(sr.getScrollId()).setScroll(new TimeValue(60000))
                            .execute().actionGet();
                else
                    break;
            }
            if (scroll)
                _state.client.prepareClearScroll().addScrollId(sr.getScrollId());

            //(wait for any remaining batches - this one we'll allow to error out since we've completed all our operations)
            CompletableFuture
                    .allOf(mutable_future_batches.getCompleteStream().toArray(CompletableFuture[]::new)).join();

            return mutable_count; //(just return an estimate)
        }, (err, future) -> {
            if ((err instanceof IndexMissingException) || (err instanceof SearchPhaseExecutionException)) //(this one can come up as on a read on a newly created index)
            {
                // just treat this like an "object not found"
                future.complete(0L);
            } else {
                future.completeExceptionally(err);
            }
        });
    } catch (Exception e) {
        return FutureUtils.returnError(e);
    }
}

From source file:com.devicehive.service.NetworkService.java

@Transactional(propagation = Propagation.NOT_SUPPORTED)
public NetworkWithUsersAndDevicesVO getWithDevicesAndDeviceClasses(@NotNull Long networkId,
        @NotNull HiveAuthentication hiveAuthentication) {
    HivePrincipal principal = (HivePrincipal) hiveAuthentication.getPrincipal();

    Set<Long> permittedNetworks = principal.getNetworkIds();
    Set<String> permittedDevices = principal.getDeviceGuids();

    Optional<NetworkWithUsersAndDevicesVO> result = of(principal).flatMap(pr -> {
        if (pr.getUser() != null)
            return of(pr.getUser());
        else//from w ww . ja v a 2  s  .  c om
            return empty();
    }).flatMap(user -> {
        Long idForFiltering = user.isAdmin() ? null : user.getId();
        List<NetworkWithUsersAndDevicesVO> found = networkDao.getNetworksByIdsAndUsers(idForFiltering,
                Collections.singleton(networkId), permittedNetworks);
        return found.stream().findFirst();
    }).map(network -> {
        //fixme - important, restore functionality once permission evaluator is switched to jwt
        /*if (principal.getKey() != null) {
            Set<AccessKeyPermissionVO> permissions = principal.getKey().getPermissions();
            Set<AccessKeyPermissionVO> filtered = CheckPermissionsHelper
                    .filterPermissions(principal.getKey(), permissions, AccessKeyAction.GET_DEVICE,
                            details.getClientInetAddress(), details.getOrigin());
            if (filtered.isEmpty()) {
                network.setDevices(Collections.emptySet());
            }
        }*/
        if (permittedDevices != null && !permittedDevices.isEmpty()) {
            Set<DeviceVO> allowed = network.getDevices().stream()
                    .filter(device -> permittedDevices.contains(device.getGuid())).collect(Collectors.toSet());
            network.setDevices(allowed);
        }
        return network;
    });

    return result.orElse(null);
}

From source file:es.upv.grycap.coreutils.fiber.net.UrlBuilder.java

/**
 * Creates a new URL relative to the base URL provided in the constructor of this class. The new relative URL
 * includes the path, query parameters and the internal reference of the {@link UrlBuilder#baseUrl base URL} 
 * provided with this class. An additional fragment, as well as additional query parameters can be optionally 
 * added to the new URL. In addition to the parameters passed to the method as an argument, the supplied 
 * fragment can also include parameters that will be added to the created URL. The created URL is normalized 
 * and unencoded before returning it to the caller. The current implementation has the following limitations:
 * <ul>//w w w.  j a  v  a2  s . c om
 * <li>Arrays are not supported: <tt>q=foo&amp;q=bar</tt> will produce an error.</li>
 * <li>Internal references are only supported in the base URL. Any additional reference provided with the 
 * fragment will be silently ignored: the fragment <tt>/rd#ref</tt> will be appended to the base URL as
 * <tt>/rd</tt>, ignoring the internal reference.</li>
 * </ul>
 * @param fragment - optional URL fragment (may include parameters, but not references) that will be added 
 *                   to the base URL
 * @param params - optional query parameters that will be added to the base URL
 * @return A relative URL created from the base URL provided in the constructor of this class and adding the
 *         fragment and parameters passed as arguments to this method.
 */
public String buildRelativeUrl(final @Nullable String fragment, final @Nullable Map<String, String> params) {
    String url = null;
    final Optional<String> fragment2 = ofNullable(trimToNull(fragment));
    try {
        final Optional<URL> fragmentUrl = ofNullable(
                fragment2.isPresent() ? new URL("http://example.com/" + fragment2.get()) : null);
        final URIBuilder uriBuilder = new URIBuilder();
        // add path
        uriBuilder.setPath(new StringBuilder(ofNullable(trimToNull(baseUrl.getPath())).orElse("/"))
                .append(fragmentUrl.isPresent() ? "/" + stripEnd(fragmentUrl.get().getPath(), "/") : "")
                .toString().replaceAll("[/]{2,}", "/"));
        // add query parameters
        if (isNotBlank(baseUrl.getQuery())) {
            uriBuilder.setParameters(URLEncodedUtils.parse(baseUrl.getQuery(), defaultCharset()));
        }
        if (fragmentUrl.isPresent() && isNotBlank(fragmentUrl.get().getQuery())) {
            URLEncodedUtils.parse(fragmentUrl.get().getQuery(), defaultCharset()).stream().forEach(p -> {
                uriBuilder.addParameter(p.getName(), p.getValue());
            });
        }
        ofNullable(params).orElse(emptyMap()).entrySet().stream().forEach(p -> {
            uriBuilder.addParameter(p.getKey(), p.getValue());
        });
        // add internal reference
        uriBuilder.setFragment(baseUrl.getRef());
        // build relative URL
        url = uriBuilder.build().normalize().toString();
    } catch (MalformedURLException | URISyntaxException e) {
        throw new IllegalStateException(
                new StringBuilder("Failed to create relative URL from provided parameters: fragment=")
                        .append(fragment2.orElse("null")).append(", params=")
                        .append(params != null ? params.toString() : "null").toString(),
                e);
    }
    return url;
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.ElasticsearchIndexUtils.java

/** Builds a lookup table of settings 
 * @param mapping - the mapping to use/*from   w  w  w . j  a  va2s  .co m*/
 * @param type - if the index has a specific type, lookup that and _default_ ; otherwise just _default
 * @return
 */
public static LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode> parseDefaultMapping(
        final JsonNode mapping, final Optional<String> type,
        final Optional<DataSchemaBean.SearchIndexSchemaBean> maybe_search_index_schema,
        final Optional<DataSchemaBean.DocumentSchemaBean> maybe_document_schema,
        final SearchIndexSchemaDefaultBean search_index_schema_override, final ObjectMapper mapper) {
    //(see similar code in createComplexStringLookups)
    final boolean tokenize_by_default = maybe_search_index_schema.map(schema -> schema.tokenize_by_default())
            .orElse(true);
    final boolean dual_tokenize_by_default = Optional
            .ofNullable(search_index_schema_override.dual_tokenize_by_default()).orElse(false);

    final JsonNode default_string_mapping = ((ObjectNode) (ElasticsearchIndexUtils.getMapping(
            Tuples._2T(tokenize_by_default, dual_tokenize_by_default), search_index_schema_override, mapper,
            true))).put(TYPE_MATCH_NAME, "string").put(PATH_MATCH_NAME, "*");

    // (this is always not tokenized but inherits dual tokenization)
    final ObjectNode not_analyzed_field = ((ObjectNode) (ElasticsearchIndexUtils.getMapping(
            Tuples._2T(false, dual_tokenize_by_default), search_index_schema_override, mapper, true)))
                    .put(TYPE_MATCH_NAME, "string");

    final LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode> ret = Optional
            .ofNullable(mapping.get("mappings")).map(m -> {
                if (!m.isObject())
                    throw new RuntimeException("mappings must be object");
                return m;
            })
            .map(m -> Optional.ofNullable(m.get(type.orElse("_default_")))
                    .map(mm -> !mm.isNull() ? mm : m.get("_default_")).orElse(m.get("_default_")))
            .filter(m -> !m.isNull()).map(i -> {
                if (!i.isObject())
                    throw new RuntimeException(type + " must be object");
                return i;
            }).map(i -> {
                // OK so I have a list of dynamic_templates, and a list of properties - and then a set of string defaults to apply
                // 1) want to leave the properties alone
                // 2) then the tokenization overrides from createComplexStringLookups
                // 3) then the existing templates
                final Map<Either<String, Tuple2<String, String>>, JsonNode> override_props = createComplexStringLookups(
                        maybe_search_index_schema, search_index_schema_override, mapper);

                // ensure string doc fields aren't analyzed
                final Map<Either<String, Tuple2<String, String>>, String> type_override = maybe_search_index_schema
                        .map(s -> s.type_override()).map(m -> buildTypeMap(m)).orElse(Collections.emptyMap());

                final Map<Either<String, Tuple2<String, String>>, JsonNode> doc_props = maybe_document_schema
                        .map(ds -> ds.deduplication_fields())
                        .<Map<Either<String, Tuple2<String, String>>, JsonNode>>map(fields -> {
                            return fields.stream().filter(f -> !override_props.containsKey(Either.left(f)))
                                    .filter(f -> !override_props.containsKey(Either.right(Tuples._2T(f, "*"))))
                                    .filter(f -> !type_override.containsKey(Either.left(f)))
                                    .filter(f -> !type_override.containsKey(Either.right(Tuples._2T(f, "*"))))
                                    .<Tuple2<Either<String, Tuple2<String, String>>, JsonNode>>map(
                                            f -> Tuples._2T(Either.right(Tuples._2T(f, "string")),
                                                    not_analyzed_field.deepCopy().put(PATH_MATCH_NAME, f)))
                                    .collect(Collectors.toMap(
                                            (Tuple2<Either<String, Tuple2<String, String>>, JsonNode> t2) -> t2
                                                    ._1(),
                                            t2 -> t2._2()));
                        }).orElse(Collections.emptyMap());

                final LinkedHashMap<Either<String, Tuple2<String, String>>, JsonNode> props = new LinkedHashMap<>();
                props.putAll(doc_props); // (put these first - though actually i'm fixing the order with an explicit sort in the columnar section)
                props.putAll(override_props);

                // extra mappings and extra templates
                Optionals.of(() -> search_index_schema_override.extra_field_mappings())
                        .map(o -> mapper.convertValue(o, JsonNode.class)).ifPresent(j -> {
                            props.putAll(getTemplates(j, default_string_mapping, props.keySet()));
                            props.putAll(getProperties(j));
                        });

                // full mappings at the end
                props.putAll(getTemplates(i, default_string_mapping, props.keySet()));
                props.putAll(getProperties(i));

                return props;
            }).orElse(new LinkedHashMap<>());

    return ret;
}