Example usage for java.util Optional flatMap

List of usage examples for java.util Optional flatMap

Introduction

In this page you can find the example usage for java.util Optional flatMap.

Prototype

public <U> Optional<U> flatMap(Function<? super T, ? extends Optional<? extends U>> mapper) 

Source Link

Document

If a value is present, returns the result of applying the given Optional -bearing mapping function to the value, otherwise returns an empty Optional .

Usage

From source file:com.github.horrorho.inflatabledonkey.file.FileStreamWriter.java

public static boolean copy(InputStream in, OutputStream out, Optional<XFileKey> keyCipher,
        Optional<byte[]> signature, Optional<IOFunction<InputStream, InputStream>> decompress)
        throws IOException {

    Digest digest = signature.flatMap(FileSignature::type).orElse(FileSignature.ONE).newDigest();

    DigestInputStream dis = new DigestInputStream(in, digest);

    InputStream fis = decryptStream(dis, keyCipher);

    if (decompress.isPresent()) {
        logger.info("-- copy() - decompressing");
        fis = decompress.get().apply(fis);
    }//from w  w  w . j  a  v  a2 s .  c o  m

    IOUtils.copyLarge(fis, out, new byte[BUFFER_SIZE]);
    out.flush();

    return testSignature(dis.getDigest(), signature);
}

From source file:de.mas.wiiu.jnus.utils.FSTUtils.java

public static Optional<FSTEntry> getFSTEntryByFullPath(FSTEntry root, String givenFullPath) {
    String fullPath = givenFullPath.replace(File.separator, "/");
    if (!fullPath.startsWith("/")) {
        fullPath = "/" + fullPath;
    }/* ww  w .  ja v  a  2s  .c o m*/

    String dirPath = FilenameUtils.getFullPathNoEndSeparator(fullPath);
    Optional<FSTEntry> pathOpt = Optional.of(root);
    if (!dirPath.equals("/")) {
        pathOpt = getFileEntryDir(root, dirPath);
    }

    String path = fullPath;

    return pathOpt.flatMap(e -> e.getChildren().stream().filter(c -> c.getFullPath().equals(path)).findAny());
}

From source file:no.digipost.api.useragreements.client.response.ResponseUtils.java

public static InputStream getResponseEntityContent(HttpResponse response) {
    Optional<CloseableHttpResponse> closeableResponse = Optional.of(response)
            .filter(r -> r instanceof CloseableHttpResponse).map(r -> (CloseableHttpResponse) r);
    StatusLine statusLine = response.getStatusLine();
    HttpEntity entity = response.getEntity();
    if (entity == null) {
        closeableResponse.flatMap(r -> close(r)).map(RuntimeIOException::from).ifPresent(e -> {
            throw e;
        });//from  w  ww.  j  a v a2 s .com
        return null;
    }

    try {
        return entity.getContent();
    } catch (UnsupportedOperationException | IOException e) {
        UnexpectedResponseException mainException = new UnexpectedResponseException(statusLine,
                ErrorCode.GENERAL_ERROR, e.getMessage(), e);
        closeableResponse.flatMap(r -> close(r)).ifPresent(mainException::addSuppressed);
        throw mainException;
    }
}

From source file:com.ikanow.aleph2.analytics.spark.utils.SparkTechnologyUtils.java

/** Builds a multimap of named SQL inputs
 * @param context the analytic context retrieved from the 
 * @return A multi map of Java RDDs against name (with input name built as resource_name:data_service if not present)
 *//*from  w  ww  .  j  a va 2s  .c  om*/
@SuppressWarnings("unchecked")
public static Multimap<String, DataFrame> buildBatchSparkSqlInputs(final IAnalyticsContext context,
        final Optional<ProcessingTestSpecBean> maybe_test_spec, final SQLContext spark_sql_context,
        final Set<String> exclude_names) {
    final AnalyticThreadJobBean job = context.getJob().get();

    final Multimap<String, DataFrame> mutable_builder = HashMultimap.create();

    transformInputBean(Optionals.ofNullable(job.inputs()).stream(), maybe_test_spec)
            .filter(input -> !exclude_names.contains(input.name())).forEach(Lambdas.wrap_consumer_u(input -> {
                Optional<SparkSqlAccessContext> maybe_input_format_info = context
                        .getServiceInput(SparkSqlAccessContext.class, Optional.empty(), job, input);
                maybe_input_format_info.flatMap(input_format_info -> input_format_info.getAccessConfig())
                        .map(info_objects -> (Function<SQLContext, DataFrame>) info_objects.get(input.name()))
                        .ifPresent(rdd_getter -> {
                            mutable_builder.put(input.name(), rdd_getter.apply(spark_sql_context));
                        });
            }));

    return mutable_builder;
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.utils.ElasticsearchHiveUtils.java

/** Handles the prefix and suffix of the full hive schema
 *  https://www.elastic.co/guide/en/elasticsearch/hadoop/current/hive.html
 * @param table_name - if empty then "main_table"
 * @param bucket/*from w w w.  ja  v a2  s.c  o m*/
 * @param schema
 * @param partial_hive_schema
 * @return
 */
public static Validation<String, String> generateFullHiveSchema(final Optional<String> table_name,
        final DataBucketBean bucket, final DataSchemaBean.DataWarehouseSchemaBean schema,
        Optional<Client> maybe_client, ElasticsearchIndexServiceConfigBean config) {
    // (ignore views for the moment)

    final String prefix = ErrorUtils.get("CREATE EXTERNAL TABLE {0} ", getTableName(bucket, schema));

    final DataSchemaBean.DataWarehouseSchemaBean.Table table = table_name.flatMap(t -> Optionals
            .ofNullable(schema.views()).stream().filter(v -> t.equals(v.database_name())).findFirst())
            .orElse(schema.main_table());

    final JsonNode user_schema = _mapper.convertValue(table.table_format(), JsonNode.class);

    final Validation<String, String> partial_table = generatePartialHiveSchema(prefix, user_schema, true);

    // (for the main table, just going to be the full alias - for views will need to be cleverer)
    final String index = Optionals
            .of(() -> bucket.data_schema().search_index_schema().technology_override_schema()
                    .get(SearchIndexSchemaDefaultBean.index_name_override_).toString())
            .orElseGet(() -> "r__" + BucketUtils.getUniqueSignature(bucket.full_name(), Optional.empty()));

    final Optional<ElasticsearchHiveOverrideBean> maybe_override = Optionals
            .of(() -> schema.technology_override_schema())
            .map(m -> BeanTemplateUtils.from(m, ElasticsearchHiveOverrideBean.class).get());

    // OK all this horrible code is intended to sort out the list of types to apply in the hive query
    final Optional<ElasticsearchHiveOverrideBean.TableOverride> table_override = maybe_override
            .map(cfg -> cfg.table_overrides().get(table_name.orElse(MAIN_TABLE_NAME)));
    final Optional<Set<String>> user_type_overrides = table_override.map(t -> t.types())
            .filter(l -> !l.isEmpty()).map(l -> new TreeSet<String>(l));
    final Set<String> mutable_type_set = user_type_overrides.orElseGet(() -> {
        return new TreeSet<String>(
                maybe_client.map(client -> ElasticsearchIndexUtils.getTypesForIndex(client, index).values())
                        .orElse(Collections.emptySet()));
    });

    final ElasticsearchIndexServiceConfigBean schema_config = ElasticsearchIndexConfigUtils
            .buildConfigBeanFromSchema(bucket, config, _mapper);
    final CollidePolicy collide_policy = Optionals
            .of(() -> schema_config.search_technology_override().collide_policy())
            .orElse(CollidePolicy.new_type);

    Optionals.of(() -> schema_config.search_technology_override().type_name_or_prefix()).map(Optional::of)
            .orElseGet(() -> Optional.of((collide_policy == CollidePolicy.new_type)
                    ? ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext.DEFAULT_PREFIX
                    : ElasticsearchIndexServiceConfigBean.DEFAULT_FIXED_TYPE_NAME))
            .ifPresent(type_or_prefix -> {
                if (!user_type_overrides.isPresent()) { // leave alone if manually specified
                    if (collide_policy == CollidePolicy.new_type) { // add a few types
                        //TODO (ALEPH-17): need to make this get auto populated as new types are added, see the ALEPH-17 comment in ElasticsearchIndexService
                        if (mutable_type_set.size() < 10) {
                            IntStream.rangeClosed(1, 10).boxed().map(i -> type_or_prefix + i.toString())
                                    .forEach(type -> mutable_type_set.add(type));
                        }
                    } else { // OK in this case just make sure the default type is represented
                        mutable_type_set.add(type_or_prefix);
                    }
                }
            });

    final String suffix = Optional.of(" STORED BY 'org.elasticsearch.hadoop.hive.EsStorageHandler' ")
            .map(s -> s + ErrorUtils.get(
                    "TBLPROPERTIES(''es.index.auto.create'' = ''false'', ''es.resource'' = ''{0}/{1}''", index,
                    mutable_type_set.stream().collect(Collectors.joining(","))))
            .map(s -> table_override.map(t -> t.name_mappings()).filter(m -> !m.isEmpty())
                    .map(m -> s + ", 'es.mapping.names' = '"
                            + m.entrySet().stream().map(kv -> kv.getKey() + ":" + kv.getValue())
                                    .collect(Collectors.joining(","))
                            + "'")
                    .orElse(s))
            .map(s -> table_override
                    .flatMap(t -> Optional.ofNullable(t.url_query()).map(ss -> "?" + ss).map(Optional::of)
                            .orElseGet(() -> Optional.ofNullable(t.json_query())
                                    .map(jq -> _mapper.convertValue(jq, JsonNode.class).toString())))
                    .map(ss -> s + ", 'es.query' = '" + ss + "'").orElse(s))
            .map(s -> s + ") ").get();

    return partial_table.map(s -> s + suffix);
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

/** Compares the old and new records' timestamps (if either doesn't exist then assume we're leaving)
 *  (so that if the time isn't present then doesn't hammer the DB)
 * @param timestamp_field//from  w ww .  j  a  va 2 s.  c  o  m
 * @param new_record
 * @param old_record
 * @return
 */
protected static boolean newRecordUpdatesOld(String timestamp_field, final JsonNode new_record,
        final JsonNode old_record) {
    final Optional<JsonNode> old_timestamp = JsonUtils.getProperty(timestamp_field, old_record);
    final Optional<JsonNode> new_timestamp = JsonUtils.getProperty(timestamp_field, new_record);
    final Optional<Tuple2<Long, Long>> maybe_old_new = old_timestamp
            .flatMap(old_ts -> getTimestampFromJsonNode(old_ts))
            .flatMap(old_ts -> new_timestamp.flatMap(new_ts -> getTimestampFromJsonNode(new_ts))
                    .map(new_ts -> Tuples._2T(old_ts, new_ts)));

    return maybe_old_new.filter(old_new -> old_new._2() > old_new._1()).isPresent();
}

From source file:org.zalando.riptide.ContentTypeSelector.java

private Supplier<Optional<Binding<MediaType>>> bestMatch(final Optional<MediaType> attribute,
        final Map<Optional<MediaType>, Binding<MediaType>> bindings) {
    return () -> attribute.flatMap(a -> bindings.values().stream().filter(b -> b.getAttribute().isPresent())
            .sorted(BY_SPECIFICITY).filter(b -> b.getAttribute().get().includes(a)).findFirst());
}

From source file:it.tidalwave.bluemarine2.metadata.impl.audio.musicbrainz.MusicBrainzAudioMedatataImporter.java

/*******************************************************************************************************************
 *
 *
 *
 ******************************************************************************************************************/
@Nonnull//from  w w w. j a  va  2s . c  o  m
private static Optional<Integer> emptyIfOne(final @Nonnull Optional<Integer> number) {
    return number.flatMap(n -> (n == 1) ? Optional.empty() : Optional.of(n));
}

From source file:com.siemens.sw360.fossology.handler.FossologyFileHandler.java

private void updateReleaseClearingState(Release release, Optional<FossologyStatus> fossologyStatus) {
    Optional<ClearingState> newClearingState = fossologyStatus.flatMap(this::mapFossologyStatusToClearingState);
    if (newClearingState.isPresent() && newClearingState.get().compareTo(release.getClearingState()) > 0) {
        release.setClearingState(newClearingState.get());
    }/*  w  ww.j  ava  2  s .c o  m*/
}

From source file:com.ikanow.aleph2.core.shared.services.ReadOnlyMultiCrudService.java

/** Returns a multi bucket crud wrapper 
 *  DOESN'T CURRENTLY SUPPORT LIMITS OR SORTBY PROPERLY
 * @param buckets - a list of bucket paths
 * @param maybe_extra_query_builder - for each bucket lets the user specify an additional query to be applied to all queries
 * @return//  w  w  w. j  a  v a 2  s  .co m
 */
public static <O> Optional<ReadOnlyMultiCrudService<O>> from(final Class<O> clazz, final List<String> buckets,
        final Optional<String> owner_id, final IGenericDataService data_service,
        final IManagementCrudService<DataBucketBean> bucket_store, final IServiceContext service_context,
        final Optional<Function<DataBucketBean, Optional<QueryComponent<O>>>> maybe_extra_query_builder) {

    final DataBucketBean dummy_bucket = BeanTemplateUtils.build(DataBucketBean.class)
            .with(DataBucketBean::owner_id, owner_id.orElse(null))
            .with(DataBucketBean::multi_bucket_children, buckets).done().get();

    final List<ICrudService<O>> services = MultiBucketUtils
            .expandMultiBuckets(Arrays.asList(dummy_bucket), bucket_store, service_context).values().stream()
            .map(b -> Tuples._2T(b,
                    data_service.getReadableCrudService(clazz, Arrays.asList(b), Optional.empty())
                            .<ICrudService<O>>flatMap(ds -> ds.getCrudService())))
            .filter(bucket_crud -> bucket_crud._2().isPresent())
            .map(bucket_crud -> Tuples._2T(bucket_crud._1(), bucket_crud._2().get())) // because of above filter)
            .map(bucket_crud -> maybe_extra_query_builder.flatMap(qb -> qb.apply(bucket_crud._1()))
                    .map(extra_query -> CrudServiceUtils.intercept(clazz, bucket_crud._2(),
                            Optional.of(extra_query), Optional.empty(), Collections.emptyMap(),
                            Optional.empty()))
                    .orElse(bucket_crud._2()))
            .collect(Collectors.toList());

    return services.isEmpty() ? Optional.empty() : Optional.of(new ReadOnlyMultiCrudService<O>(services));
}