Example usage for java.util Optional ifPresent

List of usage examples for java.util Optional ifPresent

Introduction

In this page you can find the example usage for java.util Optional ifPresent.

Prototype

public void ifPresent(Consumer<? super T> action) 

Source Link

Document

If a value is present, performs the given action with the value, otherwise does nothing.

Usage

From source file:co.runrightfast.core.hazelcast.impl.TypesafeHazelcastConfig.java

private Config createHazelcastConfig(@NonNull final String name,
        @NonNull final com.typesafe.config.Config typeSafeConfig,
        @NonNull final Optional<Set<SerializerConfig>> serializerConfigs) {
    checkArgument(StringUtils.isNotBlank(name));
    final Config _hazelcastConfig = new Config();
    _hazelcastConfig.setInstanceName(name);

    _hazelcastConfig.setGroupConfig(ConfigUtils.getConfig(typeSafeConfig, "group-config").map(this::groupConfig)
            .orElseThrow(new ConfigurationExceptionSupplier("group-config is required")));

    _hazelcastConfig.setNetworkConfig(ConfigUtils.getConfig(typeSafeConfig, "network-config")
            .map(c -> this.networkConfig(c, typeSafeConfig))
            .orElseThrow(new ConfigurationExceptionSupplier("network-config is required")));

    ConfigUtils.getConfigList(typeSafeConfig, "map-configs").ifPresent(mapConfigs -> {
        mapConfigs.stream().map(this::mapConfig).forEach(_hazelcastConfig::addMapConfig);
    });/*from  w w  w .  j  a va2s .  c  o m*/

    ConfigUtils.getConfigList(typeSafeConfig, "multi-map-configs").ifPresent(mapConfigs -> {
        mapConfigs.stream().map(this::multiMapConfig).forEach(_hazelcastConfig::addMultiMapConfig);
    });

    ConfigUtils.getConfigList(typeSafeConfig, "queue-configs").ifPresent(queueConfigs -> {
        queueConfigs.stream().map(this::queueConfig).forEach(_hazelcastConfig::addQueueConfig);
    });

    ConfigUtils.getConfigList(typeSafeConfig, "topic-configs").ifPresent(queueConfigs -> {
        queueConfigs.stream().map(this::topicConfig).forEach(_hazelcastConfig::addTopicConfig);
    });

    ConfigUtils.getConfigList(typeSafeConfig, "list-configs").ifPresent(listConfigs -> {
        listConfigs.stream().map(this::listConfig).forEach(_hazelcastConfig::addListConfig);
    });

    ConfigUtils.getConfigList(typeSafeConfig, "set-configs").ifPresent(listConfigs -> {
        listConfigs.stream().map(this::getSetConfig).forEach(_hazelcastConfig::addSetConfig);
    });

    ConfigUtils.getConfigList(typeSafeConfig, "semaphore-configs").ifPresent(semaphoreConfigs -> {
        semaphoreConfigs.stream().map(this::semaphoreConfig).forEach(_hazelcastConfig::addSemaphoreConfig);
    });

    ConfigUtils.getConfigList(typeSafeConfig, "executor-configs").ifPresent(executorConfigs -> {
        executorConfigs.stream().map(this::executorConfig).forEach(_hazelcastConfig::addExecutorConfig);
    });

    _hazelcastConfig.setSerializationConfig(new SerializationConfig());
    serializerConfigs.ifPresent(configs -> configs.stream().forEach(serializerConfig -> _hazelcastConfig
            .getSerializationConfig().addSerializerConfig(serializerConfig)));

    ConfigUtils.getConfigList(typeSafeConfig, "partition-group-config").ifPresent(partitionGroupConfig -> {
        partitionGroupConfig.stream().map(this::partitionConfig)
                .forEach(_hazelcastConfig::setPartitionGroupConfig);
    });

    // Application manages the lifecycle and registers a shutdown hook - we want to ensure this is the last service that is stopped
    _hazelcastConfig.setProperty("hazelcast.shutdownhook.enabled", "false");
    // mapping hazelcast.jmx.enabled to hazelcast.jmx because using Typesafe typeSafeConfig, hazelcast.jmx is an object and cannot be set to a boolean
    ConfigUtils.getBoolean(typeSafeConfig, "properties", "hazelcast", "jmx", "enabled").ifPresent(
            jmxEnabled -> _hazelcastConfig.setProperty("hazelcast.jmx", Boolean.toString(jmxEnabled)));

    ConfigUtils.getConfig(typeSafeConfig, "properties").ifPresent(properties -> {
        _hazelcastConfig.setProperties(ConfigUtils.toProperties(properties));
    });

    ConfigUtils.getConfig(typeSafeConfig, "member-attribute-config").map(this::memberAttributeConfig)
            .ifPresent(_hazelcastConfig::setMemberAttributeConfig);

    applyAdditionalConfiguration(_hazelcastConfig);
    return _hazelcastConfig;
}

From source file:com.hortonworks.streamline.streams.catalog.service.StreamCatalogService.java

private Topology doImportTopology(Topology newTopology, TopologyData topologyData) throws Exception {
    List<TopologySource> topologySources = topologyData.getSources();
    Map<Long, Long> oldToNewComponentIds = new HashMap<>();
    Map<Long, Long> oldToNewRuleIds = new HashMap<>();
    Map<Long, Long> oldToNewWindowIds = new HashMap<>();
    Map<Long, Long> oldToNewBranchRuleIds = new HashMap<>();
    Map<Long, Long> oldToNewStreamIds = new HashMap<>();

    // import source streams
    for (TopologySource topologySource : topologySources) {
        topologySource.setOutputStreamIds(
                importOutputStreams(newTopology.getId(), oldToNewStreamIds, topologySource.getOutputStreams()));
        topologySource.setOutputStreams(null);
    }//  ww w .  java 2s .c o  m

    // import processor streams
    for (TopologyProcessor topologyProcessor : topologyData.getProcessors()) {
        topologyProcessor.setOutputStreamIds(importOutputStreams(newTopology.getId(), oldToNewStreamIds,
                topologyProcessor.getOutputStreams()));
        topologyProcessor.setOutputStreams(null);
    }

    // import rules
    for (TopologyRule rule : topologyData.getRules()) {
        Long currentId = rule.getId();
        rule.setId(null);
        TopologyRule addedRule = addRule(newTopology.getId(), rule);
        oldToNewRuleIds.put(currentId, addedRule.getId());
    }

    // import windowed rules
    for (TopologyWindow window : topologyData.getWindows()) {
        Long currentId = window.getId();
        window.setId(null);
        TopologyWindow addedWindow = addWindow(newTopology.getId(), window);
        oldToNewWindowIds.put(currentId, addedWindow.getId());
    }

    // import branch rules
    for (TopologyBranchRule branchRule : topologyData.getBranchRules()) {
        Long currentId = branchRule.getId();
        branchRule.setId(null);
        TopologyBranchRule addedBranchRule = addBranchRule(newTopology.getId(), branchRule);
        oldToNewBranchRuleIds.put(currentId, addedBranchRule.getId());
    }

    // import sources
    for (TopologySource topologySource : topologySources) {
        Long oldComponentId = topologySource.getId();
        topologySource.setId(null);
        topologySource.setTopologyId(newTopology.getId());
        TopologyComponentBundle bundle = getCurrentTopologyComponentBundle(
                TopologyComponentBundle.TopologyComponentType.SOURCE,
                topologyData.getBundleIdToType().get(topologySource.getTopologyComponentBundleId().toString()));
        topologySource.setTopologyComponentBundleId(bundle.getId());
        addTopologySource(newTopology.getId(), topologySource);
        oldToNewComponentIds.put(oldComponentId, topologySource.getId());
    }

    // import processors
    for (TopologyProcessor topologyProcessor : topologyData.getProcessors()) {
        Long oldComponentId = topologyProcessor.getId();
        topologyProcessor.setId(null);
        topologyProcessor.setTopologyId(newTopology.getId());
        TopologyComponentBundle bundle;
        String subType = topologyData.getBundleIdToType()
                .get(topologyProcessor.getTopologyComponentBundleId().toString());
        if (TopologyLayoutConstants.JSON_KEY_CUSTOM_PROCESSOR_SUB_TYPE.equals(subType)) {
            QueryParam queryParam = new QueryParam(CustomProcessorInfo.NAME,
                    topologyProcessor.getConfig().get(CustomProcessorInfo.NAME));
            Collection<TopologyComponentBundle> result = listCustomProcessorBundlesWithFilter(
                    Collections.singletonList(queryParam));
            if (result == null || result.size() != 1) {
                throw new IllegalStateException(
                        "Not able to find topology component bundle for custom processor :"
                                + topologyProcessor.getConfig().get(CustomProcessorInfo.NAME));
            }
            bundle = result.iterator().next();
        } else {
            bundle = getCurrentTopologyComponentBundle(TopologyComponentBundle.TopologyComponentType.PROCESSOR,
                    subType);
        }
        topologyProcessor.setTopologyComponentBundleId(bundle.getId());
        Optional<Object> ruleListObj = topologyProcessor.getConfig()
                .getAnyOptional(RulesProcessor.CONFIG_KEY_RULES);
        ruleListObj.ifPresent(ruleList -> {
            List<Long> ruleIds = new ObjectMapper().convertValue(ruleList, new TypeReference<List<Long>>() {
            });
            List<Long> updatedRuleIds = new ArrayList<>();
            if (ComponentTypes.RULE.equals(bundle.getSubType())
                    || ComponentTypes.PROJECTION.equals(bundle.getSubType())) {
                ruleIds.forEach(ruleId -> updatedRuleIds.add(oldToNewRuleIds.get(ruleId)));
            } else if (bundle.getSubType().equals(ComponentTypes.BRANCH)) {
                ruleIds.forEach(ruleId -> updatedRuleIds.add(oldToNewBranchRuleIds.get(ruleId)));
            } else if (bundle.getSubType().equals(ComponentTypes.WINDOW)) {
                ruleIds.forEach(ruleId -> updatedRuleIds.add(oldToNewWindowIds.get(ruleId)));
            }
            topologyProcessor.getConfig().setAny(RulesProcessor.CONFIG_KEY_RULES, updatedRuleIds);
        });
        addTopologyProcessor(newTopology.getId(), topologyProcessor);
        oldToNewComponentIds.put(oldComponentId, topologyProcessor.getId());
    }

    // import sinks
    for (TopologySink topologySink : topologyData.getSinks()) {
        topologySink.setTopologyId(newTopology.getId());
        Long currentId = topologySink.getId();
        topologySink.setId(null);
        TopologyComponentBundle bundle = getCurrentTopologyComponentBundle(
                TopologyComponentBundle.TopologyComponentType.SINK,
                topologyData.getBundleIdToType().get(topologySink.getTopologyComponentBundleId().toString()));
        topologySink.setTopologyComponentBundleId(bundle.getId());
        if (bundle.getSubType().equals(NOTIFICATION)) {
            updateNotifierJarFileName(topologySink);
        }
        addTopologySink(newTopology.getId(), topologySink);
        oldToNewComponentIds.put(currentId, topologySink.getId());
    }

    // import edges
    for (TopologyEdge topologyEdge : topologyData.getEdges()) {
        List<StreamGrouping> streamGroupings = topologyEdge.getStreamGroupings();
        for (StreamGrouping streamGrouping : streamGroupings) {
            Long newStreamId = oldToNewStreamIds.get(streamGrouping.getStreamId());
            streamGrouping.setStreamId(newStreamId);
        }
        topologyEdge.setId(null);
        topologyEdge.setTopologyId(newTopology.getId());
        topologyEdge.setFromId(oldToNewComponentIds.get(topologyEdge.getFromId()));
        topologyEdge.setToId(oldToNewComponentIds.get(topologyEdge.getToId()));
        addTopologyEdge(newTopology.getId(), topologyEdge);
    }

    // import topology editor metadata
    TopologyEditorMetadata topologyEditorMetadata = topologyData.getTopologyEditorMetadata();
    topologyEditorMetadata.setTopologyId(newTopology.getId());
    if (topologyEditorMetadata.getData() != null) {
        TopologyUIData topologyUIData = new ObjectMapper().readValue(topologyEditorMetadata.getData(),
                TopologyUIData.class);
        topologyUIData.getSources().forEach(c -> c.setId(oldToNewComponentIds.get(c.getId())));
        topologyUIData.getProcessors().forEach(c -> c.setId(oldToNewComponentIds.get(c.getId())));
        topologyUIData.getSinks().forEach(c -> c.setId(oldToNewComponentIds.get(c.getId())));
        topologyEditorMetadata.setData(new ObjectMapper().writeValueAsString(topologyUIData));
    } else {
        topologyEditorMetadata.setData(StringUtils.EMPTY);
    }
    addTopologyEditorMetadata(newTopology.getId(), topologyData.getTopologyEditorMetadata());
    return newTopology;
}

From source file:com.ikanow.aleph2.management_db.services.DataBucketCrudService.java

/** Worker function for storeObject
 * @param new_object - the bucket to create
 * @param old_bucket - the version of the bucket being overwritte, if an update
 * @param validation_info - validation info to be presented to the user
 * @param replace_if_present - update move
 * @return - the user return value//ww w .j a va  2  s  . c  o  m
 * @throws Exception
 */
public ManagementFuture<Supplier<Object>> storeValidatedObject(final DataBucketBean new_object,
        final Optional<DataBucketBean> old_bucket, final Collection<BasicMessageBean> validation_info,
        boolean replace_if_present) throws Exception {
    final MethodNamingHelper<DataBucketStatusBean> helper = BeanTemplateUtils.from(DataBucketStatusBean.class);

    // Error if a bucket status doesn't exist - must create a bucket status before creating the bucket
    // (note the above validation ensures the bucket has an _id)
    // (obviously need to block here until we're sure..)

    final CompletableFuture<Optional<DataBucketStatusBean>> corresponding_status = _underlying_data_bucket_status_db
            .get().getObjectById(new_object._id(),
                    Arrays.asList(helper.field(DataBucketStatusBean::_id),
                            helper.field(DataBucketStatusBean::node_affinity),
                            helper.field(DataBucketStatusBean::confirmed_master_enrichment_type),
                            helper.field(DataBucketStatusBean::confirmed_suspended),
                            helper.field(DataBucketStatusBean::confirmed_multi_node_enabled),
                            helper.field(DataBucketStatusBean::suspended),
                            helper.field(DataBucketStatusBean::quarantined_until)),
                    true);

    if (!corresponding_status.get().isPresent()) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException(
                        ErrorUtils.get(ManagementDbErrorUtils.BUCKET_CANNOT_BE_CREATED_WITHOUT_BUCKET_STATUS,
                                new_object.full_name()))),
                CompletableFuture.completedFuture(Collections.emptyList()));
    }

    // Some fields like multi-node, you can only change if the bucket status is set to suspended, to make
    // the control logic easy
    old_bucket.ifPresent(ob -> {
        validation_info.addAll(checkForInactiveOnlyUpdates(new_object, ob, corresponding_status.join().get()));
        // (corresponding_status present and completed because of above check) 
    });
    if (!validation_info.isEmpty() && validation_info.stream().anyMatch(m -> !m.success())) {
        return FutureUtils.createManagementFuture(
                FutureUtils.returnError(new RuntimeException("Bucket not valid, see management channels")),
                CompletableFuture.completedFuture(validation_info));
    }
    // Made it this far, try to set the next_poll_time in the status object
    if (null != new_object.poll_frequency()) {
        //get the next poll time
        final Date next_poll_time = TimeUtils
                .getForwardSchedule(new_object.poll_frequency(), Optional.of(new Date())).success();
        //update the status
        _underlying_data_bucket_status_db.get().updateObjectById(new_object._id(), CrudUtils
                .update(DataBucketStatusBean.class).set(DataBucketStatusBean::next_poll_date, next_poll_time));
    }

    // Create the directories

    try {
        createFilePaths(new_object, _storage_service.get());
        //if logging is enabled, create the logging filepath also
        if (Optionals.of(() -> new_object.management_schema().logging_schema().enabled()).orElse(false)) {
            createFilePaths(BucketUtils.convertDataBucketBeanToLogging(new_object), _storage_service.get());
        }
    } catch (Exception e) { // Error creating directory, haven't created object yet so just back out now

        return FutureUtils.createManagementFuture(FutureUtils.returnError(e));
    }
    // OK if the bucket is validated we can store it (and create a status object)

    final CompletableFuture<Supplier<Object>> ret_val = _underlying_data_bucket_db.get().storeObject(new_object,
            replace_if_present);
    final boolean is_suspended = DataBucketStatusCrudService
            .bucketIsSuspended(corresponding_status.get().get());

    // Register the bucket update with any applicable data services      

    final Multimap<IDataServiceProvider, String> data_service_info = DataServiceUtils
            .selectDataServices(new_object.data_schema(), _service_context);
    final Optional<Multimap<IDataServiceProvider, String>> old_data_service_info = old_bucket
            .map(old -> DataServiceUtils.selectDataServices(old.data_schema(), _service_context));

    final List<CompletableFuture<Collection<BasicMessageBean>>> ds_update_results = data_service_info.asMap()
            .entrySet().stream()
            .map(kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                    kv.getValue().stream().collect(Collectors.toSet()),
                    old_data_service_info.map(old_map -> old_map.get(kv.getKey()))
                            .map(old_servs -> old_servs.stream().collect(Collectors.toSet()))
                            .orElse(Collections.emptySet())))
            .collect(Collectors.toList());

    // Process old data services that are no longer in use
    final List<CompletableFuture<Collection<BasicMessageBean>>> old_ds_update_results = old_data_service_info
            .map(old_ds_info -> {
                return old_ds_info.asMap().entrySet().stream()
                        .filter(kv -> !data_service_info.containsKey(kv.getKey()))
                        .<CompletableFuture<Collection<BasicMessageBean>>>map(
                                kv -> kv.getKey().onPublishOrUpdate(new_object, old_bucket, is_suspended,
                                        Collections.emptySet(),
                                        kv.getValue().stream().collect(Collectors.toSet())))
                        .collect(Collectors.toList());
            }).orElse(Collections.emptyList());

    //(combine)
    @SuppressWarnings("unchecked")
    CompletableFuture<Collection<BasicMessageBean>> all_service_registration_complete[] = Stream
            .concat(ds_update_results.stream(), old_ds_update_results.stream())
            .toArray(CompletableFuture[]::new);

    // Get the status and then decide whether to broadcast out the new/update message

    final CompletableFuture<Collection<BasicMessageBean>> mgmt_results = CompletableFuture
            .allOf(all_service_registration_complete)
            .thenCombine(
                    old_bucket.isPresent()
                            ? requestUpdatedBucket(new_object, old_bucket.get(),
                                    corresponding_status.get().get(), _actor_context,
                                    _underlying_data_bucket_status_db.get(), _bucket_action_retry_store.get())
                            : requestNewBucket(new_object, is_suspended,
                                    _underlying_data_bucket_status_db.get(), _actor_context),
                    (__, harvest_results) -> {
                        return (Collection<BasicMessageBean>) Stream
                                .concat(Arrays.stream(all_service_registration_complete)
                                        .flatMap(s -> s.join().stream()), harvest_results.stream())
                                .collect(Collectors.toList());
                    })
            .exceptionally(t -> Arrays.asList(ErrorUtils.buildErrorMessage(this.getClass().getSimpleName(),
                    "storeValidatedObject", ErrorUtils.get("{0}", t))));

    // Update the status depending on the results of the management channels

    return FutureUtils.createManagementFuture(ret_val,
            MgmtCrudUtils
                    .handleUpdatingStatus(new_object, corresponding_status.get().get(), is_suspended,
                            mgmt_results, _underlying_data_bucket_status_db.get())
                    .thenApply(msgs -> Stream.concat(msgs.stream(), validation_info.stream())
                            .collect(Collectors.toList())));
}

From source file:com.ikanow.aleph2.analytics.services.DeduplicationService.java

@Override
public void onObjectBatch(final Stream<Tuple2<Long, IBatchRecord>> batch, final Optional<Integer> batch_size,
        final Optional<JsonNode> grouping_key) {
    if (_deduplication_is_disabled.get()) {
        // no deduplication, generally shouldn't be here...
        //.. but if we are, make do the best we can
        batch.forEach(t2 -> _context.get().emitImmutableObject(t2._1(), t2._2().getJson(), Optional.empty(),
                Optional.empty(), Optional.empty()));
        return;//from w ww.  jav a 2 s .c o m
    }

    // Create big query

    final Tuple3<QueryComponent<JsonNode>, List<Tuple2<JsonNode, Tuple2<Long, IBatchRecord>>>, Either<String, List<String>>> fieldinfo_dedupquery_keyfields = getDedupQuery(
            batch, _dedup_fields.get(), _db_mapper.get());

    // Get duplicate results

    final Tuple2<List<String>, Boolean> fields_include = getIncludeFields(_policy.get(), _dedup_fields.get(),
            _timestamp_field.get());

    final CompletableFuture<Iterator<JsonNode>> dedup_res = fieldinfo_dedupquery_keyfields._2().isEmpty()
            ? CompletableFuture.completedFuture(Collections.<JsonNode>emptyList().iterator())
            : _dedup_context.get().getObjectsBySpec(fieldinfo_dedupquery_keyfields._1(), fields_include._1(),
                    fields_include._2()).thenApply(cursor -> cursor.iterator());

    // Wait for it to finsh

    //(create handy results structure if so)
    final LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> mutable_obj_map = fieldinfo_dedupquery_keyfields
            ._2().stream()
            .collect(Collector.of(
                    () -> new LinkedHashMap<JsonNode, LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>>(),
                    (acc, t2) -> {
                        // (ie only the first element is added, duplicate elements are removed)
                        final Tuple3<Long, IBatchRecord, ObjectNode> t3 = Tuples._3T(t2._2()._1(), t2._2()._2(),
                                _mapper.createObjectNode());
                        acc.compute(t2._1(), (k, v) -> {
                            final LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>> new_list = (null == v)
                                    ? new LinkedList<>()
                                    : v;
                            new_list.add(t3);
                            return new_list;
                        });
                    }, (map1, map2) -> {
                        map1.putAll(map2);
                        return map1;
                    }));

    //TODO (ALEPH-20): add timestamps to annotation
    //TODO (ALEPH-20): support different timestamp fields for the different buckets
    //TODO (ALEPH-20): really need to support >1 current enrichment job 
    //                 ^^(Really really longer term you should be able to decide what objects you want and what you don't  <- NOTE: don't remember what i meant here)

    final Iterator<JsonNode> cursor = dedup_res.join();

    // Handle the results

    final Stream<JsonNode> records_to_delete = Lambdas.get(() -> {
        if (isCustom(_doc_schema.get().deduplication_policy())
                || _doc_schema.get().delete_unhandled_duplicates()) {
            return Optionals.streamOf(cursor, true)
                    .collect(Collectors.groupingBy(
                            ret_obj -> getKeyFieldsAgain(ret_obj, fieldinfo_dedupquery_keyfields._3())))
                    .entrySet().stream().<JsonNode>flatMap(kv -> {

                        final Optional<JsonNode> maybe_key = kv.getKey();
                        final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                                .map(key -> mutable_obj_map.get(key));

                        // Stats:
                        _mutable_stats.duplicate_keys++;
                        _mutable_stats.duplicates_existing += kv.getValue().size();
                        _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                        //DEBUG
                        //System.out.println("?? " + kv.getValue().size() + " vs " + maybe_key + " vs " + matching_records.map(x -> Integer.toString(x.size())).orElse("(no match)"));

                        return matching_records
                                .<Stream<JsonNode>>map(records -> handleDuplicateRecord(_doc_schema.get(),
                                        _custom_handler.optional().map(
                                                handler -> Tuples._2T(handler, this._custom_context.get())),
                                        _timestamp_field.get(), records, kv.getValue(), maybe_key.get(),
                                        mutable_obj_map))
                                .orElse(Stream.empty());
                    });
        } else {
            Optionals.streamOf(cursor, true).forEach(ret_obj -> {
                final Optional<JsonNode> maybe_key = getKeyFieldsAgain(ret_obj,
                        fieldinfo_dedupquery_keyfields._3());
                final Optional<LinkedList<Tuple3<Long, IBatchRecord, ObjectNode>>> matching_records = maybe_key
                        .map(key -> mutable_obj_map.get(key));

                //DEBUG
                //System.out.println("?? " + ret_obj + " vs " + maybe_key + " vs " + matching_record.map(x -> x._2().getJson().toString()).orElse("(no match)"));

                // Stats:
                _mutable_stats.duplicate_keys++;
                _mutable_stats.duplicates_existing++;
                _mutable_stats.duplicates_incoming += matching_records.map(l -> l.size()).orElse(0);

                matching_records.ifPresent(records -> handleDuplicateRecord(_doc_schema.get(),
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        _timestamp_field.get(), records, Arrays.asList(ret_obj), maybe_key.get(),
                        mutable_obj_map));
            });
            return Stream.<JsonNode>empty();
        }
    });

    final List<Object> ids = records_to_delete.map(j -> jsonToObject(j)).filter(j -> null != j)
            .collect(Collectors.toList());

    if (!ids.isEmpty()) { // fire a bulk deletion request
        mutable_uncompleted_deletes.add(
                _dedup_context.get().deleteObjectsBySpec(CrudUtils.allOf().withAny(AnnotationBean._ID, ids)));

        _mutable_stats.deleted += ids.size();

        //(quickly see if we can reduce the number of outstanding requests)
        final Iterator<CompletableFuture<Long>> it = mutable_uncompleted_deletes.iterator();
        while (it.hasNext()) {
            final CompletableFuture<Long> cf = it.next();
            if (cf.isDone()) {
                it.remove();
            } else
                break; // ie stop as soon as we hit one that isn't complete)
        }
    }

    _mutable_stats.nonduplicate_keys += mutable_obj_map.size();

    if (Optional.ofNullable(_doc_schema.get().custom_finalize_all_objects()).orElse(false)) {
        mutable_obj_map.entrySet().stream()
                .forEach(kv -> handleCustomDeduplication(
                        _custom_handler.optional()
                                .map(handler -> Tuples._2T(handler, this._custom_context.get())),
                        kv.getValue(), Collections.emptyList(), kv.getKey()));
    } else { // Just emit the last element of each grouped object set
        mutable_obj_map.values().stream().map(t -> t.peekLast())
                .forEach(t -> _context.get().emitImmutableObject(t._1(), t._2().getJson(), Optional.of(t._3()),
                        Optional.empty(), Optional.empty()));
    }
}

From source file:com.ikanow.aleph2.search_service.elasticsearch.services.TestElasticsearchIndexService.java

public void test_endToEnd_autoTime(boolean test_not_create_mode, Optional<String> primary_name)
        throws IOException, InterruptedException, ExecutionException {
    final Calendar time_setter = GregorianCalendar.getInstance();
    time_setter.set(2015, 1, 1, 13, 0, 0);
    final String bucket_str = Resources.toString(
            Resources.getResource(
                    "com/ikanow/aleph2/search_service/elasticsearch/services/test_end_2_end_bucket.json"),
            Charsets.UTF_8);//from w  w  w .  j  a  v  a2  s  .c  o m
    final DataBucketBean bucket = BeanTemplateUtils.build(bucket_str, DataBucketBean.class)
            .with("_id", "test_end_2_end")
            .with("full_name", "/test/end-end/auto-time" + primary_name.map(s -> "/" + s).orElse(""))
            .with("modified", time_setter.getTime()).done().get();

    final String template_name = ElasticsearchIndexUtils.getBaseIndexName(bucket, primary_name);

    // Check starting from clean

    {
        try {
            _crud_factory.getClient().admin().indices().prepareDeleteTemplate(template_name).execute()
                    .actionGet();
        } catch (Exception e) {
        } // (This is fine, just means it doesn't exist)      
        try {
            _crud_factory.getClient().admin().indices().prepareDelete(template_name + "*").execute()
                    .actionGet();
        } catch (Exception e) {
        } // (This is fine, just means it doesn't exist)      

        final GetIndexTemplatesRequest gt = new GetIndexTemplatesRequest().names(template_name);
        final GetIndexTemplatesResponse gtr = _crud_factory.getClient().admin().indices().getTemplates(gt)
                .actionGet();
        assertTrue("No templates to start with", gtr.getIndexTemplates().isEmpty());
    }

    // If the primary buffer is specified then create it and switch to it
    primary_name.ifPresent(primary -> {
        _index_service.getDataService()
                .flatMap(s -> s.getWritableDataService(JsonNode.class, bucket, Optional.empty(), primary_name))
                .flatMap(IDataWriteService::getCrudService).get();

        _index_service.getDataService().get().switchCrudServiceToPrimaryBuffer(bucket, primary_name,
                Optional.empty(), Optional.empty());
    });

    // (note pass Optional.empty() in regardless of primary, since it should return the non-default primary regardless)
    final ICrudService<JsonNode> index_service_crud = _index_service.getDataService()
            .flatMap(s -> s.getWritableDataService(JsonNode.class, bucket, Optional.empty(), Optional.empty()))
            .flatMap(IDataWriteService::getCrudService).get();

    // Check template added:

    {
        final GetIndexTemplatesRequest gt2 = new GetIndexTemplatesRequest().names(template_name);
        final GetIndexTemplatesResponse gtr2 = _crud_factory.getClient().admin().indices().getTemplates(gt2)
                .actionGet();
        assertTrue(
                "Cache should contain the template: " + _index_service._bucket_template_cache.asMap().keySet(),
                _index_service._bucket_template_cache.asMap()
                        .containsKey(bucket._id() + primary_name.map(s -> ":" + s).orElse("") + ":true"));
        assertEquals(1, gtr2.getIndexTemplates().size());
    }

    // Get batch sub-service

    @SuppressWarnings("unchecked")
    final Optional<ICrudService.IBatchSubservice<JsonNode>> batch_service = index_service_crud
            .getUnderlyingPlatformDriver(ICrudService.IBatchSubservice.class, Optional.empty())
            .map(t -> (IBatchSubservice<JsonNode>) t);

    {
        assertTrue("Batch service must exist", batch_service.isPresent());
    }

    // Get information about the crud service

    final ElasticsearchContext es_context = (ElasticsearchContext) index_service_crud
            .getUnderlyingPlatformDriver(ElasticsearchContext.class, Optional.empty()).get();

    {
        assertTrue("Read write index", es_context instanceof ElasticsearchContext.ReadWriteContext);
        assertTrue("Temporal index", es_context
                .indexContext() instanceof ElasticsearchContext.IndexContext.ReadWriteIndexContext.TimedRwIndexContext);
        assertTrue("Auto type", es_context
                .typeContext() instanceof ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext);

        // Check the the context contains the invalid 

        final ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext context = (ElasticsearchContext.TypeContext.ReadWriteTypeContext.AutoRwTypeContext) es_context
                .typeContext();

        assertEquals(Arrays.asList("@timestamp"),
                context.fixed_type_fields().stream().collect(Collectors.toList()));
    }

    // Write some docs out

    Arrays.asList(1, 2, 3, 4, 5).stream().map(i -> {
        time_setter.set(2015, i, 1, 13, 0, 0);
        return time_setter.getTime();
    }).map(d -> (ObjectNode) _mapper.createObjectNode().put("@timestamp", d.getTime())).forEach(o -> {
        ObjectNode o1 = o.deepCopy();
        o1.set("val1", _mapper.createObjectNode().put("val2", "test"));
        ObjectNode o2 = o.deepCopy();
        o2.put("val1", "test");
        batch_service.get().storeObject(o1, false);
        batch_service.get().storeObject(o2, false);
    });

    for (int i = 0; i < 30; ++i) {
        Thread.sleep(1000L);
        if (index_service_crud.countObjects().get() >= 10) {
            System.out.println("Test end 2 end: (Got all the records)");
            break;
        }
    }
    Thread.sleep(2100L); // sleep another 2s+e for the aliases)

    // Check an alias per time slice gets created also
    Arrays.asList("_2015.02.01", "_2015.03.01", "_2015.04.01", "_2015.05.01", "_2015.06.01").stream()
            .forEach(time_suffix -> {
                final List<String> aliases = getAliasedBuffers(bucket, Optional.of(time_suffix));
                assertEquals(Arrays.asList(template_name + time_suffix), aliases);
            });

    // Check the top level alias is created
    final List<String> aliases = this.getMainAliasedBuffers(bucket);
    assertEquals(Arrays.asList("_2015.02.01", "_2015.03.01", "_2015.04.01", "_2015.05.01", "_2015.06.01")
            .stream().map(x -> template_name + x).collect(Collectors.toList()), aliases);

    final GetMappingsResponse gmr = es_context.client().admin().indices()
            .prepareGetMappings(template_name + "*").execute().actionGet();

    // Should have 5 different indexes, each with 2 types + _default_

    assertEquals(5, gmr.getMappings().keys().size());
    final Set<String> expected_keys = Arrays.asList(1, 2, 3, 4, 5).stream()
            .map(i -> template_name + "_2015.0" + (i + 1) + ".01").collect(Collectors.toSet());
    final Set<String> expected_types = Arrays.asList("_default_", "type_1", "type_2").stream()
            .collect(Collectors.toSet());

    if (test_not_create_mode)
        StreamSupport.stream(gmr.getMappings().spliterator(), false).forEach(x -> {
            assertTrue(
                    "Is one of the expected keys: " + x.key + " vs  "
                            + expected_keys.stream().collect(Collectors.joining(":")),
                    expected_keys.contains(x.key));
            //DEBUG
            //System.out.println(" ? " + x.key);
            StreamSupport.stream(x.value.spliterator(), false).forEach(Lambdas.wrap_consumer_u(y -> {
                //DEBUG
                //System.out.println("?? " + y.key + " --- " + y.value.sourceAsMap().toString());
                // Size 3: _default_, type1 and type2
                assertTrue("Is expected type: " + y.key, expected_types.contains(y.key));
            }));
            // Size 3: _default_, type_1, type_2 
            assertEquals("Should have 3 indexes: " + x.value.toString(), 3, x.value.size());
        });

    //TEST DELETION:
    if (test_not_create_mode)
        test_handleDeleteOrPurge(bucket, primary_name, true);
}

From source file:com.ikanow.aleph2.graph.titan.utils.TestTitanGraphBuildingUtils.java

public List<ObjectNode> test_buildGraph_getUserGeneratedAssets_run() {

    // (this is a pretty simple method so will use to test the decomposing service also)

    final GraphSchemaBean graph_schema = BeanTemplateUtils.build(GraphSchemaBean.class)
            .with(GraphSchemaBean::deduplication_fields,
                    Arrays.asList(GraphAnnotationBean.name, GraphAnnotationBean.type))
            .done().get();//from   w ww. j  a  va2 s .  c  o m

    final IEnrichmentModuleContext delegate_context = Mockito.mock(IEnrichmentModuleContext.class);
    Mockito.when(delegate_context.getNextUnusedId()).thenReturn(0L);
    final Optional<Tuple2<IEnrichmentBatchModule, GraphDecompEnrichmentContext>> maybe_decomposer = Optional
            .of(Tuples._2T((IEnrichmentBatchModule) new SimpleGraphDecompService(),
                    new GraphDecompEnrichmentContext(delegate_context, graph_schema)));

    final EnrichmentControlMetadataBean control = BeanTemplateUtils.build(EnrichmentControlMetadataBean.class)
            .with(EnrichmentControlMetadataBean::config, BeanTemplateUtils.toMap(BeanTemplateUtils
                    .build(SimpleDecompConfigBean.class)
                    .with(SimpleDecompConfigBean::elements, Arrays.asList(
                            BeanTemplateUtils.build(SimpleDecompElementBean.class)
                                    .with(SimpleDecompElementBean::edge_name, "test_edge_1")
                                    .with(SimpleDecompElementBean::from_fields,
                                            Arrays.asList("int_ip1", "int_ip2"))
                                    .with(SimpleDecompElementBean::from_type, "ip")
                                    .with(SimpleDecompElementBean::to_fields, Arrays.asList("host1", "host2"))
                                    .with(SimpleDecompElementBean::to_type, "host").done().get(),
                            BeanTemplateUtils.build(SimpleDecompElementBean.class)
                                    .with(SimpleDecompElementBean::edge_name, "test_edge_2")
                                    .with(SimpleDecompElementBean::from_fields, Arrays.asList("missing"))
                                    .with(SimpleDecompElementBean::from_type, "ip")
                                    .with(SimpleDecompElementBean::to_fields, Arrays.asList("host1", "host2"))
                                    .with(SimpleDecompElementBean::to_type, "host").done().get()))
                    .done().get()))
            .done().get();

    final Stream<Tuple2<Long, IBatchRecord>> batch = Stream
            .<ObjectNode>of(
                    _mapper.createObjectNode().put("int_ip1", "ipA").put("int_ip2", "ipB").put("host1", "dX")
                            .put("host2", "dY"),
                    _mapper.createObjectNode().put("int_ip1", "ipA").put("host1", "dZ").put("host2", "dY"))
            .map(o -> Tuples._2T(0L, new BatchRecordUtils.JsonBatchRecord(o)));

    maybe_decomposer.ifPresent(t2 -> t2._1().onStageInitialize(t2._2(), null, control, null, null));

    final List<ObjectNode> ret_val = TitanGraphBuildingUtils.buildGraph_getUserGeneratedAssets(batch,
            Optional.empty(), Optional.empty(), maybe_decomposer);

    // test_buildGraph_collectUserGeneratedAssets needs
    // Nodes
    // IPA, IPB, DX, DY, DZ
    // Links
    // IPA: IPA->DX, IPA->DY, IPA->DZ
    // IPB: IPB->DX, IPB->DY
    // DX: IPA, IPB
    // DY: IPA, IPB
    // DZ: IPA

    assertEquals(11, ret_val.size()); // (get more vertices because they are generated multiple times...)
    assertEquals(5, ret_val.stream().filter(v -> GraphAnnotationBean.ElementType.vertex.toString()
            .equals(v.get(GraphAnnotationBean.type).asText())).count());
    assertEquals(6, ret_val.stream().filter(v -> GraphAnnotationBean.ElementType.edge.toString()
            .equals(v.get(GraphAnnotationBean.type).asText())).count());

    //coverage!
    maybe_decomposer.ifPresent(t2 -> t2._1().onStageComplete(true));

    return ret_val;
}

From source file:net.sourceforge.pmd.util.fxdesigner.SourceEditorController.java

/**
 * Refreshes the AST and returns the new compilation unit if the parse didn't fail.
 *//* w w w  .j av a  2 s.  co  m*/
public Optional<Node> refreshAST() {
    String source = getText();

    if (StringUtils.isBlank(source)) {
        astTreeView.setRoot(null);
        return Optional.empty();
    }

    Optional<Node> current;

    try {
        current = astManager.updateIfChanged(source, auxclasspathClassLoader.getValue());
    } catch (ParseAbortedException e) {
        astTitleLabel.setText("Abstract syntax tree (error)");
        return Optional.empty();
    }

    current.ifPresent(this::setUpToDateCompilationUnit);
    return current;
}

From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorage.java

@Override
public List<DetectedInconsistency> localConsistencyCheck(Optional<RateLimiter> rateLimiter) throws IOException {
    long checkStart = MathUtils.nowInNano();
    LOG.info("Starting localConsistencyCheck");
    long checkedLedgers = 0;
    long checkedPages = 0;
    final MutableLong checkedEntries = new MutableLong(0);
    final MutableLong pageRetries = new MutableLong(0);
    NavigableMap<Long, Boolean> bkActiveLedgersSnapshot = activeLedgers.snapshot();
    final List<DetectedInconsistency> errors = new ArrayList<>();
    for (Long ledger : bkActiveLedgersSnapshot.keySet()) {
        try (LedgerCache.PageEntriesIterable pages = ledgerCache.listEntries(ledger)) {
            for (LedgerCache.PageEntries page : pages) {
                @Cleanup//from   ww  w  . ja v a 2s  .  c  om
                LedgerEntryPage lep = page.getLEP();
                MutableBoolean retry = new MutableBoolean(false);
                do {
                    retry.setValue(false);
                    int version = lep.getVersion();

                    MutableBoolean success = new MutableBoolean(true);
                    long start = MathUtils.nowInNano();
                    lep.getEntries((entry, offset) -> {
                        rateLimiter.ifPresent(RateLimiter::acquire);

                        try {
                            entryLogger.checkEntry(ledger, entry, offset);
                            checkedEntries.increment();
                        } catch (EntryLogger.EntryLookupException e) {
                            if (version != lep.getVersion()) {
                                pageRetries.increment();
                                if (lep.isDeleted()) {
                                    LOG.debug("localConsistencyCheck: ledger {} deleted", ledger);
                                } else {
                                    LOG.debug("localConsistencyCheck: concurrent modification, retrying");
                                    retry.setValue(true);
                                    retryCounter.inc();
                                }
                                return false;
                            } else {
                                errors.add(new DetectedInconsistency(ledger, entry, e));
                                LOG.error("Got error: ", e);
                            }
                            success.setValue(false);
                        }
                        return true;
                    });

                    if (success.booleanValue()) {
                        pageScanStats.registerSuccessfulEvent(MathUtils.elapsedNanos(start),
                                TimeUnit.NANOSECONDS);
                    } else {
                        pageScanStats.registerFailedEvent(MathUtils.elapsedNanos(start), TimeUnit.NANOSECONDS);
                    }
                } while (retry.booleanValue());
                checkedPages++;
            }
        } catch (NoLedgerException | FileInfo.FileInfoDeletedException e) {
            if (activeLedgers.containsKey(ledger)) {
                LOG.error("Cannot find ledger {}, should exist, exception is ", ledger, e);
                errors.add(new DetectedInconsistency(ledger, -1, e));
            } else {
                LOG.debug("ledger {} deleted since snapshot taken", ledger);
            }
        } catch (Exception e) {
            throw new IOException("Got other exception in localConsistencyCheck", e);
        }
        checkedLedgers++;
    }
    LOG.info(
            "Finished localConsistencyCheck, took {}s to scan {} ledgers, {} pages, "
                    + "{} entries with {} retries, {} errors",
            TimeUnit.NANOSECONDS.toSeconds(MathUtils.elapsedNanos(checkStart)), checkedLedgers, checkedPages,
            checkedEntries.longValue(), pageRetries.longValue(), errors.size());

    return errors;
}

From source file:org.apache.bookkeeper.util.collections.SynchronizedHashMultiMap.java

public synchronized Optional<V> removeAny(K k) {
    Set<Pair<K, V>> set = map.getOrDefault(k.hashCode(), Collections.emptySet());
    Optional<Pair<K, V>> pair = set.stream().filter(p -> p.getLeft().equals(k)).findAny();
    pair.ifPresent(p -> set.remove(p));
    return pair.map(p -> p.getRight());
}

From source file:org.apache.hadoop.hbase.client.AsyncRegionLocatorHelper.java

static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception,
        Function<HRegionLocation, HRegionLocation> cachedLocationSupplier, Consumer<HRegionLocation> addToCache,
        Consumer<HRegionLocation> removeFromCache, Optional<MetricsConnection> metrics) {
    HRegionLocation oldLoc = cachedLocationSupplier.apply(loc);
    if (LOG.isDebugEnabled()) {
        LOG.debug("Try updating {} , the old value is {}, error={}", loc, oldLoc,
                exception != null ? exception.toString() : "none");
    }//from ww w. java 2 s.c o m
    if (!canUpdateOnError(loc, oldLoc)) {
        return;
    }
    Throwable cause = findException(exception);
    if (LOG.isDebugEnabled()) {
        LOG.debug("The actual exception when updating {} is {}", loc,
                cause != null ? cause.toString() : "none");
    }
    if (cause == null || !isMetaClearingException(cause)) {
        LOG.debug("Will not update {} because the exception is null or not the one we care about", loc);
        return;
    }
    if (cause instanceof RegionMovedException) {
        RegionMovedException rme = (RegionMovedException) cause;
        HRegionLocation newLoc = new HRegionLocation(loc.getRegion(), rme.getServerName(),
                rme.getLocationSeqNum());
        LOG.debug("Try updating {} with the new location {} constructed by {}", loc, newLoc, rme.toString());
        addToCache.accept(newLoc);
    } else {
        LOG.debug("Try removing {} from cache", loc);
        metrics.ifPresent(m -> m.incrCacheDroppingExceptions(exception));
        removeFromCache.accept(loc);
    }
}