List of usage examples for java.util Optional orElse
public T orElse(T other)
From source file:com.oneops.antenna.senders.slack.SlackService.java
/** * Post messages in <b>parallel</b> to all configured slack channels. * * @param msg the msg//from w w w . j a v a 2 s . com * @param subscriber the subscriber * @return the accumulated boolean result of all the parallel operations. */ @Override public boolean postMessage(NotificationMessage msg, BasicSubscriber subscriber) { SlackSubscriber sub = (SlackSubscriber) subscriber; String text = getText(msg, sub.getFormats()); Attachment attach = getAttachment(msg, sub.isFieldsOn()); // The final result would be the reduced value of all parallel post message operations. Optional<Boolean> result = sub.getChannels().parallelStream().map((c) -> { try { String token = slackCfg.getTeamTokenMap().get(c.getTeam()); if (isEmpty(token)) { logger.error("Slack token is not configured for " + c + ", NsPath: " + msg.getNsPath()); errCount.mark(); return false; } SlackResponse res = slackClient.postMessage(token, c.getName(), text, attach).execute().body(); if (res.isOk()) { msgCount.mark(); } else { logger.error("Slack msg post failed for " + c + ", NsPath: " + msg.getNsPath() + ", Error: " + res.getError()); errCount.mark(); return false; } } catch (Exception ex) { // Throws exception if there is some major issue with msg/transport. Needs to log exception here. logger.error("Slack msg post failed for " + c + ", NsPath: " + msg.getNsPath(), ex); errCount.mark(); return false; } return true; }).reduce((a, b) -> a & b); return result.orElse(false); }
From source file:it.polimi.diceH2020.SPACE4CloudWS.core.CoarseGrainedOptimizer.java
private List<Triple<Integer, Optional<Double>, Boolean>> alterUntilBreakPoint(SolutionPerJob solPerJob, Function<Integer, Integer> updateFunction, Function<Double, Double> fromResult, Predicate<Double> feasibilityCheck, Predicate<Double> stoppingCondition, BiPredicate<Double, Double> incrementCheck, Predicate<Integer> vmCheck) { List<Triple<Integer, Optional<Double>, Boolean>> lst = new ArrayList<>(); Optional<Double> previous = Optional.empty(); boolean shouldKeepGoing = true; while (shouldKeepGoing) { Pair<Optional<Double>, Long> simulatorResult = dataProcessor.simulateClass(solPerJob); Optional<Double> maybeResult = simulatorResult.getLeft(); Optional<Double> interestingMetric = maybeResult.map(fromResult); Integer nVM = solPerJob.getNumberVM(); lst.add(new ImmutableTriple<>(nVM, maybeResult, interestingMetric.filter(feasibilityCheck).isPresent())); boolean terminationCriterion = !checkState(); logger.trace("terminationCriterion is " + terminationCriterion + " after checkState()"); terminationCriterion |= vmCheck.test(nVM); logger.trace("terminationCriterion is " + terminationCriterion + " after vmCheck.test()"); terminationCriterion |= interestingMetric.filter(stoppingCondition).isPresent(); logger.trace("terminationCriterion is " + terminationCriterion + " after filter"); if (previous.isPresent() && interestingMetric.isPresent() && (dataService.getScenario().getTechnology() != Technology.STORM || interestingMetric.get() == 0.0)) { terminationCriterion |= incrementCheck.test(previous.get(), interestingMetric.get()); }//from w w w .ja v a 2 s . c o m shouldKeepGoing = !terminationCriterion; previous = interestingMetric; if (dataService.getScenario().getTechnology() == Technology.STORM) { logger.trace(interestingMetric.orElse(Double.NaN) + " vs. " + solPerJob.getJob().getU()); } else { logger.trace(interestingMetric.orElse(Double.NaN) + " vs. " + solPerJob.getJob().getD()); } if (shouldKeepGoing) { String message = String.format("class %s -> num VM: %d, simulator result: %f, metric: %f", solPerJob.getId(), nVM, maybeResult.orElse(Double.NaN), interestingMetric.orElse(Double.NaN)); logger.info(message); solPerJob.updateNumberVM(updateFunction.apply(nVM)); } } return lst; }
From source file:com.ikanow.aleph2.search_service.elasticsearch.services.ElasticsearchIndexService.java
@Override public Tuple2<String, List<BasicMessageBean>> validateSchema(final SearchIndexSchemaBean schema, final DataBucketBean bucket) { final LinkedList<BasicMessageBean> errors = new LinkedList<BasicMessageBean>(); // (Warning mutable code) try {//from w w w . j a v a2 s. c o m Map<String, DataSchemaBean.ColumnarSchemaBean> tokenization_overrides = Optionals .of(() -> schema.tokenization_override()).orElse(Collections.emptyMap()); final HashSet<String> unsupported_tokenization_overrides = new HashSet<String>( tokenization_overrides.keySet()); unsupported_tokenization_overrides .removeAll(Arrays.asList(ElasticsearchIndexUtils.DEFAULT_TOKENIZATION_TYPE, ElasticsearchIndexUtils.NO_TOKENIZATION_TYPE)); if (!unsupported_tokenization_overrides.isEmpty()) { errors.add(ErrorUtils.buildErrorMessage(bucket.full_name(), "validateSchema", SearchIndexErrorUtils.NOT_YET_SUPPORTED, "tokenization_overrides: " + unsupported_tokenization_overrides.toString())); } Map<String, DataSchemaBean.ColumnarSchemaBean> type_overrides = Optionals .of(() -> schema.type_override()).orElse(Collections.emptyMap()); type_overrides.keySet().stream().filter(type -> !_supported_types.contains(type)) .forEach(type -> errors.add(ErrorUtils.buildErrorMessage(bucket.full_name(), "validateSchema", SearchIndexErrorUtils.NOT_YET_SUPPORTED, "type: " + type))); // If the user is trying to override the index name then they have to be admin: final Optional<String> manual_index_name = Optionals .<String>of(() -> ((String) bucket.data_schema().search_index_schema() .technology_override_schema().get(SearchIndexSchemaDefaultBean.index_name_override_))); if (manual_index_name.isPresent()) { // (then must be admin) if (!_service_context.getSecurityService().hasUserRole(bucket.owner_id(), ISecurityService.ROLE_ADMIN)) { errors.add(ErrorUtils.buildErrorMessage(bucket.full_name(), "validateSchema", SearchIndexErrorUtils.NON_ADMIN_BUCKET_NAME_OVERRIDE)); } } final String index_name = ElasticsearchIndexUtils.getBaseIndexName(bucket, Optional.empty()); boolean error = false; // (Warning mutable code) final boolean is_verbose = is_verbose(schema); final ElasticsearchIndexServiceConfigBean schema_config = ElasticsearchIndexConfigUtils .buildConfigBeanFromSchema(bucket, _config, _mapper); // 1) Check the schema: try { final Optional<String> type = Optional.ofNullable(schema_config.search_technology_override()) .map(t -> t.type_name_or_prefix()); final String index_type = CollidePolicy.new_type == Optional .ofNullable(schema_config.search_technology_override()).map(t -> t.collide_policy()) .orElse(CollidePolicy.new_type) ? "_default_" : type.orElse(ElasticsearchIndexServiceConfigBean.DEFAULT_FIXED_TYPE_NAME); final XContentBuilder mapping = ElasticsearchIndexUtils.createIndexMapping(bucket, Optional.empty(), true, schema_config, _mapper, index_type); if (is_verbose) { errors.add(ErrorUtils.buildSuccessMessage(bucket.full_name(), "validateSchema", mapping.bytes().toUtf8())); } } catch (Throwable e) { errors.add(ErrorUtils.buildErrorMessage(bucket.full_name(), "validateSchema", ErrorUtils.getLongForm("{0}", e))); error = true; } // 2) Sanity check the max size final Optional<Long> index_max_size = Optional .ofNullable(schema_config.search_technology_override().target_index_size_mb()); if (index_max_size.isPresent()) { final long max = index_max_size.get(); if ((max > 0) && (max < 25)) { errors.add(ErrorUtils.buildErrorMessage(bucket.full_name(), "validateSchema", SearchIndexErrorUtils.INVALID_MAX_INDEX_SIZE, max)); error = true; } else if (is_verbose) { errors.add(ErrorUtils.buildSuccessMessage(bucket.full_name(), "validateSchema", "Max index size = {0} MB", max)); } } return Tuples._2T(error ? "" : index_name, errors); } catch (Exception e) { // Very early error has occurred, just report that: return Tuples._2T("", Arrays.asList(ErrorUtils.buildErrorMessage(bucket.full_name(), "validateSchema", ErrorUtils.getLongForm("{0}", e)))); } }
From source file:org.apache.nifi.schemaregistry.hortonworks.HortonworksSchemaRegistry.java
private RecordSchema retrieveSchemaByName(final SchemaIdentifier schemaIdentifier) throws org.apache.nifi.schema.access.SchemaNotFoundException, IOException { final SchemaRegistryClient client = getClient(); final SchemaVersionInfo versionInfo; final Long schemaId; final Optional<String> schemaName = schemaIdentifier.getName(); if (!schemaName.isPresent()) { throw new org.apache.nifi.schema.access.SchemaNotFoundException( "Cannot retrieve schema because Schema Name is not present"); }/*from www.j av a2 s .c om*/ final Optional<String> schemaBranchName = schemaIdentifier.getBranch(); final OptionalInt schemaVersion = schemaIdentifier.getVersion(); try { final SchemaMetadataInfo metadataInfo = client.getSchemaMetadataInfo(schemaName.get()); if (metadataInfo == null) { throw new org.apache.nifi.schema.access.SchemaNotFoundException( "Could not find schema with name '" + schemaName + "'"); } schemaId = metadataInfo.getId(); if (schemaId == null) { throw new org.apache.nifi.schema.access.SchemaNotFoundException( "Could not find schema with name '" + schemaName + "'"); } // possible scenarios are name only, name + branch, or name + version if (schemaVersion.isPresent()) { final SchemaVersionKey schemaVersionKey = new SchemaVersionKey(schemaName.get(), schemaVersion.getAsInt()); versionInfo = getSchemaVersionInfo(client, schemaVersionKey); } else { versionInfo = getLatestSchemaVersionInfo(client, schemaName.get(), schemaBranchName.orElse(null)); } if (versionInfo == null || versionInfo.getVersion() == null) { final String message = createErrorMessage("Could not find schema", schemaName, schemaBranchName, schemaVersion); throw new org.apache.nifi.schema.access.SchemaNotFoundException(message); } } catch (final Exception e) { final String message = createErrorMessage("Failed to retrieve schema", schemaName, schemaBranchName, schemaVersion); handleException(message, e); return null; } final String schemaText = versionInfo.getSchemaText(); final SchemaIdentifier resultSchemaIdentifier = SchemaIdentifier.builder().id(schemaId) .name(schemaName.get()).branch(schemaBranchName.orElse(null)).version(versionInfo.getVersion()) .build(); final Tuple<SchemaIdentifier, String> tuple = new Tuple<>(resultSchemaIdentifier, schemaText); return schemaNameToSchemaMap.computeIfAbsent(tuple, t -> { final Schema schema = new Schema.Parser().parse(schemaText); return AvroTypeUtil.createSchema(schema, schemaText, resultSchemaIdentifier); }); }
From source file:com.orange.ngsi2.server.Ngsi2BaseController.java
/** * Discover registration matching entities and their attributes * @param bulkQueryRequest defines the list of entities, attributes and scopes to match registrations * @param offset an optional offset (0 for none) * @param limit an optional limit (0 for none) * @param options an optional list of options separated by comma. Possible value for option: count. * If count is present then the total number of registrations is returned in the response as a HTTP header named `X-Total-Count`. * @return a paginated list of registration *//*from ww w .j av a 2s .com*/ @RequestMapping(method = RequestMethod.POST, value = { "/op/discover" }, consumes = MediaType.APPLICATION_JSON_VALUE) final public ResponseEntity<List<Registration>> bulkDiscoverEndpoint( @RequestBody BulkQueryRequest bulkQueryRequest, @RequestParam Optional<Integer> limit, @RequestParam Optional<Integer> offset, @RequestParam Optional<Set<String>> options) { validateSyntax(bulkQueryRequest); boolean count = false; if (options.isPresent()) { Set<String> optionsSet = options.get(); count = optionsSet.contains("count"); } Paginated<Registration> paginatedRegistration = bulkDiscover(bulkQueryRequest, limit.orElse(0), offset.orElse(0), count); if (count) { return new ResponseEntity<>(paginatedRegistration.getItems(), xTotalCountHeader(paginatedRegistration.getTotal()), HttpStatus.OK); } else { return new ResponseEntity<>(paginatedRegistration.getItems(), HttpStatus.OK); } }
From source file:org.silverpeas.core.webapi.calendar.CalendarWebManager.java
/** * Deletes occurrences of an event from the given occurrence.<br> * This method handles also a common behavior the UI must have between each way an event is * deleted (from a controller, a WEB service...) * @param occurrence the occurrence to delete. * @param deleteMethodType indicates the method of the occurrence deletion. * @param zoneId the zoneId into which dates are displayed (optional). *//*from w w w. j a v a 2s . c om*/ CalendarEvent deleteOccurrence(CalendarEventOccurrence occurrence, OccurrenceEventActionMethodType deleteMethodType, final ZoneId zoneId) { if (!occurrence.getCalendarEvent().canBeDeletedBy(User.getCurrentRequester())) { throw new WebApplicationException(Response.Status.FORBIDDEN); } OccurrenceEventActionMethodType methodType = deleteMethodType == null ? ALL : deleteMethodType; final EventOperationResult result; switch (methodType) { case FROM: result = occurrence.deleteSinceMe(); break; case UNIQUE: result = occurrence.delete(); break; default: result = occurrence.getCalendarEvent().delete(); break; } Optional<CalendarEvent> updatedEvent = result.updated(); if (!updatedEvent.isPresent() || !updatedEvent.get().isRecurrent()) { successMessage("calendar.message.event.deleted", occurrence.getTitle()); } else { final String bundleKey; final Temporal endDate; if (methodType == UNIQUE) { bundleKey = "calendar.message.event.occurrence.deleted.unique"; endDate = occurrence.getOriginalStartDate(); } else { bundleKey = "calendar.message.event.occurrence.deleted.from"; //noinspection OptionalGetWithoutIsPresent endDate = updatedEvent.get().getRecurrence().getRecurrenceEndDate().get(); } successMessage(bundleKey, occurrence.getTitle(), getMessager().formatDate(getDateWithOffset(occurrence.asCalendarComponent(), endDate, zoneId))); } return updatedEvent.orElse(null); }
From source file:com.ikanow.aleph2.analytics.storm.services.MockAnalyticsContext.java
@Override public String getAnalyticsContextSignature(final Optional<DataBucketBean> bucket, final Optional<Set<Tuple2<Class<? extends IUnderlyingService>, Optional<String>>>> services) { if (_state_name == State.IN_TECHNOLOGY) { // Returns a config object containing: // - set up for any of the services described // - all the rest of the configuration // - the bucket bean ID final Config full_config = ModuleUtils.getStaticConfig() .withoutPath(DistributedServicesPropertyBean.APPLICATION_NAME) .withoutPath("MongoDbManagementDbService.v1_enabled") // (special workaround for V1 sync service) ;/*w ww . j a va 2 s.co m*/ final Optional<Config> service_config = PropertiesUtils.getSubConfig(full_config, "service"); final ImmutableSet<Tuple2<Class<? extends IUnderlyingService>, Optional<String>>> complete_services_set = ImmutableSet .<Tuple2<Class<? extends IUnderlyingService>, Optional<String>>>builder() .addAll(services.orElse(Collections.emptySet())) .add(Tuples._2T(ICoreDistributedServices.class, Optional.empty())) .add(Tuples._2T(IManagementDbService.class, Optional.empty())) .add(Tuples._2T(ISearchIndexService.class, Optional.empty())) .add(Tuples._2T(ISecurityService.class, Optional.empty())) .add(Tuples._2T(IStorageService.class, Optional.empty())) .add(Tuples._2T(IManagementDbService.class, IManagementDbService.CORE_MANAGEMENT_DB)).build(); if (_mutable_state.service_manifest_override.isSet()) { if (!complete_services_set.equals(_mutable_state.service_manifest_override.get())) { throw new RuntimeException(ErrorUtils.SERVICE_RESTRICTIONS); } } else { _mutable_state.service_manifest_override.set(complete_services_set); } final Config config_no_services = full_config.withoutPath("service"); // Ugh need to add: core deps, core + underlying management db to this list final Config service_subset = complete_services_set.stream() // DON'T MAKE PARALLEL SEE BELOW .map(clazz_name -> { final String config_path = clazz_name._2() .orElse(clazz_name._1().getSimpleName().substring(1)); return service_config.get().hasPath(config_path) ? Tuples._2T(config_path, service_config.get().getConfig(config_path)) : null; }).filter(cfg -> null != cfg).reduce(ConfigFactory.empty(), (acc, k_v) -> acc.withValue(k_v._1(), k_v._2().root()), (acc1, acc2) -> acc1 // (This will never be called as long as the above stream is not parallel) ); final Config config_subset_services = config_no_services.withValue("service", service_subset.root()); final Config last_call = Lambdas .get(() -> _mutable_state.library_configs.isSet() ? config_subset_services .withValue(__MY_MODULE_LIBRARY_ID, ConfigValueFactory .fromAnyRef(BeanTemplateUtils .toJson(new LibraryContainerBean( _mutable_state.library_configs.get().entrySet() .stream() .filter(kv -> kv.getValue().path_name() .equals(kv.getKey())) .map(kv -> kv.getValue()) .collect(Collectors.toList()))) .toString())) : config_subset_services) .withValue(__MY_BUCKET_ID, ConfigValueFactory.fromAnyRef(BeanTemplateUtils .toJson(bucket.orElseGet(() -> _mutable_state.bucket.get())).toString())) .withValue(__MY_TECH_LIBRARY_ID, ConfigValueFactory.fromAnyRef( BeanTemplateUtils.toJson(_mutable_state.technology_config.get()).toString())); final String ret1 = last_call.root().render(ConfigRenderOptions.concise()); _mutable_state.signature_override.set(ret1); final String ret = this.getClass().getName() + ":" + ret1; this.overrideSavedContext(); // (FOR TESTING ONLY - ie BECAUSE THIS IS MOCK ANALYTIC CONTEXT) return ret; } else { throw new RuntimeException(ErrorUtils.TECHNOLOGY_NOT_MODULE); } }
From source file:com.epam.ta.reportportal.core.launch.impl.FinishLaunchHandler.java
@Override public OperationCompletionRS finishLaunch(String launchId, FinishExecutionRQ finishLaunchRQ, String projectName, String username) {// w w w. ja va 2 s.co m Launch launch = launchRepository.findOne(launchId); validate(launchId, launch, finishLaunchRQ); Project project = validateRoles(launch, username, projectName); launch.setEndTime(finishLaunchRQ.getEndTime()); Optional<Status> status = fromValue(finishLaunchRQ.getStatus()); status.ifPresent(providedStatus -> { /* Validate provided status */ expect(providedStatus, not(Preconditions.statusIn(IN_PROGRESS, SKIPPED))).verify( INCORRECT_FINISH_STATUS, formattedSupplier("Cannot finish launch '{}' with status '{}'", launchId, providedStatus)); /* Validate actual launch status */ if (PASSED.equals(providedStatus)) { /* Validate actual launch status */ expect(launch.getStatus(), Preconditions.statusIn(IN_PROGRESS, PASSED)).verify( INCORRECT_FINISH_STATUS, formattedSupplier("Cannot finish launch '{}' with current status '{}' as 'PASSED'", launchId, launch.getStatus())); /* * Calculate status from launch statistics and validate it */ Status fromStatistics = StatisticsHelper.getStatusFromStatistics(launch.getStatistics()); expect(fromStatistics, Preconditions.statusIn(IN_PROGRESS, PASSED)).verify(INCORRECT_FINISH_STATUS, formattedSupplier( "Cannot finish launch '{}' with calculated automatically status '{}' as 'PASSED'", launchId, fromStatistics)); } }); launch.setStatus(status.orElse(StatisticsHelper.getStatusFromStatistics(launch.getStatistics()))); try { launchRepository.save(launch); } catch (Exception exp) { throw new ReportPortalException("Error while Launch updating.", exp); } eventPublisher.publishEvent(new LaunchFinishedEvent(launch, project)); return new OperationCompletionRS("Launch with ID = '" + launchId + "' successfully finished."); }