diff --git a/spp_programs/__manifest__.py b/spp_programs/__manifest__.py
index c7eefaa1..f84a56ae 100644
--- a/spp_programs/__manifest__.py
+++ b/spp_programs/__manifest__.py
@@ -4,7 +4,7 @@
"name": "OpenSPP Programs",
"summary": "Manage programs, cycles, beneficiary enrollment, entitlements (cash and in-kind), payments, and fund tracking for social protection.",
"category": "OpenSPP/Core",
- "version": "19.0.2.0.6",
+ "version": "19.0.2.0.7",
"sequence": 1,
"author": "OpenSPP.org",
"website": "https://github.com/OpenSPP/OpenSPP2",
diff --git a/spp_programs/models/managers/program_manager.py b/spp_programs/models/managers/program_manager.py
index eccb41a2..0162f708 100644
--- a/spp_programs/models/managers/program_manager.py
+++ b/spp_programs/models/managers/program_manager.py
@@ -3,7 +3,7 @@
from datetime import datetime, timedelta
from odoo import _, api, fields, models
-from odoo.exceptions import UserError
+from odoo.exceptions import UserError, ValidationError
from odoo.addons.job_worker.delay import group
@@ -219,12 +219,59 @@ def _enroll_eligible_registrants(self, states, offset=0, limit=None, do_count=Fa
_logger.debug("members filtered: %s", members)
not_enrolled = members.filtered(lambda m: m.state not in ("enrolled", "duplicated", "exited"))
_logger.debug("not_enrolled: %s", not_enrolled)
- not_enrolled.write(
+
+ # Run pre-enrollment hooks (e.g., scoring eligibility checks).
+ # Members that fail the hook are moved to not_eligible.
+ hook_failed = self.env["spp.program.membership"]
+ for member in not_enrolled:
+ try:
+ program._pre_enrollment_hook(member.partner_id)
+ except (ValidationError, UserError) as e:
+ _logger.info(
+ "Pre-enrollment hook rejected registrant %s: %s",
+ member.partner_id.id,
+ str(e),
+ )
+ hook_failed |= member
+
+ # Re-check already-enrolled members against the current
+ # eligibility rules. "Verify Eligibility" implies a fresh check;
+ # an enrolled registrant whose data became invalid (e.g. a
+ # required indicator now resolves to a sentinel string) must be
+ # demoted, not silently kept enrolled. We work from `member_before`
+ # (pre-eligibility-manager-filter) so an enrolled member that the
+ # default manager would silently skip is still re-checked here.
+ # See OP#838.
+ already_enrolled = member_before.filtered(lambda m: m.state == "enrolled")
+ re_verify_failed = self.env["spp.program.membership"]
+ for member in already_enrolled:
+ try:
+ program._pre_enrollment_hook(member.partner_id)
+ except (ValidationError, UserError) as e:
+ _logger.info(
+ "Re-verify rejected enrolled registrant %s: %s",
+ member.partner_id.id,
+ str(e),
+ )
+ re_verify_failed |= member
+
+ enrollable = not_enrolled - hook_failed
+ if hook_failed:
+ hook_failed.write({"state": "not_eligible"})
+
+ if re_verify_failed:
+ re_verify_failed.write({"state": "not_eligible"})
+
+ enrollable.write(
{
"state": "enrolled",
"enrollment_date": fields.Datetime.now(),
}
)
+
+ # Run post-enrollment hooks (e.g., auto-score on enrollment)
+ for member in enrollable:
+ program._post_enrollment_hook(member.partner_id)
# dis-enroll the one not eligible anymore:
enrolled_members_ids = members.ids
members_to_remove = member_before.filtered(
@@ -242,4 +289,4 @@ def _enroll_eligible_registrants(self, states, offset=0, limit=None, do_count=Fa
program._compute_eligible_beneficiary_count()
program._compute_beneficiary_count()
- return len(not_enrolled)
+ return len(enrollable)
diff --git a/spp_scoring/__manifest__.py b/spp_scoring/__manifest__.py
index 5d805028..af4da941 100644
--- a/spp_scoring/__manifest__.py
+++ b/spp_scoring/__manifest__.py
@@ -1,7 +1,7 @@
{
"name": "OpenSPP Scoring",
"category": "OpenSPP/Targeting",
- "version": "19.0.2.0.0",
+ "version": "19.0.2.0.3",
"sequence": 1,
"author": "OpenSPP.org",
"website": "https://github.com/OpenSPP/OpenSPP2",
@@ -33,7 +33,9 @@
"views/scoring_model_views.xml",
"views/scoring_indicator_views.xml",
"views/scoring_threshold_views.xml",
+ "views/scoring_invalid_value_views.xml",
"views/scoring_result_views.xml",
+ "views/res_partner_views.xml",
# Wizards (must be before menus so actions are available)
"wizard/batch_scoring_wizard_views.xml",
# Menus (last, references actions from other files)
diff --git a/spp_scoring/models/__init__.py b/spp_scoring/models/__init__.py
index dff27cc5..1f66b445 100644
--- a/spp_scoring/models/__init__.py
+++ b/spp_scoring/models/__init__.py
@@ -2,9 +2,11 @@
from . import scoring_indicator
from . import scoring_value_mapping
from . import scoring_threshold
+from . import scoring_invalid_value
from . import scoring_result
from . import scoring_result_detail
from . import scoring_engine
from . import scoring_batch_job
from . import scoring_indicator_provider
from . import scoring_data_integration
+from . import res_partner
diff --git a/spp_scoring/models/res_partner.py b/spp_scoring/models/res_partner.py
new file mode 100644
index 00000000..439f6f39
--- /dev/null
+++ b/spp_scoring/models/res_partner.py
@@ -0,0 +1,61 @@
+# Part of OpenSPP. See LICENSE file for full copyright and licensing details.
+"""Extends res.partner with scoring result count and actions."""
+
+from odoo import api, fields, models
+
+
+class ResPartner(models.Model):
+ """Add scoring smart button to registrant form."""
+
+ _inherit = "res.partner"
+
+ scoring_result_ids = fields.One2many(
+ "spp.scoring.result",
+ "registrant_id",
+ string="Scoring Results",
+ )
+ scoring_result_count = fields.Integer(
+ string="# Scores",
+ compute="_compute_scoring_result_count",
+ )
+
+ @api.depends("scoring_result_ids")
+ def _compute_scoring_result_count(self):
+ for partner in self:
+ partner.scoring_result_count = len(partner.scoring_result_ids)
+
+ def action_view_scoring_results(self):
+ """Open scoring results for this registrant."""
+ self.ensure_one()
+ return {
+ "name": self.name,
+ "type": "ir.actions.act_window",
+ "res_model": "spp.scoring.result",
+ "view_mode": "list,form",
+ "domain": [("registrant_id", "=", self.id)],
+ "context": {"default_registrant_id": self.id},
+ }
+
+ def action_score_registrant(self):
+ """Open the batch scoring wizard pre-filled for the selected registrant(s).
+
+ Works for both single-record (form view) and multi-record
+ (list-view bulk selection) calls. The wizard's `registrant_ids`
+ m2m takes precedence over `registrant_domain` (see
+ wizard/batch_scoring_wizard.py:70), so we only need to seed the
+ m2m. The redundant `default_domain` keeps the wizard form's
+ "Domain" field readable when a user opens it on one record.
+ """
+ if not self:
+ return False
+ ctx = {"default_registrant_ids": self.ids}
+ if len(self) == 1:
+ ctx["default_registrant_domain"] = f"[('id', '=', {self.id})]"
+ return {
+ "name": "Score Registrant",
+ "type": "ir.actions.act_window",
+ "res_model": "spp.batch.scoring.wizard",
+ "view_mode": "form",
+ "target": "new",
+ "context": ctx,
+ }
diff --git a/spp_scoring/models/scoring_data_integration.py b/spp_scoring/models/scoring_data_integration.py
index 12842051..e855fe7c 100644
--- a/spp_scoring/models/scoring_data_integration.py
+++ b/spp_scoring/models/scoring_data_integration.py
@@ -151,8 +151,13 @@ def calculate_score(self, registrant, scoring_model, mode="manual"):
# Call original method
result = super().calculate_score(registrant, scoring_model, mode)
- # Store in unified cache if enabled
- if scoring_model.cache_scores:
+ # Only cache *complete* results — strict-mode required-indicator
+ # failures persist an audit row with is_complete=False and a
+ # zeroed score; publishing that to spp.data.value would let CEL
+ # consumers (e.g. `r.demo_hva_2025_score >= 0.6`) read 0 and
+ # treat the registrant as scored-low rather than not-scored.
+ # See OP#838.
+ if scoring_model.cache_scores and result.is_complete:
self._store_score_in_cache(result, registrant, scoring_model)
return result
diff --git a/spp_scoring/models/scoring_engine.py b/spp_scoring/models/scoring_engine.py
index 18ae8acc..1c00d52d 100644
--- a/spp_scoring/models/scoring_engine.py
+++ b/spp_scoring/models/scoring_engine.py
@@ -71,6 +71,12 @@ def calculate_score(self, registrant, scoring_model, mode="manual"):
# Get sorted indicators (prefetched)
indicators = scoring_model.indicator_ids.sorted(key=lambda i: i.sequence)
+ # Track required-indicator failures separately. In strict mode
+ # these mark the whole result as incomplete (audit row only — no
+ # score is published downstream); other indicator errors are
+ # recorded in error_messages but don't block the score.
+ required_failures = []
+
# Process each indicator
for indicator in indicators:
result = self._calculate_indicator(indicator, registrant)
@@ -79,6 +85,7 @@ def calculate_score(self, registrant, scoring_model, mode="manual"):
if result.get("error"):
errors.append(result["error"])
if is_strict_mode and indicator.is_required:
+ required_failures.append(indicator)
continue
total_score += result.get("weighted_score", 0.0)
@@ -115,13 +122,20 @@ def calculate_score(self, registrant, scoring_model, mode="manual"):
if calculation_method == "cel_formula":
total_score = self._calculate_cel_total(scoring_model, registrant, inputs_snapshot)
- # Determine classification (thresholds already prefetched)
- classification = self._get_classification(total_score, scoring_model)
-
- # Check for errors in strict mode
- is_complete = True
- if errors and is_strict_mode:
- is_complete = False
+ # Strict mode + at least one required-indicator failure → the
+ # result is persisted as an audit row but flagged incomplete.
+ # Score is zeroed and classification cleared so a downstream
+ # consumer that forgets to gate on `is_complete` still gets
+ # nothing meaningful. The cache layer in
+ # scoring_data_integration._store_score_in_cache and the
+ # cached-lookup path in scoring_result.get_latest_score both
+ # filter on is_complete=True. See OP#838.
+ is_complete = not (is_strict_mode and required_failures)
+ if not is_complete:
+ total_score = 0.0
+ classification = {"code": False, "label": False, "color": False}
+ else:
+ classification = self._get_classification(total_score, scoring_model)
# Create result record
Result = self.env["spp.scoring.result"]
@@ -164,8 +178,9 @@ def _prefetch_model_data(self, scoring_model):
This ensures indicators, value mappings, and thresholds are loaded
in a minimal number of queries before processing.
"""
- # Prefetch indicators with their value mappings
+ # Prefetch indicators with their value mappings + invalid values
scoring_model.indicator_ids.mapped("value_mapping_ids")
+ scoring_model.indicator_ids.mapped("invalid_value_ids")
# Prefetch thresholds
list(scoring_model.threshold_ids)
@@ -206,10 +221,25 @@ def _calculate_indicator(self, indicator, registrant):
result["field_value"] = field_value
- # Handle missing values
- if field_value is None:
+ # Handle missing or falsy values.
+ # For required indicators we treat None, boolean False, and
+ # empty containers/strings as "missing" — but numeric 0 / 0.0
+ # stays a valid value (a real score). The previous
+ # `field_value != 0` short-circuited on boolean False (since
+ # `False == 0` in Python), letting empty Date/Char fields
+ # bypass strict mode entirely. See OP#838.
+ #
+ # Each indicator can pick a subset of the curated Invalid
+ # Values library (spp.scoring.invalid.value) — only active
+ # rows count. Matching strings are treated as missing for
+ # both required and non-required indicators.
+ invalid_values = set(indicator.invalid_value_ids.filtered("active").mapped("name"))
+ if self._is_missing_value(field_value, indicator.is_required, invalid_values):
if indicator.is_required:
- result["error"] = _("Required field '%s' is missing.") % indicator.field_path
+ result["error"] = _("Required field '%(path)s' has no valid value (got: %(value)s).") % {
+ "path": indicator.field_path,
+ "value": repr(field_value),
+ }
return result
field_value = indicator._convert_default_value()
@@ -244,6 +274,37 @@ def _calculate_indicator(self, indicator, registrant):
return result
+ @staticmethod
+ def _is_missing_value(field_value, is_required, invalid_patterns=None):
+ """Decide whether a field value should be treated as 'missing'.
+
+ Non-required indicators only treat ``None`` as missing — anything
+ else (including 0, "", False) flows through to the indicator's
+ own calculation, which decides what to do with it.
+
+ Required indicators additionally treat boolean ``False`` and
+ empty strings/containers as missing. Numeric 0 (int or float),
+ and the literal ``True``, stay valid.
+
+ ``invalid_patterns`` is the global set of curated invalid-value
+ strings (from ``spp.scoring.invalid.value``). Matching strings
+ are treated as missing for both required and non-required
+ indicators.
+ """
+ if field_value is None:
+ return True
+ # Configured invalid-value strings always count as missing.
+ if invalid_patterns and isinstance(field_value, str) and field_value.strip() in invalid_patterns:
+ return True
+ if not is_required:
+ return False
+ # Booleans subclass int in Python, so check this branch first.
+ if isinstance(field_value, bool):
+ return not field_value
+ if isinstance(field_value, (int, float)):
+ return False
+ return not field_value
+
def _evaluate_indicator_formula(self, indicator, registrant):
"""Evaluate a CEL formula for an indicator."""
if not indicator.cel_expression:
@@ -401,6 +462,21 @@ def _batch_score_sync(self, registrants, scoring_model, options):
scoring_model,
mode="batch",
)
+ # Strict-mode required-indicator failures persist an
+ # incomplete audit row instead of raising. Count those as
+ # batch failures so the wizard summary shows the real
+ # picture and fail_fast still works.
+ if not result.is_complete:
+ errors.append(
+ {
+ "registrant_id": registrant.id,
+ "registrant_name": registrant.name,
+ "error": result.error_messages or _("Scoring incomplete"),
+ }
+ )
+ if options.get("fail_fast"):
+ raise UserError(result.error_messages or _("Scoring incomplete"))
+ continue
results.append(result)
except Exception as e:
@@ -590,7 +666,9 @@ def get_or_calculate_score(self, registrant, scoring_model, max_age_days=None):
"""
Result = self.env["spp.scoring.result"]
- if max_age_days:
+ # max_age_days > 0: reuse cached score if fresh enough
+ # max_age_days = 0 or None: always recalculate
+ if max_age_days and max_age_days > 0:
existing = Result.get_latest_score(registrant, scoring_model, max_age_days)
if existing:
return existing
diff --git a/spp_scoring/models/scoring_indicator.py b/spp_scoring/models/scoring_indicator.py
index 6eb36d1a..bcf4df3e 100644
--- a/spp_scoring/models/scoring_indicator.py
+++ b/spp_scoring/models/scoring_indicator.py
@@ -93,6 +93,16 @@ class SppScoringIndicator(models.Model):
default=0.0,
help="Score to use when value is missing or not mapped",
)
+ invalid_value_ids = fields.Many2many(
+ comodel_name="spp.scoring.invalid.value",
+ string="Invalid Values",
+ help=(
+ "Pick from the curated library of strings that should be "
+ "treated as missing for this indicator (e.g. 'No Birthdate!'). "
+ "Manage the library under Scoring → Configuration → "
+ "Invalid Values."
+ ),
+ )
# Value mappings
value_mapping_ids = fields.One2many(
diff --git a/spp_scoring/models/scoring_invalid_value.py b/spp_scoring/models/scoring_invalid_value.py
new file mode 100644
index 00000000..417f2dd2
--- /dev/null
+++ b/spp_scoring/models/scoring_invalid_value.py
@@ -0,0 +1,39 @@
+"""Curated list of string values that should be treated as missing during
+scoring (e.g. ``'No Birthdate!'`` returned by computed fields when their
+underlying source is empty). Applies globally across every indicator —
+each scoring run fetches the active set once and matches every read
+field value against it. Toggle ``active`` to retire a sentinel without
+losing the audit trail."""
+
+from odoo import fields, models
+
+
+class SppScoringInvalidValue(models.Model):
+ _name = "spp.scoring.invalid.value"
+ _description = "Scoring Invalid Value"
+ _order = "name"
+
+ name = fields.Char(
+ string="Value",
+ required=True,
+ help="Exact string that should be treated as missing during scoring.",
+ )
+ description = fields.Char(
+ help="Optional note explaining why this value is treated as invalid.",
+ )
+ active = fields.Boolean(
+ default=True,
+ help=(
+ "Untick to disable this entry without deleting it — useful when "
+ "a previously-invalid value should be accepted as real data "
+ "going forward."
+ ),
+ )
+
+ _sql_constraints = [
+ (
+ "spp_scoring_invalid_value_name_uniq",
+ "unique(name)",
+ "An invalid-value entry with this string already exists.",
+ ),
+ ]
diff --git a/spp_scoring/models/scoring_model.py b/spp_scoring/models/scoring_model.py
index 0d141f14..569cfd0b 100644
--- a/spp_scoring/models/scoring_model.py
+++ b/spp_scoring/models/scoring_model.py
@@ -116,11 +116,13 @@ class SppScoringModel(models.Model):
comodel_name="spp.scoring.indicator",
inverse_name="model_id",
string="Indicators",
+ copy=True,
)
threshold_ids = fields.One2many(
comodel_name="spp.scoring.threshold",
inverse_name="model_id",
string="Thresholds",
+ copy=True,
)
result_ids = fields.One2many(
comodel_name="spp.scoring.result",
@@ -205,7 +207,10 @@ def action_activate(self):
for record in self:
errors = record._validate_configuration()
if errors:
- raise ValidationError(_("Cannot activate model. Validation errors:\n%s") % "\n".join(errors))
+ raise ValidationError(
+ _("Cannot activate model '%(name)s'. Validation errors:\n%(errors)s")
+ % {"name": record.name, "errors": "\n".join(f"• {e}" for e in errors)}
+ )
record.is_active = True
return True
@@ -221,11 +226,11 @@ def _validate_configuration(self):
# Check indicators exist
if not self.indicator_ids:
- errors.append(_("At least one indicator is required."))
+ errors.append(_("No indicators defined. Add at least one indicator in the Indicators tab."))
# Check thresholds exist
if not self.threshold_ids:
- errors.append(_("At least one threshold is required."))
+ errors.append(_("No thresholds defined. Add at least one threshold in the Thresholds tab."))
# Check weights sum correctly (if expected)
if self.expected_total_weight > 0:
@@ -258,21 +263,41 @@ def _validate_configuration(self):
return errors
def _validate_thresholds(self):
- """Check that thresholds cover the expected score range without gaps."""
+ """Check that thresholds cover the expected score range without gaps or overlaps."""
errors = []
if not self.threshold_ids:
return errors
sorted_thresholds = self.threshold_ids.sorted(key=lambda t: t.min_score)
- # Check for gaps between thresholds
+ # Check all consecutive threshold boundaries for gaps and overlaps.
+ # matches_score uses inclusive bounds on both ends, so shared
+ # boundaries (e.g. 0–20 / 20–40) overlap at the shared value.
+ # Round to 2 decimal places to avoid IEEE-754 false positives
+ # (e.g., 20.00 - 19.99 computing to 0.010000000000000009).
for i, threshold in enumerate(sorted_thresholds[:-1]):
next_threshold = sorted_thresholds[i + 1]
- gap = next_threshold.min_score - threshold.max_score
+ gap = round(next_threshold.min_score - threshold.max_score, 2)
+
if gap > 0.01:
errors.append(
- _("Gap detected between thresholds '%(current)s' and '%(next)s'.")
- % {"current": threshold.name, "next": next_threshold.name}
+ _("Gap detected between thresholds '%(current)s' (max %(max)s) and '%(next)s' (min %(min)s).")
+ % {
+ "current": threshold.name,
+ "max": threshold.max_score,
+ "next": next_threshold.name,
+ "min": next_threshold.min_score,
+ }
+ )
+ elif gap <= 0:
+ errors.append(
+ _("Overlap detected between thresholds '%(current)s' (max %(max)s) and '%(next)s' (min %(min)s).")
+ % {
+ "current": threshold.name,
+ "max": threshold.max_score,
+ "next": next_threshold.name,
+ "min": next_threshold.min_score,
+ }
)
return errors
diff --git a/spp_scoring/models/scoring_result.py b/spp_scoring/models/scoring_result.py
index 78df180a..45d2bc95 100644
--- a/spp_scoring/models/scoring_result.py
+++ b/spp_scoring/models/scoring_result.py
@@ -229,7 +229,14 @@ def get_age_in_days(self):
@api.model
def get_latest_score(self, registrant, model, max_age_days=None):
- """Get the most recent score for a registrant/model combination."""
+ """Get the most recent score for a registrant/model — including
+ incomplete rows. The latest attempt reflects the registrant's
+ current scoreability: if their data became invalid after a
+ previously successful score, the most recent (incomplete) row
+ is the truth, not the stale complete one. Callers that need to
+ gate behaviour on success must check ``result.is_complete``.
+ See OP#838.
+ """
domain = [
("registrant_id", "=", registrant.id),
("model_id", "=", model.id),
diff --git a/spp_scoring/security/ir.model.access.csv b/spp_scoring/security/ir.model.access.csv
index b7ca0ebf..4b949f26 100644
--- a/spp_scoring/security/ir.model.access.csv
+++ b/spp_scoring/security/ir.model.access.csv
@@ -22,3 +22,6 @@ access_spp_batch_scoring_wizard_manager,spp.batch.scoring.wizard manager,model_s
access_spp_scoring_batch_job_viewer,spp.scoring.batch.job viewer,model_spp_scoring_batch_job,group_scoring_viewer,1,0,0,0
access_spp_scoring_batch_job_officer,spp.scoring.batch.job officer,model_spp_scoring_batch_job,group_scoring_officer,1,1,1,0
access_spp_scoring_batch_job_manager,spp.scoring.batch.job manager,model_spp_scoring_batch_job,group_scoring_manager,1,1,1,1
+access_spp_scoring_invalid_value_viewer,spp.scoring.invalid.value viewer,model_spp_scoring_invalid_value,group_scoring_viewer,1,0,0,0
+access_spp_scoring_invalid_value_manager,spp.scoring.invalid.value manager,model_spp_scoring_invalid_value,group_scoring_manager,1,1,1,1
+access_spp_scoring_invalid_value_config_admin,spp.scoring.invalid.value config admin,model_spp_scoring_invalid_value,group_scoring_config_admin,1,1,1,1
diff --git a/spp_scoring/tests/test_scoring_engine.py b/spp_scoring/tests/test_scoring_engine.py
index 94ee9e4d..7a5a31c3 100644
--- a/spp_scoring/tests/test_scoring_engine.py
+++ b/spp_scoring/tests/test_scoring_engine.py
@@ -252,8 +252,172 @@ def test_batch_scoring_with_errors(self):
strict_model,
)
- # Should have 1 result but marked as incomplete
+ # Strict mode + required indicator missing → an audit row is
+ # persisted with is_complete=False, score=0, and the failures
+ # listed in error_messages. The wizard counts it as failed.
self.assertEqual(result["summary"]["total"], 1)
+ self.assertEqual(result["summary"]["successful"], 0)
+ self.assertEqual(result["summary"]["failed"], 1)
+ audit = self.env["spp.scoring.result"].search(
+ [("registrant_id", "=", self.registrant.id), ("model_id", "=", strict_model.id)]
+ )
+ self.assertEqual(len(audit), 1)
+ self.assertFalse(audit.is_complete)
+ self.assertEqual(audit.score, 0.0)
+ self.assertTrue(audit.error_messages)
+
+ def test_strict_mode_blocks_falsy_required_value(self):
+ """Strict mode treats boolean False / empty string from a required
+ field as missing — the previous check used ``field_value != 0``
+ which silently passed boolean False (since False == 0 in Python),
+ letting an empty Date field bypass strict validation. See OP#838."""
+ strict_model = self.ScoringModel.create(
+ {
+ "name": "Strict Model — Falsy",
+ "code": "STRICT_FALSY",
+ "is_active": True,
+ "is_strict_mode": True,
+ }
+ )
+ # An unset Char field (here `website`) returns False from the
+ # Odoo ORM — the same shape QA hit with an unset Date.
+ self.ScoringIndicator.create(
+ {
+ "model_id": strict_model.id,
+ "name": "Empty Char Field",
+ "code": "EMPTY_CHAR",
+ "field_path": "website",
+ "calculation_type": "direct",
+ "is_required": True,
+ }
+ )
+ self.ScoringThreshold.create(
+ {
+ "model_id": strict_model.id,
+ "name": "All",
+ "min_score": 0,
+ "max_score": 1000,
+ "classification_code": "ALL",
+ "classification_label": "All",
+ }
+ )
+
+ result = self.ScoringEngine.batch_score(self.registrant, strict_model)
+
+ # Falsy value on a required indicator must be treated as missing
+ # → strict mode persists an incomplete audit row → batch counts
+ # it as a failure.
+ self.assertEqual(result["summary"]["successful"], 0)
+ self.assertEqual(result["summary"]["failed"], 1)
+ audit = self.env["spp.scoring.result"].search(
+ [("registrant_id", "=", self.registrant.id), ("model_id", "=", strict_model.id)]
+ )
+ self.assertEqual(len(audit), 1)
+ self.assertFalse(audit.is_complete)
+
+ def test_is_missing_value_logic(self):
+ """Unit tests for the missing-value classifier. Covers OP#838 cases
+ — numeric 0 stays valid even on required, but boolean False does
+ not, and configured invalid-value strings count as missing on
+ both required and non-required."""
+ is_missing = self.ScoringEngine._is_missing_value
+
+ # None is always missing
+ self.assertTrue(is_missing(None, False))
+ self.assertTrue(is_missing(None, True))
+
+ # Required + falsy → missing
+ self.assertTrue(is_missing(False, True))
+ self.assertTrue(is_missing("", True))
+ self.assertTrue(is_missing([], True))
+
+ # Required + numeric 0 → valid (real score, not missing)
+ self.assertFalse(is_missing(0, True))
+ self.assertFalse(is_missing(0.0, True))
+
+ # Non-required → only None is missing; everything else flows through
+ self.assertFalse(is_missing(False, False))
+ self.assertFalse(is_missing("", False))
+ self.assertFalse(is_missing(0, False))
+
+ # Invalid-value patterns match both required and non-required
+ patterns = {"No Birthdate!", "Unknown"}
+ self.assertTrue(is_missing("No Birthdate!", True, patterns))
+ self.assertTrue(is_missing("No Birthdate!", False, patterns))
+ self.assertTrue(is_missing(" Unknown ", True, patterns))
+ # Non-matching string still passes when not required
+ self.assertFalse(is_missing("present", False, patterns))
+
+ def test_strict_mode_blocks_curated_invalid_value(self):
+ """A required indicator linked to one or more rows in the
+ ``spp.scoring.invalid.value`` library treats matching field
+ reads as missing (e.g. `age` returning 'No Birthdate!' when
+ `birthdate` is unset). Archiving the linked row turns the
+ check off without losing the entry. See OP#838."""
+ sentinel_model = self.ScoringModel.create(
+ {
+ "name": "Strict Model — Invalid Values",
+ "code": "STRICT_INVALID",
+ "is_active": True,
+ "is_strict_mode": True,
+ }
+ )
+ # Configure the registrant's display_name as a curated invalid
+ # value and link it to the indicator — the engine should treat
+ # the live read as missing.
+ invalid = self.env["spp.scoring.invalid.value"].create(
+ {
+ "name": self.registrant.name,
+ "description": "Test sentinel",
+ }
+ )
+ self.ScoringIndicator.create(
+ {
+ "model_id": sentinel_model.id,
+ "name": "Display Name",
+ "code": "INVALID_PATTERN",
+ "field_path": "display_name",
+ "calculation_type": "direct",
+ "is_required": True,
+ "invalid_value_ids": [(6, 0, [invalid.id])],
+ }
+ )
+ self.ScoringThreshold.create(
+ {
+ "model_id": sentinel_model.id,
+ "name": "All",
+ "min_score": 0,
+ "max_score": 1000,
+ "classification_code": "ALL",
+ "classification_label": "All",
+ }
+ )
+
+ result = self.ScoringEngine.batch_score(self.registrant, sentinel_model)
+ self.assertEqual(result["summary"]["successful"], 0)
+ self.assertEqual(result["summary"]["failed"], 1)
+ audit = self.env["spp.scoring.result"].search(
+ [("registrant_id", "=", self.registrant.id), ("model_id", "=", sentinel_model.id)]
+ )
+ self.assertEqual(len(audit), 1)
+ self.assertFalse(audit.is_complete)
+ self.assertEqual(audit.score, 0.0)
+
+ # `get_latest_score` returns the latest *attempt* — the
+ # incomplete row is the registrant's current state, and
+ # callers (eligibility, membership compute) gate on
+ # ``is_complete`` themselves rather than silently falling back
+ # to a stale complete row.
+ latest = self.env["spp.scoring.result"].get_latest_score(self.registrant, sentinel_model)
+ self.assertEqual(latest, audit)
+ self.assertFalse(latest.is_complete)
+
+ # Archive the invalid-value row → the same field value should
+ # now flow through and produce a real complete score.
+ invalid.active = False
+ result_after = self.ScoringEngine.batch_score(self.registrant, sentinel_model)
+ self.assertEqual(result_after["summary"]["successful"], 1)
+ self.assertEqual(result_after["summary"]["failed"], 0)
def test_get_or_calculate_score_caches(self):
"""Test get_or_calculate_score returns cached score."""
diff --git a/spp_scoring/tests/test_scoring_indicator_provider.py b/spp_scoring/tests/test_scoring_indicator_provider.py
index 561766f2..fbbb9349 100644
--- a/spp_scoring/tests/test_scoring_indicator_provider.py
+++ b/spp_scoring/tests/test_scoring_indicator_provider.py
@@ -54,7 +54,7 @@ def setUpClass(cls):
"model_id": cls.scoring_model.id,
"name": "Low",
"min_score": 0,
- "max_score": 50,
+ "max_score": 49.99,
"classification_code": "LOW",
"classification_label": "Low Score",
}
diff --git a/spp_scoring/views/menus.xml b/spp_scoring/views/menus.xml
index 48fe5a02..7626b51b 100644
--- a/spp_scoring/views/menus.xml
+++ b/spp_scoring/views/menus.xml
@@ -67,4 +67,14 @@
sequence="20"
groups="group_scoring_manager"
/>
+
+
+
diff --git a/spp_scoring/views/res_partner_views.xml b/spp_scoring/views/res_partner_views.xml
new file mode 100644
index 00000000..1373fe82
--- /dev/null
+++ b/spp_scoring/views/res_partner_views.xml
@@ -0,0 +1,55 @@
+
+
+
+
+ spp_registry.view_registrant_form.scoring
+ res.partner
+
+
+
+
+
+
+
+
+
+
+ Score Registrant
+
+
+ form,list
+
+ code
+ action = records.action_score_registrant() if records else None
+
+
diff --git a/spp_scoring/views/scoring_indicator_views.xml b/spp_scoring/views/scoring_indicator_views.xml
index 82b74544..2ab2ca07 100644
--- a/spp_scoring/views/scoring_indicator_views.xml
+++ b/spp_scoring/views/scoring_indicator_views.xml
@@ -41,6 +41,11 @@
+
diff --git a/spp_scoring/views/scoring_invalid_value_views.xml b/spp_scoring/views/scoring_invalid_value_views.xml
new file mode 100644
index 00000000..570a9634
--- /dev/null
+++ b/spp_scoring/views/scoring_invalid_value_views.xml
@@ -0,0 +1,73 @@
+
+
+
+
+ spp.scoring.invalid.value.list
+ spp.scoring.invalid.value
+
+
+
+
+
+
+
+
+
+
+
+ spp.scoring.invalid.value.form
+ spp.scoring.invalid.value
+
+
+
+
+
+
+
+ spp.scoring.invalid.value.search
+ spp.scoring.invalid.value
+
+
+
+
+
+
+
+
+
+
+
+ Invalid Values
+ spp.scoring.invalid.value
+ list,form
+
+
+ Add a curated invalid value
+
+
+ These string values (e.g. No Birthdate!,
+ Unknown, N/A) are treated as
+ missing by every scoring indicator.
+ Untick Active to retire an entry without losing it.
+