Add decision_engine/tests/test_core.py
This commit is contained in:
parent
08b7308e1f
commit
f044f465b7
1 changed files with 82 additions and 0 deletions
82
decision_engine/tests/test_core.py
Normal file
82
decision_engine/tests/test_core.py
Normal file
|
|
@ -0,0 +1,82 @@
|
||||||
|
import pytest
|
||||||
|
import types
|
||||||
|
|
||||||
|
import decision_engine.core as core
|
||||||
|
|
||||||
|
def make_run(**overrides):
|
||||||
|
base = {
|
||||||
|
'run_id': 'r1',
|
||||||
|
'pinned': False,
|
||||||
|
'warn_rate': 0.02,
|
||||||
|
'unknown_rate': 0.01,
|
||||||
|
'unknown_class': 'none',
|
||||||
|
'prev_label': 'PASS'
|
||||||
|
}
|
||||||
|
base.update(overrides)
|
||||||
|
return base
|
||||||
|
|
||||||
|
|
||||||
|
def validate_result(result):
|
||||||
|
assert isinstance(result, dict)
|
||||||
|
assert 'final_decision' in result and 'reason' in result
|
||||||
|
assert result['final_decision'] in {'PASS', 'WARN', 'FAIL'}
|
||||||
|
assert isinstance(result['reason'], str)
|
||||||
|
|
||||||
|
|
||||||
|
def test_pass_case():
|
||||||
|
run = make_run(warn_rate=0.01, unknown_rate=0.0)
|
||||||
|
result = core.evaluate_run(run)
|
||||||
|
validate_result(result)
|
||||||
|
assert result['final_decision'] == 'PASS'
|
||||||
|
|
||||||
|
|
||||||
|
def test_warn_case_high_warn_rate():
|
||||||
|
run = make_run(warn_rate=0.12, unknown_rate=0.02)
|
||||||
|
result = core.evaluate_run(run)
|
||||||
|
validate_result(result)
|
||||||
|
assert result['final_decision'] in {'WARN', 'FAIL'}
|
||||||
|
|
||||||
|
|
||||||
|
def test_fail_case_unknown_rate():
|
||||||
|
run = make_run(warn_rate=0.01, unknown_rate=0.5)
|
||||||
|
result = core.evaluate_run(run)
|
||||||
|
validate_result(result)
|
||||||
|
assert result['final_decision'] == 'FAIL'
|
||||||
|
|
||||||
|
|
||||||
|
def test_unknown_class_affects_decision():
|
||||||
|
run = make_run(unknown_class='schema_violation', unknown_rate=0.05)
|
||||||
|
result = core.evaluate_run(run)
|
||||||
|
validate_result(result)
|
||||||
|
assert 'schema_violation' in result['reason'].lower()
|
||||||
|
|
||||||
|
|
||||||
|
def test_pinned_runs_handling():
|
||||||
|
run = make_run(pinned=True, warn_rate=0.2)
|
||||||
|
result = core.evaluate_run(run)
|
||||||
|
validate_result(result)
|
||||||
|
# pinned might downgrade fail to warn or pass depending on policy
|
||||||
|
assert result['final_decision'] in {'PASS', 'WARN', 'FAIL'}
|
||||||
|
|
||||||
|
|
||||||
|
def test_missing_field_type_validation():
|
||||||
|
# Invalid type or missing field should raise TypeError or ValueError
|
||||||
|
with pytest.raises((TypeError, ValueError, KeyError)):
|
||||||
|
bad_run = {'run_id': 'x', 'warn_rate': 'not_num'}
|
||||||
|
core.evaluate_run(bad_run)
|
||||||
|
|
||||||
|
|
||||||
|
def test_prev_label_consistency(monkeypatch):
|
||||||
|
run = make_run(prev_label='FAIL', warn_rate=0.02)
|
||||||
|
result = core.evaluate_run(run)
|
||||||
|
validate_result(result)
|
||||||
|
assert isinstance(result['reason'], str)
|
||||||
|
|
||||||
|
|
||||||
|
def test_reason_field_contains_rationale():
|
||||||
|
run = make_run(warn_rate=0.15, unknown_rate=0.1)
|
||||||
|
result = core.evaluate_run(run)
|
||||||
|
validate_result(result)
|
||||||
|
# Should give human-readable rationale
|
||||||
|
msg = result['reason'].lower()
|
||||||
|
assert any(keyword in msg for keyword in ('warn', 'fail', 'unknown', 'threshold'))
|
||||||
Loading…
Reference in a new issue