Add drift_analysis/tests/test_core.py
This commit is contained in:
parent
7854afa614
commit
109d897328
1 changed files with 64 additions and 0 deletions
64
drift_analysis/tests/test_core.py
Normal file
64
drift_analysis/tests/test_core.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
import json
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
|
||||
import drift_analysis.core as core
|
||||
|
||||
@pytest.fixture
|
||||
def sample_frozen_runs(tmp_path: Path):
|
||||
data = [
|
||||
{"run_id": "r1", "status": "PASS", "is_pinned": True},
|
||||
{"run_id": "r2", "status": "WARN", "is_pinned": True},
|
||||
{"run_id": "r3", "status": "WARN", "is_pinned": False},
|
||||
{"run_id": "r4", "status": "FAIL", "is_pinned": True}
|
||||
]
|
||||
file_path = tmp_path / "frozen_runs_sample.json"
|
||||
with file_path.open("w", encoding="utf-8") as f:
|
||||
json.dump(data, f)
|
||||
return file_path, [core.FrozenRun(**item) for item in data]
|
||||
|
||||
|
||||
def test_load_frozen_runs_valid(sample_frozen_runs):
|
||||
file_path, expected = sample_frozen_runs
|
||||
loaded = core.load_frozen_runs(str(file_path))
|
||||
assert isinstance(loaded, list)
|
||||
assert all(isinstance(r, core.FrozenRun) for r in loaded)
|
||||
assert len(loaded) == len(expected)
|
||||
assert loaded[1].status == "WARN"
|
||||
|
||||
|
||||
def test_calculate_warn_rate(sample_frozen_runs):
|
||||
_, runs = sample_frozen_runs
|
||||
warn_count, total = core.calculate_warn_rate(runs, threshold=0.5)
|
||||
assert warn_count == 2
|
||||
assert total == 4
|
||||
ratio = warn_count / total
|
||||
assert pytest.approx(ratio, 0.01) == 0.5
|
||||
|
||||
|
||||
def test_generate_report_ok():
|
||||
report = core.generate_report(warn_count=1, total_runs=10, threshold=0.3)
|
||||
assert isinstance(report, dict)
|
||||
assert set(report.keys()) == {"warn_rate", "threshold", "status"}
|
||||
assert report["status"] == "OK"
|
||||
assert report["warn_rate"] == pytest.approx(0.1, 0.001)
|
||||
|
||||
|
||||
def test_generate_report_warn():
|
||||
report = core.generate_report(warn_count=4, total_runs=10, threshold=0.3)
|
||||
assert report["status"] == "WARN"
|
||||
assert report["warn_rate"] == pytest.approx(0.4, 0.001)
|
||||
|
||||
|
||||
def test_input_file_validation(tmp_path):
|
||||
invalid_path = tmp_path / "missing.json"
|
||||
with pytest.raises(FileNotFoundError):
|
||||
core.load_frozen_runs(str(invalid_path))
|
||||
|
||||
|
||||
def test_invalid_data_structure(tmp_path):
|
||||
malformed_path = tmp_path / "bad.json"
|
||||
with malformed_path.open("w", encoding="utf-8") as f:
|
||||
json.dump({"run_id": "r1", "status": "WARN"}, f)
|
||||
with pytest.raises(ValueError):
|
||||
core.load_frozen_runs(str(malformed_path))
|
||||
Loading…
Reference in a new issue