Add rerun_evaluator/tests/test_core.py
This commit is contained in:
parent
5cfabafcdd
commit
e56234cbcd
1 changed files with 55 additions and 0 deletions
55
rerun_evaluator/tests/test_core.py
Normal file
55
rerun_evaluator/tests/test_core.py
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
import pytest
|
||||
from rerun_evaluator import core
|
||||
|
||||
@pytest.fixture
|
||||
def sample_runs():
|
||||
return [
|
||||
core.RunData(run_id="1", label_triggered=True, flappy=False, pinned=False, unknown_rate=0.1),
|
||||
core.RunData(run_id="2", label_triggered=False, flappy=False, pinned=True, unknown_rate=0.0),
|
||||
core.RunData(run_id="3", label_triggered=True, flappy=True, pinned=False, unknown_rate=0.3),
|
||||
core.RunData(run_id="4", label_triggered=False, flappy=False, pinned=False, unknown_rate=0.0),
|
||||
]
|
||||
|
||||
@pytest.fixture
|
||||
def empty_runs():
|
||||
return []
|
||||
|
||||
def test_evaluate_rerun_needs_basic(sample_runs):
|
||||
result = core.evaluate_rerun_needs(sample_runs)
|
||||
assert isinstance(result, dict)
|
||||
# Expected keys
|
||||
for key in ("total_runs", "label_trigger_rate", "flappy_rate", "avg_unknown_rate", "rerun_recommended"):
|
||||
assert key in result
|
||||
assert result["total_runs"] == len(sample_runs)
|
||||
assert 0 <= result["label_trigger_rate"] <= 1
|
||||
assert 0 <= result["flappy_rate"] <= 1
|
||||
assert 0 <= result["avg_unknown_rate"] <= 1
|
||||
assert isinstance(result["rerun_recommended"], bool)
|
||||
|
||||
def test_evaluate_rerun_needs_empty(empty_runs):
|
||||
result = core.evaluate_rerun_needs(empty_runs)
|
||||
assert isinstance(result, dict)
|
||||
assert result["total_runs"] == 0
|
||||
assert result["label_trigger_rate"] == 0
|
||||
assert result["flappy_rate"] == 0
|
||||
assert result["avg_unknown_rate"] == 0
|
||||
assert result["rerun_recommended"] is False
|
||||
|
||||
def test_rundata_initialization():
|
||||
rd = core.RunData(run_id="X99", label_triggered=True, flappy=False, pinned=True, unknown_rate=0.15)
|
||||
assert rd.run_id == "X99"
|
||||
assert isinstance(rd.label_triggered, bool)
|
||||
assert isinstance(rd.flappy, bool)
|
||||
assert isinstance(rd.pinned, bool)
|
||||
assert isinstance(rd.unknown_rate, float)
|
||||
|
||||
@pytest.mark.parametrize("unknown_rate,expected_recommendation", [
|
||||
(0.0, False),
|
||||
(0.5, True),
|
||||
(1.0, True),
|
||||
])
|
||||
def test_evaluate_rerun_needs_unknown_rate_threshold(unknown_rate, expected_recommendation):
|
||||
runs = [core.RunData(run_id=str(i), label_triggered=False, flappy=False, pinned=False, unknown_rate=unknown_rate) for i in range(5)]
|
||||
result = core.evaluate_rerun_needs(runs)
|
||||
assert isinstance(result, dict)
|
||||
assert result["rerun_recommended"] == expected_recommendation
|
||||
Loading…
Reference in a new issue