Add metrics_reporting/tests/test_core.py
This commit is contained in:
parent
a199d1ab38
commit
ebfef34771
1 changed files with 61 additions and 0 deletions
61
metrics_reporting/tests/test_core.py
Normal file
61
metrics_reporting/tests/test_core.py
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
import pytest
|
||||
from metrics_reporting import core
|
||||
|
||||
@pytest.fixture
|
||||
def sample_logs_no_drift():
|
||||
return [
|
||||
{"level": "INFO", "time": "2024-03-01T10:00:00", "duration": 1.2},
|
||||
{"level": "WARN", "time": "2024-03-01T10:01:00", "duration": 1.3},
|
||||
{"level": "INFO", "time": "2024-03-01T10:02:00", "duration": 1.1},
|
||||
{"level": "INFO", "time": "2024-03-01T10:03:00", "duration": 1.4}
|
||||
]
|
||||
|
||||
@pytest.fixture
|
||||
def sample_logs_with_drift():
|
||||
return [
|
||||
{"level": "WARN", "time": "2024-03-01T10:00:00", "duration": 2.5},
|
||||
{"level": "WARN", "time": "2024-03-01T10:01:00", "duration": 2.3},
|
||||
{"level": "INFO", "time": "2024-03-01T10:02:00", "duration": 2.7},
|
||||
{"level": "WARN", "time": "2024-03-01T10:03:00", "duration": 2.8}
|
||||
]
|
||||
|
||||
def test_report_metrics_basic_structure(sample_logs_no_drift):
|
||||
threshold = 0.3
|
||||
result = core.report_metrics(sample_logs_no_drift, threshold)
|
||||
assert isinstance(result, dict)
|
||||
for key in ("warn_rate", "drift_detected", "performance"):
|
||||
assert key in result
|
||||
|
||||
|
||||
def test_report_metrics_values_no_drift(sample_logs_no_drift):
|
||||
threshold = 0.5
|
||||
result = core.report_metrics(sample_logs_no_drift, threshold)
|
||||
warn_count = sum(1 for x in sample_logs_no_drift if x["level"] == "WARN")
|
||||
expected_warn_rate = warn_count / len(sample_logs_no_drift)
|
||||
assert pytest.approx(result["warn_rate"], rel=1e-3) == expected_warn_rate
|
||||
assert result["drift_detected"] is False
|
||||
assert result["performance"] > 0
|
||||
|
||||
|
||||
def test_report_metrics_values_with_drift(sample_logs_with_drift):
|
||||
threshold = 0.2
|
||||
result = core.report_metrics(sample_logs_with_drift, threshold)
|
||||
warn_count = sum(1 for x in sample_logs_with_drift if x["level"] == "WARN")
|
||||
expected_warn_rate = warn_count / len(sample_logs_with_drift)
|
||||
assert result["warn_rate"] == pytest.approx(expected_warn_rate, rel=1e-3)
|
||||
assert isinstance(result["drift_detected"], bool)
|
||||
assert result["drift_detected"] is True
|
||||
|
||||
|
||||
def test_report_metrics_invalid_input_type():
|
||||
with pytest.raises((TypeError, ValueError)):
|
||||
core.report_metrics("invalid_input", 0.1)
|
||||
|
||||
|
||||
def test_report_metrics_empty_logs():
|
||||
logs = []
|
||||
res = core.report_metrics(logs, 0.1)
|
||||
assert isinstance(res, dict)
|
||||
assert res["warn_rate"] == 0.0
|
||||
assert res["performance"] == 0.0
|
||||
assert res["drift_detected"] is False
|
||||
Loading…
Reference in a new issue