From ebfef34771b3aa2560b4807b8cd5db5831d3476a Mon Sep 17 00:00:00 2001 From: Mika Date: Mon, 23 Feb 2026 14:48:39 +0000 Subject: [PATCH] Add metrics_reporting/tests/test_core.py --- metrics_reporting/tests/test_core.py | 61 ++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 metrics_reporting/tests/test_core.py diff --git a/metrics_reporting/tests/test_core.py b/metrics_reporting/tests/test_core.py new file mode 100644 index 0000000..b5b1f97 --- /dev/null +++ b/metrics_reporting/tests/test_core.py @@ -0,0 +1,61 @@ +import pytest +from metrics_reporting import core + +@pytest.fixture +def sample_logs_no_drift(): + return [ + {"level": "INFO", "time": "2024-03-01T10:00:00", "duration": 1.2}, + {"level": "WARN", "time": "2024-03-01T10:01:00", "duration": 1.3}, + {"level": "INFO", "time": "2024-03-01T10:02:00", "duration": 1.1}, + {"level": "INFO", "time": "2024-03-01T10:03:00", "duration": 1.4} + ] + +@pytest.fixture +def sample_logs_with_drift(): + return [ + {"level": "WARN", "time": "2024-03-01T10:00:00", "duration": 2.5}, + {"level": "WARN", "time": "2024-03-01T10:01:00", "duration": 2.3}, + {"level": "INFO", "time": "2024-03-01T10:02:00", "duration": 2.7}, + {"level": "WARN", "time": "2024-03-01T10:03:00", "duration": 2.8} + ] + +def test_report_metrics_basic_structure(sample_logs_no_drift): + threshold = 0.3 + result = core.report_metrics(sample_logs_no_drift, threshold) + assert isinstance(result, dict) + for key in ("warn_rate", "drift_detected", "performance"): + assert key in result + + +def test_report_metrics_values_no_drift(sample_logs_no_drift): + threshold = 0.5 + result = core.report_metrics(sample_logs_no_drift, threshold) + warn_count = sum(1 for x in sample_logs_no_drift if x["level"] == "WARN") + expected_warn_rate = warn_count / len(sample_logs_no_drift) + assert pytest.approx(result["warn_rate"], rel=1e-3) == expected_warn_rate + assert result["drift_detected"] is False + assert result["performance"] > 0 + + +def test_report_metrics_values_with_drift(sample_logs_with_drift): + threshold = 0.2 + result = core.report_metrics(sample_logs_with_drift, threshold) + warn_count = sum(1 for x in sample_logs_with_drift if x["level"] == "WARN") + expected_warn_rate = warn_count / len(sample_logs_with_drift) + assert result["warn_rate"] == pytest.approx(expected_warn_rate, rel=1e-3) + assert isinstance(result["drift_detected"], bool) + assert result["drift_detected"] is True + + +def test_report_metrics_invalid_input_type(): + with pytest.raises((TypeError, ValueError)): + core.report_metrics("invalid_input", 0.1) + + +def test_report_metrics_empty_logs(): + logs = [] + res = core.report_metrics(logs, 0.1) + assert isinstance(res, dict) + assert res["warn_rate"] == 0.0 + assert res["performance"] == 0.0 + assert res["drift_detected"] is False \ No newline at end of file