From befcf74909d22018aa5f8acd9daae825db6dcd38 Mon Sep 17 00:00:00 2001 From: Mika Date: Thu, 26 Feb 2026 12:52:57 +0000 Subject: [PATCH] Add exit_metrics_logging/tests/test_core.py --- exit_metrics_logging/tests/test_core.py | 82 +++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 exit_metrics_logging/tests/test_core.py diff --git a/exit_metrics_logging/tests/test_core.py b/exit_metrics_logging/tests/test_core.py new file mode 100644 index 0000000..007ba69 --- /dev/null +++ b/exit_metrics_logging/tests/test_core.py @@ -0,0 +1,82 @@ +import json +import builtins +from pathlib import Path +from unittest import mock +import pytest + +import src.exit_metrics_logging.core as core + + +@pytest.fixture +def sample_metrics(): + return { + "run_id": "run42", + "warn_rate": 0.01, + "unknown_rate": 0.02, + "delta_t": 1.23, + } + + +def test_runmetrics_to_dict(sample_metrics): + rm = core.RunMetrics(**sample_metrics) + result = rm.to_dict() + assert isinstance(result, dict) + assert result["run_id"] == sample_metrics["run_id"] + assert pytest.approx(result["warn_rate"]) == sample_metrics["warn_rate"] + assert set(result.keys()) == {"run_id", "warn_rate", "unknown_rate", "delta_t"} + + +def test_runmetrics_initialization_validation(): + # invalid type for warn_rate + with pytest.raises((TypeError, ValueError)): + core.RunMetrics(run_id=123, warn_rate="bad", unknown_rate=0.1, delta_t=1.0) + + +@pytest.mark.parametrize( + "warn_rate,unknown_rate,delta_t", + [ + (0.0, 0.0, 0.0), + (0.999, 0.0001, 10.2), + (1e-6, 1e-6, 999.999), + ], +) +def test_log_metrics_json_output(tmp_path, warn_rate, unknown_rate, delta_t): + output_file = tmp_path / "exit_metrics.json" + + with mock.patch("src.exit_metrics_logging.core.Path", spec=Path) as mock_path: + mock_path.return_value = output_file + mock_path.return_value.exists.return_value = False + + with mock.patch("src.exit_metrics_logging.core.open", builtins.open): + core.log_metrics("r123", warn_rate, unknown_rate, delta_t) + + # Write mocked? ensure JSON structure creation + rm = core.RunMetrics("r123", warn_rate, unknown_rate, delta_t) + d = rm.to_dict() + assert isinstance(d["warn_rate"], float) + assert 0.0 <= d["warn_rate"] <= 1.0 + + +def test_log_metrics_file_write(tmp_path, sample_metrics, monkeypatch): + output_file = tmp_path / "exit_metrics.json" + + def fake_exists(): + return False + + monkeypatch.setattr(core.Path, "exists", fake_exists) + monkeypatch.setattr(core, "METRICS_FILE", output_file) + + # execute and verify file content + core.log_metrics(**sample_metrics) + assert output_file.exists(), "Metrics JSON file should be created." + with output_file.open() as f: + data = json.load(f) + + assert data["run_id"] == sample_metrics["run_id"] + assert isinstance(data["warn_rate"], float) + assert data["unknown_rate"] == pytest.approx(sample_metrics["unknown_rate"]) + + +def test_log_metrics_invalid_input_type(): + with pytest.raises((TypeError, ValueError)): + core.log_metrics(run_id=5, warn_rate="invalid", unknown_rate=0.1, delta_t=0.2)