Add exit_metrics_logging/tests/test_core.py
This commit is contained in:
parent
3e8795ebe7
commit
befcf74909
1 changed files with 82 additions and 0 deletions
82
exit_metrics_logging/tests/test_core.py
Normal file
82
exit_metrics_logging/tests/test_core.py
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
import json
|
||||
import builtins
|
||||
from pathlib import Path
|
||||
from unittest import mock
|
||||
import pytest
|
||||
|
||||
import src.exit_metrics_logging.core as core
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_metrics():
|
||||
return {
|
||||
"run_id": "run42",
|
||||
"warn_rate": 0.01,
|
||||
"unknown_rate": 0.02,
|
||||
"delta_t": 1.23,
|
||||
}
|
||||
|
||||
|
||||
def test_runmetrics_to_dict(sample_metrics):
|
||||
rm = core.RunMetrics(**sample_metrics)
|
||||
result = rm.to_dict()
|
||||
assert isinstance(result, dict)
|
||||
assert result["run_id"] == sample_metrics["run_id"]
|
||||
assert pytest.approx(result["warn_rate"]) == sample_metrics["warn_rate"]
|
||||
assert set(result.keys()) == {"run_id", "warn_rate", "unknown_rate", "delta_t"}
|
||||
|
||||
|
||||
def test_runmetrics_initialization_validation():
|
||||
# invalid type for warn_rate
|
||||
with pytest.raises((TypeError, ValueError)):
|
||||
core.RunMetrics(run_id=123, warn_rate="bad", unknown_rate=0.1, delta_t=1.0)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"warn_rate,unknown_rate,delta_t",
|
||||
[
|
||||
(0.0, 0.0, 0.0),
|
||||
(0.999, 0.0001, 10.2),
|
||||
(1e-6, 1e-6, 999.999),
|
||||
],
|
||||
)
|
||||
def test_log_metrics_json_output(tmp_path, warn_rate, unknown_rate, delta_t):
|
||||
output_file = tmp_path / "exit_metrics.json"
|
||||
|
||||
with mock.patch("src.exit_metrics_logging.core.Path", spec=Path) as mock_path:
|
||||
mock_path.return_value = output_file
|
||||
mock_path.return_value.exists.return_value = False
|
||||
|
||||
with mock.patch("src.exit_metrics_logging.core.open", builtins.open):
|
||||
core.log_metrics("r123", warn_rate, unknown_rate, delta_t)
|
||||
|
||||
# Write mocked? ensure JSON structure creation
|
||||
rm = core.RunMetrics("r123", warn_rate, unknown_rate, delta_t)
|
||||
d = rm.to_dict()
|
||||
assert isinstance(d["warn_rate"], float)
|
||||
assert 0.0 <= d["warn_rate"] <= 1.0
|
||||
|
||||
|
||||
def test_log_metrics_file_write(tmp_path, sample_metrics, monkeypatch):
|
||||
output_file = tmp_path / "exit_metrics.json"
|
||||
|
||||
def fake_exists():
|
||||
return False
|
||||
|
||||
monkeypatch.setattr(core.Path, "exists", fake_exists)
|
||||
monkeypatch.setattr(core, "METRICS_FILE", output_file)
|
||||
|
||||
# execute and verify file content
|
||||
core.log_metrics(**sample_metrics)
|
||||
assert output_file.exists(), "Metrics JSON file should be created."
|
||||
with output_file.open() as f:
|
||||
data = json.load(f)
|
||||
|
||||
assert data["run_id"] == sample_metrics["run_id"]
|
||||
assert isinstance(data["warn_rate"], float)
|
||||
assert data["unknown_rate"] == pytest.approx(sample_metrics["unknown_rate"])
|
||||
|
||||
|
||||
def test_log_metrics_invalid_input_type():
|
||||
with pytest.raises((TypeError, ValueError)):
|
||||
core.log_metrics(run_id=5, warn_rate="invalid", unknown_rate=0.1, delta_t=0.2)
|
||||
Loading…
Reference in a new issue