Add artifact.data_logger/tests/test_core.py
This commit is contained in:
parent
63979723fe
commit
84d31f54a3
1 changed files with 66 additions and 0 deletions
66
artifact.data_logger/tests/test_core.py
Normal file
66
artifact.data_logger/tests/test_core.py
Normal file
|
|
@ -0,0 +1,66 @@
|
||||||
|
import json
|
||||||
|
import io
|
||||||
|
import sys
|
||||||
|
import pytest
|
||||||
|
from rich.console import Console
|
||||||
|
|
||||||
|
import artifact_data_logger.core as core
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def valid_metrics():
|
||||||
|
return {
|
||||||
|
"setup_fingerprint": "fp_12345",
|
||||||
|
"policy_hash": "ph_abcdef",
|
||||||
|
"epoch_ms": 1710000000000,
|
||||||
|
"near_expiry_unpinned": 0.05,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def test_log_metrics_stdout_output(valid_metrics, capsys):
|
||||||
|
run_id = "aux2_040"
|
||||||
|
core.log_metrics(run_id, valid_metrics)
|
||||||
|
captured = capsys.readouterr()
|
||||||
|
output = captured.out.strip()
|
||||||
|
assert run_id in output
|
||||||
|
for key, value in valid_metrics.items():
|
||||||
|
assert str(value) in output
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_metrics_type_raises():
|
||||||
|
with pytest.raises((TypeError, KeyError, AssertionError)):
|
||||||
|
core.log_metrics("runX", {"setup_fingerprint": 123})
|
||||||
|
|
||||||
|
|
||||||
|
def test_missing_field_raises(valid_metrics):
|
||||||
|
bad_metrics = valid_metrics.copy()
|
||||||
|
bad_metrics.pop("policy_hash")
|
||||||
|
with pytest.raises((KeyError, AssertionError)):
|
||||||
|
core.log_metrics("runY", bad_metrics)
|
||||||
|
|
||||||
|
|
||||||
|
def test_epoch_ms_numeric(valid_metrics, capsys):
|
||||||
|
valid_metrics["epoch_ms"] = 999999
|
||||||
|
core.log_metrics("runZ", valid_metrics)
|
||||||
|
out = capsys.readouterr().out
|
||||||
|
assert "999999" in out
|
||||||
|
assert "policy_hash" in out
|
||||||
|
|
||||||
|
|
||||||
|
def test_near_expiry_float_range(valid_metrics):
|
||||||
|
valid_metrics["near_expiry_unpinned"] = 1.0
|
||||||
|
assert 0.0 <= valid_metrics["near_expiry_unpinned"] <= 1.0
|
||||||
|
core.log_metrics("runR", valid_metrics)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("field,value", [
|
||||||
|
("setup_fingerprint", None),
|
||||||
|
("policy_hash", None),
|
||||||
|
("epoch_ms", "notint"),
|
||||||
|
("near_expiry_unpinned", -0.1),
|
||||||
|
])
|
||||||
|
def test_invalid_field_values_raise(valid_metrics, field, value):
|
||||||
|
bad_metrics = valid_metrics.copy()
|
||||||
|
bad_metrics[field] = value
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
core.log_metrics("runW", bad_metrics)
|
||||||
Loading…
Reference in a new issue