Add decision_table_generator/tests/test_core.py
This commit is contained in:
parent
ad48b148bd
commit
61e2fb762d
1 changed files with 89 additions and 0 deletions
89
decision_table_generator/tests/test_core.py
Normal file
89
decision_table_generator/tests/test_core.py
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
import json
|
||||
import pytest
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
from typing import List, Dict
|
||||
|
||||
import src.decision_table_generator.core as core
|
||||
|
||||
|
||||
def load_sample_config() -> Dict:
|
||||
sample_path = Path('tests/data/decision_config_sample.json')
|
||||
if sample_path.exists():
|
||||
with sample_path.open('r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
# fallback sample in case data file not present
|
||||
return {
|
||||
"N_values": [10, 20],
|
||||
"warn_threshold": 30.0,
|
||||
"rerun_options": ["off", "on"],
|
||||
"unknown_handling": "exclude"
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def sample_config() -> Dict:
|
||||
cfg = load_sample_config()
|
||||
# Validate basic DecisionConfig structure
|
||||
assert isinstance(cfg, dict)
|
||||
assert all(k in cfg for k in ["N_values", "warn_threshold", "rerun_options", "unknown_handling"])
|
||||
assert isinstance(cfg["N_values"], list)
|
||||
assert all(isinstance(n, int) for n in cfg["N_values"])
|
||||
assert isinstance(cfg["warn_threshold"], (int, float))
|
||||
assert isinstance(cfg["rerun_options"], list)
|
||||
assert isinstance(cfg["unknown_handling"], str)
|
||||
return cfg
|
||||
|
||||
|
||||
def test_generate_decision_table_structure(sample_config):
|
||||
table = core.generate_decision_table(sample_config)
|
||||
assert isinstance(table, list)
|
||||
assert table, "Generated decision table should not be empty"
|
||||
expected_keys = {"N", "warn_threshold", "rerun_option", "unknown_handling", "warn_count", "rerun_helps", "rerun_shifts", "rerun_hurts"}
|
||||
for row in table:
|
||||
assert isinstance(row, dict)
|
||||
assert expected_keys.issubset(row.keys())
|
||||
|
||||
|
||||
def test_generate_decision_table_content_consistency(sample_config):
|
||||
table = core.generate_decision_table(sample_config)
|
||||
df = pd.DataFrame(table)
|
||||
# Validate combinations of N and rerun_option present
|
||||
Ns = sorted(sample_config["N_values"])
|
||||
reruns = sample_config["rerun_options"]
|
||||
expected_rows = len(Ns) * len(reruns)
|
||||
assert len(df) == expected_rows
|
||||
assert df["N"].isin(Ns).all()
|
||||
assert df["rerun_option"].isin(reruns).all()
|
||||
# Numeric fields should not be negative
|
||||
for col in ["warn_count", "rerun_helps", "rerun_shifts", "rerun_hurts"]:
|
||||
assert (df[col] >= 0).all()
|
||||
|
||||
|
||||
def test_invalid_config_raises_error():
|
||||
invalid_cfgs = [
|
||||
{},
|
||||
{"N_values": "notalist", "warn_threshold": 20.0, "rerun_options": ["off"], "unknown_handling": "exclude"},
|
||||
{"N_values": [10, 20], "warn_threshold": None, "rerun_options": ["off"], "unknown_handling": "exclude"}
|
||||
]
|
||||
for invalid in invalid_cfgs:
|
||||
with pytest.raises((KeyError, TypeError, ValueError)):
|
||||
core.generate_decision_table(invalid)
|
||||
|
||||
|
||||
def test_cli_integration(monkeypatch, tmp_path, sample_config):
|
||||
import subprocess
|
||||
config_path = tmp_path / 'config.json'
|
||||
output_path = tmp_path / 'output.csv'
|
||||
with config_path.open('w', encoding='utf-8') as f:
|
||||
json.dump(sample_config, f)
|
||||
|
||||
result = subprocess.run(
|
||||
['python', '-m', 'src.decision_table_generator.cli', '--config', str(config_path), '--output', str(output_path)],
|
||||
capture_output=True, text=True
|
||||
)
|
||||
assert result.returncode == 0, f"CLI execution failed: {result.stderr}"
|
||||
assert output_path.exists()
|
||||
df = pd.read_csv(output_path)
|
||||
assert not df.empty
|
||||
assert all(col in df.columns for col in ["N", "warn_threshold", "rerun_option", "unknown_handling"])
|
||||
Loading…
Reference in a new issue