Add data_collection_tool/tests/test_core.py
This commit is contained in:
parent
57167a4867
commit
3438eb636d
1 changed files with 81 additions and 0 deletions
81
data_collection_tool/tests/test_core.py
Normal file
81
data_collection_tool/tests/test_core.py
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
"""Pytest-Unit-Tests für die Datensammlungsfunktion collect_performance_data."""
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
import pytest
|
||||
|
||||
import src.data_collection_tool.core as core
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def temp_output_dir(tmp_path, monkeypatch):
|
||||
# Setze das Arbeitsverzeichnis auf einen temporären Pfad, um Dateischreibungen kontrolliert zu testen
|
||||
monkeypatch.chdir(tmp_path)
|
||||
return tmp_path
|
||||
|
||||
|
||||
def test_collect_performance_data_creates_file_and_returns_dict(temp_output_dir):
|
||||
run_id = "run_001"
|
||||
result = core.collect_performance_data(run_id)
|
||||
|
||||
# Überprüfe Rückgabe
|
||||
assert isinstance(result, dict)
|
||||
for key in ("run_id", "p50", "p95", "max_alerts", "total_overhead"):
|
||||
assert key in result
|
||||
|
||||
# Überprüfe Typen
|
||||
assert isinstance(result["run_id"], str)
|
||||
assert isinstance(result["p50"], float)
|
||||
assert isinstance(result["p95"], float)
|
||||
assert isinstance(result["max_alerts"], int)
|
||||
assert isinstance(result["total_overhead"], float)
|
||||
|
||||
# Überprüfe Datei wurde erstellt
|
||||
expected_path = temp_output_dir / "output" / f"{run_id}_performance.json"
|
||||
assert expected_path.exists(), f"Performance-Output-Datei fehlt: {expected_path}"
|
||||
|
||||
# Lade Dateiinhalt und vergleiche
|
||||
with open(expected_path, 'r', encoding='utf-8') as f:
|
||||
stored = json.load(f)
|
||||
|
||||
assert stored == result
|
||||
|
||||
|
||||
def test_collect_performance_data_invalid_run_id_raises(tmp_path, monkeypatch):
|
||||
monkeypatch.chdir(tmp_path)
|
||||
# Simuliere fehlerhafte Eingabe: leere run_id
|
||||
with pytest.raises((ValueError, AssertionError)):
|
||||
core.collect_performance_data("")
|
||||
|
||||
|
||||
def test_collect_performance_data_output_is_valid_json(temp_output_dir):
|
||||
run_id = "42"
|
||||
_ = core.collect_performance_data(run_id)
|
||||
|
||||
output_file = Path("output") / f"{run_id}_performance.json"
|
||||
with open(output_file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Sollte gültiges JSON sein
|
||||
try:
|
||||
data = json.loads(content)
|
||||
except json.JSONDecodeError as exc:
|
||||
pytest.fail(f"Output-Datei enthält kein gültiges JSON: {exc}")
|
||||
|
||||
assert data["run_id"] == run_id
|
||||
assert isinstance(data["p50"], float)
|
||||
assert 0 <= data["p95"] >= data["p50"]
|
||||
|
||||
|
||||
def test_collect_performance_data_does_not_overwrite_different_runs(temp_output_dir):
|
||||
run_a, run_b = "x10", "x20"
|
||||
result_a = core.collect_performance_data(run_a)
|
||||
result_b = core.collect_performance_data(run_b)
|
||||
|
||||
assert result_a["run_id"] != result_b["run_id"]
|
||||
|
||||
file_a = Path("output") / f"{run_a}_performance.json"
|
||||
file_b = Path("output") / f"{run_b}_performance.json"
|
||||
|
||||
assert file_a.exists() and file_b.exists()
|
||||
assert file_a.read_text() != file_b.read_text()
|
||||
Loading…
Reference in a new issue