From 3438eb636d60c4f8fe886aee3f657480321557f2 Mon Sep 17 00:00:00 2001 From: Mika Date: Sat, 14 Mar 2026 17:17:10 +0000 Subject: [PATCH] Add data_collection_tool/tests/test_core.py --- data_collection_tool/tests/test_core.py | 81 +++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 data_collection_tool/tests/test_core.py diff --git a/data_collection_tool/tests/test_core.py b/data_collection_tool/tests/test_core.py new file mode 100644 index 0000000..db49056 --- /dev/null +++ b/data_collection_tool/tests/test_core.py @@ -0,0 +1,81 @@ +"""Pytest-Unit-Tests für die Datensammlungsfunktion collect_performance_data.""" +import json +import os +from pathlib import Path +import pytest + +import src.data_collection_tool.core as core + + +@pytest.fixture() +def temp_output_dir(tmp_path, monkeypatch): + # Setze das Arbeitsverzeichnis auf einen temporären Pfad, um Dateischreibungen kontrolliert zu testen + monkeypatch.chdir(tmp_path) + return tmp_path + + +def test_collect_performance_data_creates_file_and_returns_dict(temp_output_dir): + run_id = "run_001" + result = core.collect_performance_data(run_id) + + # Überprüfe Rückgabe + assert isinstance(result, dict) + for key in ("run_id", "p50", "p95", "max_alerts", "total_overhead"): + assert key in result + + # Überprüfe Typen + assert isinstance(result["run_id"], str) + assert isinstance(result["p50"], float) + assert isinstance(result["p95"], float) + assert isinstance(result["max_alerts"], int) + assert isinstance(result["total_overhead"], float) + + # Überprüfe Datei wurde erstellt + expected_path = temp_output_dir / "output" / f"{run_id}_performance.json" + assert expected_path.exists(), f"Performance-Output-Datei fehlt: {expected_path}" + + # Lade Dateiinhalt und vergleiche + with open(expected_path, 'r', encoding='utf-8') as f: + stored = json.load(f) + + assert stored == result + + +def test_collect_performance_data_invalid_run_id_raises(tmp_path, monkeypatch): + monkeypatch.chdir(tmp_path) + # Simuliere fehlerhafte Eingabe: leere run_id + with pytest.raises((ValueError, AssertionError)): + core.collect_performance_data("") + + +def test_collect_performance_data_output_is_valid_json(temp_output_dir): + run_id = "42" + _ = core.collect_performance_data(run_id) + + output_file = Path("output") / f"{run_id}_performance.json" + with open(output_file, 'r', encoding='utf-8') as f: + content = f.read() + + # Sollte gültiges JSON sein + try: + data = json.loads(content) + except json.JSONDecodeError as exc: + pytest.fail(f"Output-Datei enthält kein gültiges JSON: {exc}") + + assert data["run_id"] == run_id + assert isinstance(data["p50"], float) + assert 0 <= data["p95"] >= data["p50"] + + +def test_collect_performance_data_does_not_overwrite_different_runs(temp_output_dir): + run_a, run_b = "x10", "x20" + result_a = core.collect_performance_data(run_a) + result_b = core.collect_performance_data(run_b) + + assert result_a["run_id"] != result_b["run_id"] + + file_a = Path("output") / f"{run_a}_performance.json" + file_b = Path("output") / f"{run_b}_performance.json" + + assert file_a.exists() and file_b.exists() + assert file_a.read_text() != file_b.read_text() \ No newline at end of file