Add audit_data_processing/tests/test_core.py
This commit is contained in:
parent
9c87dd9de4
commit
3a41302cde
1 changed files with 69 additions and 0 deletions
69
audit_data_processing/tests/test_core.py
Normal file
69
audit_data_processing/tests/test_core.py
Normal file
|
|
@ -0,0 +1,69 @@
|
||||||
|
import pytest
|
||||||
|
import pandas as pd
|
||||||
|
from audit_data_processing import core
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sample_data():
|
||||||
|
# Beispielhafte Audit-Run-Daten mit warn_rate und unknown_rate
|
||||||
|
class RunData:
|
||||||
|
def __init__(self, run_id, warn_rate, unknown_rate, pinned):
|
||||||
|
self.run_id = run_id
|
||||||
|
self.warn_rate = warn_rate
|
||||||
|
self.unknown_rate = unknown_rate
|
||||||
|
self.pinned = pinned
|
||||||
|
|
||||||
|
return [
|
||||||
|
RunData("r1", 0.1, 0.05, True),
|
||||||
|
RunData("r2", 0.2, 0.10, False),
|
||||||
|
RunData("r3", 0.3, 0.15, True),
|
||||||
|
RunData("r4", 0.4, 0.20, False),
|
||||||
|
RunData("r5", 0.5, 0.25, True)
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_percentiles_basic(sample_data):
|
||||||
|
percentiles = [50, 75, 90, 95]
|
||||||
|
result = core.calculate_percentiles(sample_data, percentiles)
|
||||||
|
|
||||||
|
assert isinstance(result, dict)
|
||||||
|
assert set(result.keys()) == {"warn_rate", "unknown_rate"}
|
||||||
|
|
||||||
|
# Prüfe, dass alle verlangten Prozentile berechnet wurden
|
||||||
|
for metric in result.values():
|
||||||
|
for p in percentiles:
|
||||||
|
assert p in metric
|
||||||
|
assert isinstance(metric[p], float)
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_percentiles_values(sample_data):
|
||||||
|
# Erwartete Werte via Pandas für Vergleich berechnen
|
||||||
|
df = pd.DataFrame([
|
||||||
|
{"warn_rate": d.warn_rate, "unknown_rate": d.unknown_rate}
|
||||||
|
for d in sample_data
|
||||||
|
])
|
||||||
|
percentiles = [50, 75, 90, 95]
|
||||||
|
expected_warn = {
|
||||||
|
p: float(df["warn_rate"].quantile(p / 100)) for p in percentiles
|
||||||
|
}
|
||||||
|
expected_unknown = {
|
||||||
|
p: float(df["unknown_rate"].quantile(p / 100)) for p in percentiles
|
||||||
|
}
|
||||||
|
|
||||||
|
result = core.calculate_percentiles(sample_data, percentiles)
|
||||||
|
|
||||||
|
for p in percentiles:
|
||||||
|
assert pytest.approx(result["warn_rate"][p], rel=1e-9) == expected_warn[p]
|
||||||
|
assert pytest.approx(result["unknown_rate"][p], rel=1e-9) == expected_unknown[p]
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_percentiles_empty():
|
||||||
|
# Fehlerfall: leere Liste sollte leer oder Exception liefern
|
||||||
|
with pytest.raises((ValueError, ZeroDivisionError, KeyError, IndexError)):
|
||||||
|
core.calculate_percentiles([], [50])
|
||||||
|
|
||||||
|
|
||||||
|
def test_calculate_percentiles_invalid_input():
|
||||||
|
# Fehlerfall: Daten ohne erforderliche Attribute
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
core.calculate_percentiles([{"warn_rate": 0.1}], [50])
|
||||||
Loading…
Reference in a new issue