Add policy_evaluation/tests/test_core.py
This commit is contained in:
parent
9c890b9304
commit
307c4fa0fb
1 changed files with 81 additions and 0 deletions
81
policy_evaluation/tests/test_core.py
Normal file
81
policy_evaluation/tests/test_core.py
Normal file
|
|
@ -0,0 +1,81 @@
|
||||||
|
import pytest
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from policy_evaluation import core
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def sample_log_data():
|
||||||
|
# Enthält pinned und unpinned-Einträge mit diversen Zeitwerten und Drifts
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
't_publish': 1.0,
|
||||||
|
't_gate_read': 2.0,
|
||||||
|
't_index_visible': 3.0,
|
||||||
|
'pinned_flag': True,
|
||||||
|
'timeouts': 0,
|
||||||
|
'drift_signature': 'normal'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
't_publish': 2.0,
|
||||||
|
't_gate_read': 3.5,
|
||||||
|
't_index_visible': 4.0,
|
||||||
|
'pinned_flag': False,
|
||||||
|
'timeouts': 1,
|
||||||
|
'drift_signature': 'mild_drift'
|
||||||
|
},
|
||||||
|
{
|
||||||
|
't_publish': 3.0,
|
||||||
|
't_gate_read': 6.0,
|
||||||
|
't_index_visible': 7.0,
|
||||||
|
'pinned_flag': True,
|
||||||
|
'timeouts': 0,
|
||||||
|
'drift_signature': 'outlier'
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_evaluate_policies_returns_policyresults(sample_log_data):
|
||||||
|
result = core.evaluate_policies(sample_log_data)
|
||||||
|
assert isinstance(result, core.PolicyResults)
|
||||||
|
|
||||||
|
|
||||||
|
def test_policyresults_fields_are_valid(sample_log_data):
|
||||||
|
result = core.evaluate_policies(sample_log_data)
|
||||||
|
data = result.to_json()
|
||||||
|
required_fields = ['p99_coverage', 'remaining_missing_cases', 'conversion_rates', 'max_wait_time']
|
||||||
|
|
||||||
|
# Überprüfen, dass alle Felder vorhanden sind und den erwarteten Typen entsprechen
|
||||||
|
for field in required_fields:
|
||||||
|
assert field in data, f"{field} fehlt im Resultat"
|
||||||
|
|
||||||
|
assert isinstance(data['p99_coverage'], float)
|
||||||
|
assert isinstance(data['remaining_missing_cases'], int)
|
||||||
|
assert isinstance(data['conversion_rates'], float)
|
||||||
|
assert isinstance(data['max_wait_time'], float)
|
||||||
|
|
||||||
|
|
||||||
|
def test_evaluate_policies_values_are_reasonable(sample_log_data):
|
||||||
|
result = core.evaluate_policies(sample_log_data)
|
||||||
|
data = result.to_json()
|
||||||
|
# Da p99 Coverage und Zeit nicht genau definiert, prüfen wir nur Wertebereiche
|
||||||
|
assert 0.0 <= data['p99_coverage'] <= 1.0
|
||||||
|
assert data['remaining_missing_cases'] >= 0
|
||||||
|
assert 0.0 <= data['conversion_rates'] <= 1.0
|
||||||
|
assert data['max_wait_time'] >= 0.0
|
||||||
|
|
||||||
|
|
||||||
|
def test_evaluate_policies_invalid_input_raises():
|
||||||
|
with pytest.raises((TypeError, ValueError)):
|
||||||
|
core.evaluate_policies(None)
|
||||||
|
|
||||||
|
|
||||||
|
def test_evaluate_policies_empty_data():
|
||||||
|
result = core.evaluate_policies([])
|
||||||
|
# Bei leerem Input sollte gültiges PolicyResults-Objekt erstellt werden
|
||||||
|
data = result.to_json()
|
||||||
|
assert isinstance(data, dict)
|
||||||
|
assert all(k in data for k in ['p99_coverage', 'remaining_missing_cases', 'conversion_rates', 'max_wait_time'])
|
||||||
|
assert data['p99_coverage'] == 0.0
|
||||||
|
assert data['remaining_missing_cases'] == 0
|
||||||
|
assert data['conversion_rates'] == 0.0
|
||||||
|
assert data['max_wait_time'] == 0.0
|
||||||
Loading…
Reference in a new issue