Add artifact1/tests/test_core.py
This commit is contained in:
parent
eac4e0749b
commit
4bdc286978
1 changed files with 58 additions and 0 deletions
58
artifact1/tests/test_core.py
Normal file
58
artifact1/tests/test_core.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
import pytest
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
import json
|
||||
|
||||
import src.artifact1.core as core
|
||||
|
||||
@pytest.fixture
|
||||
def sample_data():
|
||||
return {
|
||||
'runs': [
|
||||
{'max_only_alerts': 3, 'outlier_frequency': 0.05, 'expires_at_dist_hours': 2.5, 'retry_total_overhead': 120.0},
|
||||
{'max_only_alerts': 4, 'outlier_frequency': 0.07, 'expires_at_dist_hours': 1.8, 'retry_total_overhead': 98.0},
|
||||
{'max_only_alerts': 2, 'outlier_frequency': 0.03, 'expires_at_dist_hours': 2.9, 'retry_total_overhead': 130.0}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def test_analyze_metrics_returns_dict(sample_data):
|
||||
result = core.analyze_metrics(sample_data)
|
||||
assert isinstance(result, dict), "The result should be a dictionary"
|
||||
expected_keys = {'max_only_alerts', 'outlier_frequency', 'expires_at_dist_hours', 'retry_total_overhead'}
|
||||
assert expected_keys.issubset(result.keys()), "All expected metrics should be present"
|
||||
|
||||
|
||||
def test_analyze_metrics_values_reasonable(sample_data):
|
||||
result = core.analyze_metrics(sample_data)
|
||||
assert result['max_only_alerts']['mean'] == pytest.approx(3.0, rel=1e-2)
|
||||
assert result['outlier_frequency']['mean'] == pytest.approx(0.05, rel=1e-2)
|
||||
assert result['expires_at_dist_hours']['mean'] == pytest.approx(2.4, rel=1e-2)
|
||||
assert result['retry_total_overhead']['mean'] == pytest.approx(116.0, rel=1e-2)
|
||||
|
||||
|
||||
def test_analyze_metrics_handles_invalid_input():
|
||||
with pytest.raises((KeyError, TypeError, ValueError)):
|
||||
core.analyze_metrics({'invalid': 123})
|
||||
|
||||
|
||||
def test_analyze_metrics_empty_data():
|
||||
with pytest.raises(ValueError):
|
||||
core.analyze_metrics({'runs': []})
|
||||
|
||||
|
||||
def test_analyze_metrics_stat_consistency(sample_data):
|
||||
result = core.analyze_metrics(sample_data)
|
||||
for metric, values in result.items():
|
||||
assert 'mean' in values and 'std' in values, f"Metric {metric} should have mean and std"
|
||||
assert isinstance(values['mean'], (int, float))
|
||||
assert not pd.isna(values['mean']), f"Mean should not be NaN for {metric}"
|
||||
|
||||
|
||||
def test_analyze_metrics_outlier_logic(sample_data):
|
||||
result = core.analyze_metrics(sample_data)
|
||||
assert 'anomalies' in result, "Result should contain an 'anomalies' section"
|
||||
anomalies = result['anomalies']
|
||||
assert isinstance(anomalies, dict), "Anomalies should be represented as dict"
|
||||
for key, value in anomalies.items():
|
||||
assert isinstance(value, bool), f"Anomaly flag for {key} should be boolean"
|
||||
Loading…
Reference in a new issue