import pytest import pandas as pd from scheduling_analysis import core @pytest.fixture def sample_data(): return [ {"run_id": "r1", "metric_name": "max_outlier_ms", "metric_value": 10.0, "mechanism": "sched_a"}, {"run_id": "r2", "metric_name": "max_outlier_ms", "metric_value": 25.0, "mechanism": "sched_b"}, {"run_id": "r1", "metric_name": "band_width_h", "metric_value": 5.0, "mechanism": "sched_a"}, {"run_id": "r2", "metric_name": "band_width_h", "metric_value": 7.5, "mechanism": "sched_b"} ] def test_analyze_scheduling_effects_basic(sample_data): result = core.analyze_scheduling_effects(sample_data) assert isinstance(result, dict) assert set(result.keys()) == {"max_outlier_effect", "resonance_band_shift", "metrics"} # numerical plausibility assert isinstance(result["max_outlier_effect"], float) assert isinstance(result["resonance_band_shift"], float) assert abs(result["max_outlier_effect"] - 15.0) < 1e-6 assert abs(result["resonance_band_shift"] - 2.5) < 1e-6 def test_empty_data_raises(): with pytest.raises(ValueError): core.analyze_scheduling_effects([]) def test_invalid_data_type(): # Not a list should raise a TypeError with pytest.raises(TypeError): core.analyze_scheduling_effects("invalid") def test_partial_metrics_present(): data = [ {"run_id": "r1", "metric_name": "max_outlier_ms", "metric_value": 10.0, "mechanism": "sched_a"}, {"run_id": "r1", "metric_name": "band_width_h", "metric_value": 5.0, "mechanism": "sched_a"} ] result = core.analyze_scheduling_effects(data) assert result["max_outlier_effect"] == pytest.approx(0.0) assert result["resonance_band_shift"] == pytest.approx(0.0) assert isinstance(result["metrics"], dict)