diff --git a/cpu_pinning_analysis/tests/test_core.py b/cpu_pinning_analysis/tests/test_core.py new file mode 100644 index 0000000..16b6290 --- /dev/null +++ b/cpu_pinning_analysis/tests/test_core.py @@ -0,0 +1,44 @@ +import pytest +from cpu_pinning_analysis import core + + +@pytest.fixture +def sample_latency_data(): + pinned = {"latencies": [1.0, 1.2, 1.5, 1.7, 2.0, 2.1, 2.5, 2.8, 3.0, 3.5]} + unpinned = {"latencies": [1.1, 1.3, 1.4, 1.6, 1.9, 2.2, 2.4, 2.9, 3.1, 3.4]} + return pinned, unpinned + + +def test_analyze_latencies_structure(sample_latency_data): + pinned, unpinned = sample_latency_data + result = core.analyze_latencies(pinned, unpinned) + + # Basic structure checks + assert isinstance(result, dict) + assert set(result.keys()) == {"pinned", "unpinned"} + + for key in ("pinned", "unpinned"): + metrics = result[key] + assert all(k in metrics for k in ("P95", "P99", "histogram")) + assert isinstance(metrics["P95"], float) + assert isinstance(metrics["P99"], float) + assert isinstance(metrics["histogram"], dict) + + +def test_percentile_order(sample_latency_data): + pinned, unpinned = sample_latency_data + result = core.analyze_latencies(pinned, unpinned) + + for key in ("pinned", "unpinned"): + metrics = result[key] + assert metrics["P99"] >= metrics["P95"], "P99 should be >= P95" + + +def test_histogram_nonempty(sample_latency_data): + pinned, unpinned = sample_latency_data + result = core.analyze_latencies(pinned, unpinned) + + for key in ("pinned", "unpinned"): + hist = result[key]["histogram"] + assert len(hist) > 0 + assert all(isinstance(v, int) for v in hist.values()) \ No newline at end of file