Add policy_eval/src/policy_eval/cli.py
This commit is contained in:
parent
ade86428c6
commit
bae235cd3c
1 changed files with 63 additions and 0 deletions
63
policy_eval/src/policy_eval/cli.py
Normal file
63
policy_eval/src/policy_eval/cli.py
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
from policy_eval.core import evaluate_policy
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
"""Command-line interface for evaluating policy metrics.
|
||||||
|
|
||||||
|
This CLI accepts input and output JSON file paths, evaluates the provided
|
||||||
|
drift report using the core.evaluate_policy function, and writes the
|
||||||
|
resulting evaluation metrics JSON.
|
||||||
|
"""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Evaluate policy metrics from a drift report."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--input",
|
||||||
|
required=True,
|
||||||
|
help="Path to the drift report JSON file."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output",
|
||||||
|
required=True,
|
||||||
|
help="Path to the output evaluation metrics JSON file."
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
input_path = Path(args.input).expanduser().resolve()
|
||||||
|
output_path = Path(args.output).expanduser().resolve()
|
||||||
|
|
||||||
|
# Basic input validation (required by constraints)
|
||||||
|
if not input_path.exists() or not input_path.is_file():
|
||||||
|
raise FileNotFoundError(f"Input file does not exist: {input_path}")
|
||||||
|
|
||||||
|
with open(input_path, "r", encoding="utf-8") as f:
|
||||||
|
drift_data: Any = json.load(f)
|
||||||
|
|
||||||
|
if not isinstance(drift_data, dict):
|
||||||
|
raise ValueError("Invalid drift report: expected a dict JSON structure.")
|
||||||
|
|
||||||
|
metrics = evaluate_policy(drift_data)
|
||||||
|
|
||||||
|
# CI validation: ensure output conforms to expected data model
|
||||||
|
required_fields = {"total_warn", "total_fail", "unknowns", "manual_overrides"}
|
||||||
|
if not isinstance(metrics, dict) or not required_fields.issubset(metrics.keys()):
|
||||||
|
raise ValueError(
|
||||||
|
f"Invalid evaluation output: missing required fields. Expected {required_fields}, got {set(metrics.keys())}."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Ensure output directory exists
|
||||||
|
os.makedirs(output_path.parent, exist_ok=True)
|
||||||
|
|
||||||
|
with open(output_path, "w", encoding="utf-8") as out_file:
|
||||||
|
json.dump(metrics, out_file, indent=2, sort_keys=True)
|
||||||
|
out_file.write("\n")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
Loading…
Reference in a new issue