Add rerun_evaluator/src/rerun_evaluator/cli.py
This commit is contained in:
parent
fa99a0b2bf
commit
5cfabafcdd
1 changed files with 75 additions and 0 deletions
75
rerun_evaluator/src/rerun_evaluator/cli.py
Normal file
75
rerun_evaluator/src/rerun_evaluator/cli.py
Normal file
|
|
@ -0,0 +1,75 @@
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, List
|
||||||
|
|
||||||
|
from rerun_evaluator.core import RunData, evaluate_rerun_needs
|
||||||
|
|
||||||
|
|
||||||
|
def _load_runs_data(input_path: Path) -> List[RunData]:
|
||||||
|
try:
|
||||||
|
with input_path.open('r', encoding='utf-8') as f:
|
||||||
|
raw_data = json.load(f)
|
||||||
|
if not isinstance(raw_data, list):
|
||||||
|
raise ValueError('Input JSON must be a list of objects.')
|
||||||
|
runs_data = []
|
||||||
|
for idx, entry in enumerate(raw_data):
|
||||||
|
if not isinstance(entry, dict):
|
||||||
|
raise ValueError(f'Entry {idx} is not a dict.')
|
||||||
|
required_fields = {"run_id", "label_triggered", "flappy", "pinned", "unknown_rate"}
|
||||||
|
missing = required_fields - entry.keys()
|
||||||
|
if missing:
|
||||||
|
raise ValueError(f'Entry {idx} missing required fields: {missing}')
|
||||||
|
runs_data.append(
|
||||||
|
RunData(
|
||||||
|
run_id=str(entry['run_id']),
|
||||||
|
label_triggered=bool(entry['label_triggered']),
|
||||||
|
flappy=bool(entry['flappy']),
|
||||||
|
pinned=bool(entry['pinned']),
|
||||||
|
unknown_rate=float(entry['unknown_rate'])
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return runs_data
|
||||||
|
except (OSError, json.JSONDecodeError, ValueError) as e:
|
||||||
|
print(f'Error loading runs data: {e}', file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def _write_output(output_path: Path, result: Any) -> None:
|
||||||
|
try:
|
||||||
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with output_path.open('w', encoding='utf-8') as f:
|
||||||
|
json.dump(result, f, indent=2)
|
||||||
|
except OSError as e:
|
||||||
|
print(f'Error writing output: {e}', file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> None:
|
||||||
|
parser = argparse.ArgumentParser(description='Evaluate Rerun Needs for CI Pipelines')
|
||||||
|
parser.add_argument('--input', required=True, help='Path to input JSON file with RunData entries.')
|
||||||
|
parser.add_argument('--output', required=True, help='Path to output JSON file for evaluation result.')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
input_path = Path(args.input)
|
||||||
|
output_path = Path(args.output)
|
||||||
|
|
||||||
|
if not input_path.exists():
|
||||||
|
print(f'Input file not found: {input_path}', file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
runs_data = _load_runs_data(input_path)
|
||||||
|
assert runs_data, 'Runs data list must not be empty.'
|
||||||
|
|
||||||
|
result = evaluate_rerun_needs(runs_data)
|
||||||
|
|
||||||
|
if not isinstance(result, dict):
|
||||||
|
print('Evaluation result must be a dict.', file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
_write_output(output_path, result)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
Loading…
Reference in a new issue