Add artifact.1/src/artifact_1/cli.py
This commit is contained in:
parent
548d94a7f0
commit
fb7d47250d
1 changed files with 86 additions and 0 deletions
86
artifact.1/src/artifact_1/cli.py
Normal file
86
artifact.1/src/artifact_1/cli.py
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
import argparse
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict
|
||||
|
||||
import pandas as pd # noqa: F401
|
||||
from scipy import stats # noqa: F401
|
||||
|
||||
from artifact_1.core import calculate_metrics, run_mann_whitney_test
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s: %(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _validate_input_path(path: str) -> str:
|
||||
if not os.path.exists(path):
|
||||
raise FileNotFoundError(f"Eingabedatei {path} wurde nicht gefunden.")
|
||||
if not path.endswith('.json'):
|
||||
raise ValueError("Eingabedatei muss im JSON-Format vorliegen.")
|
||||
return path
|
||||
|
||||
|
||||
def _validate_output_path(path: str) -> str:
|
||||
directory = os.path.dirname(os.path.abspath(path))
|
||||
if not os.path.isdir(directory):
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
if not path.endswith('.json'):
|
||||
raise ValueError("Ausgabedatei muss im JSON-Format vorliegen.")
|
||||
return path
|
||||
|
||||
|
||||
def _load_run_data(path: str) -> Dict[str, Any]:
|
||||
with open(path, 'r', encoding='utf-8') as f:
|
||||
data = json.load(f)
|
||||
# Minimal Validation
|
||||
required_keys = {"timestamp", "pinned_flag", "runtime", "seqcount_retry_count", "mischfenster_dauer"}
|
||||
missing = required_keys - set(data.keys())
|
||||
if missing:
|
||||
raise ValueError(f"Fehlende Felder im Eingabedatensatz: {missing}")
|
||||
# Try to parse timestamp
|
||||
try:
|
||||
datetime.fromisoformat(data["timestamp"])
|
||||
except (ValueError, TypeError):
|
||||
raise ValueError("Ungültiges Datumsformat für Feld 'timestamp'. Erwartet ISO 8601 Format.")
|
||||
return data
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="CLI für die N40 Run Performance Analysis.")
|
||||
parser.add_argument('--input', required=True, help='Pfad zur Eingabedatei mit Run-Daten im JSON-Format.')
|
||||
parser.add_argument('--output', required=True, help='Pfad zur Ausgabedatei mit berechneten Metriken.')
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
input_path = _validate_input_path(args.input)
|
||||
output_path = _validate_output_path(args.output)
|
||||
logger.info(f"Lade Run-Daten von {input_path} ...")
|
||||
run_data = _load_run_data(input_path)
|
||||
|
||||
logger.info("Berechne Metriken ...")
|
||||
metrics_summary = calculate_metrics(run_data)
|
||||
|
||||
# Für Demonstration: Vergleich pinned vs unpinned Werte (nur wenn Daten verfügbar)
|
||||
data1 = [run_data["seqcount_retry_count"]] if run_data["pinned_flag"] else []
|
||||
data2 = [run_data["seqcount_retry_count"]] if not run_data["pinned_flag"] else []
|
||||
|
||||
p_value = None
|
||||
if data1 and data2:
|
||||
p_value = run_mann_whitney_test(data1, data2)
|
||||
metrics_summary['mann_whitney_p'] = p_value # Typisch erweiterte Ausgabe
|
||||
|
||||
logger.info(f"Speichere Ergebnisse unter {output_path} ...")
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(metrics_summary, f, indent=2)
|
||||
|
||||
logger.info("Analyse erfolgreich abgeschlossen.")
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler: {e}")
|
||||
raise SystemExit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Loading…
Reference in a new issue