File size: 2,269 Bytes
fd0c71a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
#!/usr/bin/env python3
"""Inference benchmark over provider runtime and policy stacks."""

from __future__ import annotations

import argparse
import json
from pathlib import Path
import time

import sys

ROOT = Path(__file__).resolve().parents[1]
if str(ROOT) not in sys.path:
    sys.path.insert(0, str(ROOT))

from app.env.env_core import PolyGuardEnv
from app.models.policy.provider_runtime import PolicyProviderRouter


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="Benchmark local inference path.")
    parser.add_argument("--provider", default="transformers")
    parser.add_argument("--model", default="Qwen/Qwen2.5-0.5B-Instruct")
    parser.add_argument("--runs", type=int, default=5)
    return parser.parse_args()


def main() -> None:
    args = parse_args()
    env = PolyGuardEnv()
    router = PolicyProviderRouter(hf_model=args.model)
    provider_preference = (args.provider,) if args.provider == "transformers" else (args.provider, "transformers")

    rows = []
    for i in range(args.runs):
        env.reset(seed=7_100 + i, difficulty="medium")
        obs = env._build_observation()  # noqa: SLF001
        candidates = list(obs.candidate_action_set)
        start = time.monotonic()
        selection = router.select_candidate(candidates, prompt={"run": i}, provider_preference=provider_preference)
        latency = (time.monotonic() - start) * 1000.0
        rows.append(
            {
                "run": i,
                "provider": selection.provider,
                "candidate_id": selection.candidate_id,
                "latency_ms": round(latency, 3),
                "rationale": selection.rationale,
            }
        )

    avg_latency = sum(item["latency_ms"] for item in rows) / len(rows) if rows else 0.0
    payload = {
        "status": "ok",
        "runs": rows,
        "avg_latency_ms": round(avg_latency, 3),
        "provider_requested": args.provider,
        "model": args.model,
    }

    out = ROOT / "outputs" / "reports"
    out.mkdir(parents=True, exist_ok=True)
    (out / "inference_benchmark.json").write_text(json.dumps(payload, ensure_ascii=True, indent=2), encoding="utf-8")
    print("benchmark_inference_done")


if __name__ == "__main__":
    main()