Spaces:
Paused
Paused
Update CERNenv Space
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .dockerignore +9 -0
- .gitignore +21 -0
- .python-version +1 -0
- Dockerfile +31 -0
- README.md +115 -5
- [External] Apr ‘26 OpenEnv Hackathon Themes & Judging Criteria.txt +190 -0
- [External] Meta OpenEnv Hackathon Participant Help Guide.txt +291 -0
- client.py +40 -0
- models.py +694 -0
- openenv.yaml +6 -0
- pyproject.toml +57 -0
- scripts/__init__.py +0 -0
- scripts/_build_spaces.py +135 -0
- scripts/baseline_agents.py +305 -0
- scripts/push_to_hub.py +247 -0
- scripts/run_agent.py +129 -0
- server/Dockerfile +50 -0
- server/__init__.py +1 -0
- server/app.py +154 -0
- server/environment.py +369 -0
- server/requirements.txt +6 -0
- server/rewards/__init__.py +19 -0
- server/rewards/reward_function.py +408 -0
- server/rules/__init__.py +5 -0
- server/rules/engine.py +203 -0
- server/simulator/__init__.py +31 -0
- server/simulator/latent_state.py +171 -0
- server/simulator/noise.py +161 -0
- server/simulator/output_generator.py +586 -0
- server/simulator/transition.py +197 -0
- server/tasks/__init__.py +9 -0
- server/tasks/scenarios.py +422 -0
- space/__init__.py +0 -0
- space/env/Dockerfile +24 -0
- space/env/README.md +51 -0
- space/env/requirements.txt +6 -0
- space/training/Dockerfile +31 -0
- space/training/README.md +120 -0
- space/training/__init__.py +0 -0
- space/training/app.py +673 -0
- space/training/requirements.txt +31 -0
- tests/__init__.py +0 -0
- tests/conftest.py +13 -0
- tests/test_curriculum.py +75 -0
- tests/test_environment.py +163 -0
- tests/test_reward_components.py +151 -0
- tests/test_reward_hacking.py +189 -0
- tests/test_rewards.py +311 -0
- tests/test_rules_engine.py +160 -0
- tests/test_scenarios.py +60 -0
.dockerignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.git
|
| 2 |
+
.venv
|
| 3 |
+
__pycache__
|
| 4 |
+
*.pyc
|
| 5 |
+
.pytest_cache
|
| 6 |
+
training/runs
|
| 7 |
+
training/grpo-output
|
| 8 |
+
training/rollouts
|
| 9 |
+
notebooks/.ipynb_checkpoints
|
.gitignore
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__/
|
| 2 |
+
*.pyc
|
| 3 |
+
.venv/
|
| 4 |
+
.env
|
| 5 |
+
.pytest_cache/
|
| 6 |
+
.coverage
|
| 7 |
+
htmlcov/
|
| 8 |
+
.DS_Store
|
| 9 |
+
training/runs/
|
| 10 |
+
training/grpo-output/
|
| 11 |
+
training/rollouts/
|
| 12 |
+
training/plots/
|
| 13 |
+
*.png
|
| 14 |
+
!docs/*.png
|
| 15 |
+
!assets/*.png
|
| 16 |
+
.ipynb_checkpoints/
|
| 17 |
+
.uv/
|
| 18 |
+
uv.lock
|
| 19 |
+
dist/
|
| 20 |
+
build/
|
| 21 |
+
*.egg-info/
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.11
|
Dockerfile
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CERNenv trainer Space (Docker, A100)
|
| 2 |
+
FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04
|
| 3 |
+
|
| 4 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
| 5 |
+
PYTHONUNBUFFERED=1 \
|
| 6 |
+
PIP_NO_CACHE_DIR=1 \
|
| 7 |
+
HF_HOME=/home/user/.cache/huggingface \
|
| 8 |
+
TRANSFORMERS_CACHE=/home/user/.cache/huggingface/transformers \
|
| 9 |
+
PYTHONPATH=/home/user/app
|
| 10 |
+
|
| 11 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 12 |
+
python3.11 python3.11-venv python3.11-dev python3-pip \
|
| 13 |
+
git curl ca-certificates build-essential \
|
| 14 |
+
&& rm -rf /var/lib/apt/lists/* \
|
| 15 |
+
&& ln -sf /usr/bin/python3.11 /usr/local/bin/python \
|
| 16 |
+
&& ln -sf /usr/bin/python3.11 /usr/local/bin/python3
|
| 17 |
+
|
| 18 |
+
RUN useradd -ms /bin/bash user
|
| 19 |
+
USER user
|
| 20 |
+
ENV PATH="/home/user/.local/bin:${PATH}"
|
| 21 |
+
WORKDIR /home/user/app
|
| 22 |
+
|
| 23 |
+
COPY --chown=user:user space/training/requirements.txt /tmp/requirements.txt
|
| 24 |
+
RUN python -m pip install --upgrade pip && \
|
| 25 |
+
python -m pip install --user -r /tmp/requirements.txt
|
| 26 |
+
|
| 27 |
+
COPY --chown=user:user . /home/user/app
|
| 28 |
+
|
| 29 |
+
EXPOSE 7860
|
| 30 |
+
|
| 31 |
+
CMD ["python", "-m", "uvicorn", "space.training.app:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
|
@@ -1,10 +1,120 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
|
|
|
|
|
|
| 7 |
pinned: false
|
|
|
|
|
|
|
| 8 |
---
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: CERNenv Trainer
|
| 3 |
+
emoji: ⚛️
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: pink
|
| 6 |
sdk: docker
|
| 7 |
+
suggested_hardware: a100x4
|
| 8 |
+
suggested_storage: medium
|
| 9 |
pinned: false
|
| 10 |
+
license: bsd-3-clause
|
| 11 |
+
short_description: GRPO trainer for CERNenv (Unsloth + LoRA, A100)
|
| 12 |
---
|
| 13 |
|
| 14 |
+
# CERNenv Trainer (Hugging Face Space, A100)
|
| 15 |
+
|
| 16 |
+
Fine-tunes a small instruction-tuned LLM (Large Language Model) to act as
|
| 17 |
+
an LHC (Large Hadron Collider) physicist inside the **CERNenv** OpenEnv
|
| 18 |
+
environment using **GRPO** (Group-Relative Policy Optimization),
|
| 19 |
+
**Unsloth**, and **LoRA** (Low-Rank Adaptation).
|
| 20 |
+
|
| 21 |
+
## Hardware
|
| 22 |
+
|
| 23 |
+
- Recommended: **4× A100 (`a100x4`, 320 GB VRAM, ~$10/hr)**
|
| 24 |
+
- Single GPU also supported: `a100-large` (slower, fewer episodes recommended)
|
| 25 |
+
- Minimum: T4 / L4 (use the Colab notebook fallback)
|
| 26 |
+
|
| 27 |
+
### Budget guidance (~$27 envelope, the default for this hackathon run)
|
| 28 |
+
|
| 29 |
+
A 1500-episode GRPO run with `MODEL_NAME=unsloth/Qwen2.5-3B-Instruct`,
|
| 30 |
+
`NUM_GENERATIONS=8`, `MAX_STEPS=18` typically lands as follows:
|
| 31 |
+
|
| 32 |
+
| Hardware | $/hr | Wall-clock | Cost (1× run) | Headroom in $27 |
|
| 33 |
+
| ------------ | ----- | ---------- | ------------- | --------------- |
|
| 34 |
+
| `a100x4` | ~$10 | ~1.5–2 h | ~$15–20 | 1 re-run |
|
| 35 |
+
| `a100-large` | ~$4 | ~2.5–3 h | ~$10–12 | 2+ re-runs |
|
| 36 |
+
| `l40sx4` | ~$8 | ~2 h | ~$16 | 1 re-run |
|
| 37 |
+
|
| 38 |
+
`a100x4` gets the trained adapters + evidence into your hands fastest; the
|
| 39 |
+
multi-GPU launcher (`accelerate launch --num_processes 4`) is already wired
|
| 40 |
+
in `_build_training_cmd`. If you want extra safety margin in case anything
|
| 41 |
+
needs a re-run, drop to `a100-large` — wall-clock is ~2× longer but cost
|
| 42 |
+
is ~50% lower, leaving you with budget for two complete attempts.
|
| 43 |
+
|
| 44 |
+
## Required Space secrets
|
| 45 |
+
| Secret | Purpose |
|
| 46 |
+
| --- | --- |
|
| 47 |
+
| `HF_TOKEN` | Hugging Face token with `write` access for model push |
|
| 48 |
+
| `HF_USERNAME` | Hub username, used as the default model-repo owner |
|
| 49 |
+
|
| 50 |
+
## Optional environment variables
|
| 51 |
+
| Variable | Default | Notes |
|
| 52 |
+
| --- | --- | --- |
|
| 53 |
+
| `MODEL_NAME` | `unsloth/Qwen2.5-3B-Instruct` | Any chat model Unsloth supports |
|
| 54 |
+
| `TOTAL_EPISODES` | `1500` | Prompts × generations rollouts |
|
| 55 |
+
| `DIFFICULTY` | `easy` | Starting tier when `CURRICULUM=1`; static tier when `CURRICULUM=0` |
|
| 56 |
+
| `CURRICULUM` | `1` | `1` enables easy→medium→hard prompt-ramp + adaptive eval-tier |
|
| 57 |
+
| `CURRICULUM_PROMOTE` | `0.55` | Held-out success rate that promotes the eval tier one step |
|
| 58 |
+
| `CURRICULUM_DEMOTE` | `0.10` | Rolling success rate that demotes the eval tier one step |
|
| 59 |
+
| `MAX_STEPS` | `18` | Max steps per episode |
|
| 60 |
+
| `NUM_GENERATIONS` | `8` | GRPO group size (bigger = better signal) |
|
| 61 |
+
| `NUM_GPUS` | auto-detected | `accelerate launch --num_processes` value |
|
| 62 |
+
| `CHECKPOINT_EVAL_STEPS` | `25` | Run a held-out eval every N updates |
|
| 63 |
+
| `CHECKPOINT_EVAL_EPISODES` | `8` | Episodes per mid-training eval |
|
| 64 |
+
| `EVAL_EPISODES` | `32` | Episodes for pre/post eval (statistical power) |
|
| 65 |
+
| `OUTPUT_DIR` | `runs/unsloth-grpo` | LoRA adapter output |
|
| 66 |
+
| `EVIDENCE_DIR` | `evidence` | Where curves, CSVs, plots are written |
|
| 67 |
+
| `PUSH_REPO` | `${HF_USERNAME}/cernenv-grpo-qwen2.5-3b` | Hub repo for adapters + evidence |
|
| 68 |
+
| `AUTOSTART` | `0` | Set to `1` to start training on Space boot |
|
| 69 |
+
|
| 70 |
+
## How to use
|
| 71 |
+
|
| 72 |
+
This Space exposes a tiny FastAPI control panel:
|
| 73 |
+
- `GET /` — status + run info + **live training-progress evidence** (curves, before/after metrics, plots)
|
| 74 |
+
- `POST /train` — start / restart a training run
|
| 75 |
+
- `GET /logs?tail=N` — live tail of `training.log`
|
| 76 |
+
- `GET /metrics` — pre / post / Δ metrics JSON
|
| 77 |
+
- `GET /evidence` — list of evidence artifacts on disk
|
| 78 |
+
- `GET /evidence/{name}` — download an artifact (`training_curve.png`, `training_log.csv`, etc.)
|
| 79 |
+
|
| 80 |
+
### Training-progress evidence saved (and pushed to Hub)
|
| 81 |
+
- `training_log.csv` — per-step reward, loss, KL, lr, grad-norm
|
| 82 |
+
- `training_curve.png` — reward + loss vs step
|
| 83 |
+
- `reward_components.csv` — per-rollout terminal vs shaping reward, plus
|
| 84 |
+
discovery / mass / channel / parsed-action rates per logging step.
|
| 85 |
+
This is the "watch individual reward function columns" view recommended
|
| 86 |
+
in the hackathon FAQ — it makes verifier hacks visible (rising mean
|
| 87 |
+
reward without rising mass/channel correctness ⇒ red flag).
|
| 88 |
+
- `reward_components.png` — 2-panel plot rendered from the above CSV
|
| 89 |
+
- `checkpoint_evals.csv` — held-out eval every `CHECKPOINT_EVAL_STEPS` updates
|
| 90 |
+
- `checkpoint_progression.png` — mean reward + success/mass/channel accuracy vs step
|
| 91 |
+
- `pre_eval.jsonl` / `post_eval.jsonl` — full per-episode rollouts before vs after
|
| 92 |
+
- `before_after_summary.png` — pre/post bar chart with Δ annotations
|
| 93 |
+
- `reward_distribution.png` — pre vs post reward histogram
|
| 94 |
+
- `before_after_metrics.json` — machine-readable metrics + deltas
|
| 95 |
+
- `sample_trajectories.md` — cherry-picked pre vs post agent traces
|
| 96 |
+
- `curriculum_state.json` — adaptive-curriculum tier/promotion log
|
| 97 |
+
|
| 98 |
+
Click **"Start training"** in the UI, or set `AUTOSTART=1` in the Space variables to kick off immediately on boot.
|
| 99 |
+
|
| 100 |
+
When training finishes, the LoRA adapters are pushed to `PUSH_REPO`.
|
| 101 |
+
|
| 102 |
+
## Local equivalent
|
| 103 |
+
|
| 104 |
+
The same training run is reproducible locally with:
|
| 105 |
+
|
| 106 |
+
```bash
|
| 107 |
+
# single GPU (with curriculum)
|
| 108 |
+
PYTHONPATH=. python -m training.training_unsloth \
|
| 109 |
+
--model_name unsloth/Qwen2.5-3B-Instruct \
|
| 110 |
+
--difficulty easy --curriculum --total_episodes 1500 --max_steps 18 \
|
| 111 |
+
--num_generations 8 --output_dir runs/unsloth-grpo \
|
| 112 |
+
--evidence_dir evidence
|
| 113 |
+
|
| 114 |
+
# multi-GPU (e.g. 4× A100, with curriculum)
|
| 115 |
+
PYTHONPATH=. accelerate launch --num_processes 4 --mixed_precision bf16 \
|
| 116 |
+
-m training.training_unsloth \
|
| 117 |
+
--difficulty easy --curriculum \
|
| 118 |
+
--total_episodes 1500 --num_generations 8 \
|
| 119 |
+
--output_dir runs/unsloth-grpo --evidence_dir evidence
|
| 120 |
+
```
|
[External] Apr ‘26 OpenEnv Hackathon Themes & Judging Criteria.txt
ADDED
|
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Theme #1 - Multi-Agent Interactions
|
| 2 |
+
Environments for this theme involve cooperation, competition, negotiation, and coalition formation. Learning from these environments will enable agents to model the beliefs and incentives of others in partially observable settings. This drives theory-of-mind reasoning and emergent strategic behavior.
|
| 3 |
+
Expected Outcome: an environment that can be used to train multi-agent task handling in a LLM
|
| 4 |
+
Example environments: Market simulations, compute-allocation negotiations, collaborative puzzle worlds, mixed cooperative/competitive strategy games.
|
| 5 |
+
Theme #2 - (Super) Long-Horizon Planning & Instruction Following
|
| 6 |
+
You will build environments that require deep, multi-step reasoning with sparse or delayed rewards. After using these environments, the goal is to enable agents to decompose goals, track state over extended trajectories, and recover from early mistakes. The aim is to push beyond shallow next-token reasoning toward structured planning and durable internal representations.
|
| 7 |
+
Expected Outcome: an environment that can capture and improve LLM behaviour on challenging long horizon tasks that need long running sessions beyond context memory limits.
|
| 8 |
+
Example environments: (Think of OpenClaw workflows with Multi-turn tasks). Research-planning simulators, large-scale codebase refactoring tasks, strategic resource management worlds, long-horizon logistics optimization, extremely complicated long-horizon instruction following (e.g., 300 instructions scattered around).
|
| 9 |
+
Theme #3 - World Modeling
|
| 10 |
+
#3.1 Professional Tasks
|
| 11 |
+
Here you will develop environments that require real interaction with tools, APIs, or dynamic systems where the model is expected to do real hard work instead of exploiting short-cuts to arrive at the desired outcome. Learning from these environments will enable agents to maintain consistent internal state, update beliefs based on outcomes, and orchestrate multi-step workflows. The goal is to strengthen causal reasoning and persistent world models.
|
| 12 |
+
Expected Outcome: an environment capturing nuances of a defined partially observable world and improve LLM interaction with it
|
| 13 |
+
Example environments: Dynamic browser/API ecosystems, enterprise applications, scientific workflow loops (papers → code → experiments), economic simulations with feedback, tool-discovery benchmarks.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
#3.2 Personalized Tasks
|
| 17 |
+
Here we will develop an environment that offers real personalized task handling, imagine replying to personal messages or handling dinner conflicts due to work conflicts, replying to tough emails. Think any personal assistant tasks
|
| 18 |
+
|
| 19 |
+
Expected Outcome: An environment that gives the model a realistic simulation of handling personal tasks, conflicts and managing them as delegations
|
| 20 |
+
|
| 21 |
+
Example environments: Executive Assistant Meeting Planner, Dinner and drive planning, email and message replying, shopping, etc
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
Theme #4 - Self-Improvement
|
| 25 |
+
The focus here is to create environments where agents can learn to generate new challenges, escalate difficulty, and improve through self-play or adaptive curricula. Rather than optimizing fixed tasks, the goal is for agents to learn to drive their own capability growth. The objective is recursive skill amplification.
|
| 26 |
+
Expected Outcome: an environment for improving self-play of a LLM over a defined set of tasks
|
| 27 |
+
Example environments: Self-play negotiation arenas, auto-generated math/proof tasks, evolving coding competitions, adaptive RL curricula.
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
Theme #5: Wild Card - Impress Us!
|
| 31 |
+
We do not want to limit your focus if your idea doesn’t fit the boxes above, we want and WILL reward out of box tasks, please be creative but remember to add submissions that meaningfully add value to LLM training on a certain task.
|
| 32 |
+
Guidelines for Problem Statement
|
| 33 |
+
* It is NOT mandatory to choose the same problem statement as Round 1. Only choose the same problem statement if it aligns with the above provided Hackathon themes.
|
| 34 |
+
* You can start working on your problem statement once you have finalized it. Post-training can be done onsite on 25th & 26th when you receive compute credits for HuggingFace.
|
| 35 |
+
* Before the onsite, we suggest you work on building the environment, agent behaviours, reward model and evaluate if your work aligns with the judging criteria given below.
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
Judging Criteria
|
| 41 |
+
Minimum requirements:
|
| 42 |
+
* Usage of OpenEnv (latest release)
|
| 43 |
+
* Show a minimal training script for your environment using Unsloth or HF TRL in Colab
|
| 44 |
+
* Write a mini-blog on HuggingFace or mini-video on YouTube talking about your submission, <2 minutes
|
| 45 |
+
* Your OpenEnv compliant environment should be hosted on Hugging Face Spaces.
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
Judging Overview
|
| 49 |
+
* Evaluation: Teams will be scored based on the following criteria:
|
| 50 |
+
1. Environment Innovation (40%): Is the environment novel, creative, or challenging? Does it meaningfully test the agent’s behavior?
|
| 51 |
+
2. Storytelling (30%): Does the team clearly explain the problem, environment, and agent behavior? Is the demo engaging and easy to follow?
|
| 52 |
+
3. Showing Improvement in Rewards (20%): Does the demo provide observable evidence of training progress (reward curves, metrics, or before/after behavior)?
|
| 53 |
+
4. Reward and Training Script/Pipeline Setup (10%): Is the reward logic coherent, and does the pipeline produce meaningful improvement in the agent’s inference (how it acts in the environment)?
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
OpenEnv Hackathon - What Judges Look For
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
This guide tells you what makes a strong submission for the OpenEnv Hackathon (India 2026).
|
| 60 |
+
Read it before you start building, and again before you submit.
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
For the list of themes and example problems, refer to the top sections.
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
NOTE: Please remember only one submission per team. If you have multiple ideas, pick the best one and go for it. Please make sure that the URL link of your environment is submitted as judges will pull the environment from the URL to evaluate it. Changes or commits after the submission deadline will not be considered.
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
TL;DR
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
Build an environment that an LLM could actually be trained on to get measurably better at
|
| 73 |
+
something interesting. Then show that training. Then tell the story.
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
A messy but ambitious environment with real training evidence beats a polished but boring one.
|
| 77 |
+
Pick a problem that excites you (that energy comes through in the pitch).
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
Judging Criteria
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
Criterion: Environment Innovation
|
| 84 |
+
Weight: 40%
|
| 85 |
+
What it means:
|
| 86 |
+
Is the environment novel, creative, or genuinely challenging?
|
| 87 |
+
Does it meaningfully test agent behavior in a way that hasn't been done before?
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
Criterion: Storytelling & Presentation
|
| 91 |
+
Weight: 30%
|
| 92 |
+
What it means:
|
| 93 |
+
Can you clearly explain the problem, the environment, and what the agent learned?
|
| 94 |
+
Is the demo engaging and easy to follow for a non-technical audience?
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
Criterion: Showing Improvement in Rewards
|
| 98 |
+
Weight: 20%
|
| 99 |
+
What it means:
|
| 100 |
+
Is there observable evidence of training progress? Reward curves, before/after behavior,
|
| 101 |
+
comparison against a baseline -- anything that proves the agent learned something.
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
Criterion: Reward & Training Pipeline
|
| 105 |
+
Weight: 10%
|
| 106 |
+
What it means:
|
| 107 |
+
Is the reward logic coherent? Does the pipeline produce meaningful improvement in the trained
|
| 108 |
+
agent's behavior?
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
Minimum Submission Requirements
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
NOTE: These are non-negotiable. Submissions missing any of these are at a serious disadvantage.
|
| 115 |
+
* Use OpenEnv (latest release). Build on top of the framework; don’t reinvent the wheel.
|
| 116 |
+
* A working training script using Unsloth or Hugging Face TRL, ideally as a Colab notebook so judges can re-run it.
|
| 117 |
+
* Evidence that you actually trained; at minimum, loss and reward plots from a real run.
|
| 118 |
+
* A short writeup: a mini-blog on Hugging Face or a < 2 minute video on YouTube explaining what your environment does and what you trained, or a short slide deck of presentation. Please make sure that all materials are linked from your README file so that judges can access them easily.
|
| 119 |
+
* Push your environment to a Hugging Face Space so it’s discoverable and runnable.
|
| 120 |
+
* A README that motivates the problem, explains how the env works, and shows results.
|
| 121 |
+
* README should have a link to the environment in the Hugging Face Space. It should also have all additional references to other materials (e.g. videos, blog posts, slides, presentations, etc.) that you want to include.
|
| 122 |
+
* Please do not include big video files in your Env submission on HF Hub as we would like to have a small size for each env (Please use url as reference link to additional materials).
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
What Makes a Submission Stand Out
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
Pick an ambitious, original problem
|
| 129 |
+
The themes (problems) are deliberately open. Use them as launching pads, not boxes. Judges have seen a lot of chess, snake, tic-tac-toe, and grid-world clones. To score well on innovation,
|
| 130 |
+
you need a genuinely fresh angle. Some questions to ask yourself:
|
| 131 |
+
* Does this environment exist to teach an LLM something it currently can’t do well?
|
| 132 |
+
* Is the domain underexplored in RL/LLM training?
|
| 133 |
+
* Could a researcher write a paper about training on this?
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
Design a reward signal that actually teaches
|
| 137 |
+
A great environment has a reward function that:
|
| 138 |
+
* Provides a rich, informative signal (not just 0/1 at the end)
|
| 139 |
+
* Captures something hard to measure in a clever way
|
| 140 |
+
* Uses OpenEnv’s Rubric system thoughtfully (composable rubrics > monolithic scoring)
|
| 141 |
+
* Is hard to game; an agent that exploits the reward without solving the task should not get high scores
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
Show real training, end to end
|
| 145 |
+
The bar isn’t “training script exists.” The bar is “training script runs against the environment, the
|
| 146 |
+
agent learns, and you can show it.” Concretely:
|
| 147 |
+
* Your training loop should connect to your environment (not a static dataset)
|
| 148 |
+
* Train long enough that the curves mean something
|
| 149 |
+
* Compare a trained agent vs. a random/untrained baseline; quantitative and/or qualitative
|
| 150 |
+
* Include the plots and numbers in your README and writeup
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
Make your plots readable
|
| 154 |
+
Reviewers spend seconds, not minutes, on each plot. Help them out:
|
| 155 |
+
* Label both axes (e.g. “training step” / “episode” on x, “reward” / “loss” on y) and include units where they apply
|
| 156 |
+
* Save plots as .png or .jpg and commit them to the repo (don’t leave them only in a Colab cell or a deleted Wandb run) (if you ran via Wandb, please include the link to that specific run of your plots)
|
| 157 |
+
* Embed the key plots in your README with a one-line caption explaining what each one shows If you have multiple runs (baseline vs. trained, ablations, etc.), put them on the same axes so the comparison is obvious
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
Tell a story, not an API doc
|
| 161 |
+
Your README, blog, and pitch should answer:
|
| 162 |
+
1. Problem) what capability gap or interesting domain are you targeting?
|
| 163 |
+
2. Environment) what does the agent see, do, and get rewarded for?
|
| 164 |
+
3. Results) what changed after training? Show it.
|
| 165 |
+
4. Why does it matter) who would care, and why?
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
A reviewer should be able to read your README in 3~5 minutes and want to try your
|
| 169 |
+
environment.
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
NOTE: If you have a video, HF post, or anything else interesting, please make sure that it’s linked
|
| 173 |
+
from your README as a link.
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
Engineer it cleanly (table stakes)
|
| 177 |
+
Engineering quality matters less than ambition, but sloppy work hurts. Make sure you:
|
| 178 |
+
* Use OpenEnv’s Environment / MCPEnvironment base classes properly
|
| 179 |
+
* Respect the client / server separation (clients should never import server internals)
|
| 180 |
+
* Follow the standard Gym-style API (reset, step, state)
|
| 181 |
+
* Have a valid openenv.yaml manifest
|
| 182 |
+
* Don’t use reserved tool names (reset, step, state, close) for MCP tools
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
Final Note
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
Judges are looking for environments that push the frontier of what we can train LLMs to do. Be
|
| 189 |
+
ambitious. Pick a problem you find genuinely interesting; that almost always produces better
|
| 190 |
+
work than chasing what you think judges want. Good luck.
|
[External] Meta OpenEnv Hackathon Participant Help Guide.txt
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Hackathon Self-Serve Guide: Build an RL Environment, Train an LLM, Ship a Demo
|
| 2 |
+
0) What you are building
|
| 3 |
+
The core idea is not just to fine-tune a text model, but to build a specialized LLM system that can act inside an environment, get feedback, and improve through reinforcement learning. The practical stack discussed here is:
|
| 4 |
+
Environment → verifier/reward functions → TRL trainer → Unsloth for efficiency → deployment on OpenEnv / Spaces.
|
| 5 |
+
A strong project usually looks like one of these,
|
| 6 |
+
Please refer to [External] Apr ‘26 OpenEnv Hackathon Themes for theme guidelines on selecting & forming problem statements.
|
| 7 |
+
1) Start with the right project idea
|
| 8 |
+
Pick a task that has all three of these properties:
|
| 9 |
+
1. The model can act step by step
|
| 10 |
+
2. You can verify success programmatically
|
| 11 |
+
3. The task is hard enough to be interesting, but not so hard that the model never succeeds
|
| 12 |
+
This last point matters a lot. RL only works if the probability of getting a good answer is greater than zero. If your task is so hard that the model never gets any reward, you will burn compute and learn nothing.
|
| 13 |
+
Please refer to [External] Apr ‘26 OpenEnv Hackathon Themes for theme guidelines on selecting & forming problem statements.
|
| 14 |
+
A useful rule: prefer tasks with crisp verification over tasks that only “look good” to a human. RL gets easier when the reward is objective.
|
| 15 |
+
2) Understand the minimum RL loop before you build
|
| 16 |
+
At a high level, your loop is:
|
| 17 |
+
1. Give the model a prompt
|
| 18 |
+
2. Let it generate an action, strategy, answer, or code
|
| 19 |
+
3. Execute that output in an environment or verifier
|
| 20 |
+
4. Convert the result into a reward
|
| 21 |
+
5. Update the model so higher-reward behavior becomes more likely
|
| 22 |
+
That is the practical mental model for RL here. The system samples many outputs, scores them, and shifts probability mass away from bad outputs and toward better ones.
|
| 23 |
+
One especially useful framing is that RL is like a more efficient version of repeated in-context improvement. Instead of repeatedly stuffing previous examples into the context, you let backpropagation store what worked into the weights.
|
| 24 |
+
3) Decide whether you need SFT first
|
| 25 |
+
Use this simple rule:
|
| 26 |
+
* If you have a lot of good data, use SFT
|
| 27 |
+
* If you do not have data but can verify outputs, use RL
|
| 28 |
+
* In many practical cases, do a little SFT first, then RL
|
| 29 |
+
Why this matters:
|
| 30 |
+
* SFT is generally more sample-efficient
|
| 31 |
+
* RL is useful when you can test outcomes but cannot cheaply author ideal traces
|
| 32 |
+
* RL often needs some warm start, formatting priming, or easy tasks first so that good rollouts happen at all
|
| 33 |
+
For hackathon teams, the best path is usually:
|
| 34 |
+
1. Start from a capable base/instruct model
|
| 35 |
+
2. Add light formatting or task scaffolding if needed
|
| 36 |
+
3. Use RL for improvement, not as magic from scratch
|
| 37 |
+
4) Design the environment before you design the trainer
|
| 38 |
+
Treat the environment as a first-class artifact. It should define:
|
| 39 |
+
* reset(): start a fresh episode
|
| 40 |
+
* step(action): apply an action and return the next result
|
| 41 |
+
* state() / observation: what the agent sees
|
| 42 |
+
* reward: what counts as progress or success
|
| 43 |
+
OpenEnv standardizes this so the same training code can work across many environments, instead of every team inventing a different API. That is one of the main reasons to use it in a hackathon.
|
| 44 |
+
Think about your environment in this order:
|
| 45 |
+
1. What does the agent observe?
|
| 46 |
+
2. What actions can it take?
|
| 47 |
+
3. What ends an episode?
|
| 48 |
+
4. How do you compute reward?
|
| 49 |
+
5. How do you stop abuse, infinite loops, or cheating?
|
| 50 |
+
5) Build the environment using OpenEnv
|
| 51 |
+
The intended workflow is to bootstrap an environment skeleton and then fill in the behavior. OpenEnv’s CLI creates the scaffolding for you. The environment is implemented as a Python package and exposed via a FastAPI app.
|
| 52 |
+
Your implementation typically defines:
|
| 53 |
+
* action dataclass
|
| 54 |
+
* observation dataclass
|
| 55 |
+
* state representation
|
| 56 |
+
* environment methods like reset and step
|
| 57 |
+
* FastAPI wrapper / client-server interface
|
| 58 |
+
That gives you a clean separation:
|
| 59 |
+
* the environment handles world dynamics and scoring,
|
| 60 |
+
* the trainer handles optimization,
|
| 61 |
+
* and the model just learns to act inside the interface.
|
| 62 |
+
6) Keep the task simple at first
|
| 63 |
+
Do not begin with your hardest benchmark. Start with the easiest version of your environment that still proves the concept. This is where curriculum learning helps.
|
| 64 |
+
A good progression:
|
| 65 |
+
1. easy tasks with short horizons,
|
| 66 |
+
2. medium tasks with a little more branching,
|
| 67 |
+
3. harder tasks only after the model starts getting non-zero reward.
|
| 68 |
+
The principle is simple: make success possible early. If the model never sees successful trajectories, learning stalls.
|
| 69 |
+
7) Design rewards carefully
|
| 70 |
+
Your reward function is your task specification. If it is weak, incomplete, or easy to exploit, the model will optimize the wrong thing very efficiently.
|
| 71 |
+
A strong reward design usually includes multiple components, for example:
|
| 72 |
+
* execution success,
|
| 73 |
+
* correctness,
|
| 74 |
+
* format compliance,
|
| 75 |
+
* timeouts,
|
| 76 |
+
* resource usage,
|
| 77 |
+
* safety constraints,
|
| 78 |
+
* and anti-cheating checks.
|
| 79 |
+
One explicit recommendation was to use multiple independent reward functions, not just one. If you only have a single reward signal, it is easier for the model to hack it. Multiple independent checks reduce that risk.
|
| 80 |
+
For example, for a coding environment:
|
| 81 |
+
* reward passing tests,
|
| 82 |
+
* penalize timeouts,
|
| 83 |
+
* reward format compliance,
|
| 84 |
+
* reject use of forbidden globals,
|
| 85 |
+
* and separately verify the function contract.
|
| 86 |
+
8) Protect yourself against reward hacking
|
| 87 |
+
Reward hacking is one of the biggest practical failure modes. The model may learn shortcuts that maximize your reward without solving the real task. Examples mentioned include:
|
| 88 |
+
* editing timers,
|
| 89 |
+
* caching results,
|
| 90 |
+
* abusing globals,
|
| 91 |
+
* mutating protected state,
|
| 92 |
+
* or exploiting environment bugs.
|
| 93 |
+
What to do:
|
| 94 |
+
1. Use multiple independent reward functions
|
| 95 |
+
2. Lock down execution where possible
|
| 96 |
+
3. Add time limits
|
| 97 |
+
4. Avoid unrestricted global state
|
| 98 |
+
5. Sample outputs frequently and inspect them
|
| 99 |
+
6. Terminate or roll back runs if behavior drifts badly
|
| 100 |
+
A particularly practical recommendation was to use a locked-down function or restricted execution approach so the model cannot rely on undeclared globals or hidden cached state.
|
| 101 |
+
Also, do not just let training run forever without checking generations. Periodic human inspection is still necessary.
|
| 102 |
+
9) Use process-aware feedback when you can
|
| 103 |
+
Naively assigning the same final reward to every token is inefficient. If possible, use richer supervision that distinguishes good intermediate steps from bad ones. That is the idea behind process supervision.
|
| 104 |
+
In practice, this can be approximated by:
|
| 105 |
+
* line-by-line checks,
|
| 106 |
+
* step-level verifiers,
|
| 107 |
+
* program trace analysis,
|
| 108 |
+
* or LLM-as-a-judge for intermediate reasoning.
|
| 109 |
+
But be careful: LLM-as-a-judge can itself be gamed. Use it as one signal, not the only signal.
|
| 110 |
+
For a hackathon, outcome-based verification plus a few lightweight process checks is usually the sweet spot.
|
| 111 |
+
10) Pick the right training stack
|
| 112 |
+
The intended stack here is:
|
| 113 |
+
* TRL for RL training algorithms
|
| 114 |
+
* Unsloth to make RL training and inference more efficient
|
| 115 |
+
* OpenEnv to standardize environment interaction
|
| 116 |
+
This combination works because:
|
| 117 |
+
* OpenEnv gives you a common environment interface
|
| 118 |
+
* TRL gives you RL trainers like GRPO
|
| 119 |
+
* Unsloth reduces memory use and improves efficiency on top of TRL
|
| 120 |
+
One of the practical examples used the same prompt repeated many times, routed through an environment, with TRL driving training and Unsloth helping with performance.
|
| 121 |
+
11) Prefer GRPO / RLVR style training for verifiable tasks
|
| 122 |
+
The RL setup discussed here leans toward RL with verifiable rewards:
|
| 123 |
+
* instead of a learned reward model,
|
| 124 |
+
* use a verifier, test harness, regex check, executor, or environment.
|
| 125 |
+
GRPO was described as a more efficient evolution relative to older PPO-style setups, especially by simplifying away parts like the value model.
|
| 126 |
+
For hackathon purposes, the key practical takeaway is:
|
| 127 |
+
* if the task is verifiable,
|
| 128 |
+
* build the verifier first,
|
| 129 |
+
* then plug that verifier into RL training.
|
| 130 |
+
12) Keep inference fast
|
| 131 |
+
One important point: in RL for LLMs, inference can dominate total runtime. Over time, rollout generation often becomes the bottleneck, not the optimizer step.
|
| 132 |
+
That means your project speed depends heavily on:
|
| 133 |
+
* fast sampling,
|
| 134 |
+
* tight environment loops,
|
| 135 |
+
* low-overhead execution,
|
| 136 |
+
* and efficient model runtime.
|
| 137 |
+
This is one reason Unsloth matters in the stack, and another reason to avoid overly heavy environments early in the hackathon.
|
| 138 |
+
13) Deploy your environment early
|
| 139 |
+
OpenEnv environments are designed to be deployed as Hugging Face Spaces, which provide:
|
| 140 |
+
* a running server,
|
| 141 |
+
* a Git repository,
|
| 142 |
+
* and a container registry.
|
| 143 |
+
That gives you several ways to work:
|
| 144 |
+
* interact with the remote Space directly,
|
| 145 |
+
* install the client code from the repo,
|
| 146 |
+
* pull and run the container locally,
|
| 147 |
+
* or run the FastAPI app locally via Python/Uvicorn.
|
| 148 |
+
Why this is good for a hackathon:
|
| 149 |
+
* one shared source of truth,
|
| 150 |
+
* easier collaboration,
|
| 151 |
+
* easier demos,
|
| 152 |
+
* easier switching between local and remote execution.
|
| 153 |
+
A good habit is to deploy an early version of the environment before training seriously. That catches API and packaging issues early.
|
| 154 |
+
14) Scale only after the environment is stable
|
| 155 |
+
There was a dedicated tutorial flow around:
|
| 156 |
+
1. environment,
|
| 157 |
+
2. deployment,
|
| 158 |
+
3. scaling,
|
| 159 |
+
4. training with TRL and Wordle.
|
| 160 |
+
Follow the same order.
|
| 161 |
+
Do not start with scale. First confirm:
|
| 162 |
+
* reset works,
|
| 163 |
+
* step works,
|
| 164 |
+
* rewards are sensible,
|
| 165 |
+
* timeouts work,
|
| 166 |
+
* logs are visible,
|
| 167 |
+
* and the environment can be run locally and remotely.
|
| 168 |
+
Only then:
|
| 169 |
+
* increase batch sizes,
|
| 170 |
+
* duplicate prompts or tasks,
|
| 171 |
+
* expand task diversity,
|
| 172 |
+
* and benchmark throughput.
|
| 173 |
+
15) Monitor the right things during training
|
| 174 |
+
Do not watch only one scalar. Monitor:
|
| 175 |
+
* overall reward,
|
| 176 |
+
* individual reward function columns,
|
| 177 |
+
* success indicators,
|
| 178 |
+
* timeout frequency,
|
| 179 |
+
* and generated strategies over time.
|
| 180 |
+
A very concrete suggestion was:
|
| 181 |
+
* watch whether the reward is going up,
|
| 182 |
+
* and separately watch critical columns like “function works.”
|
| 183 |
+
Also inspect actual generations during training. A rising reward is not enough if the model is learning to exploit bugs.
|
| 184 |
+
16) Save models correctly
|
| 185 |
+
If you use QLoRA / LoRA-style training, be careful when saving. One explicit warning was:
|
| 186 |
+
Do not upcast a 4-bit model to 16-bit and then merge the LoRA weights naively. That can badly damage model quality. Instead, use the proper merged-save path, or use the adapters directly.
|
| 187 |
+
For participants, that means:
|
| 188 |
+
* keep your training save path simple,
|
| 189 |
+
* test post-training inference immediately,
|
| 190 |
+
* and do not leave export until the end.
|
| 191 |
+
17) How to structure your team over the hackathon
|
| 192 |
+
A very effective team split is:
|
| 193 |
+
Person A: Environment
|
| 194 |
+
* builds reset/step/state
|
| 195 |
+
* adds timeouts and safety constraints
|
| 196 |
+
* makes local and remote execution work
|
| 197 |
+
Person B: Verifier / Rewards
|
| 198 |
+
* writes multiple reward functions
|
| 199 |
+
* adds anti-hacking checks
|
| 200 |
+
* makes failure cases visible
|
| 201 |
+
Person C: Training
|
| 202 |
+
* sets up TRL + Unsloth
|
| 203 |
+
* runs experiments
|
| 204 |
+
* tracks metrics and generations
|
| 205 |
+
Person D: Demo / Product
|
| 206 |
+
* prepares the Space demo
|
| 207 |
+
* creates a simple interface
|
| 208 |
+
* records examples and final benchmarks
|
| 209 |
+
This split matches the way the stack naturally decomposes in practice.
|
| 210 |
+
18) A practical 1-day execution plan
|
| 211 |
+
Phase 1: Pick a narrow task
|
| 212 |
+
Choose a small, verifiable environment. Avoid huge long-horizon tasks first.
|
| 213 |
+
Phase 2: Build the environment
|
| 214 |
+
Use OpenEnv init, implement reset/step/state, and get a local loop working.
|
| 215 |
+
Phase 3: Build rewards
|
| 216 |
+
Add at least 2–4 independent reward checks, plus timeout and anti-cheat logic.
|
| 217 |
+
Phase 4: Deploy
|
| 218 |
+
Push to a Space or run locally via container/Uvicorn so teammates can use the same environment.
|
| 219 |
+
Phase 5: Train small
|
| 220 |
+
Run a tiny TRL + Unsloth experiment first. Look at outputs, not just metrics.
|
| 221 |
+
Phase 6: Inspect for hacking
|
| 222 |
+
Sample generations. Check for globals, hacks, environment abuse, or suspicious shortcuts.
|
| 223 |
+
Phase 7: Add curriculum
|
| 224 |
+
If the model gets zero reward too often, simplify tasks or add easier start states.
|
| 225 |
+
Phase 8: Train bigger
|
| 226 |
+
Only after the loop is stable should you increase scale, batch size, or environment diversity.
|
| 227 |
+
Phase 9: Save and demo
|
| 228 |
+
Export the trained model correctly, test inference, and show before/after behavior.
|
| 229 |
+
19) What judges or reviewers will likely find compelling
|
| 230 |
+
The strongest hackathon projects usually show:
|
| 231 |
+
* a clear environment design,
|
| 232 |
+
* objective reward functions,
|
| 233 |
+
* evidence that the model improved,
|
| 234 |
+
* prevention against reward hacking,
|
| 235 |
+
* a reproducible deployment story,
|
| 236 |
+
* and a sharp demo.
|
| 237 |
+
A simple but strong demo format is:
|
| 238 |
+
1. baseline model attempt,
|
| 239 |
+
2. reward/verifier output,
|
| 240 |
+
3. trained model attempt,
|
| 241 |
+
4. measurable improvement,
|
| 242 |
+
5. short explanation of safeguards.
|
| 243 |
+
20) Suggested problem statement theme directions
|
| 244 |
+
Please Refer to [External] Apr ‘26 OpenEnv Hackathon Themes
|
| 245 |
+
21) Common mistakes to avoid
|
| 246 |
+
* Picking a task so hard that success probability is zero
|
| 247 |
+
* Using only one reward function
|
| 248 |
+
* Not checking for reward hacking
|
| 249 |
+
* Training before the environment is stable
|
| 250 |
+
* Relying only on average reward and not inspecting outputs
|
| 251 |
+
* Forgetting timeouts and sandbox limits
|
| 252 |
+
* Saving LoRA/QLoRA models incorrectly
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
22) Learning Resources
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
(Recommended) RL Environment Lecture Chapters:
|
| 259 |
+
RL Mega Lecture
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
Module 1: Why OpenEnv? (~7 min)
|
| 265 |
+
▸ Workshop 8:02–15:05 — https://www.youtube.com/watch?v=1jU05MlENOI&t=482s
|
| 266 |
+
▸ Sanyam: RL loop, fragmented env APIs, OpenEnv as universal interface, Gymnasium spec + Docker
|
| 267 |
+
▸ Alt: Mega Lecture 40:01–46:00 — https://www.youtube.com/watch?v=Jew4lhAiqnw&t=2401s
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
Module 2: Using Existing Envs (~7.5 min)
|
| 271 |
+
▸ Workshop 35:33–43:05 — https://www.youtube.com/watch?v=1jU05MlENOI&t=2133s
|
| 272 |
+
▸ Ben: Hub org, env collections, 3 Space interfaces (server/repo/registry), from_hub
|
| 273 |
+
▸ Alt: Mega Lecture 1:24:11–1:30:00 — https://www.youtube.com/watch?v=Jew4lhAiqnw&t=5051s
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
Module 3: Deploying Envs (~9 min)
|
| 277 |
+
▸ Mega Lecture 1:30:00–1:39:07 — https://www.youtube.com/watch?v=Jew4lhAiqnw&t=5400s
|
| 278 |
+
▸ Ben: live openenv init, scaffold, running locally, openenv push, Docker run from Space
|
| 279 |
+
▸ Alt: Workshop 43:05–48:30 — https://www.youtube.com/watch?v=1jU05MlENOI&t=2585s
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
Module 4: Building Your Own (~6.5 min)
|
| 283 |
+
▸ Workshop 43:45–50:20 — https://www.youtube.com/watch?v=1jU05MlENOI&t=2625s
|
| 284 |
+
▸ Ben: scaffold files, business logic (reset/step), models, client, publishing
|
| 285 |
+
▸ Alt: Mega Lecture 1:33:30–1:39:07 — https://www.youtube.com/watch?v=Jew4lhAiqnw&t=5610s
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
Module 5: Training + TRL (~14 min)
|
| 289 |
+
▸ Mega Lecture 1:53:20–2:07:12 — https://www.youtube.com/watch?v=Jew4lhAiqnw&t=6800s
|
| 290 |
+
▸ Lewis: Wordle GRPO walkthrough — rollout function, reward shaping, GRPOTrainer, live training
|
| 291 |
+
▸ Alt: Workshop 22:24–34:12 — https://www.youtube.com/watch?v=1jU05MlENOI&t=1344s
|
client.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""WebSocket client for CERNenv.
|
| 2 |
+
|
| 3 |
+
Wraps OpenEnv's ``EnvClient`` so users can ``await client.reset()`` and
|
| 4 |
+
``await client.step(action)`` against a running CERNenv server.
|
| 5 |
+
|
| 6 |
+
Only public schemas from ``models`` are imported here — by design this
|
| 7 |
+
client never reaches into ``server.*`` internals so it can be installed
|
| 8 |
+
on the consumer side without pulling the simulator code.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
from typing import Any, Dict
|
| 14 |
+
|
| 15 |
+
from openenv.core import EnvClient
|
| 16 |
+
from openenv.core.client_types import StepResult
|
| 17 |
+
|
| 18 |
+
from models import CernState, CollisionObservation, ExperimentAction
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class CernEnv(EnvClient[ExperimentAction, CollisionObservation, CernState]):
|
| 22 |
+
"""Async WebSocket client for the CERN environment."""
|
| 23 |
+
|
| 24 |
+
def _step_payload(self, action: ExperimentAction) -> Dict[str, Any]:
|
| 25 |
+
return action.model_dump()
|
| 26 |
+
|
| 27 |
+
def _parse_result(self, payload: Dict[str, Any]) -> StepResult[CollisionObservation]:
|
| 28 |
+
obs_data = payload.get("observation", payload)
|
| 29 |
+
observation = CollisionObservation(**obs_data)
|
| 30 |
+
return StepResult(
|
| 31 |
+
observation=observation,
|
| 32 |
+
reward=payload.get("reward", observation.reward),
|
| 33 |
+
done=payload.get("done", observation.done),
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
def _parse_state(self, payload: Dict[str, Any]) -> CernState:
|
| 37 |
+
return CernState(**payload)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
__all__ = ["CernEnv"]
|
models.py
ADDED
|
@@ -0,0 +1,694 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Data models for CERNenv: an LHC (Large Hadron Collider) style particle
|
| 3 |
+
physics discovery POMDP (Partially Observable Markov Decision Process).
|
| 4 |
+
|
| 5 |
+
The agent is a Large Language Model (LLM) acting as a high-energy physicist.
|
| 6 |
+
Each step it picks one structured action (configure beams, allocate
|
| 7 |
+
luminosity, run a trigger, fit a spectrum, request systematics, submit a
|
| 8 |
+
discovery claim, etc.) and receives a noisy detector-style observation.
|
| 9 |
+
The latent particle and detector parameters are the hidden ground truth.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
from __future__ import annotations
|
| 13 |
+
|
| 14 |
+
from enum import Enum
|
| 15 |
+
from typing import Any, Dict, List, Optional
|
| 16 |
+
|
| 17 |
+
from pydantic import BaseModel, Field
|
| 18 |
+
|
| 19 |
+
from openenv.core.env_server.types import Action, Observation, State
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# ── Action vocabulary ───────────────────────────────────────────────────────
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class ActionType(str, Enum):
|
| 26 |
+
# ── Beam & data acquisition (DAQ) ─────────────────────────────────
|
| 27 |
+
CONFIGURE_BEAM = "configure_beam"
|
| 28 |
+
ALLOCATE_LUMINOSITY = "allocate_luminosity"
|
| 29 |
+
SET_TRIGGER = "set_trigger"
|
| 30 |
+
COLLECT_COLLISIONS = "collect_collisions"
|
| 31 |
+
|
| 32 |
+
# ── Reconstruction & calibration ─────────────────────────────────
|
| 33 |
+
CALIBRATE_DETECTOR = "calibrate_detector"
|
| 34 |
+
RECONSTRUCT_TRACKS = "reconstruct_tracks"
|
| 35 |
+
SELECT_CHANNEL = "select_channel"
|
| 36 |
+
|
| 37 |
+
# ── Analysis ──────────────────────────────────────────────────────
|
| 38 |
+
BUILD_INVARIANT_MASS = "build_invariant_mass"
|
| 39 |
+
SUBTRACT_BACKGROUND = "subtract_background"
|
| 40 |
+
FIT_RESONANCE = "fit_resonance"
|
| 41 |
+
SCAN_BUMP = "scan_bump"
|
| 42 |
+
MEASURE_ANGULAR = "measure_angular"
|
| 43 |
+
ESTIMATE_SIGNIFICANCE = "estimate_significance"
|
| 44 |
+
|
| 45 |
+
# ── Systematics & meta ───────────────────────────────────────────
|
| 46 |
+
REQUEST_SYSTEMATICS = "request_systematics"
|
| 47 |
+
REQUEST_THEORY_REVIEW = "request_theory_review"
|
| 48 |
+
|
| 49 |
+
# ── Final ─────────────────────────────────────────────────────────
|
| 50 |
+
SUBMIT_DISCOVERY_CLAIM = "submit_discovery_claim"
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
DAQ_ACTIONS = frozenset({
|
| 54 |
+
ActionType.CONFIGURE_BEAM,
|
| 55 |
+
ActionType.ALLOCATE_LUMINOSITY,
|
| 56 |
+
ActionType.SET_TRIGGER,
|
| 57 |
+
ActionType.COLLECT_COLLISIONS,
|
| 58 |
+
})
|
| 59 |
+
|
| 60 |
+
RECO_ACTIONS = frozenset({
|
| 61 |
+
ActionType.CALIBRATE_DETECTOR,
|
| 62 |
+
ActionType.RECONSTRUCT_TRACKS,
|
| 63 |
+
ActionType.SELECT_CHANNEL,
|
| 64 |
+
})
|
| 65 |
+
|
| 66 |
+
ANALYSIS_ACTIONS = frozenset({
|
| 67 |
+
ActionType.BUILD_INVARIANT_MASS,
|
| 68 |
+
ActionType.SUBTRACT_BACKGROUND,
|
| 69 |
+
ActionType.FIT_RESONANCE,
|
| 70 |
+
ActionType.SCAN_BUMP,
|
| 71 |
+
ActionType.MEASURE_ANGULAR,
|
| 72 |
+
ActionType.ESTIMATE_SIGNIFICANCE,
|
| 73 |
+
})
|
| 74 |
+
|
| 75 |
+
META_ACTIONS = frozenset({
|
| 76 |
+
ActionType.REQUEST_SYSTEMATICS,
|
| 77 |
+
ActionType.REQUEST_THEORY_REVIEW,
|
| 78 |
+
ActionType.SUBMIT_DISCOVERY_CLAIM,
|
| 79 |
+
})
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
# ── Detector channels & physics primitives ────────────────────────────────
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class DetectorChannel(str, Enum):
|
| 86 |
+
"""Final-state decay channel the agent reconstructs in.
|
| 87 |
+
|
| 88 |
+
Channels affect signal acceptance and background composition. Picking a
|
| 89 |
+
channel where the true particle does not decay yields low signal yield
|
| 90 |
+
no matter how much luminosity is collected — this is intentional.
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
DIPHOTON = "diphoton" # γγ
|
| 94 |
+
DILEPTON_EE = "dilepton_ee" # e+ e-
|
| 95 |
+
DILEPTON_MUMU = "dilepton_mumu" # μ+ μ-
|
| 96 |
+
DIJET = "dijet" # jj
|
| 97 |
+
FOUR_LEPTON = "four_lepton" # 4ℓ
|
| 98 |
+
BB = "bb" # b b-bar
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class TriggerType(str, Enum):
|
| 102 |
+
"""Hardware-level event selection."""
|
| 103 |
+
|
| 104 |
+
LOW_PT = "low_pt" # broad acceptance, lots of background
|
| 105 |
+
HIGH_PT = "high_pt" # high-mass focus, lower QCD
|
| 106 |
+
DIPHOTON_HLT = "diphoton_hlt"
|
| 107 |
+
DILEPTON_HLT = "dilepton_hlt"
|
| 108 |
+
JET_HLT = "jet_hlt"
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class BeamEnergy(str, Enum):
|
| 112 |
+
"""LHC-style center-of-mass energies (TeV)."""
|
| 113 |
+
|
| 114 |
+
E_7 = "7TeV"
|
| 115 |
+
E_8 = "8TeV"
|
| 116 |
+
E_13 = "13TeV"
|
| 117 |
+
E_14 = "14TeV"
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# ── Tool / instrument registry (for prompts and tool-fit reward) ──────────
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class ToolCategory(str, Enum):
|
| 124 |
+
DAQ = "daq"
|
| 125 |
+
RECONSTRUCTION = "reconstruction"
|
| 126 |
+
CALIBRATION = "calibration"
|
| 127 |
+
ANALYSIS = "analysis"
|
| 128 |
+
STATISTICS = "statistics"
|
| 129 |
+
SYSTEMATICS = "systematics"
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class ToolSpec(BaseModel):
|
| 133 |
+
name: str
|
| 134 |
+
category: ToolCategory
|
| 135 |
+
description: str = ""
|
| 136 |
+
typical_runtime_hours: float = 0.5
|
| 137 |
+
typical_cost_musd: float = 0.0 # in millions of USD (compute / beam time proxy)
|
| 138 |
+
requires_gpu: bool = False
|
| 139 |
+
channels: List[str] = Field(default_factory=list)
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
TOOL_REGISTRY: Dict[str, ToolSpec] = {
|
| 143 |
+
"ATLAS_HLT": ToolSpec(
|
| 144 |
+
name="ATLAS_HLT",
|
| 145 |
+
category=ToolCategory.DAQ,
|
| 146 |
+
description="ATLAS High-Level Trigger system for online event selection",
|
| 147 |
+
typical_runtime_hours=0.0,
|
| 148 |
+
channels=["diphoton", "dilepton_ee", "dilepton_mumu", "four_lepton", "dijet", "bb"],
|
| 149 |
+
),
|
| 150 |
+
"CMS_HLT": ToolSpec(
|
| 151 |
+
name="CMS_HLT",
|
| 152 |
+
category=ToolCategory.DAQ,
|
| 153 |
+
description="CMS High-Level Trigger system",
|
| 154 |
+
typical_runtime_hours=0.0,
|
| 155 |
+
channels=["diphoton", "dilepton_ee", "dilepton_mumu", "four_lepton", "dijet", "bb"],
|
| 156 |
+
),
|
| 157 |
+
"GEANT4": ToolSpec(
|
| 158 |
+
name="GEANT4",
|
| 159 |
+
category=ToolCategory.RECONSTRUCTION,
|
| 160 |
+
description="Detector simulation toolkit for full event reconstruction",
|
| 161 |
+
typical_runtime_hours=1.0,
|
| 162 |
+
typical_cost_musd=0.05,
|
| 163 |
+
requires_gpu=False,
|
| 164 |
+
),
|
| 165 |
+
"Athena": ToolSpec(
|
| 166 |
+
name="Athena",
|
| 167 |
+
category=ToolCategory.RECONSTRUCTION,
|
| 168 |
+
description="ATLAS reconstruction framework",
|
| 169 |
+
typical_runtime_hours=0.8,
|
| 170 |
+
),
|
| 171 |
+
"CMSSW": ToolSpec(
|
| 172 |
+
name="CMSSW",
|
| 173 |
+
category=ToolCategory.RECONSTRUCTION,
|
| 174 |
+
description="CMS reconstruction software",
|
| 175 |
+
typical_runtime_hours=0.8,
|
| 176 |
+
),
|
| 177 |
+
"ECAL_calibration": ToolSpec(
|
| 178 |
+
name="ECAL_calibration",
|
| 179 |
+
category=ToolCategory.CALIBRATION,
|
| 180 |
+
description="Electromagnetic calorimeter energy-scale calibration",
|
| 181 |
+
typical_runtime_hours=0.3,
|
| 182 |
+
),
|
| 183 |
+
"Tracker_alignment": ToolSpec(
|
| 184 |
+
name="Tracker_alignment",
|
| 185 |
+
category=ToolCategory.CALIBRATION,
|
| 186 |
+
description="Inner tracker alignment for momentum precision",
|
| 187 |
+
typical_runtime_hours=0.4,
|
| 188 |
+
),
|
| 189 |
+
"ROOT_RooFit": ToolSpec(
|
| 190 |
+
name="ROOT_RooFit",
|
| 191 |
+
category=ToolCategory.ANALYSIS,
|
| 192 |
+
description="Maximum-likelihood spectrum fitting toolkit",
|
| 193 |
+
typical_runtime_hours=0.2,
|
| 194 |
+
),
|
| 195 |
+
"MadGraph": ToolSpec(
|
| 196 |
+
name="MadGraph",
|
| 197 |
+
category=ToolCategory.ANALYSIS,
|
| 198 |
+
description="Matrix-element generator for signal+background templates",
|
| 199 |
+
typical_runtime_hours=1.5,
|
| 200 |
+
typical_cost_musd=0.02,
|
| 201 |
+
),
|
| 202 |
+
"Pythia8": ToolSpec(
|
| 203 |
+
name="Pythia8",
|
| 204 |
+
category=ToolCategory.ANALYSIS,
|
| 205 |
+
description="Parton-shower and hadronisation generator",
|
| 206 |
+
typical_runtime_hours=0.5,
|
| 207 |
+
),
|
| 208 |
+
"BumpHunter": ToolSpec(
|
| 209 |
+
name="BumpHunter",
|
| 210 |
+
category=ToolCategory.STATISTICS,
|
| 211 |
+
description="Sliding-window local-significance bump-hunting algorithm",
|
| 212 |
+
typical_runtime_hours=0.1,
|
| 213 |
+
),
|
| 214 |
+
"CLs_fit": ToolSpec(
|
| 215 |
+
name="CLs_fit",
|
| 216 |
+
category=ToolCategory.STATISTICS,
|
| 217 |
+
description="Modified-frequentist CLs limits and significance",
|
| 218 |
+
typical_runtime_hours=0.1,
|
| 219 |
+
),
|
| 220 |
+
"Asimov_significance": ToolSpec(
|
| 221 |
+
name="Asimov_significance",
|
| 222 |
+
category=ToolCategory.STATISTICS,
|
| 223 |
+
description="Asymptotic significance from Asimov dataset",
|
| 224 |
+
typical_runtime_hours=0.05,
|
| 225 |
+
),
|
| 226 |
+
"JES_systematics": ToolSpec(
|
| 227 |
+
name="JES_systematics",
|
| 228 |
+
category=ToolCategory.SYSTEMATICS,
|
| 229 |
+
description="Jet energy-scale systematic study",
|
| 230 |
+
typical_runtime_hours=0.4,
|
| 231 |
+
),
|
| 232 |
+
"Luminosity_calibration": ToolSpec(
|
| 233 |
+
name="Luminosity_calibration",
|
| 234 |
+
category=ToolCategory.SYSTEMATICS,
|
| 235 |
+
description="Van der Meer scan luminosity calibration",
|
| 236 |
+
typical_runtime_hours=0.3,
|
| 237 |
+
),
|
| 238 |
+
}
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
# Forward decl used by the next mapping; the actual ActionType class is
|
| 242 |
+
# defined above. Keeping this immediately after TOOL_REGISTRY makes the
|
| 243 |
+
# tool-category contract a single, auditable block.
|
| 244 |
+
ACTION_TOOL_CATEGORIES: Dict["ActionType", List[ToolCategory]] = {
|
| 245 |
+
# DAQ-style actions don't normally take a named external tool, but if
|
| 246 |
+
# the agent supplies one we expect a DAQ-flavoured method.
|
| 247 |
+
ActionType.CONFIGURE_BEAM: [ToolCategory.DAQ],
|
| 248 |
+
ActionType.ALLOCATE_LUMINOSITY: [ToolCategory.DAQ],
|
| 249 |
+
ActionType.SET_TRIGGER: [ToolCategory.DAQ],
|
| 250 |
+
ActionType.COLLECT_COLLISIONS: [ToolCategory.DAQ],
|
| 251 |
+
# Reconstruction & calibration
|
| 252 |
+
ActionType.RECONSTRUCT_TRACKS: [ToolCategory.RECONSTRUCTION],
|
| 253 |
+
ActionType.CALIBRATE_DETECTOR: [ToolCategory.CALIBRATION, ToolCategory.RECONSTRUCTION],
|
| 254 |
+
ActionType.SELECT_CHANNEL: [ToolCategory.RECONSTRUCTION, ToolCategory.ANALYSIS],
|
| 255 |
+
# Analysis
|
| 256 |
+
ActionType.BUILD_INVARIANT_MASS: [ToolCategory.ANALYSIS],
|
| 257 |
+
ActionType.SUBTRACT_BACKGROUND: [ToolCategory.ANALYSIS, ToolCategory.STATISTICS],
|
| 258 |
+
ActionType.FIT_RESONANCE: [ToolCategory.ANALYSIS, ToolCategory.STATISTICS],
|
| 259 |
+
ActionType.SCAN_BUMP: [ToolCategory.STATISTICS, ToolCategory.ANALYSIS],
|
| 260 |
+
ActionType.MEASURE_ANGULAR: [ToolCategory.ANALYSIS, ToolCategory.STATISTICS],
|
| 261 |
+
ActionType.ESTIMATE_SIGNIFICANCE: [ToolCategory.STATISTICS],
|
| 262 |
+
# Meta
|
| 263 |
+
ActionType.REQUEST_SYSTEMATICS: [ToolCategory.SYSTEMATICS],
|
| 264 |
+
ActionType.REQUEST_THEORY_REVIEW: [],
|
| 265 |
+
ActionType.SUBMIT_DISCOVERY_CLAIM: [],
|
| 266 |
+
}
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def is_recommended_tool(action_type: "ActionType", method: Optional[str]) -> bool:
|
| 270 |
+
"""Return True iff ``method`` is a real tool whose category matches ``action_type``.
|
| 271 |
+
|
| 272 |
+
This is the gate the reward function uses to credit "tool fit". It
|
| 273 |
+
intentionally rejects bogus method strings (so a model can't farm
|
| 274 |
+
shaping reward by setting ``method='whatever'``) and rejects mismatches
|
| 275 |
+
(e.g. running ``BumpHunter`` for a calibration step).
|
| 276 |
+
"""
|
| 277 |
+
if not method:
|
| 278 |
+
return False
|
| 279 |
+
spec = TOOL_REGISTRY.get(method)
|
| 280 |
+
if spec is None:
|
| 281 |
+
return False
|
| 282 |
+
expected = ACTION_TOOL_CATEGORIES.get(action_type, [])
|
| 283 |
+
if not expected:
|
| 284 |
+
return False
|
| 285 |
+
return spec.category in expected
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
# ── Action schema ──────────────────────────────────────────────────────────
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class ExperimentAction(Action):
|
| 292 |
+
"""One structured experimental step at the LHC."""
|
| 293 |
+
|
| 294 |
+
action_type: ActionType = Field(
|
| 295 |
+
...,
|
| 296 |
+
description=(
|
| 297 |
+
"Discrete LHC pipeline step. The environment enforces physics "
|
| 298 |
+
"prerequisites: you cannot fit a spectrum before collecting data, "
|
| 299 |
+
"or claim a discovery before estimating significance."
|
| 300 |
+
),
|
| 301 |
+
)
|
| 302 |
+
method: Optional[str] = Field(
|
| 303 |
+
None,
|
| 304 |
+
description=(
|
| 305 |
+
"Optional named instrument or framework (e.g. 'ROOT_RooFit', "
|
| 306 |
+
"'BumpHunter', 'Pythia8'). Affects cost, runtime, and tool-fit reward."
|
| 307 |
+
),
|
| 308 |
+
)
|
| 309 |
+
parameters: Dict[str, Any] = Field(
|
| 310 |
+
default_factory=dict,
|
| 311 |
+
description=(
|
| 312 |
+
"Action-specific settings such as beam energy, integrated luminosity "
|
| 313 |
+
"(fb^-1), trigger selection, decay channel, mass window, fit model."
|
| 314 |
+
),
|
| 315 |
+
)
|
| 316 |
+
justification: Optional[str] = Field(
|
| 317 |
+
None,
|
| 318 |
+
description="Short scientific rationale for picking this step now.",
|
| 319 |
+
)
|
| 320 |
+
confidence: float = Field(
|
| 321 |
+
0.5, ge=0.0, le=1.0,
|
| 322 |
+
description="Agent confidence in the chosen step.",
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
# ── Outputs ────────────────────────────────────────────────────────────────
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
class OutputType(str, Enum):
|
| 330 |
+
BEAM_CONFIG = "beam_config"
|
| 331 |
+
LUMINOSITY_LOG = "luminosity_log"
|
| 332 |
+
TRIGGER_REPORT = "trigger_report"
|
| 333 |
+
COLLISION_BATCH = "collision_batch"
|
| 334 |
+
CALIBRATION_REPORT = "calibration_report"
|
| 335 |
+
RECONSTRUCTION = "reconstruction"
|
| 336 |
+
CHANNEL_SELECTION = "channel_selection"
|
| 337 |
+
INVARIANT_MASS_HIST = "invariant_mass_hist"
|
| 338 |
+
BACKGROUND_SUBTRACTION = "background_subtraction"
|
| 339 |
+
FIT_RESULT = "fit_result"
|
| 340 |
+
BUMP_SCAN = "bump_scan"
|
| 341 |
+
ANGULAR_RESULT = "angular_result"
|
| 342 |
+
SIGNIFICANCE = "significance"
|
| 343 |
+
SYSTEMATICS_REPORT = "systematics_report"
|
| 344 |
+
THEORY_REVIEW = "theory_review"
|
| 345 |
+
DISCOVERY_CLAIM = "discovery_claim"
|
| 346 |
+
FAILURE_REPORT = "failure_report"
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
class IntermediateOutput(BaseModel):
|
| 350 |
+
"""A single noisy detector or analysis artifact."""
|
| 351 |
+
|
| 352 |
+
output_type: OutputType
|
| 353 |
+
step_index: int
|
| 354 |
+
success: bool = True
|
| 355 |
+
quality_score: float = Field(1.0, ge=0.0, le=1.0)
|
| 356 |
+
summary: str = ""
|
| 357 |
+
data: Dict[str, Any] = Field(default_factory=dict)
|
| 358 |
+
uncertainty: float = Field(0.0, ge=0.0, le=1.0)
|
| 359 |
+
warnings: List[str] = Field(default_factory=list)
|
| 360 |
+
artifacts_available: List[str] = Field(default_factory=list)
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
# ── Observable state components ───────────────────────────────────────────
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
class ResourceUsage(BaseModel):
|
| 367 |
+
"""Agent-visible resource counters."""
|
| 368 |
+
|
| 369 |
+
budget_used_musd: float = 0.0
|
| 370 |
+
budget_remaining_musd: float = 100.0
|
| 371 |
+
luminosity_used_fb: float = 0.0
|
| 372 |
+
luminosity_remaining_fb: float = 300.0
|
| 373 |
+
time_used_days: float = 0.0
|
| 374 |
+
time_remaining_days: float = 365.0
|
| 375 |
+
compute_hours_used: float = 0.0
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
class PipelineStepRecord(BaseModel):
|
| 379 |
+
step_index: int
|
| 380 |
+
action_type: ActionType
|
| 381 |
+
method: Optional[str] = None
|
| 382 |
+
parameters: Dict[str, Any] = Field(default_factory=dict)
|
| 383 |
+
output_summary: str = ""
|
| 384 |
+
output_type: OutputType
|
| 385 |
+
success: bool = True
|
| 386 |
+
quality_score: float = 1.0
|
| 387 |
+
cost_musd: float = 0.0
|
| 388 |
+
luminosity_cost_fb: float = 0.0
|
| 389 |
+
time_cost_days: float = 0.0
|
| 390 |
+
|
| 391 |
+
|
| 392 |
+
class PaperReference(BaseModel):
|
| 393 |
+
title: str
|
| 394 |
+
citation: Optional[str] = None
|
| 395 |
+
doi: Optional[str] = None
|
| 396 |
+
arxiv_id: Optional[str] = None
|
| 397 |
+
url: Optional[str] = None
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
class ExpectedFinding(BaseModel):
|
| 401 |
+
finding: str
|
| 402 |
+
category: str = "claim"
|
| 403 |
+
keywords: List[str] = Field(default_factory=list)
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
class TaskSpec(BaseModel):
|
| 407 |
+
"""The physics question the agent is given for this episode."""
|
| 408 |
+
|
| 409 |
+
problem_statement: str = "Discover and characterise an unknown resonance."
|
| 410 |
+
target_collider: str = "LHC"
|
| 411 |
+
beam_energy_options: List[str] = Field(
|
| 412 |
+
default_factory=lambda: [e.value for e in BeamEnergy],
|
| 413 |
+
)
|
| 414 |
+
available_channels: List[str] = Field(
|
| 415 |
+
default_factory=lambda: [c.value for c in DetectorChannel],
|
| 416 |
+
)
|
| 417 |
+
available_triggers: List[str] = Field(
|
| 418 |
+
default_factory=lambda: [t.value for t in TriggerType],
|
| 419 |
+
)
|
| 420 |
+
available_tools: List[str] = Field(
|
| 421 |
+
default_factory=lambda: list(TOOL_REGISTRY.keys()),
|
| 422 |
+
)
|
| 423 |
+
mass_search_window_gev: List[float] = Field(default_factory=lambda: [50.0, 1000.0])
|
| 424 |
+
budget_limit_musd: float = 100.0
|
| 425 |
+
luminosity_budget_fb: float = 300.0
|
| 426 |
+
time_limit_days: float = 365.0
|
| 427 |
+
prior_observations: List[str] = Field(default_factory=list)
|
| 428 |
+
success_criteria: List[str] = Field(default_factory=list)
|
| 429 |
+
paper_references: List[PaperReference] = Field(default_factory=list)
|
| 430 |
+
expected_findings: List[ExpectedFinding] = Field(default_factory=list)
|
| 431 |
+
difficulty: str = "medium"
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
class DiscoveryClaim(BaseModel):
|
| 435 |
+
"""Structured final claim graded against hidden truth."""
|
| 436 |
+
|
| 437 |
+
claim: str = ""
|
| 438 |
+
mass_estimate_gev: Optional[float] = None
|
| 439 |
+
mass_uncertainty_gev: Optional[float] = None
|
| 440 |
+
width_estimate_gev: Optional[float] = None
|
| 441 |
+
significance_sigma: Optional[float] = None
|
| 442 |
+
decay_channel: Optional[str] = None
|
| 443 |
+
spin_hypothesis: Optional[int] = None # 0, 1, 2
|
| 444 |
+
parity: Optional[str] = None # "+", "-"
|
| 445 |
+
cross_section_fb: Optional[float] = None
|
| 446 |
+
confidence: float = Field(0.5, ge=0.0, le=1.0)
|
| 447 |
+
evidence_steps: List[int] = Field(default_factory=list)
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
class CollisionObservation(Observation):
|
| 451 |
+
"""Full observable state returned to the agent each step.
|
| 452 |
+
|
| 453 |
+
Excludes the hidden particle truth and hidden detector systematics.
|
| 454 |
+
"""
|
| 455 |
+
|
| 456 |
+
task: TaskSpec = Field(default_factory=TaskSpec)
|
| 457 |
+
step_index: int = 0
|
| 458 |
+
pipeline_history: List[PipelineStepRecord] = Field(default_factory=list)
|
| 459 |
+
available_channels: List[str] = Field(default_factory=list)
|
| 460 |
+
available_triggers: List[str] = Field(default_factory=list)
|
| 461 |
+
available_tools: List[str] = Field(default_factory=list)
|
| 462 |
+
resource_usage: ResourceUsage = Field(default_factory=ResourceUsage)
|
| 463 |
+
latest_output: Optional[IntermediateOutput] = None
|
| 464 |
+
all_outputs: List[IntermediateOutput] = Field(default_factory=list)
|
| 465 |
+
candidate_masses_gev: List[float] = Field(default_factory=list)
|
| 466 |
+
candidate_significances: List[float] = Field(default_factory=list)
|
| 467 |
+
selected_channel: Optional[str] = None
|
| 468 |
+
selected_beam_energy: Optional[str] = None
|
| 469 |
+
cumulative_significance: float = 0.0
|
| 470 |
+
uncertainty_summary: Dict[str, float] = Field(default_factory=dict)
|
| 471 |
+
rule_violations: List[str] = Field(default_factory=list)
|
| 472 |
+
step_reward_breakdown: Dict[str, float] = Field(default_factory=dict)
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
# ── Public state snapshot ─────────────────────────────────────────────────
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
class CernState(State):
|
| 479 |
+
"""OpenEnv ``State`` snapshot for CERNenv.
|
| 480 |
+
|
| 481 |
+
This is the public, agent-facing summary of where an episode currently
|
| 482 |
+
stands. It is defined here in ``models.py`` (not in ``server.environment``)
|
| 483 |
+
so that the WebSocket / HTTP client can deserialize episode state without
|
| 484 |
+
importing any server internals.
|
| 485 |
+
"""
|
| 486 |
+
|
| 487 |
+
scenario_name: Optional[str] = None
|
| 488 |
+
difficulty: Optional[str] = None
|
| 489 |
+
episode_done: bool = False
|
| 490 |
+
cumulative_reward: float = 0.0
|
| 491 |
+
terminal_reward: Optional[float] = None
|
| 492 |
+
discovered: Optional[bool] = None
|
| 493 |
+
correct_mass: Optional[bool] = None
|
| 494 |
+
correct_channel: Optional[bool] = None
|
| 495 |
+
correct_spin: Optional[bool] = None
|
| 496 |
+
truth_mass_gev: Optional[float] = None
|
| 497 |
+
truth_channel: Optional[str] = None
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
# ── Agent-facing prompt helpers ───────────────────────────────────────────
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
AGENT_ACTION_GUIDANCE: Dict[ActionType, str] = {
|
| 504 |
+
ActionType.CONFIGURE_BEAM: (
|
| 505 |
+
"Pick the LHC center-of-mass energy. Higher energy reaches heavier "
|
| 506 |
+
"resonances but costs more per fb^-1. Required before collecting data."
|
| 507 |
+
),
|
| 508 |
+
ActionType.ALLOCATE_LUMINOSITY: (
|
| 509 |
+
"Schedule a chunk of integrated luminosity (fb^-1). More luminosity "
|
| 510 |
+
"means more events but uses budget and time. Required before collecting."
|
| 511 |
+
),
|
| 512 |
+
ActionType.SET_TRIGGER: (
|
| 513 |
+
"Choose a hardware/HLT trigger. Match the trigger to the channel of "
|
| 514 |
+
"interest; mismatched triggers throw away signal."
|
| 515 |
+
),
|
| 516 |
+
ActionType.COLLECT_COLLISIONS: (
|
| 517 |
+
"Run the experiment. Returns a noisy raw event count plus background "
|
| 518 |
+
"estimate, conditioned on beam, luminosity, trigger, and channel."
|
| 519 |
+
),
|
| 520 |
+
ActionType.CALIBRATE_DETECTOR: (
|
| 521 |
+
"Apply ECAL/tracker calibration. Reduces systematic uncertainty; "
|
| 522 |
+
"neglecting it inflates fit uncertainty later."
|
| 523 |
+
),
|
| 524 |
+
ActionType.RECONSTRUCT_TRACKS: (
|
| 525 |
+
"Reconstruct charged-particle tracks and physics objects. Required "
|
| 526 |
+
"before any analysis-level step."
|
| 527 |
+
),
|
| 528 |
+
ActionType.SELECT_CHANNEL: (
|
| 529 |
+
"Pick the decay channel to study (γγ, ℓℓ, jj, 4ℓ, bb). Wrong channel "
|
| 530 |
+
"= small signal acceptance regardless of luminosity."
|
| 531 |
+
),
|
| 532 |
+
ActionType.BUILD_INVARIANT_MASS: (
|
| 533 |
+
"Construct the invariant-mass histogram in the chosen channel and "
|
| 534 |
+
"mass window."
|
| 535 |
+
),
|
| 536 |
+
ActionType.SUBTRACT_BACKGROUND: (
|
| 537 |
+
"Fit a smooth background model and subtract it to expose any peak."
|
| 538 |
+
),
|
| 539 |
+
ActionType.FIT_RESONANCE: (
|
| 540 |
+
"Fit a Breit-Wigner / Crystal Ball line shape. Returns mass, width, "
|
| 541 |
+
"and statistical uncertainty."
|
| 542 |
+
),
|
| 543 |
+
ActionType.SCAN_BUMP: (
|
| 544 |
+
"Run a sliding-window bump hunt over the mass window. Reports the "
|
| 545 |
+
"most-significant candidate region."
|
| 546 |
+
),
|
| 547 |
+
ActionType.MEASURE_ANGULAR: (
|
| 548 |
+
"Measure decay angular distribution to constrain spin/parity. "
|
| 549 |
+
"Useful only after a peak is identified."
|
| 550 |
+
),
|
| 551 |
+
ActionType.ESTIMATE_SIGNIFICANCE: (
|
| 552 |
+
"Compute the statistical significance of a candidate signal in σ. "
|
| 553 |
+
"Required before claiming a discovery."
|
| 554 |
+
),
|
| 555 |
+
ActionType.REQUEST_SYSTEMATICS: (
|
| 556 |
+
"Run a systematics study (JES, luminosity, calibration). Improves "
|
| 557 |
+
"uncertainty estimates and reduces overconfidence penalty."
|
| 558 |
+
),
|
| 559 |
+
ActionType.REQUEST_THEORY_REVIEW: (
|
| 560 |
+
"Ask a theorist sub-agent to review the evidence; small extra signal "
|
| 561 |
+
"but not a substitute for missing data."
|
| 562 |
+
),
|
| 563 |
+
ActionType.SUBMIT_DISCOVERY_CLAIM: (
|
| 564 |
+
"Submit a structured discovery claim. Graded on mass calibration, "
|
| 565 |
+
"significance, channel, spin hypothesis, and overconfidence."
|
| 566 |
+
),
|
| 567 |
+
}
|
| 568 |
+
|
| 569 |
+
|
| 570 |
+
AGENT_ENVIRONMENT_RULES: List[str] = [
|
| 571 |
+
"Each successful action returns summarized evidence; do not repeat steps.",
|
| 572 |
+
"Hard prerequisites are enforced: data collection requires beam+luminosity+trigger; "
|
| 573 |
+
"analysis requires reconstruction and a chosen channel.",
|
| 574 |
+
"A discovery claim requires a fitted resonance and an estimated significance.",
|
| 575 |
+
"Tools listed in available_tools are pre-filtered for this episode; prefer them.",
|
| 576 |
+
"Submitting an overconfident wrong claim is heavily penalised.",
|
| 577 |
+
]
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
def build_agent_system_prompt() -> str:
|
| 581 |
+
lines = [
|
| 582 |
+
"You are an expert high-energy physicist running an analysis at the LHC.",
|
| 583 |
+
"",
|
| 584 |
+
"At each turn you observe the experiment state and pick one structured next step",
|
| 585 |
+
"to maximise the probability of correctly characterising a hidden resonance.",
|
| 586 |
+
"",
|
| 587 |
+
"Environment rules:",
|
| 588 |
+
]
|
| 589 |
+
lines.extend(f" - {rule}" for rule in AGENT_ENVIRONMENT_RULES)
|
| 590 |
+
lines.append("")
|
| 591 |
+
lines.append("Action guidance:")
|
| 592 |
+
lines.extend(
|
| 593 |
+
f" - {a.value}: {AGENT_ACTION_GUIDANCE[a]}" for a in ActionType
|
| 594 |
+
)
|
| 595 |
+
lines.extend([
|
| 596 |
+
"",
|
| 597 |
+
"Respond with ONLY a single valid JSON object, no extra prose:",
|
| 598 |
+
'{"action_type": "...", "method": null, "parameters": {}, "justification": "...", "confidence": 0.8}',
|
| 599 |
+
"",
|
| 600 |
+
"For submit_discovery_claim, structure parameters['claim'] as:",
|
| 601 |
+
'{"mass_estimate_gev": 125.0, "mass_uncertainty_gev": 0.5, "width_estimate_gev": 0.004,'
|
| 602 |
+
' "significance_sigma": 5.2, "decay_channel": "diphoton", "spin_hypothesis": 0,'
|
| 603 |
+
' "parity": "+", "cross_section_fb": 50.0, "confidence": 0.9}',
|
| 604 |
+
])
|
| 605 |
+
return "\n".join(lines)
|
| 606 |
+
|
| 607 |
+
|
| 608 |
+
def build_agent_observation_context(
|
| 609 |
+
obs: CollisionObservation,
|
| 610 |
+
*,
|
| 611 |
+
max_tools: int = 6,
|
| 612 |
+
max_channels: int = 4,
|
| 613 |
+
) -> str:
|
| 614 |
+
parts: List[str] = []
|
| 615 |
+
|
| 616 |
+
parts.append(
|
| 617 |
+
f"Mass search window: [{obs.task.mass_search_window_gev[0]:.0f}, "
|
| 618 |
+
f"{obs.task.mass_search_window_gev[1]:.0f}] GeV; "
|
| 619 |
+
f"difficulty={obs.task.difficulty}."
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
chans = list(dict.fromkeys(obs.available_channels or obs.task.available_channels))
|
| 623 |
+
if chans:
|
| 624 |
+
parts.append("Available channels: " + ", ".join(chans[:max_channels]))
|
| 625 |
+
|
| 626 |
+
tools = list(dict.fromkeys(obs.available_tools or obs.task.available_tools))
|
| 627 |
+
if tools:
|
| 628 |
+
parts.append("Available tools: " + ", ".join(tools[:max_tools]))
|
| 629 |
+
|
| 630 |
+
if obs.selected_channel:
|
| 631 |
+
parts.append(f"Selected channel: {obs.selected_channel}")
|
| 632 |
+
if obs.selected_beam_energy:
|
| 633 |
+
parts.append(f"Beam energy: {obs.selected_beam_energy}")
|
| 634 |
+
|
| 635 |
+
if obs.candidate_masses_gev:
|
| 636 |
+
masses = [f"{m:.1f}" for m in obs.candidate_masses_gev[:3]]
|
| 637 |
+
sigmas = [f"{s:.1f}" for s in obs.candidate_significances[:3]]
|
| 638 |
+
parts.append(
|
| 639 |
+
"Candidate peaks (GeV / σ): "
|
| 640 |
+
+ ", ".join(f"{m}/{s}" for m, s in zip(masses, sigmas))
|
| 641 |
+
)
|
| 642 |
+
|
| 643 |
+
return "\n".join(parts)
|
| 644 |
+
|
| 645 |
+
|
| 646 |
+
__all__ = [
|
| 647 |
+
"ActionType",
|
| 648 |
+
"DAQ_ACTIONS",
|
| 649 |
+
"RECO_ACTIONS",
|
| 650 |
+
"ANALYSIS_ACTIONS",
|
| 651 |
+
"META_ACTIONS",
|
| 652 |
+
"DetectorChannel",
|
| 653 |
+
"TriggerType",
|
| 654 |
+
"BeamEnergy",
|
| 655 |
+
"ToolCategory",
|
| 656 |
+
"ToolSpec",
|
| 657 |
+
"TOOL_REGISTRY",
|
| 658 |
+
"ACTION_TOOL_CATEGORIES",
|
| 659 |
+
"is_recommended_tool",
|
| 660 |
+
"ExperimentAction",
|
| 661 |
+
"OutputType",
|
| 662 |
+
"IntermediateOutput",
|
| 663 |
+
"ResourceUsage",
|
| 664 |
+
"PipelineStepRecord",
|
| 665 |
+
"PaperReference",
|
| 666 |
+
"ExpectedFinding",
|
| 667 |
+
"TaskSpec",
|
| 668 |
+
"DiscoveryClaim",
|
| 669 |
+
"CollisionObservation",
|
| 670 |
+
"CernState",
|
| 671 |
+
"AGENT_ACTION_GUIDANCE",
|
| 672 |
+
"AGENT_ENVIRONMENT_RULES",
|
| 673 |
+
"build_agent_system_prompt",
|
| 674 |
+
"build_agent_observation_context",
|
| 675 |
+
]
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
# ── Reserved-name guard ───────────────────────────────────────────────────
|
| 679 |
+
#
|
| 680 |
+
# OpenEnv reserves these names for its built-in HTTP/MCP routes; no custom
|
| 681 |
+
# action_type, tool, or output_type may collide with them. We enforce this
|
| 682 |
+
# at import time so any future schema change is caught instantly rather
|
| 683 |
+
# than silently breaking the MCP layer.
|
| 684 |
+
_OPENENV_RESERVED_NAMES: frozenset = frozenset({"reset", "step", "state", "close"})
|
| 685 |
+
_collisions = (
|
| 686 |
+
{a.value for a in ActionType}
|
| 687 |
+
| set(TOOL_REGISTRY)
|
| 688 |
+
| {o.value for o in OutputType}
|
| 689 |
+
) & _OPENENV_RESERVED_NAMES
|
| 690 |
+
if _collisions: # pragma: no cover - defensive
|
| 691 |
+
raise RuntimeError(
|
| 692 |
+
f"CERNenv schema collides with OpenEnv reserved names: {_collisions}. "
|
| 693 |
+
"Rename the offending entry in ActionType, TOOL_REGISTRY, or OutputType."
|
| 694 |
+
)
|
openenv.yaml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
spec_version: 1
|
| 2 |
+
name: cernenv
|
| 3 |
+
type: space
|
| 4 |
+
runtime: fastapi
|
| 5 |
+
app: server.app:app
|
| 6 |
+
port: 8000
|
pyproject.toml
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["setuptools>=45", "wheel"]
|
| 3 |
+
build-backend = "setuptools.build_meta"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "openenv-cernenv"
|
| 7 |
+
version = "0.1.0"
|
| 8 |
+
description = "RL environment for autonomous particle physics agents at the LHC"
|
| 9 |
+
requires-python = ">=3.10,<3.13"
|
| 10 |
+
dependencies = [
|
| 11 |
+
"openenv-core[core]>=0.2.3",
|
| 12 |
+
"numpy>=1.24.0",
|
| 13 |
+
"scipy>=1.10.0",
|
| 14 |
+
"pydantic>=2.0.0",
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
[project.optional-dependencies]
|
| 18 |
+
dev = [
|
| 19 |
+
"pytest>=8.0.0",
|
| 20 |
+
"pytest-cov>=4.0.0",
|
| 21 |
+
]
|
| 22 |
+
train = [
|
| 23 |
+
"accelerate>=1.0.0",
|
| 24 |
+
"datasets>=2.18.0",
|
| 25 |
+
"matplotlib>=3.8.0",
|
| 26 |
+
"peft>=0.10.0",
|
| 27 |
+
"torch>=2.2.0",
|
| 28 |
+
"transformers>=4.44.0",
|
| 29 |
+
"trl>=0.9.0",
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
[project.scripts]
|
| 33 |
+
cernenv-server = "server.app:main"
|
| 34 |
+
|
| 35 |
+
[tool.uv]
|
| 36 |
+
package = false
|
| 37 |
+
|
| 38 |
+
[tool.setuptools]
|
| 39 |
+
include-package-data = true
|
| 40 |
+
packages = [
|
| 41 |
+
"cernenv",
|
| 42 |
+
"cernenv.server",
|
| 43 |
+
"cernenv.server.simulator",
|
| 44 |
+
"cernenv.server.rules",
|
| 45 |
+
"cernenv.server.rewards",
|
| 46 |
+
"cernenv.server.tasks",
|
| 47 |
+
"cernenv.training",
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
[tool.setuptools.package-dir]
|
| 51 |
+
cernenv = "."
|
| 52 |
+
"cernenv.server" = "server"
|
| 53 |
+
"cernenv.server.simulator" = "server/simulator"
|
| 54 |
+
"cernenv.server.rules" = "server/rules"
|
| 55 |
+
"cernenv.server.rewards" = "server/rewards"
|
| 56 |
+
"cernenv.server.tasks" = "server/tasks"
|
| 57 |
+
"cernenv.training" = "training"
|
scripts/__init__.py
ADDED
|
File without changes
|
scripts/_build_spaces.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Stage env- and trainer-Space directories from the repo root.
|
| 2 |
+
|
| 3 |
+
Each Space needs a *single* directory containing the full repo plus the
|
| 4 |
+
right Dockerfile + README front-matter at its root. This script copies
|
| 5 |
+
the repo into a staging directory, drops in the Space-specific
|
| 6 |
+
``Dockerfile`` / ``README.md``, and prints the staging path.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import argparse
|
| 12 |
+
import shutil
|
| 13 |
+
import sys
|
| 14 |
+
from pathlib import Path
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
REPO_ROOT = Path(__file__).resolve().parent.parent
|
| 18 |
+
|
| 19 |
+
EXCLUDES = {
|
| 20 |
+
".venv",
|
| 21 |
+
"__pycache__",
|
| 22 |
+
".git",
|
| 23 |
+
".cursor",
|
| 24 |
+
".DS_Store",
|
| 25 |
+
"runs",
|
| 26 |
+
"wandb",
|
| 27 |
+
"node_modules",
|
| 28 |
+
".pytest_cache",
|
| 29 |
+
".mypy_cache",
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def _ignore(_dir: str, names):
|
| 34 |
+
return [n for n in names if n in EXCLUDES or n.endswith((".pyc", ".log"))]
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _stage(stage_dir: Path) -> None:
|
| 38 |
+
if stage_dir.exists():
|
| 39 |
+
shutil.rmtree(stage_dir)
|
| 40 |
+
shutil.copytree(REPO_ROOT, stage_dir, ignore=_ignore, symlinks=False)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def build_env_space(stage_dir: Path) -> None:
|
| 44 |
+
_stage(stage_dir)
|
| 45 |
+
|
| 46 |
+
dockerfile = """\
|
| 47 |
+
# CERNenv environment Space (Docker, CPU)
|
| 48 |
+
FROM python:3.11-slim
|
| 49 |
+
|
| 50 |
+
ENV PYTHONUNBUFFERED=1 \\
|
| 51 |
+
PIP_NO_CACHE_DIR=1 \\
|
| 52 |
+
PYTHONPATH=/home/user/app
|
| 53 |
+
|
| 54 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \\
|
| 55 |
+
git curl ca-certificates build-essential \\
|
| 56 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 57 |
+
|
| 58 |
+
RUN useradd -ms /bin/bash user
|
| 59 |
+
USER user
|
| 60 |
+
WORKDIR /home/user/app
|
| 61 |
+
|
| 62 |
+
COPY --chown=user:user space/env/requirements.txt /tmp/requirements.txt
|
| 63 |
+
RUN python -m pip install --upgrade pip && \\
|
| 64 |
+
python -m pip install --user -r /tmp/requirements.txt
|
| 65 |
+
|
| 66 |
+
COPY --chown=user:user . /home/user/app
|
| 67 |
+
|
| 68 |
+
EXPOSE 7860
|
| 69 |
+
|
| 70 |
+
CMD [\"python\", \"-m\", \"uvicorn\", \"server.app:app\", \"--host\", \"0.0.0.0\", \"--port\", \"7860\"]
|
| 71 |
+
"""
|
| 72 |
+
(stage_dir / "Dockerfile").write_text(dockerfile)
|
| 73 |
+
|
| 74 |
+
readme = (stage_dir / "space" / "env" / "README.md").read_text()
|
| 75 |
+
(stage_dir / "README.md").write_text(readme)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def build_trainer_space(stage_dir: Path) -> None:
|
| 79 |
+
_stage(stage_dir)
|
| 80 |
+
|
| 81 |
+
dockerfile = """\
|
| 82 |
+
# CERNenv trainer Space (Docker, A100)
|
| 83 |
+
FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04
|
| 84 |
+
|
| 85 |
+
ENV DEBIAN_FRONTEND=noninteractive \\
|
| 86 |
+
PYTHONUNBUFFERED=1 \\
|
| 87 |
+
PIP_NO_CACHE_DIR=1 \\
|
| 88 |
+
HF_HOME=/home/user/.cache/huggingface \\
|
| 89 |
+
TRANSFORMERS_CACHE=/home/user/.cache/huggingface/transformers \\
|
| 90 |
+
PYTHONPATH=/home/user/app
|
| 91 |
+
|
| 92 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \\
|
| 93 |
+
python3.11 python3.11-venv python3.11-dev python3-pip \\
|
| 94 |
+
git curl ca-certificates build-essential \\
|
| 95 |
+
&& rm -rf /var/lib/apt/lists/* \\
|
| 96 |
+
&& ln -sf /usr/bin/python3.11 /usr/local/bin/python \\
|
| 97 |
+
&& ln -sf /usr/bin/python3.11 /usr/local/bin/python3
|
| 98 |
+
|
| 99 |
+
RUN useradd -ms /bin/bash user
|
| 100 |
+
USER user
|
| 101 |
+
ENV PATH=\"/home/user/.local/bin:${PATH}\"
|
| 102 |
+
WORKDIR /home/user/app
|
| 103 |
+
|
| 104 |
+
COPY --chown=user:user space/training/requirements.txt /tmp/requirements.txt
|
| 105 |
+
RUN python -m pip install --upgrade pip && \\
|
| 106 |
+
python -m pip install --user -r /tmp/requirements.txt
|
| 107 |
+
|
| 108 |
+
COPY --chown=user:user . /home/user/app
|
| 109 |
+
|
| 110 |
+
EXPOSE 7860
|
| 111 |
+
|
| 112 |
+
CMD [\"python\", \"-m\", \"uvicorn\", \"space.training.app:app\", \"--host\", \"0.0.0.0\", \"--port\", \"7860\"]
|
| 113 |
+
"""
|
| 114 |
+
(stage_dir / "Dockerfile").write_text(dockerfile)
|
| 115 |
+
|
| 116 |
+
readme = (stage_dir / "space" / "training" / "README.md").read_text()
|
| 117 |
+
(stage_dir / "README.md").write_text(readme)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def main() -> None: # pragma: no cover
|
| 121 |
+
parser = argparse.ArgumentParser()
|
| 122 |
+
parser.add_argument("kind", choices=["env", "trainer"])
|
| 123 |
+
parser.add_argument("--stage_dir", required=True)
|
| 124 |
+
args = parser.parse_args()
|
| 125 |
+
|
| 126 |
+
stage_dir = Path(args.stage_dir).resolve()
|
| 127 |
+
if args.kind == "env":
|
| 128 |
+
build_env_space(stage_dir)
|
| 129 |
+
else:
|
| 130 |
+
build_trainer_space(stage_dir)
|
| 131 |
+
print(stage_dir)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
if __name__ == "__main__": # pragma: no cover
|
| 135 |
+
main()
|
scripts/baseline_agents.py
ADDED
|
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Built-in agents for evaluating CERNenv.
|
| 2 |
+
|
| 3 |
+
These do **not** use any neural model — they are deterministic / random
|
| 4 |
+
policies you can use as baselines and oracles. They consume a
|
| 5 |
+
``CollisionObservation`` and return an ``ExperimentAction``.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import random
|
| 11 |
+
from dataclasses import dataclass
|
| 12 |
+
from typing import List, Optional, Protocol
|
| 13 |
+
|
| 14 |
+
from models import ActionType, CollisionObservation, ExperimentAction
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class CernAgent(Protocol):
|
| 18 |
+
name: str
|
| 19 |
+
|
| 20 |
+
def reset(self) -> None: ...
|
| 21 |
+
|
| 22 |
+
def act(self, obs: CollisionObservation) -> ExperimentAction: ...
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# ── Random agent ─────────────────────────────────────────────────────────
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@dataclass
|
| 29 |
+
class RandomAgent:
|
| 30 |
+
"""Picks a uniformly random valid action; useful as a worst-case baseline."""
|
| 31 |
+
|
| 32 |
+
name: str = "random"
|
| 33 |
+
seed: int = 0
|
| 34 |
+
|
| 35 |
+
def __post_init__(self) -> None:
|
| 36 |
+
self._rng = random.Random(self.seed)
|
| 37 |
+
|
| 38 |
+
def reset(self) -> None:
|
| 39 |
+
self._rng = random.Random(self.seed)
|
| 40 |
+
|
| 41 |
+
def act(self, obs: CollisionObservation) -> ExperimentAction:
|
| 42 |
+
action_type = self._rng.choice(list(ActionType))
|
| 43 |
+
params: dict = {}
|
| 44 |
+
if action_type == ActionType.CONFIGURE_BEAM:
|
| 45 |
+
params = {"beam_energy": self._rng.choice(obs.task.beam_energy_options or ["13TeV"])}
|
| 46 |
+
elif action_type == ActionType.SELECT_CHANNEL:
|
| 47 |
+
params = {"channel": self._rng.choice(obs.task.available_channels or ["diphoton"])}
|
| 48 |
+
elif action_type == ActionType.SET_TRIGGER:
|
| 49 |
+
params = {"trigger": self._rng.choice(obs.task.available_triggers or ["high_pt"])}
|
| 50 |
+
elif action_type == ActionType.ALLOCATE_LUMINOSITY:
|
| 51 |
+
params = {"luminosity_fb": self._rng.uniform(20.0, 100.0)}
|
| 52 |
+
elif action_type == ActionType.COLLECT_COLLISIONS:
|
| 53 |
+
params = {"luminosity_fb": self._rng.uniform(20.0, 100.0)}
|
| 54 |
+
elif action_type == ActionType.BUILD_INVARIANT_MASS:
|
| 55 |
+
lo, hi = obs.task.mass_search_window_gev
|
| 56 |
+
params = {"mass_window_gev": [lo, hi]}
|
| 57 |
+
elif action_type == ActionType.SUBMIT_DISCOVERY_CLAIM:
|
| 58 |
+
mass = obs.candidate_masses_gev[-1] if obs.candidate_masses_gev else (
|
| 59 |
+
0.5 * (obs.task.mass_search_window_gev[0] + obs.task.mass_search_window_gev[1])
|
| 60 |
+
)
|
| 61 |
+
params = {
|
| 62 |
+
"claim": {
|
| 63 |
+
"mass_estimate_gev": mass,
|
| 64 |
+
"mass_uncertainty_gev": 5.0,
|
| 65 |
+
"significance_sigma": obs.cumulative_significance,
|
| 66 |
+
"decay_channel": obs.selected_channel or "diphoton",
|
| 67 |
+
"spin_hypothesis": int(self._rng.choice([0, 1, 2])),
|
| 68 |
+
"parity": self._rng.choice(["+", "-"]),
|
| 69 |
+
"confidence": self._rng.uniform(0.4, 0.9),
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
return ExperimentAction(
|
| 73 |
+
action_type=action_type,
|
| 74 |
+
parameters=params,
|
| 75 |
+
confidence=0.4,
|
| 76 |
+
justification="random baseline",
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
# ── Heuristic agent ──────────────────────────────────────────────────────
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@dataclass
|
| 84 |
+
class HeuristicAgent:
|
| 85 |
+
"""A scripted analysis-flow agent using high-yield channels and
|
| 86 |
+
sensible default parameters. Acts as the strong non-LLM baseline.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
name: str = "heuristic"
|
| 90 |
+
|
| 91 |
+
def __post_init__(self) -> None:
|
| 92 |
+
self._reset_plan()
|
| 93 |
+
|
| 94 |
+
def reset(self) -> None:
|
| 95 |
+
self._reset_plan()
|
| 96 |
+
|
| 97 |
+
def _reset_plan(self) -> None:
|
| 98 |
+
self._plan: List[ExperimentAction] = [
|
| 99 |
+
ExperimentAction(
|
| 100 |
+
action_type=ActionType.CONFIGURE_BEAM,
|
| 101 |
+
parameters={"beam_energy": "13TeV"},
|
| 102 |
+
confidence=0.9,
|
| 103 |
+
justification="13 TeV maximises reach within budget",
|
| 104 |
+
),
|
| 105 |
+
ExperimentAction(
|
| 106 |
+
action_type=ActionType.SELECT_CHANNEL,
|
| 107 |
+
parameters={"channel": "diphoton"},
|
| 108 |
+
confidence=0.7,
|
| 109 |
+
justification="diphoton has clean low-background signature",
|
| 110 |
+
),
|
| 111 |
+
ExperimentAction(
|
| 112 |
+
action_type=ActionType.SET_TRIGGER,
|
| 113 |
+
parameters={"trigger": "diphoton_hlt"},
|
| 114 |
+
confidence=0.9,
|
| 115 |
+
justification="match trigger to channel",
|
| 116 |
+
),
|
| 117 |
+
ExperimentAction(
|
| 118 |
+
action_type=ActionType.ALLOCATE_LUMINOSITY,
|
| 119 |
+
parameters={"luminosity_fb": 80.0},
|
| 120 |
+
confidence=0.8,
|
| 121 |
+
justification="bulk allocation for the first run",
|
| 122 |
+
),
|
| 123 |
+
ExperimentAction(
|
| 124 |
+
action_type=ActionType.COLLECT_COLLISIONS,
|
| 125 |
+
parameters={"luminosity_fb": 80.0},
|
| 126 |
+
confidence=0.8,
|
| 127 |
+
justification="run physics",
|
| 128 |
+
),
|
| 129 |
+
ExperimentAction(
|
| 130 |
+
action_type=ActionType.RECONSTRUCT_TRACKS,
|
| 131 |
+
method="Athena",
|
| 132 |
+
confidence=0.9,
|
| 133 |
+
justification="reconstruct objects",
|
| 134 |
+
),
|
| 135 |
+
ExperimentAction(
|
| 136 |
+
action_type=ActionType.CALIBRATE_DETECTOR,
|
| 137 |
+
method="ECAL_calibration",
|
| 138 |
+
confidence=0.8,
|
| 139 |
+
justification="reduce systematic uncertainty",
|
| 140 |
+
),
|
| 141 |
+
ExperimentAction(
|
| 142 |
+
action_type=ActionType.BUILD_INVARIANT_MASS,
|
| 143 |
+
parameters={"mass_window_gev": [80.0, 800.0], "n_bins": 60},
|
| 144 |
+
confidence=0.8,
|
| 145 |
+
justification="broad-window histogram",
|
| 146 |
+
),
|
| 147 |
+
ExperimentAction(
|
| 148 |
+
action_type=ActionType.SUBTRACT_BACKGROUND,
|
| 149 |
+
confidence=0.7,
|
| 150 |
+
justification="smooth-fit subtraction",
|
| 151 |
+
),
|
| 152 |
+
ExperimentAction(
|
| 153 |
+
action_type=ActionType.SCAN_BUMP,
|
| 154 |
+
method="BumpHunter",
|
| 155 |
+
confidence=0.8,
|
| 156 |
+
justification="locate candidate peak",
|
| 157 |
+
),
|
| 158 |
+
ExperimentAction(
|
| 159 |
+
action_type=ActionType.FIT_RESONANCE,
|
| 160 |
+
method="ROOT_RooFit",
|
| 161 |
+
confidence=0.85,
|
| 162 |
+
justification="fit Breit-Wigner peak",
|
| 163 |
+
),
|
| 164 |
+
ExperimentAction(
|
| 165 |
+
action_type=ActionType.REQUEST_SYSTEMATICS,
|
| 166 |
+
method="Luminosity_calibration",
|
| 167 |
+
confidence=0.7,
|
| 168 |
+
justification="pin down dominant systematics",
|
| 169 |
+
),
|
| 170 |
+
ExperimentAction(
|
| 171 |
+
action_type=ActionType.ESTIMATE_SIGNIFICANCE,
|
| 172 |
+
method="Asimov_significance",
|
| 173 |
+
confidence=0.85,
|
| 174 |
+
justification="quantify discovery significance",
|
| 175 |
+
),
|
| 176 |
+
ExperimentAction(
|
| 177 |
+
action_type=ActionType.MEASURE_ANGULAR,
|
| 178 |
+
confidence=0.7,
|
| 179 |
+
justification="probe spin",
|
| 180 |
+
),
|
| 181 |
+
]
|
| 182 |
+
self._idx = 0
|
| 183 |
+
self._claim_submitted = False
|
| 184 |
+
|
| 185 |
+
def act(self, obs: CollisionObservation) -> ExperimentAction:
|
| 186 |
+
if self._idx < len(self._plan):
|
| 187 |
+
a = self._plan[self._idx]
|
| 188 |
+
self._idx += 1
|
| 189 |
+
return a
|
| 190 |
+
if not self._claim_submitted:
|
| 191 |
+
self._claim_submitted = True
|
| 192 |
+
mass = obs.candidate_masses_gev[-1] if obs.candidate_masses_gev else 125.0
|
| 193 |
+
sig = obs.cumulative_significance or 5.0
|
| 194 |
+
return ExperimentAction(
|
| 195 |
+
action_type=ActionType.SUBMIT_DISCOVERY_CLAIM,
|
| 196 |
+
parameters={
|
| 197 |
+
"claim": {
|
| 198 |
+
"mass_estimate_gev": mass,
|
| 199 |
+
"mass_uncertainty_gev": 1.0,
|
| 200 |
+
"width_estimate_gev": 0.01,
|
| 201 |
+
"significance_sigma": sig,
|
| 202 |
+
"decay_channel": obs.selected_channel or "diphoton",
|
| 203 |
+
"spin_hypothesis": 0,
|
| 204 |
+
"parity": "+",
|
| 205 |
+
"cross_section_fb": 50.0,
|
| 206 |
+
"confidence": 0.8,
|
| 207 |
+
}
|
| 208 |
+
},
|
| 209 |
+
confidence=0.85,
|
| 210 |
+
justification="submit best calibrated claim",
|
| 211 |
+
)
|
| 212 |
+
return ExperimentAction(
|
| 213 |
+
action_type=ActionType.REQUEST_THEORY_REVIEW,
|
| 214 |
+
confidence=0.3,
|
| 215 |
+
justification="filler step (claim already submitted)",
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
# ── Oracle agent ─────────────────────────────────────────────────────────
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
@dataclass
|
| 223 |
+
class OracleAgent:
|
| 224 |
+
"""An oracle that *peeks* at the latent particle truth (only available
|
| 225 |
+
for in-process evaluation; never used remotely). This is the upper bound
|
| 226 |
+
of what a perfect agent could achieve given the noise budget.
|
| 227 |
+
"""
|
| 228 |
+
|
| 229 |
+
name: str = "oracle"
|
| 230 |
+
truth: Optional[dict] = None # set externally before the episode
|
| 231 |
+
|
| 232 |
+
def reset(self) -> None:
|
| 233 |
+
self._stage = 0
|
| 234 |
+
self._claim_submitted = False
|
| 235 |
+
|
| 236 |
+
def act(self, obs: CollisionObservation) -> ExperimentAction:
|
| 237 |
+
truth = self.truth or {}
|
| 238 |
+
true_channel = truth.get("primary_channel", obs.selected_channel or "diphoton")
|
| 239 |
+
trigger_for_channel = {
|
| 240 |
+
"diphoton": "diphoton_hlt",
|
| 241 |
+
"dilepton_ee": "dilepton_hlt",
|
| 242 |
+
"dilepton_mumu": "dilepton_hlt",
|
| 243 |
+
"four_lepton": "dilepton_hlt",
|
| 244 |
+
"dijet": "jet_hlt",
|
| 245 |
+
"bb": "jet_hlt",
|
| 246 |
+
}.get(true_channel, "high_pt")
|
| 247 |
+
|
| 248 |
+
plan = [
|
| 249 |
+
ExperimentAction(action_type=ActionType.CONFIGURE_BEAM, parameters={"beam_energy": "13TeV"}, confidence=0.95),
|
| 250 |
+
ExperimentAction(action_type=ActionType.SELECT_CHANNEL, parameters={"channel": true_channel}, confidence=0.99),
|
| 251 |
+
ExperimentAction(action_type=ActionType.SET_TRIGGER, parameters={"trigger": trigger_for_channel}, confidence=0.95),
|
| 252 |
+
ExperimentAction(action_type=ActionType.ALLOCATE_LUMINOSITY, parameters={"luminosity_fb": 120.0}, confidence=0.9),
|
| 253 |
+
ExperimentAction(action_type=ActionType.COLLECT_COLLISIONS, parameters={"luminosity_fb": 120.0}, confidence=0.9),
|
| 254 |
+
ExperimentAction(action_type=ActionType.RECONSTRUCT_TRACKS, method="Athena", confidence=0.95),
|
| 255 |
+
ExperimentAction(action_type=ActionType.CALIBRATE_DETECTOR, method="ECAL_calibration", confidence=0.9),
|
| 256 |
+
ExperimentAction(
|
| 257 |
+
action_type=ActionType.BUILD_INVARIANT_MASS,
|
| 258 |
+
parameters={
|
| 259 |
+
"mass_window_gev": [
|
| 260 |
+
max(50.0, float(truth.get("mass_gev", 100.0)) - 50.0),
|
| 261 |
+
float(truth.get("mass_gev", 100.0)) + 80.0,
|
| 262 |
+
],
|
| 263 |
+
"n_bins": 80,
|
| 264 |
+
},
|
| 265 |
+
confidence=0.95,
|
| 266 |
+
),
|
| 267 |
+
ExperimentAction(action_type=ActionType.SUBTRACT_BACKGROUND, confidence=0.9),
|
| 268 |
+
ExperimentAction(action_type=ActionType.FIT_RESONANCE, method="ROOT_RooFit", confidence=0.95),
|
| 269 |
+
ExperimentAction(action_type=ActionType.REQUEST_SYSTEMATICS, method="Luminosity_calibration", confidence=0.9),
|
| 270 |
+
ExperimentAction(action_type=ActionType.ESTIMATE_SIGNIFICANCE, method="Asimov_significance", confidence=0.95),
|
| 271 |
+
ExperimentAction(action_type=ActionType.MEASURE_ANGULAR, confidence=0.85),
|
| 272 |
+
]
|
| 273 |
+
if self._stage < len(plan):
|
| 274 |
+
a = plan[self._stage]
|
| 275 |
+
self._stage += 1
|
| 276 |
+
return a
|
| 277 |
+
|
| 278 |
+
if not self._claim_submitted:
|
| 279 |
+
self._claim_submitted = True
|
| 280 |
+
return ExperimentAction(
|
| 281 |
+
action_type=ActionType.SUBMIT_DISCOVERY_CLAIM,
|
| 282 |
+
parameters={
|
| 283 |
+
"claim": {
|
| 284 |
+
"mass_estimate_gev": float(truth.get("mass_gev", 125.0)),
|
| 285 |
+
"mass_uncertainty_gev": 0.5,
|
| 286 |
+
"width_estimate_gev": float(truth.get("width_gev", 0.01)),
|
| 287 |
+
"significance_sigma": max(obs.cumulative_significance, 5.0),
|
| 288 |
+
"decay_channel": true_channel,
|
| 289 |
+
"spin_hypothesis": int(truth.get("spin", 0)),
|
| 290 |
+
"parity": str(truth.get("parity", "+")),
|
| 291 |
+
"cross_section_fb": float(truth.get("cross_section_fb", 50.0)),
|
| 292 |
+
"confidence": 0.95,
|
| 293 |
+
}
|
| 294 |
+
},
|
| 295 |
+
confidence=0.95,
|
| 296 |
+
justification="oracle claim from hidden truth",
|
| 297 |
+
)
|
| 298 |
+
return ExperimentAction(
|
| 299 |
+
action_type=ActionType.REQUEST_THEORY_REVIEW,
|
| 300 |
+
confidence=0.5,
|
| 301 |
+
justification="oracle filler",
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
__all__ = ["CernAgent", "RandomAgent", "HeuristicAgent", "OracleAgent"]
|
scripts/push_to_hub.py
ADDED
|
@@ -0,0 +1,247 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Push CERNenv artefacts to the Hugging Face Hub.
|
| 2 |
+
|
| 3 |
+
Two subcommands:
|
| 4 |
+
|
| 5 |
+
* ``model`` — push trained LoRA adapters (output of ``training_unsloth.py``)
|
| 6 |
+
to a model repo. Generates a model card describing the run.
|
| 7 |
+
|
| 8 |
+
* ``space`` — push a directory as a Hugging Face Space
|
| 9 |
+
(e.g. ``space/training`` for the trainer Space, or the project root
|
| 10 |
+
to publish the env Space). Front-matter is taken from the README.md
|
| 11 |
+
inside the directory.
|
| 12 |
+
|
| 13 |
+
Usage:
|
| 14 |
+
python -m scripts.push_to_hub model \\
|
| 15 |
+
--adapter_dir runs/unsloth-grpo \\
|
| 16 |
+
--repo_id YOUR_HF_USERNAME/cernenv-grpo-qwen2.5-3b \\
|
| 17 |
+
--base_model unsloth/Qwen2.5-3B-Instruct
|
| 18 |
+
|
| 19 |
+
python -m scripts.push_to_hub space \\
|
| 20 |
+
--space_dir space/training \\
|
| 21 |
+
--repo_id YOUR_HF_USERNAME/cernenv-trainer \\
|
| 22 |
+
--hardware a100-large
|
| 23 |
+
|
| 24 |
+
python -m scripts.push_to_hub space \\
|
| 25 |
+
--space_dir . \\
|
| 26 |
+
--repo_id YOUR_HF_USERNAME/cernenv \\
|
| 27 |
+
--include "models.py" "server/**" "openenv.yaml" "pyproject.toml" "client.py" "README.md"
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
from __future__ import annotations
|
| 31 |
+
|
| 32 |
+
import argparse
|
| 33 |
+
import logging
|
| 34 |
+
import os
|
| 35 |
+
import sys
|
| 36 |
+
from pathlib import Path
|
| 37 |
+
from typing import Iterable, List, Optional
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
|
| 41 |
+
logger = logging.getLogger(__name__)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
DEFAULT_SPACE_EXCLUDES: List[str] = [
|
| 45 |
+
".venv/**",
|
| 46 |
+
"__pycache__/**",
|
| 47 |
+
"**/__pycache__/**",
|
| 48 |
+
"*.pyc",
|
| 49 |
+
".cursor/**",
|
| 50 |
+
".git/**",
|
| 51 |
+
".DS_Store",
|
| 52 |
+
"**/.DS_Store",
|
| 53 |
+
"runs/**",
|
| 54 |
+
"training/runs/**",
|
| 55 |
+
"training/plots/**",
|
| 56 |
+
"wandb/**",
|
| 57 |
+
"*.zip",
|
| 58 |
+
"*.apk",
|
| 59 |
+
"*.png",
|
| 60 |
+
"*.jpg",
|
| 61 |
+
"*.jpeg",
|
| 62 |
+
"[External]*.txt",
|
| 63 |
+
"Hackathon FAQs*.txt",
|
| 64 |
+
"*.log",
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _hf_login() -> None:
|
| 69 |
+
from huggingface_hub import login
|
| 70 |
+
|
| 71 |
+
token = os.environ.get("HF_TOKEN")
|
| 72 |
+
if not token:
|
| 73 |
+
raise SystemExit(
|
| 74 |
+
"HF_TOKEN environment variable is required (write-scoped Hugging Face token)."
|
| 75 |
+
)
|
| 76 |
+
login(token=token)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _model_card(*, repo_id: str, base_model: str, run_dir: Path) -> str:
|
| 80 |
+
return f"""---
|
| 81 |
+
license: bsd-3-clause
|
| 82 |
+
library_name: peft
|
| 83 |
+
base_model: {base_model}
|
| 84 |
+
tags:
|
| 85 |
+
- cernenv
|
| 86 |
+
- openenv
|
| 87 |
+
- reinforcement-learning
|
| 88 |
+
- grpo
|
| 89 |
+
- unsloth
|
| 90 |
+
- lora
|
| 91 |
+
- particle-physics
|
| 92 |
+
---
|
| 93 |
+
|
| 94 |
+
# {repo_id}
|
| 95 |
+
|
| 96 |
+
LoRA (Low-Rank Adaptation) adapters trained with **GRPO** (Group-Relative
|
| 97 |
+
Policy Optimization) inside the **CERNenv** OpenEnv environment — an
|
| 98 |
+
LHC (Large Hadron Collider) particle-discovery POMDP (Partially Observable
|
| 99 |
+
Markov Decision Process).
|
| 100 |
+
|
| 101 |
+
The agent (this model) plays the role of a high-energy physicist running an
|
| 102 |
+
analysis: it configures the beam, allocates luminosity, picks decay
|
| 103 |
+
channels and triggers, reconstructs events, fits resonances, estimates
|
| 104 |
+
significance, and finally submits a structured discovery claim that is
|
| 105 |
+
graded against a hidden ground-truth particle.
|
| 106 |
+
|
| 107 |
+
* Base model: `{base_model}`
|
| 108 |
+
* RL framework: TRL (Transformer Reinforcement Learning) GRPO
|
| 109 |
+
* Acceleration: Unsloth + 4-bit + LoRA
|
| 110 |
+
* Environment: [CERNenv](https://huggingface.co/spaces/{repo_id.split('/')[0]}/cernenv)
|
| 111 |
+
|
| 112 |
+
## Usage
|
| 113 |
+
|
| 114 |
+
```python
|
| 115 |
+
from peft import PeftModel
|
| 116 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 117 |
+
|
| 118 |
+
base = "{base_model}"
|
| 119 |
+
adapter = "{repo_id}"
|
| 120 |
+
|
| 121 |
+
tokenizer = AutoTokenizer.from_pretrained(base)
|
| 122 |
+
model = AutoModelForCausalLM.from_pretrained(base, device_map="auto")
|
| 123 |
+
model = PeftModel.from_pretrained(model, adapter)
|
| 124 |
+
```
|
| 125 |
+
|
| 126 |
+
See the CERNenv repo for full evaluation, plots, and the `LLMAgent` wrapper.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def push_model(
|
| 131 |
+
*,
|
| 132 |
+
adapter_dir: str,
|
| 133 |
+
repo_id: str,
|
| 134 |
+
base_model: str,
|
| 135 |
+
private: bool,
|
| 136 |
+
) -> None:
|
| 137 |
+
from huggingface_hub import HfApi, create_repo
|
| 138 |
+
|
| 139 |
+
_hf_login()
|
| 140 |
+
api = HfApi()
|
| 141 |
+
|
| 142 |
+
run_dir = Path(adapter_dir)
|
| 143 |
+
if not run_dir.exists():
|
| 144 |
+
raise SystemExit(f"adapter_dir not found: {run_dir}")
|
| 145 |
+
|
| 146 |
+
create_repo(repo_id=repo_id, repo_type="model", private=private, exist_ok=True)
|
| 147 |
+
|
| 148 |
+
card_path = run_dir / "README.md"
|
| 149 |
+
card_path.write_text(_model_card(repo_id=repo_id, base_model=base_model, run_dir=run_dir))
|
| 150 |
+
|
| 151 |
+
logger.info("uploading %s → %s", run_dir, repo_id)
|
| 152 |
+
api.upload_folder(
|
| 153 |
+
folder_path=str(run_dir),
|
| 154 |
+
repo_id=repo_id,
|
| 155 |
+
repo_type="model",
|
| 156 |
+
commit_message="Upload CERNenv GRPO LoRA adapters",
|
| 157 |
+
)
|
| 158 |
+
logger.info("done: https://huggingface.co/%s", repo_id)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def push_space(
|
| 162 |
+
*,
|
| 163 |
+
space_dir: str,
|
| 164 |
+
repo_id: str,
|
| 165 |
+
hardware: Optional[str],
|
| 166 |
+
private: bool,
|
| 167 |
+
include: Optional[List[str]],
|
| 168 |
+
exclude: Optional[List[str]],
|
| 169 |
+
) -> None:
|
| 170 |
+
from huggingface_hub import HfApi, create_repo
|
| 171 |
+
|
| 172 |
+
_hf_login()
|
| 173 |
+
api = HfApi()
|
| 174 |
+
|
| 175 |
+
src = Path(space_dir).resolve()
|
| 176 |
+
if not src.exists():
|
| 177 |
+
raise SystemExit(f"space_dir not found: {src}")
|
| 178 |
+
|
| 179 |
+
create_repo(
|
| 180 |
+
repo_id=repo_id,
|
| 181 |
+
repo_type="space",
|
| 182 |
+
space_sdk="docker",
|
| 183 |
+
space_hardware=hardware,
|
| 184 |
+
private=private,
|
| 185 |
+
exist_ok=True,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
effective_exclude = list(DEFAULT_SPACE_EXCLUDES)
|
| 189 |
+
if exclude:
|
| 190 |
+
effective_exclude.extend(exclude)
|
| 191 |
+
|
| 192 |
+
logger.info("uploading %s → space:%s", src, repo_id)
|
| 193 |
+
logger.info("ignore patterns: %s", effective_exclude)
|
| 194 |
+
api.upload_folder(
|
| 195 |
+
folder_path=str(src),
|
| 196 |
+
repo_id=repo_id,
|
| 197 |
+
repo_type="space",
|
| 198 |
+
commit_message="Update CERNenv Space",
|
| 199 |
+
allow_patterns=include,
|
| 200 |
+
ignore_patterns=effective_exclude,
|
| 201 |
+
)
|
| 202 |
+
logger.info("done: https://huggingface.co/spaces/%s", repo_id)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def main() -> None: # pragma: no cover
|
| 206 |
+
parser = argparse.ArgumentParser()
|
| 207 |
+
sub = parser.add_subparsers(dest="cmd", required=True)
|
| 208 |
+
|
| 209 |
+
m = sub.add_parser("model", help="push trained LoRA adapters to the Hub")
|
| 210 |
+
m.add_argument("--adapter_dir", required=True)
|
| 211 |
+
m.add_argument("--repo_id", required=True)
|
| 212 |
+
m.add_argument("--base_model", required=True)
|
| 213 |
+
m.add_argument("--private", action="store_true")
|
| 214 |
+
|
| 215 |
+
s = sub.add_parser("space", help="push a directory as an HF Space")
|
| 216 |
+
s.add_argument("--space_dir", required=True)
|
| 217 |
+
s.add_argument("--repo_id", required=True)
|
| 218 |
+
s.add_argument("--hardware", default=None,
|
| 219 |
+
help="e.g. a100-large, t4-small, l4-medium")
|
| 220 |
+
s.add_argument("--private", action="store_true")
|
| 221 |
+
s.add_argument("--include", nargs="*", default=None,
|
| 222 |
+
help="glob patterns to include")
|
| 223 |
+
s.add_argument("--exclude", nargs="*", default=None,
|
| 224 |
+
help="glob patterns to exclude")
|
| 225 |
+
|
| 226 |
+
args = parser.parse_args()
|
| 227 |
+
|
| 228 |
+
if args.cmd == "model":
|
| 229 |
+
push_model(
|
| 230 |
+
adapter_dir=args.adapter_dir,
|
| 231 |
+
repo_id=args.repo_id,
|
| 232 |
+
base_model=args.base_model,
|
| 233 |
+
private=args.private,
|
| 234 |
+
)
|
| 235 |
+
elif args.cmd == "space":
|
| 236 |
+
push_space(
|
| 237 |
+
space_dir=args.space_dir,
|
| 238 |
+
repo_id=args.repo_id,
|
| 239 |
+
hardware=args.hardware,
|
| 240 |
+
private=args.private,
|
| 241 |
+
include=args.include,
|
| 242 |
+
exclude=args.exclude,
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
if __name__ == "__main__": # pragma: no cover
|
| 247 |
+
main()
|
scripts/run_agent.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Run a (non-LLM) baseline agent against the in-process environment.
|
| 2 |
+
|
| 3 |
+
Usage:
|
| 4 |
+
python -m scripts.run_agent --agent heuristic --scenario easy_diphoton_160 --seed 7
|
| 5 |
+
python -m scripts.run_agent --agent oracle --difficulty hard --episodes 5
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import argparse
|
| 11 |
+
import json
|
| 12 |
+
from dataclasses import asdict
|
| 13 |
+
from typing import Any, Dict, List
|
| 14 |
+
|
| 15 |
+
from server.environment import CERNCollisionEnvironment
|
| 16 |
+
from scripts.baseline_agents import (
|
| 17 |
+
HeuristicAgent,
|
| 18 |
+
OracleAgent,
|
| 19 |
+
RandomAgent,
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
AGENT_REGISTRY = {
|
| 24 |
+
"random": RandomAgent,
|
| 25 |
+
"heuristic": HeuristicAgent,
|
| 26 |
+
"oracle": OracleAgent,
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def run_episode(
|
| 31 |
+
*,
|
| 32 |
+
agent_name: str,
|
| 33 |
+
difficulty: str | None,
|
| 34 |
+
scenario: str | None,
|
| 35 |
+
seed: int,
|
| 36 |
+
max_steps: int,
|
| 37 |
+
verbose: bool,
|
| 38 |
+
) -> Dict[str, Any]:
|
| 39 |
+
env = CERNCollisionEnvironment(max_steps=max_steps)
|
| 40 |
+
obs = env.reset(seed=seed, scenario=scenario, difficulty=difficulty)
|
| 41 |
+
|
| 42 |
+
agent_cls = AGENT_REGISTRY[agent_name]
|
| 43 |
+
if agent_name == "random":
|
| 44 |
+
agent = agent_cls(seed=seed)
|
| 45 |
+
else:
|
| 46 |
+
agent = agent_cls()
|
| 47 |
+
if agent_name == "oracle":
|
| 48 |
+
agent.truth = env.hidden_truth()
|
| 49 |
+
|
| 50 |
+
agent.reset()
|
| 51 |
+
|
| 52 |
+
total_reward = 0.0
|
| 53 |
+
step_log: List[Dict[str, Any]] = []
|
| 54 |
+
while not obs.done:
|
| 55 |
+
action = agent.act(obs)
|
| 56 |
+
obs = env.step(action)
|
| 57 |
+
total_reward += float(obs.reward or 0.0)
|
| 58 |
+
if verbose:
|
| 59 |
+
print(
|
| 60 |
+
f" step {obs.step_index:2d} {action.action_type.value:24s} "
|
| 61 |
+
f"rew={obs.reward:+.3f} done={obs.done}"
|
| 62 |
+
)
|
| 63 |
+
step_log.append(
|
| 64 |
+
{
|
| 65 |
+
"step": obs.step_index,
|
| 66 |
+
"action": action.action_type.value,
|
| 67 |
+
"reward": float(obs.reward or 0.0),
|
| 68 |
+
"violations": obs.rule_violations,
|
| 69 |
+
}
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
summary = {
|
| 73 |
+
"agent": agent_name,
|
| 74 |
+
"scenario": env.state.scenario_name,
|
| 75 |
+
"difficulty": env.state.difficulty,
|
| 76 |
+
"seed": seed,
|
| 77 |
+
"total_reward": total_reward,
|
| 78 |
+
"cumulative_reward": float(env.state.cumulative_reward),
|
| 79 |
+
"terminal_reward": env.state.terminal_reward,
|
| 80 |
+
"discovered": env.state.discovered,
|
| 81 |
+
"correct_mass": env.state.correct_mass,
|
| 82 |
+
"correct_channel": env.state.correct_channel,
|
| 83 |
+
"correct_spin": env.state.correct_spin,
|
| 84 |
+
"steps": len(step_log),
|
| 85 |
+
"truth": env.hidden_truth(),
|
| 86 |
+
"log": step_log,
|
| 87 |
+
}
|
| 88 |
+
return summary
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def main() -> None:
|
| 92 |
+
parser = argparse.ArgumentParser()
|
| 93 |
+
parser.add_argument("--agent", choices=list(AGENT_REGISTRY), default="heuristic")
|
| 94 |
+
parser.add_argument("--scenario", default=None)
|
| 95 |
+
parser.add_argument("--difficulty", choices=["easy", "medium", "hard"], default=None)
|
| 96 |
+
parser.add_argument("--seed", type=int, default=0)
|
| 97 |
+
parser.add_argument("--episodes", type=int, default=1)
|
| 98 |
+
parser.add_argument("--max-steps", type=int, default=40)
|
| 99 |
+
parser.add_argument("--out", default=None, help="Optional path to dump JSON results")
|
| 100 |
+
parser.add_argument("--quiet", action="store_true")
|
| 101 |
+
args = parser.parse_args()
|
| 102 |
+
|
| 103 |
+
rollouts: List[Dict[str, Any]] = []
|
| 104 |
+
for ep in range(args.episodes):
|
| 105 |
+
seed = args.seed + ep
|
| 106 |
+
summary = run_episode(
|
| 107 |
+
agent_name=args.agent,
|
| 108 |
+
difficulty=args.difficulty,
|
| 109 |
+
scenario=args.scenario,
|
| 110 |
+
seed=seed,
|
| 111 |
+
max_steps=args.max_steps,
|
| 112 |
+
verbose=not args.quiet and args.episodes == 1,
|
| 113 |
+
)
|
| 114 |
+
rollouts.append(summary)
|
| 115 |
+
print(
|
| 116 |
+
f"[{ep + 1}/{args.episodes}] agent={args.agent} "
|
| 117 |
+
f"scenario={summary['scenario']} reward={summary['total_reward']:+.3f} "
|
| 118 |
+
f"discovered={summary['discovered']} correct_mass={summary['correct_mass']} "
|
| 119 |
+
f"correct_channel={summary['correct_channel']}"
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
if args.out:
|
| 123 |
+
with open(args.out, "w") as f:
|
| 124 |
+
json.dump(rollouts, f, indent=2, default=str)
|
| 125 |
+
print(f"saved → {args.out}")
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
if __name__ == "__main__":
|
| 129 |
+
main()
|
server/Dockerfile
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CERNenv server: OpenEnv FastAPI image
|
| 2 |
+
ARG BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest
|
| 3 |
+
FROM ${BASE_IMAGE} AS builder
|
| 4 |
+
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
RUN apt-get update && \
|
| 8 |
+
apt-get install -y --no-install-recommends git curl && \
|
| 9 |
+
rm -rf /var/lib/apt/lists/*
|
| 10 |
+
|
| 11 |
+
ARG ENV_NAME=cernenv
|
| 12 |
+
|
| 13 |
+
COPY . /app/env
|
| 14 |
+
|
| 15 |
+
WORKDIR /app/env
|
| 16 |
+
|
| 17 |
+
RUN if ! command -v uv >/dev/null 2>&1; then \
|
| 18 |
+
curl -LsSf https://astral.sh/uv/install.sh | sh && \
|
| 19 |
+
mv /root/.local/bin/uv /usr/local/bin/uv && \
|
| 20 |
+
mv /root/.local/bin/uvx /usr/local/bin/uvx; \
|
| 21 |
+
fi
|
| 22 |
+
|
| 23 |
+
RUN --mount=type=cache,target=/root/.cache/uv \
|
| 24 |
+
if [ -f uv.lock ]; then \
|
| 25 |
+
uv sync --frozen --no-install-project --no-editable; \
|
| 26 |
+
else \
|
| 27 |
+
uv sync --no-install-project --no-editable; \
|
| 28 |
+
fi
|
| 29 |
+
|
| 30 |
+
RUN --mount=type=cache,target=/root/.cache/uv \
|
| 31 |
+
if [ -f uv.lock ]; then \
|
| 32 |
+
uv sync --frozen --no-editable; \
|
| 33 |
+
else \
|
| 34 |
+
uv sync --no-editable; \
|
| 35 |
+
fi
|
| 36 |
+
|
| 37 |
+
FROM ${BASE_IMAGE}
|
| 38 |
+
|
| 39 |
+
WORKDIR /app
|
| 40 |
+
|
| 41 |
+
COPY --from=builder /app/env/.venv /app/.venv
|
| 42 |
+
COPY --from=builder /app/env /app/env
|
| 43 |
+
|
| 44 |
+
ENV PATH="/app/.venv/bin:$PATH"
|
| 45 |
+
ENV PYTHONPATH="/app/env:$PYTHONPATH"
|
| 46 |
+
|
| 47 |
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
| 48 |
+
CMD curl -f http://localhost:8000/health || exit 1
|
| 49 |
+
|
| 50 |
+
CMD ["sh", "-c", "cd /app/env && uvicorn server.app:app --host 0.0.0.0 --port 8000"]
|
server/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""CERNenv server package."""
|
server/app.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FastAPI app exposing ``CERNCollisionEnvironment`` over the OpenEnv HTTP API.
|
| 2 |
+
|
| 3 |
+
We delegate the standard OpenEnv routes (``/reset``, ``/step``, ``/state``,
|
| 4 |
+
``/schema``, ``/health``, ``/mcp``) to ``create_fastapi_app`` and add a
|
| 5 |
+
human-friendly landing page at ``/`` so the Hugging Face Space preview
|
| 6 |
+
shows the project description instead of a 404.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
from typing import Optional
|
| 13 |
+
|
| 14 |
+
from fastapi.responses import HTMLResponse
|
| 15 |
+
from openenv.core.env_server import create_fastapi_app
|
| 16 |
+
|
| 17 |
+
from models import CollisionObservation, ExperimentAction
|
| 18 |
+
from server.environment import CERNCollisionEnvironment
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
_LANDING_PAGE = """\
|
| 22 |
+
<!doctype html>
|
| 23 |
+
<html lang=en>
|
| 24 |
+
<head>
|
| 25 |
+
<meta charset=utf-8>
|
| 26 |
+
<title>CERNenv — LHC Discovery RL Environment</title>
|
| 27 |
+
<meta name=viewport content="width=device-width,initial-scale=1">
|
| 28 |
+
<style>
|
| 29 |
+
body { font-family: ui-sans-serif, system-ui, -apple-system, sans-serif;
|
| 30 |
+
margin: 2rem auto; max-width: 780px; color:#111; padding: 0 1rem; line-height:1.5 }
|
| 31 |
+
h1 { margin-bottom: .25rem }
|
| 32 |
+
h2 { margin-top: 2rem; border-bottom: 1px solid #eee; padding-bottom: .25rem }
|
| 33 |
+
code { background:#f4f4f4; padding:.05rem .35rem; border-radius:4px; font-size:.95em }
|
| 34 |
+
pre { background:#0e1116; color:#e6edf3; padding:1rem; border-radius:6px; overflow-x:auto }
|
| 35 |
+
pre code { background:transparent; color:inherit; padding:0 }
|
| 36 |
+
.pill { display:inline-block; padding:.1rem .55rem; border-radius:999px;
|
| 37 |
+
background:#e8f0ff; color:#1d4ed8; font-size:.85em; margin-right:.25rem }
|
| 38 |
+
.muted { color:#666 }
|
| 39 |
+
a { color:#1d4ed8 }
|
| 40 |
+
table { border-collapse:collapse; margin: .5rem 0 }
|
| 41 |
+
th,td { text-align:left; padding:.25rem .9rem .25rem 0; vertical-align: top }
|
| 42 |
+
th { color:#444; font-weight:600 }
|
| 43 |
+
</style>
|
| 44 |
+
</head>
|
| 45 |
+
<body>
|
| 46 |
+
<h1>⚛️ CERNenv</h1>
|
| 47 |
+
<p class=muted>An LHC (Large Hadron Collider) particle-discovery RL environment for autonomous physicist agents — built for the Meta OpenEnv Hackathon.</p>
|
| 48 |
+
|
| 49 |
+
<p>
|
| 50 |
+
<span class=pill>OpenEnv</span>
|
| 51 |
+
<span class=pill>POMDP</span>
|
| 52 |
+
<span class=pill>16 action types</span>
|
| 53 |
+
<span class=pill>3 difficulty levels</span>
|
| 54 |
+
<span class=pill>HTTP + WebSocket</span>
|
| 55 |
+
</p>
|
| 56 |
+
|
| 57 |
+
<h2>What this is</h2>
|
| 58 |
+
<p>
|
| 59 |
+
A Large Language Model (LLM) agent plays a high-energy physicist running an
|
| 60 |
+
analysis at the LHC. Each step it picks one structured action — configure
|
| 61 |
+
the beam, allocate luminosity, set a trigger, collect collisions, fit a
|
| 62 |
+
resonance, estimate significance, submit a discovery claim, and so on —
|
| 63 |
+
and receives a noisy detector-style observation. The latent particle
|
| 64 |
+
(mass, decay channel, branching ratios, width) is hidden ground truth.
|
| 65 |
+
Reward decomposes into per-step shaping + a dominant terminal calibration
|
| 66 |
+
against the truth particle.
|
| 67 |
+
</p>
|
| 68 |
+
|
| 69 |
+
<h2>API</h2>
|
| 70 |
+
<table>
|
| 71 |
+
<tr><th><code>GET /health</code></th><td>liveness probe</td></tr>
|
| 72 |
+
<tr><th><code>GET /schema</code></th><td>JSON schemas for actions, observations, state</td></tr>
|
| 73 |
+
<tr><th><code>POST /reset</code></th><td>start a new episode (e.g. <code>{"seed": 7, "scenario": "easy_diphoton_160"}</code>)</td></tr>
|
| 74 |
+
<tr><th><code>POST /step</code></th><td>execute one action (<code>{"action": {"action_type": ..., "parameters": {...}, "justification": "..."}}</code>)</td></tr>
|
| 75 |
+
<tr><th><code>GET /state</code></th><td>current public state snapshot</td></tr>
|
| 76 |
+
<tr><th><code>GET /docs</code></th><td>interactive Swagger UI</td></tr>
|
| 77 |
+
<tr><th><code>GET /metadata</code></th><td>environment metadata</td></tr>
|
| 78 |
+
</table>
|
| 79 |
+
|
| 80 |
+
<h2>Quickstart</h2>
|
| 81 |
+
<pre><code># reset
|
| 82 |
+
curl -X POST $URL/reset \\
|
| 83 |
+
-H 'Content-Type: application/json' \\
|
| 84 |
+
-d '{"seed": 7, "scenario": "easy_diphoton_160"}'
|
| 85 |
+
|
| 86 |
+
# step
|
| 87 |
+
curl -X POST $URL/step \\
|
| 88 |
+
-H 'Content-Type: application/json' \\
|
| 89 |
+
-d '{"action": {"action_type": "configure_beam",
|
| 90 |
+
"parameters": {"sqrt_s_tev": 13.0},
|
| 91 |
+
"justification": "set 13 TeV"}}'</code></pre>
|
| 92 |
+
|
| 93 |
+
<h2>Companion Spaces</h2>
|
| 94 |
+
<ul>
|
| 95 |
+
<li>📓 Trainer (Unsloth + LoRA + GRPO on A100): <a href="https://huggingface.co/spaces/anugrah55/cernenv-trainer">anugrah55/cernenv-trainer</a></li>
|
| 96 |
+
<li>🎯 Trained adapters (LoRA): <a href="https://huggingface.co/anugrah55/cernenv-grpo-qwen2.5-3b">anugrah55/cernenv-grpo-qwen2.5-3b</a></li>
|
| 97 |
+
</ul>
|
| 98 |
+
|
| 99 |
+
<p class=muted style="margin-top:2rem">CERNenv · OpenEnv-compatible · BSD-3-Clause</p>
|
| 100 |
+
</body>
|
| 101 |
+
</html>
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def make_env_factory(
|
| 106 |
+
max_steps: int,
|
| 107 |
+
default_difficulty: Optional[str],
|
| 108 |
+
):
|
| 109 |
+
def factory() -> CERNCollisionEnvironment:
|
| 110 |
+
return CERNCollisionEnvironment(
|
| 111 |
+
max_steps=max_steps,
|
| 112 |
+
default_difficulty=default_difficulty,
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
return factory
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def build_app(
|
| 119 |
+
*,
|
| 120 |
+
max_steps: int = 40,
|
| 121 |
+
default_difficulty: Optional[str] = None,
|
| 122 |
+
):
|
| 123 |
+
"""Construct the FastAPI app with a per-session environment factory.
|
| 124 |
+
|
| 125 |
+
The OpenEnv-provided routes (`/reset`, `/step`, `/state`, `/schema`,
|
| 126 |
+
`/health`, `/mcp`) come from ``create_fastapi_app``. We then mount a
|
| 127 |
+
friendly landing page at ``/`` so the Space preview is informative.
|
| 128 |
+
"""
|
| 129 |
+
factory = make_env_factory(max_steps=max_steps, default_difficulty=default_difficulty)
|
| 130 |
+
fa_app = create_fastapi_app(factory, ExperimentAction, CollisionObservation)
|
| 131 |
+
|
| 132 |
+
@fa_app.get("/", response_class=HTMLResponse, include_in_schema=False)
|
| 133 |
+
def landing() -> HTMLResponse: # pragma: no cover - trivial
|
| 134 |
+
return HTMLResponse(_LANDING_PAGE)
|
| 135 |
+
|
| 136 |
+
return fa_app
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
app = build_app(
|
| 140 |
+
max_steps=int(os.getenv("CERNENV_MAX_STEPS", "40")),
|
| 141 |
+
default_difficulty=os.getenv("CERNENV_DEFAULT_DIFFICULTY") or None,
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def main() -> None: # pragma: no cover - CLI entrypoint
|
| 146 |
+
import uvicorn
|
| 147 |
+
|
| 148 |
+
host = os.getenv("HOST", "0.0.0.0")
|
| 149 |
+
port = int(os.getenv("PORT", "8000"))
|
| 150 |
+
uvicorn.run("server.app:app", host=host, port=port, log_level="info")
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
if __name__ == "__main__": # pragma: no cover
|
| 154 |
+
main()
|
server/environment.py
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""``CERNCollisionEnvironment``: orchestrates simulator + rules + rewards.
|
| 2 |
+
|
| 3 |
+
This is the OpenEnv-compatible ``Environment`` that the FastAPI app exposes.
|
| 4 |
+
It owns one episode at a time:
|
| 5 |
+
|
| 6 |
+
reset(seed) → builds a fresh latent state from a sampled scenario.
|
| 7 |
+
step(action) → validates → generates noisy output → updates state →
|
| 8 |
+
computes reward → builds the agent observation.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import logging
|
| 14 |
+
import uuid
|
| 15 |
+
from typing import Any, List, Optional
|
| 16 |
+
|
| 17 |
+
from openenv.core.env_server import Environment
|
| 18 |
+
|
| 19 |
+
from models import (
|
| 20 |
+
AGENT_ENVIRONMENT_RULES,
|
| 21 |
+
ActionType,
|
| 22 |
+
CernState,
|
| 23 |
+
CollisionObservation,
|
| 24 |
+
DiscoveryClaim,
|
| 25 |
+
ExperimentAction,
|
| 26 |
+
IntermediateOutput,
|
| 27 |
+
OutputType,
|
| 28 |
+
PipelineStepRecord,
|
| 29 |
+
ResourceUsage,
|
| 30 |
+
TaskSpec,
|
| 31 |
+
build_agent_system_prompt,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
from server.rewards import (
|
| 35 |
+
RewardWeights,
|
| 36 |
+
compute_step_reward,
|
| 37 |
+
compute_terminal_reward,
|
| 38 |
+
)
|
| 39 |
+
from server.rules import RulesEngine, ViolationCode
|
| 40 |
+
from server.simulator import (
|
| 41 |
+
NoiseModel,
|
| 42 |
+
OutputGenerator,
|
| 43 |
+
TransitionEngine,
|
| 44 |
+
compute_action_cost,
|
| 45 |
+
)
|
| 46 |
+
from server.simulator.latent_state import FullLatentState
|
| 47 |
+
from server.tasks import sample_scenario, Scenario
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
logger = logging.getLogger(__name__)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# ── Environment ──────────────────────────────────────────────────────────
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class CERNCollisionEnvironment(Environment[ExperimentAction, CollisionObservation, CernState]):
|
| 57 |
+
"""LHC particle-discovery POMDP environment."""
|
| 58 |
+
|
| 59 |
+
SUPPORTS_CONCURRENT_SESSIONS = True
|
| 60 |
+
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
*,
|
| 64 |
+
max_steps: int = 40,
|
| 65 |
+
default_difficulty: Optional[str] = None,
|
| 66 |
+
default_scenario_name: Optional[str] = None,
|
| 67 |
+
reward_weights: Optional[RewardWeights] = None,
|
| 68 |
+
) -> None:
|
| 69 |
+
super().__init__()
|
| 70 |
+
self.max_steps = max_steps
|
| 71 |
+
self.default_difficulty = default_difficulty
|
| 72 |
+
self.default_scenario_name = default_scenario_name
|
| 73 |
+
self.reward_weights = reward_weights or RewardWeights()
|
| 74 |
+
|
| 75 |
+
self._state = CernState()
|
| 76 |
+
self._scenario: Optional[Scenario] = None
|
| 77 |
+
self._latent: Optional[FullLatentState] = None
|
| 78 |
+
self._task: Optional[TaskSpec] = None
|
| 79 |
+
self._noise: Optional[NoiseModel] = None
|
| 80 |
+
self._output_gen: Optional[OutputGenerator] = None
|
| 81 |
+
self._transition: Optional[TransitionEngine] = None
|
| 82 |
+
self._rules: Optional[RulesEngine] = None
|
| 83 |
+
self._history: List[PipelineStepRecord] = []
|
| 84 |
+
self._all_outputs: List[IntermediateOutput] = []
|
| 85 |
+
|
| 86 |
+
# ── Environment API ────────────────────────────────────────────────
|
| 87 |
+
|
| 88 |
+
@property
|
| 89 |
+
def state(self) -> CernState:
|
| 90 |
+
return self._state
|
| 91 |
+
|
| 92 |
+
def reset(
|
| 93 |
+
self,
|
| 94 |
+
seed: Optional[int] = None,
|
| 95 |
+
episode_id: Optional[str] = None,
|
| 96 |
+
**kwargs: Any,
|
| 97 |
+
) -> CollisionObservation:
|
| 98 |
+
difficulty = kwargs.get("difficulty") or self.default_difficulty
|
| 99 |
+
scenario_name = kwargs.get("scenario") or self.default_scenario_name
|
| 100 |
+
|
| 101 |
+
scenario = sample_scenario(
|
| 102 |
+
difficulty=difficulty,
|
| 103 |
+
name=scenario_name,
|
| 104 |
+
seed=seed,
|
| 105 |
+
)
|
| 106 |
+
self._scenario = scenario
|
| 107 |
+
self._latent = scenario.fresh_latent()
|
| 108 |
+
self._task = scenario.task
|
| 109 |
+
if seed is not None:
|
| 110 |
+
self._latent.rng_seed = int(seed)
|
| 111 |
+
self._noise = NoiseModel(seed=self._latent.rng_seed)
|
| 112 |
+
self._output_gen = OutputGenerator(self._noise)
|
| 113 |
+
self._transition = TransitionEngine()
|
| 114 |
+
self._rules = RulesEngine(
|
| 115 |
+
mass_search_window_gev=tuple(self._task.mass_search_window_gev),
|
| 116 |
+
)
|
| 117 |
+
self._history = []
|
| 118 |
+
self._all_outputs = []
|
| 119 |
+
|
| 120 |
+
self._state = CernState(
|
| 121 |
+
episode_id=episode_id or f"ep-{uuid.uuid4().hex[:8]}",
|
| 122 |
+
step_count=0,
|
| 123 |
+
scenario_name=scenario.name,
|
| 124 |
+
difficulty=scenario.difficulty,
|
| 125 |
+
episode_done=False,
|
| 126 |
+
cumulative_reward=0.0,
|
| 127 |
+
truth_mass_gev=self._latent.particle.mass_gev,
|
| 128 |
+
truth_channel=self._latent.particle.primary_channel,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
obs = self._build_observation(
|
| 132 |
+
latest_output=None,
|
| 133 |
+
done=False,
|
| 134 |
+
reward=0.0,
|
| 135 |
+
step_breakdown={},
|
| 136 |
+
rule_violations=[],
|
| 137 |
+
)
|
| 138 |
+
return obs
|
| 139 |
+
|
| 140 |
+
def step(
|
| 141 |
+
self,
|
| 142 |
+
action: ExperimentAction,
|
| 143 |
+
timeout_s: Optional[float] = None,
|
| 144 |
+
**kwargs: Any,
|
| 145 |
+
) -> CollisionObservation:
|
| 146 |
+
"""Apply one action and return the next observation.
|
| 147 |
+
|
| 148 |
+
``timeout_s`` is accepted for OpenEnv API compatibility but is a
|
| 149 |
+
no-op for this environment: each ``step`` is pure-compute (numpy
|
| 150 |
+
ops on a small latent state, sub-millisecond) and cannot hang.
|
| 151 |
+
The episode-level "sandbox" enforced here is *resource* exhaustion
|
| 152 |
+
— budget (M$), integrated luminosity (fb⁻¹), and wall-time-days —
|
| 153 |
+
which is checked at the bottom of this method and terminates the
|
| 154 |
+
episode via ``done=True`` when any limit is crossed. That is the
|
| 155 |
+
meaningful timeout for an LHC-discovery rollout.
|
| 156 |
+
|
| 157 |
+
If ``timeout_s`` is non-None we log it once at debug level so
|
| 158 |
+
callers can confirm their value is being received without changing
|
| 159 |
+
any runtime behaviour.
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
if timeout_s is not None:
|
| 163 |
+
logger.debug(
|
| 164 |
+
"step() received timeout_s=%.3fs (informational; "
|
| 165 |
+
"actual cutoff is resource-exhaustion based)",
|
| 166 |
+
float(timeout_s),
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
if self._latent is None:
|
| 170 |
+
self.reset()
|
| 171 |
+
if self._state.episode_done:
|
| 172 |
+
return self._build_terminal_observation(reason="episode already complete")
|
| 173 |
+
|
| 174 |
+
assert self._rules is not None
|
| 175 |
+
assert self._output_gen is not None
|
| 176 |
+
assert self._transition is not None
|
| 177 |
+
|
| 178 |
+
prev_state = self._latent.model_copy(deep=True)
|
| 179 |
+
rule_result = self._rules.validate(action, self._latent)
|
| 180 |
+
|
| 181 |
+
if not rule_result.allowed:
|
| 182 |
+
output = IntermediateOutput(
|
| 183 |
+
output_type=OutputType.FAILURE_REPORT,
|
| 184 |
+
step_index=self._state.step_count,
|
| 185 |
+
success=False,
|
| 186 |
+
quality_score=0.0,
|
| 187 |
+
summary="Action rejected: " + "; ".join(rule_result.messages),
|
| 188 |
+
warnings=rule_result.messages,
|
| 189 |
+
)
|
| 190 |
+
else:
|
| 191 |
+
output = self._output_gen.generate(
|
| 192 |
+
action=action,
|
| 193 |
+
state=self._latent,
|
| 194 |
+
step_index=self._state.step_count,
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
# Apply transition (state mutation + cost accounting)
|
| 198 |
+
if rule_result.allowed:
|
| 199 |
+
self._transition.step(self._latent, action, output)
|
| 200 |
+
else:
|
| 201 |
+
cost = compute_action_cost(action, output)
|
| 202 |
+
self._latent.resources.budget_used_musd += cost["musd"]
|
| 203 |
+
self._latent.resources.time_used_days += cost["days"]
|
| 204 |
+
self._latent.step_count += 1
|
| 205 |
+
|
| 206 |
+
self._all_outputs.append(output)
|
| 207 |
+
cost = compute_action_cost(action, output)
|
| 208 |
+
record = PipelineStepRecord(
|
| 209 |
+
step_index=self._state.step_count,
|
| 210 |
+
action_type=action.action_type,
|
| 211 |
+
method=action.method,
|
| 212 |
+
parameters=action.parameters,
|
| 213 |
+
output_summary=output.summary,
|
| 214 |
+
output_type=output.output_type,
|
| 215 |
+
success=output.success,
|
| 216 |
+
quality_score=float(output.quality_score),
|
| 217 |
+
cost_musd=float(cost["musd"]),
|
| 218 |
+
luminosity_cost_fb=float(cost["luminosity_fb"]),
|
| 219 |
+
time_cost_days=float(cost["days"]),
|
| 220 |
+
)
|
| 221 |
+
self._history.append(record)
|
| 222 |
+
|
| 223 |
+
step_reward = compute_step_reward(
|
| 224 |
+
action=action,
|
| 225 |
+
output=output,
|
| 226 |
+
state_before=prev_state,
|
| 227 |
+
state_after=self._latent,
|
| 228 |
+
rule_result=rule_result,
|
| 229 |
+
weights=self.reward_weights,
|
| 230 |
+
history=self._history[:-1], # exclude the record we just appended
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
self._state.cumulative_reward += step_reward.reward
|
| 234 |
+
self._state.step_count += 1
|
| 235 |
+
|
| 236 |
+
terminal_now = (
|
| 237 |
+
action.action_type == ActionType.SUBMIT_DISCOVERY_CLAIM
|
| 238 |
+
and rule_result.allowed
|
| 239 |
+
)
|
| 240 |
+
time_up = (
|
| 241 |
+
self._state.step_count >= self.max_steps
|
| 242 |
+
or self._latent.resources.budget_exhausted
|
| 243 |
+
or self._latent.resources.time_exhausted
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
terminal_reward_value = 0.0
|
| 247 |
+
if terminal_now:
|
| 248 |
+
claim = self._claim_from_action(action)
|
| 249 |
+
term = compute_terminal_reward(
|
| 250 |
+
state=self._latent,
|
| 251 |
+
claim=claim,
|
| 252 |
+
weights=self.reward_weights,
|
| 253 |
+
)
|
| 254 |
+
terminal_reward_value = term.reward
|
| 255 |
+
self._state.cumulative_reward += terminal_reward_value
|
| 256 |
+
self._state.terminal_reward = terminal_reward_value
|
| 257 |
+
self._state.discovered = term.discovered
|
| 258 |
+
self._state.correct_mass = term.correct_mass
|
| 259 |
+
self._state.correct_channel = term.correct_channel
|
| 260 |
+
self._state.correct_spin = term.correct_spin
|
| 261 |
+
|
| 262 |
+
done = terminal_now or time_up
|
| 263 |
+
if done:
|
| 264 |
+
self._state.episode_done = True
|
| 265 |
+
|
| 266 |
+
observation = self._build_observation(
|
| 267 |
+
latest_output=output,
|
| 268 |
+
done=done,
|
| 269 |
+
reward=step_reward.reward + terminal_reward_value,
|
| 270 |
+
step_breakdown=step_reward.breakdown.components,
|
| 271 |
+
rule_violations=[
|
| 272 |
+
*(v.value for v in rule_result.violations),
|
| 273 |
+
*(v.value for v in rule_result.soft_violations),
|
| 274 |
+
],
|
| 275 |
+
)
|
| 276 |
+
return observation
|
| 277 |
+
|
| 278 |
+
# ── Helpers ────────────────────────────────────────────────────────
|
| 279 |
+
|
| 280 |
+
def _claim_from_action(self, action: ExperimentAction) -> DiscoveryClaim:
|
| 281 |
+
raw = action.parameters.get("claim") or {}
|
| 282 |
+
try:
|
| 283 |
+
return DiscoveryClaim(**raw)
|
| 284 |
+
except Exception as exc: # pragma: no cover - defensive
|
| 285 |
+
logger.warning("Malformed claim, defaulting: %s", exc)
|
| 286 |
+
return DiscoveryClaim()
|
| 287 |
+
|
| 288 |
+
def _build_terminal_observation(self, reason: str) -> CollisionObservation:
|
| 289 |
+
obs = self._build_observation(
|
| 290 |
+
latest_output=IntermediateOutput(
|
| 291 |
+
output_type=OutputType.FAILURE_REPORT,
|
| 292 |
+
step_index=self._state.step_count,
|
| 293 |
+
success=False,
|
| 294 |
+
summary=reason,
|
| 295 |
+
),
|
| 296 |
+
done=True,
|
| 297 |
+
reward=0.0,
|
| 298 |
+
step_breakdown={},
|
| 299 |
+
rule_violations=["episode_terminated"],
|
| 300 |
+
)
|
| 301 |
+
return obs
|
| 302 |
+
|
| 303 |
+
def _build_observation(
|
| 304 |
+
self,
|
| 305 |
+
*,
|
| 306 |
+
latest_output: Optional[IntermediateOutput],
|
| 307 |
+
done: bool,
|
| 308 |
+
reward: float,
|
| 309 |
+
step_breakdown: dict,
|
| 310 |
+
rule_violations: list,
|
| 311 |
+
) -> CollisionObservation:
|
| 312 |
+
assert self._latent is not None
|
| 313 |
+
assert self._task is not None
|
| 314 |
+
|
| 315 |
+
res = self._latent.resources
|
| 316 |
+
usage = ResourceUsage(
|
| 317 |
+
budget_used_musd=res.budget_used_musd,
|
| 318 |
+
budget_remaining_musd=res.budget_remaining,
|
| 319 |
+
luminosity_used_fb=res.luminosity_used_fb,
|
| 320 |
+
luminosity_remaining_fb=res.luminosity_remaining,
|
| 321 |
+
time_used_days=res.time_used_days,
|
| 322 |
+
time_remaining_days=res.time_remaining,
|
| 323 |
+
compute_hours_used=res.compute_hours_used,
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
obs = CollisionObservation(
|
| 327 |
+
done=done,
|
| 328 |
+
reward=float(reward),
|
| 329 |
+
task=self._task,
|
| 330 |
+
step_index=self._state.step_count,
|
| 331 |
+
pipeline_history=list(self._history),
|
| 332 |
+
available_channels=self._task.available_channels,
|
| 333 |
+
available_triggers=self._task.available_triggers,
|
| 334 |
+
available_tools=self._task.available_tools,
|
| 335 |
+
resource_usage=usage,
|
| 336 |
+
latest_output=latest_output,
|
| 337 |
+
all_outputs=list(self._all_outputs),
|
| 338 |
+
candidate_masses_gev=list(self._latent.candidate_masses_gev),
|
| 339 |
+
candidate_significances=list(self._latent.candidate_significances),
|
| 340 |
+
selected_channel=self._latent.selected_channel,
|
| 341 |
+
selected_beam_energy=self._latent.selected_beam_energy,
|
| 342 |
+
cumulative_significance=float(
|
| 343 |
+
self._latent.progress.best_significance_sigma or 0.0
|
| 344 |
+
),
|
| 345 |
+
uncertainty_summary={
|
| 346 |
+
"energy_scale_unc_gev": self._latent.detector.energy_scale_uncertainty,
|
| 347 |
+
"luminosity_unc": self._latent.detector.luminosity_uncertainty,
|
| 348 |
+
"resolution_gev": self._latent.detector.detector_resolution_gev,
|
| 349 |
+
},
|
| 350 |
+
rule_violations=rule_violations,
|
| 351 |
+
step_reward_breakdown=dict(step_breakdown),
|
| 352 |
+
)
|
| 353 |
+
return obs
|
| 354 |
+
|
| 355 |
+
# ── Convenience for diagnostics ────────────────────────────────────
|
| 356 |
+
|
| 357 |
+
def hidden_truth(self) -> Optional[dict]:
|
| 358 |
+
"""Reveal the hidden particle (debug / evaluation only)."""
|
| 359 |
+
if self._latent is None:
|
| 360 |
+
return None
|
| 361 |
+
return self._latent.particle.model_dump()
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
__all__ = [
|
| 365 |
+
"CernState",
|
| 366 |
+
"CERNCollisionEnvironment",
|
| 367 |
+
"AGENT_ENVIRONMENT_RULES",
|
| 368 |
+
"build_agent_system_prompt",
|
| 369 |
+
]
|
server/requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
openenv-core[core]>=0.2.3
|
| 2 |
+
numpy>=1.24.0
|
| 3 |
+
scipy>=1.10.0
|
| 4 |
+
pydantic>=2.0.0
|
| 5 |
+
fastapi>=0.110.0
|
| 6 |
+
uvicorn>=0.27.0
|
server/rewards/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Reward components for CERNenv."""
|
| 2 |
+
|
| 3 |
+
from .reward_function import (
|
| 4 |
+
RewardBreakdown,
|
| 5 |
+
RewardWeights,
|
| 6 |
+
StepReward,
|
| 7 |
+
TerminalReward,
|
| 8 |
+
compute_step_reward,
|
| 9 |
+
compute_terminal_reward,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
"RewardBreakdown",
|
| 14 |
+
"RewardWeights",
|
| 15 |
+
"StepReward",
|
| 16 |
+
"TerminalReward",
|
| 17 |
+
"compute_step_reward",
|
| 18 |
+
"compute_terminal_reward",
|
| 19 |
+
]
|
server/rewards/reward_function.py
ADDED
|
@@ -0,0 +1,408 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Decomposed reward function.
|
| 2 |
+
|
| 3 |
+
Two stages:
|
| 4 |
+
1. **Per-step reward** ``compute_step_reward``: shapes behaviour with small
|
| 5 |
+
incentives (progress, evidence quality, valid prerequisites) and
|
| 6 |
+
penalties (rule violations, repeated work, wasted resources).
|
| 7 |
+
2. **Terminal reward** ``compute_terminal_reward``: graded only when the
|
| 8 |
+
agent submits a discovery claim or runs out of resources. Compares the
|
| 9 |
+
submitted claim against the hidden ``LatentParticle`` truth.
|
| 10 |
+
|
| 11 |
+
The terminal reward is intentionally dominant so the policy must care about
|
| 12 |
+
the *correct* discovery, not just looking busy.
|
| 13 |
+
|
| 14 |
+
Anti-reward-hacking design notes
|
| 15 |
+
--------------------------------
|
| 16 |
+
The shaping reward is layered with several independent checks so that
|
| 17 |
+
exploiting any single one alone cannot dominate the terminal grade
|
| 18 |
+
(see hackathon guidance: *"use multiple independent reward functions"*):
|
| 19 |
+
|
| 20 |
+
* ``tool_fit`` is **gated**: the agent only earns it when ``method`` is
|
| 21 |
+
in ``TOOL_REGISTRY`` *and* the tool's category matches the action's
|
| 22 |
+
expected category. Bogus method strings get **penalized**, not rewarded.
|
| 23 |
+
* ``valid_action`` is gated on a parsed structured action that the rules
|
| 24 |
+
engine accepts — pure JSON-shaped junk does not earn it.
|
| 25 |
+
* ``progress_milestone`` only fires on the *first* time a milestone is
|
| 26 |
+
unlocked, so re-doing already-completed steps cannot farm it.
|
| 27 |
+
* ``redundancy`` and the new ``repeat_action_penalty`` punish loops that
|
| 28 |
+
re-emit the same action type many times in a row.
|
| 29 |
+
* The terminal grade dominates total reward via ``terminal_scale``, and
|
| 30 |
+
the overconfident-wrong penalty also fires when the claim *significance*
|
| 31 |
+
exceeds what was actually measured.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
from __future__ import annotations
|
| 35 |
+
|
| 36 |
+
from collections import deque
|
| 37 |
+
from dataclasses import dataclass, field
|
| 38 |
+
from typing import Deque, Dict, List, Optional
|
| 39 |
+
|
| 40 |
+
import numpy as np
|
| 41 |
+
|
| 42 |
+
from models import (
|
| 43 |
+
ActionType,
|
| 44 |
+
DiscoveryClaim,
|
| 45 |
+
ExperimentAction,
|
| 46 |
+
IntermediateOutput,
|
| 47 |
+
TOOL_REGISTRY,
|
| 48 |
+
is_recommended_tool,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
from server.rules.engine import RuleResult, ViolationCode
|
| 52 |
+
from server.simulator.latent_state import FullLatentState
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# ── Configuration ────────────────────────────────────────────────────────
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@dataclass
|
| 59 |
+
class RewardWeights:
|
| 60 |
+
# ── per-step shaping ────────────────────────────────────────
|
| 61 |
+
valid_action: float = 0.05
|
| 62 |
+
progress_milestone: float = 0.25
|
| 63 |
+
evidence_quality: float = 0.20
|
| 64 |
+
tool_fit: float = 0.10 # paid only on a method ∈ TOOL_REGISTRY
|
| 65 |
+
# whose category matches the action.
|
| 66 |
+
bogus_method_penalty: float = -0.05 # penalises method strings outside
|
| 67 |
+
# TOOL_REGISTRY (anti-string-spam).
|
| 68 |
+
repeat_action_penalty: float = -0.08 # per consecutive repeat beyond the
|
| 69 |
+
# second identical action_type in a row.
|
| 70 |
+
soft_violation: float = -0.05
|
| 71 |
+
hard_violation: float = -0.50
|
| 72 |
+
redundancy: float = -0.10
|
| 73 |
+
resource_overspend: float = -0.30
|
| 74 |
+
failure: float = -0.30
|
| 75 |
+
|
| 76 |
+
# Hard cap on what a single shaping step can earn. Without this a
|
| 77 |
+
# policy could in principle stack milestone + evidence_quality +
|
| 78 |
+
# tool_fit + valid_action and approach the terminal reward magnitude.
|
| 79 |
+
step_reward_clip: float = 0.75
|
| 80 |
+
|
| 81 |
+
# ── terminal grading ────────────────────────────────────────
|
| 82 |
+
terminal_scale: float = 5.0 # multiplied with the convex sum below
|
| 83 |
+
|
| 84 |
+
mass_calibration: float = 0.30
|
| 85 |
+
significance_quality: float = 0.20
|
| 86 |
+
channel_correctness: float = 0.20
|
| 87 |
+
spin_correctness: float = 0.10
|
| 88 |
+
width_calibration: float = 0.05
|
| 89 |
+
confidence_calibration: float = 0.10
|
| 90 |
+
efficiency_bonus: float = 0.05
|
| 91 |
+
|
| 92 |
+
overconfident_wrong_penalty: float = 4.0 # subtracted from terminal
|
| 93 |
+
overclaim_significance_penalty: float = 1.5 # claim_sigma >> measured_sigma
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# ── Outputs ──────────────────────────────────────────────────────────────
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
@dataclass
|
| 100 |
+
class RewardBreakdown:
|
| 101 |
+
components: Dict[str, float] = field(default_factory=dict)
|
| 102 |
+
total: float = 0.0
|
| 103 |
+
|
| 104 |
+
def add(self, key: str, value: float) -> None:
|
| 105 |
+
self.components[key] = self.components.get(key, 0.0) + value
|
| 106 |
+
self.total += value
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@dataclass
|
| 110 |
+
class StepReward:
|
| 111 |
+
reward: float
|
| 112 |
+
breakdown: RewardBreakdown
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
@dataclass
|
| 116 |
+
class TerminalReward:
|
| 117 |
+
reward: float
|
| 118 |
+
breakdown: RewardBreakdown
|
| 119 |
+
discovered: bool
|
| 120 |
+
correct_mass: bool
|
| 121 |
+
correct_channel: bool
|
| 122 |
+
correct_spin: bool
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
# ── Per-step ─────────────────────────────────────────────────────────────
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
_PROGRESS_FLAGS = [
|
| 129 |
+
"beam_configured",
|
| 130 |
+
"luminosity_allocated",
|
| 131 |
+
"trigger_set",
|
| 132 |
+
"collisions_collected",
|
| 133 |
+
"channel_selected",
|
| 134 |
+
"tracks_reconstructed",
|
| 135 |
+
"detector_calibrated",
|
| 136 |
+
"invariant_mass_built",
|
| 137 |
+
"background_subtracted",
|
| 138 |
+
"resonance_fitted",
|
| 139 |
+
"significance_estimated",
|
| 140 |
+
]
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def _milestone_progress(state_before: FullLatentState, state_after: FullLatentState) -> int:
|
| 144 |
+
"""Number of new progress milestones unlocked this step."""
|
| 145 |
+
delta = 0
|
| 146 |
+
for flag in _PROGRESS_FLAGS:
|
| 147 |
+
was = getattr(state_before.progress, flag)
|
| 148 |
+
now = getattr(state_after.progress, flag)
|
| 149 |
+
if now and not was:
|
| 150 |
+
delta += 1
|
| 151 |
+
return delta
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def _consecutive_repeat_count(
|
| 155 |
+
history: List, action_type: ActionType, look_back: int = 6
|
| 156 |
+
) -> int:
|
| 157 |
+
"""How many times this action_type appeared *consecutively* most recently
|
| 158 |
+
(excluding the just-applied action). Used to dampen loops.
|
| 159 |
+
"""
|
| 160 |
+
if not history:
|
| 161 |
+
return 0
|
| 162 |
+
n = 0
|
| 163 |
+
for rec in reversed(history[-look_back:]):
|
| 164 |
+
if getattr(rec, "action_type", None) == action_type:
|
| 165 |
+
n += 1
|
| 166 |
+
else:
|
| 167 |
+
break
|
| 168 |
+
return n
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def compute_step_reward(
|
| 172 |
+
*,
|
| 173 |
+
action: ExperimentAction,
|
| 174 |
+
output: IntermediateOutput,
|
| 175 |
+
state_before: FullLatentState,
|
| 176 |
+
state_after: FullLatentState,
|
| 177 |
+
rule_result: RuleResult,
|
| 178 |
+
weights: RewardWeights = RewardWeights(),
|
| 179 |
+
history: Optional[List] = None,
|
| 180 |
+
) -> StepReward:
|
| 181 |
+
"""Compute the per-step shaping reward.
|
| 182 |
+
|
| 183 |
+
``history`` is the list of ``PipelineStepRecord`` *before* this step. We
|
| 184 |
+
use it to detect consecutive-repeat loops (e.g. a model spamming the
|
| 185 |
+
same action_type to farm shaping). All other fields are local.
|
| 186 |
+
"""
|
| 187 |
+
|
| 188 |
+
breakdown = RewardBreakdown()
|
| 189 |
+
|
| 190 |
+
# ── basic validity / failure ────────────────────────────────────
|
| 191 |
+
if rule_result.allowed and output.success:
|
| 192 |
+
breakdown.add("valid_action", weights.valid_action)
|
| 193 |
+
if not output.success:
|
| 194 |
+
breakdown.add("failure", weights.failure)
|
| 195 |
+
|
| 196 |
+
# ── milestone progress (one-shot per flag, anti-farming) ────────
|
| 197 |
+
new_milestones = _milestone_progress(state_before, state_after)
|
| 198 |
+
if new_milestones > 0:
|
| 199 |
+
breakdown.add("progress", weights.progress_milestone * new_milestones)
|
| 200 |
+
|
| 201 |
+
# ── evidence quality ────────────────────────────────────────────
|
| 202 |
+
if output.success:
|
| 203 |
+
breakdown.add(
|
| 204 |
+
"evidence_quality",
|
| 205 |
+
weights.evidence_quality * float(output.quality_score),
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
# ── tool fit: gated on TOOL_REGISTRY membership + category match ─
|
| 209 |
+
# Bogus or mismatched method strings are explicitly penalised so the
|
| 210 |
+
# model can't farm shaping reward by setting method='whatever'.
|
| 211 |
+
if action.method:
|
| 212 |
+
if is_recommended_tool(action.action_type, action.method):
|
| 213 |
+
breakdown.add("tool_fit", weights.tool_fit)
|
| 214 |
+
elif action.method not in TOOL_REGISTRY:
|
| 215 |
+
breakdown.add("bogus_method", weights.bogus_method_penalty)
|
| 216 |
+
# If the tool exists but the category doesn't match the action,
|
| 217 |
+
# we silently award nothing (no penalty, no reward).
|
| 218 |
+
|
| 219 |
+
# ── rule penalties ──────────────────────────────────────────────
|
| 220 |
+
if rule_result.violations:
|
| 221 |
+
breakdown.add(
|
| 222 |
+
"hard_violation",
|
| 223 |
+
weights.hard_violation * len(rule_result.violations),
|
| 224 |
+
)
|
| 225 |
+
if rule_result.soft_violations:
|
| 226 |
+
soft_redundant = sum(
|
| 227 |
+
1 for v in rule_result.soft_violations if v == ViolationCode.REDUNDANT
|
| 228 |
+
)
|
| 229 |
+
soft_other = len(rule_result.soft_violations) - soft_redundant
|
| 230 |
+
if soft_redundant:
|
| 231 |
+
breakdown.add("redundancy", weights.redundancy * soft_redundant)
|
| 232 |
+
if soft_other:
|
| 233 |
+
breakdown.add("soft_violation", weights.soft_violation * soft_other)
|
| 234 |
+
|
| 235 |
+
# ── consecutive-repeat penalty (catches loop hacks) ─────────────
|
| 236 |
+
# Two-in-a-row is mildly OK (sometimes you re-collect data); three
|
| 237 |
+
# or more identical action_types in a row earns escalating penalty.
|
| 238 |
+
repeats = _consecutive_repeat_count(history or [], action.action_type)
|
| 239 |
+
if repeats >= 2:
|
| 240 |
+
breakdown.add(
|
| 241 |
+
"repeat_action",
|
| 242 |
+
weights.repeat_action_penalty * (repeats - 1),
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
# ── resource overspend ──────────────────────────────────────────
|
| 246 |
+
res = state_after.resources
|
| 247 |
+
if res.budget_used_musd > res.budget_total_musd:
|
| 248 |
+
breakdown.add("budget_overspend", weights.resource_overspend)
|
| 249 |
+
if res.luminosity_used_fb > res.luminosity_total_fb:
|
| 250 |
+
breakdown.add("lumi_overspend", weights.resource_overspend)
|
| 251 |
+
if res.time_used_days > res.time_limit_days:
|
| 252 |
+
breakdown.add("time_overspend", weights.resource_overspend)
|
| 253 |
+
|
| 254 |
+
# ── total + soft cap ────────────────────────────────────────────
|
| 255 |
+
total = float(breakdown.total)
|
| 256 |
+
if weights.step_reward_clip > 0:
|
| 257 |
+
total = float(np.clip(total, -10.0, weights.step_reward_clip))
|
| 258 |
+
return StepReward(reward=total, breakdown=breakdown)
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
# ── Terminal grading ─────────────────────────────────────────────────────
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def _mass_score(true_mass: float, claim_mass: Optional[float], unc: Optional[float]) -> float:
|
| 265 |
+
"""1.0 within 1σ, smoothly decays to 0 by 5 GeV (or 5σ, whichever larger)."""
|
| 266 |
+
if claim_mass is None or true_mass <= 0:
|
| 267 |
+
return 0.0
|
| 268 |
+
err = abs(claim_mass - true_mass)
|
| 269 |
+
# Tolerance: max(1.0 GeV, 1% of true mass, claimed unc)
|
| 270 |
+
tol = max(1.0, 0.01 * true_mass)
|
| 271 |
+
if unc is not None and unc > 0:
|
| 272 |
+
tol = max(tol, float(unc))
|
| 273 |
+
if err <= tol:
|
| 274 |
+
return 1.0
|
| 275 |
+
if err >= 5 * tol:
|
| 276 |
+
return 0.0
|
| 277 |
+
return float(np.clip(1.0 - (err - tol) / (4 * tol), 0.0, 1.0))
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def _significance_score(state: FullLatentState, claim_sigma: Optional[float]) -> float:
|
| 281 |
+
"""High score when claimed σ matches measured σ and is ≥ 5.
|
| 282 |
+
|
| 283 |
+
A claim_sigma far above the measured significance is a classic
|
| 284 |
+
reward-hacking pattern (just write '50' in the field), so we penalise
|
| 285 |
+
over-claiming proportionally.
|
| 286 |
+
"""
|
| 287 |
+
measured = state.progress.best_significance_sigma or 0.0
|
| 288 |
+
if claim_sigma is None:
|
| 289 |
+
return 0.0
|
| 290 |
+
over_claim = max(0.0, claim_sigma - measured)
|
| 291 |
+
base = float(np.clip(measured / 5.0, 0.0, 1.0))
|
| 292 |
+
penalty = float(np.clip(over_claim / 3.0, 0.0, 1.0))
|
| 293 |
+
return float(np.clip(base - 0.5 * penalty, 0.0, 1.0))
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def _significance_overclaim(
|
| 297 |
+
state: FullLatentState, claim_sigma: Optional[float], threshold_sigma: float = 1.5
|
| 298 |
+
) -> float:
|
| 299 |
+
"""How many σ the claim *exceeds* what the env actually measured.
|
| 300 |
+
|
| 301 |
+
Used as an extra penalty — distinct from ``_significance_score`` —
|
| 302 |
+
so that a model can't compensate a giant over-claim by getting the
|
| 303 |
+
mass slightly more accurate. Returns ``max(0, claim - measured - τ)``.
|
| 304 |
+
"""
|
| 305 |
+
if claim_sigma is None:
|
| 306 |
+
return 0.0
|
| 307 |
+
measured = state.progress.best_significance_sigma or 0.0
|
| 308 |
+
return float(max(0.0, claim_sigma - measured - threshold_sigma))
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def _confidence_calibration(claim_conf: float, mass_score: float, channel_correct: bool) -> float:
|
| 312 |
+
"""Reward agents whose confidence tracks their actual accuracy."""
|
| 313 |
+
actual = 0.5 * mass_score + 0.5 * (1.0 if channel_correct else 0.0)
|
| 314 |
+
err = abs(actual - claim_conf)
|
| 315 |
+
return float(np.clip(1.0 - err, 0.0, 1.0))
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def _efficiency_bonus(state: FullLatentState) -> float:
|
| 319 |
+
"""Reward leftover budget (encourages succinct experiments)."""
|
| 320 |
+
res = state.resources
|
| 321 |
+
score = 0.0
|
| 322 |
+
score += np.clip(res.budget_remaining / res.budget_total_musd, 0.0, 1.0)
|
| 323 |
+
score += np.clip(res.luminosity_remaining / res.luminosity_total_fb, 0.0, 1.0)
|
| 324 |
+
score += np.clip(res.time_remaining / res.time_limit_days, 0.0, 1.0)
|
| 325 |
+
return float(score / 3.0)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def compute_terminal_reward(
|
| 329 |
+
*,
|
| 330 |
+
state: FullLatentState,
|
| 331 |
+
claim: DiscoveryClaim,
|
| 332 |
+
weights: RewardWeights = RewardWeights(),
|
| 333 |
+
) -> TerminalReward:
|
| 334 |
+
breakdown = RewardBreakdown()
|
| 335 |
+
truth = state.particle
|
| 336 |
+
|
| 337 |
+
mass_score = _mass_score(truth.mass_gev, claim.mass_estimate_gev, claim.mass_uncertainty_gev)
|
| 338 |
+
breakdown.add("mass_calibration", weights.mass_calibration * mass_score)
|
| 339 |
+
|
| 340 |
+
sig_score = _significance_score(state, claim.significance_sigma)
|
| 341 |
+
breakdown.add("significance_quality", weights.significance_quality * sig_score)
|
| 342 |
+
|
| 343 |
+
channel_ok = claim.decay_channel == truth.primary_channel
|
| 344 |
+
breakdown.add("channel_correctness", weights.channel_correctness * (1.0 if channel_ok else 0.0))
|
| 345 |
+
|
| 346 |
+
spin_ok = claim.spin_hypothesis is not None and claim.spin_hypothesis == truth.spin
|
| 347 |
+
breakdown.add("spin_correctness", weights.spin_correctness * (1.0 if spin_ok else 0.0))
|
| 348 |
+
|
| 349 |
+
width_score = 0.0
|
| 350 |
+
if claim.width_estimate_gev is not None and truth.width_gev > 0:
|
| 351 |
+
rel = abs(claim.width_estimate_gev - truth.width_gev) / max(truth.width_gev, 1e-3)
|
| 352 |
+
width_score = float(np.clip(1.0 - rel, 0.0, 1.0))
|
| 353 |
+
breakdown.add("width_calibration", weights.width_calibration * width_score)
|
| 354 |
+
|
| 355 |
+
conf_score = _confidence_calibration(claim.confidence, mass_score, channel_ok)
|
| 356 |
+
breakdown.add("confidence_calibration", weights.confidence_calibration * conf_score)
|
| 357 |
+
|
| 358 |
+
eff_score = _efficiency_bonus(state)
|
| 359 |
+
breakdown.add("efficiency_bonus", weights.efficiency_bonus * eff_score)
|
| 360 |
+
|
| 361 |
+
discovered = (
|
| 362 |
+
mass_score >= 0.5
|
| 363 |
+
and channel_ok
|
| 364 |
+
and (claim.significance_sigma or 0.0) >= 4.5
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
raw = breakdown.total * weights.terminal_scale
|
| 368 |
+
|
| 369 |
+
# Overconfident-wrong penalty: high confidence but wrong channel & far mass
|
| 370 |
+
if claim.confidence >= 0.8 and (mass_score < 0.2 or not channel_ok):
|
| 371 |
+
raw -= weights.overconfident_wrong_penalty
|
| 372 |
+
breakdown.add("overconfident_wrong", -weights.overconfident_wrong_penalty)
|
| 373 |
+
|
| 374 |
+
# Significance-overclaim penalty (anti-reward-hacking): discourages the
|
| 375 |
+
# model from just writing a giant σ in the claim regardless of evidence.
|
| 376 |
+
overclaim_sigma = _significance_overclaim(state, claim.significance_sigma)
|
| 377 |
+
if overclaim_sigma > 0:
|
| 378 |
+
pen = weights.overclaim_significance_penalty * float(
|
| 379 |
+
np.clip(overclaim_sigma / 3.0, 0.0, 2.0)
|
| 380 |
+
)
|
| 381 |
+
raw -= pen
|
| 382 |
+
breakdown.add("overclaim_significance", -pen)
|
| 383 |
+
|
| 384 |
+
# If the claim has zero/None mass and zero/None significance, treat it
|
| 385 |
+
# as a "no-information" submission — clamp the raw reward so the model
|
| 386 |
+
# can't pass the rules engine and then submit garbage to end early.
|
| 387 |
+
if (claim.mass_estimate_gev is None) and (claim.significance_sigma in (None, 0.0)):
|
| 388 |
+
raw = float(min(raw, 0.0))
|
| 389 |
+
breakdown.add("no_information_claim", 0.0)
|
| 390 |
+
|
| 391 |
+
return TerminalReward(
|
| 392 |
+
reward=float(raw),
|
| 393 |
+
breakdown=breakdown,
|
| 394 |
+
discovered=discovered,
|
| 395 |
+
correct_mass=mass_score >= 0.5,
|
| 396 |
+
correct_channel=channel_ok,
|
| 397 |
+
correct_spin=spin_ok,
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
__all__ = [
|
| 402 |
+
"RewardBreakdown",
|
| 403 |
+
"RewardWeights",
|
| 404 |
+
"StepReward",
|
| 405 |
+
"TerminalReward",
|
| 406 |
+
"compute_step_reward",
|
| 407 |
+
"compute_terminal_reward",
|
| 408 |
+
]
|
server/rules/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Rules engine: prerequisites, resources, redundancy, claim validity."""
|
| 2 |
+
|
| 3 |
+
from .engine import RuleResult, RulesEngine, ViolationCode
|
| 4 |
+
|
| 5 |
+
__all__ = ["RuleResult", "RulesEngine", "ViolationCode"]
|
server/rules/engine.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""RulesEngine for CERNenv.
|
| 2 |
+
|
| 3 |
+
Validates an incoming ``ExperimentAction`` against the current latent state
|
| 4 |
+
*before* it is executed. Rule violations are reported back as warnings on the
|
| 5 |
+
observation and feed into the per-step penalty in the reward function.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from dataclasses import dataclass, field
|
| 11 |
+
from enum import Enum
|
| 12 |
+
from typing import List, Optional
|
| 13 |
+
|
| 14 |
+
from models import (
|
| 15 |
+
ActionType,
|
| 16 |
+
DetectorChannel,
|
| 17 |
+
ExperimentAction,
|
| 18 |
+
TriggerType,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
from server.simulator.latent_state import FullLatentState
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ViolationCode(str, Enum):
|
| 25 |
+
PREREQ_MISSING = "prerequisite_missing"
|
| 26 |
+
BUDGET_EXHAUSTED = "budget_exhausted"
|
| 27 |
+
LUMI_EXHAUSTED = "luminosity_exhausted"
|
| 28 |
+
TIME_EXHAUSTED = "time_exhausted"
|
| 29 |
+
REDUNDANT = "redundant"
|
| 30 |
+
INVALID_PARAMS = "invalid_parameters"
|
| 31 |
+
INVALID_CLAIM = "invalid_claim"
|
| 32 |
+
CHANNEL_MISMATCH = "channel_mismatch"
|
| 33 |
+
OUT_OF_WINDOW = "out_of_search_window"
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@dataclass
|
| 37 |
+
class RuleResult:
|
| 38 |
+
allowed: bool
|
| 39 |
+
violations: List[ViolationCode] = field(default_factory=list)
|
| 40 |
+
messages: List[str] = field(default_factory=list)
|
| 41 |
+
soft_violations: List[ViolationCode] = field(default_factory=list)
|
| 42 |
+
|
| 43 |
+
def add(self, code: ViolationCode, msg: str, soft: bool = False) -> None:
|
| 44 |
+
self.messages.append(msg)
|
| 45 |
+
if soft:
|
| 46 |
+
self.soft_violations.append(code)
|
| 47 |
+
else:
|
| 48 |
+
self.violations.append(code)
|
| 49 |
+
self.allowed = False
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class RulesEngine:
|
| 53 |
+
"""Stateless validator (state is passed in)."""
|
| 54 |
+
|
| 55 |
+
def __init__(
|
| 56 |
+
self,
|
| 57 |
+
mass_search_window_gev: tuple[float, float] = (50.0, 1000.0),
|
| 58 |
+
) -> None:
|
| 59 |
+
self.mass_search_window_gev = mass_search_window_gev
|
| 60 |
+
|
| 61 |
+
# ── Public API ─────────────────────────────────────────────────────
|
| 62 |
+
|
| 63 |
+
def validate(
|
| 64 |
+
self,
|
| 65 |
+
action: ExperimentAction,
|
| 66 |
+
state: FullLatentState,
|
| 67 |
+
) -> RuleResult:
|
| 68 |
+
result = RuleResult(allowed=True)
|
| 69 |
+
|
| 70 |
+
# ── resource gating (hard) ────────────────────────────────
|
| 71 |
+
if state.resources.budget_exhausted:
|
| 72 |
+
result.add(ViolationCode.BUDGET_EXHAUSTED, "Budget fully spent.")
|
| 73 |
+
if state.resources.time_exhausted:
|
| 74 |
+
result.add(ViolationCode.TIME_EXHAUSTED, "Time budget exhausted.")
|
| 75 |
+
# luminosity exhaustion only blocks DAQ-style actions
|
| 76 |
+
if (
|
| 77 |
+
state.resources.luminosity_exhausted
|
| 78 |
+
and action.action_type in {
|
| 79 |
+
ActionType.ALLOCATE_LUMINOSITY,
|
| 80 |
+
ActionType.COLLECT_COLLISIONS,
|
| 81 |
+
}
|
| 82 |
+
):
|
| 83 |
+
result.add(ViolationCode.LUMI_EXHAUSTED, "Integrated luminosity budget spent.")
|
| 84 |
+
|
| 85 |
+
if not result.allowed:
|
| 86 |
+
return result
|
| 87 |
+
|
| 88 |
+
a = action.action_type
|
| 89 |
+
prog = state.progress
|
| 90 |
+
|
| 91 |
+
# ── prerequisites ──────────────────────────────────────────
|
| 92 |
+
if a == ActionType.COLLECT_COLLISIONS:
|
| 93 |
+
if not prog.beam_configured:
|
| 94 |
+
result.add(ViolationCode.PREREQ_MISSING, "Configure the beam first.")
|
| 95 |
+
if not prog.luminosity_allocated:
|
| 96 |
+
result.add(ViolationCode.PREREQ_MISSING, "Allocate luminosity first.")
|
| 97 |
+
if not prog.trigger_set:
|
| 98 |
+
result.add(ViolationCode.PREREQ_MISSING, "Set a trigger first.")
|
| 99 |
+
if not state.selected_channel:
|
| 100 |
+
result.add(ViolationCode.PREREQ_MISSING, "Select a decay channel first.")
|
| 101 |
+
|
| 102 |
+
elif a == ActionType.BUILD_INVARIANT_MASS:
|
| 103 |
+
if not prog.collisions_collected:
|
| 104 |
+
result.add(ViolationCode.PREREQ_MISSING, "Collect collisions before building histograms.")
|
| 105 |
+
if not prog.tracks_reconstructed:
|
| 106 |
+
result.add(ViolationCode.PREREQ_MISSING, "Reconstruct tracks before building histograms.")
|
| 107 |
+
|
| 108 |
+
elif a == ActionType.SUBTRACT_BACKGROUND:
|
| 109 |
+
if not prog.invariant_mass_built:
|
| 110 |
+
result.add(ViolationCode.PREREQ_MISSING, "Build invariant-mass histogram first.")
|
| 111 |
+
|
| 112 |
+
elif a == ActionType.FIT_RESONANCE:
|
| 113 |
+
if not prog.invariant_mass_built:
|
| 114 |
+
result.add(ViolationCode.PREREQ_MISSING, "Build the histogram before fitting.")
|
| 115 |
+
|
| 116 |
+
elif a == ActionType.MEASURE_ANGULAR:
|
| 117 |
+
if not (prog.resonance_fitted or prog.bump_scanned):
|
| 118 |
+
result.add(
|
| 119 |
+
ViolationCode.PREREQ_MISSING,
|
| 120 |
+
"Identify a peak (fit or bump scan) before angular analysis.",
|
| 121 |
+
)
|
| 122 |
+
|
| 123 |
+
elif a == ActionType.ESTIMATE_SIGNIFICANCE:
|
| 124 |
+
if not prog.collisions_collected:
|
| 125 |
+
result.add(ViolationCode.PREREQ_MISSING, "Collect data before significance estimation.")
|
| 126 |
+
|
| 127 |
+
elif a == ActionType.SUBMIT_DISCOVERY_CLAIM:
|
| 128 |
+
if not prog.resonance_fitted and not prog.bump_scanned:
|
| 129 |
+
result.add(ViolationCode.PREREQ_MISSING, "No fitted resonance or bump scan; cannot claim a discovery.")
|
| 130 |
+
if not prog.significance_estimated:
|
| 131 |
+
result.add(ViolationCode.PREREQ_MISSING, "Estimate significance before submitting a claim.")
|
| 132 |
+
|
| 133 |
+
# ── parameter & search-window validation (soft) ────────────
|
| 134 |
+
if a == ActionType.SELECT_CHANNEL:
|
| 135 |
+
channel = action.parameters.get("channel")
|
| 136 |
+
if channel:
|
| 137 |
+
try:
|
| 138 |
+
DetectorChannel(channel)
|
| 139 |
+
except ValueError:
|
| 140 |
+
result.add(ViolationCode.INVALID_PARAMS, f"Unknown channel '{channel}'.", soft=True)
|
| 141 |
+
|
| 142 |
+
if a == ActionType.SET_TRIGGER:
|
| 143 |
+
trig = action.parameters.get("trigger")
|
| 144 |
+
if trig:
|
| 145 |
+
try:
|
| 146 |
+
TriggerType(trig)
|
| 147 |
+
except ValueError:
|
| 148 |
+
result.add(ViolationCode.INVALID_PARAMS, f"Unknown trigger '{trig}'.", soft=True)
|
| 149 |
+
|
| 150 |
+
if a == ActionType.BUILD_INVARIANT_MASS:
|
| 151 |
+
window = action.parameters.get("mass_window_gev")
|
| 152 |
+
if window and len(window) == 2:
|
| 153 |
+
lo, hi = float(window[0]), float(window[1])
|
| 154 |
+
if hi <= lo:
|
| 155 |
+
result.add(
|
| 156 |
+
ViolationCode.INVALID_PARAMS,
|
| 157 |
+
f"Mass window [{lo}, {hi}] is non-positive.",
|
| 158 |
+
soft=True,
|
| 159 |
+
)
|
| 160 |
+
if lo > self.mass_search_window_gev[1] or hi < self.mass_search_window_gev[0]:
|
| 161 |
+
result.add(
|
| 162 |
+
ViolationCode.OUT_OF_WINDOW,
|
| 163 |
+
f"Histogram window [{lo}, {hi}] is outside the task search window "
|
| 164 |
+
f"{self.mass_search_window_gev}.",
|
| 165 |
+
soft=True,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
# ── redundancy (soft) ─────────────────────────────────────
|
| 169 |
+
if a == ActionType.CONFIGURE_BEAM and prog.beam_configured:
|
| 170 |
+
result.add(ViolationCode.REDUNDANT, "Beam already configured; reconfiguring wastes budget.", soft=True)
|
| 171 |
+
if a == ActionType.SELECT_CHANNEL and prog.channel_selected:
|
| 172 |
+
result.add(ViolationCode.REDUNDANT, "Channel already selected.", soft=True)
|
| 173 |
+
if a == ActionType.RECONSTRUCT_TRACKS and prog.tracks_reconstructed:
|
| 174 |
+
result.add(ViolationCode.REDUNDANT, "Tracks already reconstructed.", soft=True)
|
| 175 |
+
if a == ActionType.CALIBRATE_DETECTOR and prog.detector_calibrated:
|
| 176 |
+
result.add(ViolationCode.REDUNDANT, "Detector already calibrated.", soft=True)
|
| 177 |
+
|
| 178 |
+
# ── claim sanity ──────────────────────────────────────────
|
| 179 |
+
if a == ActionType.SUBMIT_DISCOVERY_CLAIM:
|
| 180 |
+
claim = action.parameters.get("claim") or {}
|
| 181 |
+
mass = claim.get("mass_estimate_gev")
|
| 182 |
+
if mass is None:
|
| 183 |
+
result.add(ViolationCode.INVALID_CLAIM, "Claim missing mass estimate.")
|
| 184 |
+
else:
|
| 185 |
+
try:
|
| 186 |
+
m = float(mass)
|
| 187 |
+
except Exception:
|
| 188 |
+
result.add(ViolationCode.INVALID_CLAIM, "Claim mass is not numeric.")
|
| 189 |
+
else:
|
| 190 |
+
lo, hi = self.mass_search_window_gev
|
| 191 |
+
if not (lo <= m <= hi):
|
| 192 |
+
result.add(
|
| 193 |
+
ViolationCode.INVALID_CLAIM,
|
| 194 |
+
f"Claim mass {m} outside search window [{lo}, {hi}].",
|
| 195 |
+
soft=True,
|
| 196 |
+
)
|
| 197 |
+
if claim.get("significance_sigma") is None:
|
| 198 |
+
result.add(ViolationCode.INVALID_CLAIM, "Claim missing significance.", soft=True)
|
| 199 |
+
|
| 200 |
+
return result
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
__all__ = ["RuleResult", "RulesEngine", "ViolationCode"]
|
server/simulator/__init__.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Simulator: latent particle truth, noise model, output generation."""
|
| 2 |
+
|
| 3 |
+
from .latent_state import (
|
| 4 |
+
DetectorState,
|
| 5 |
+
ExperimentProgress,
|
| 6 |
+
FullLatentState,
|
| 7 |
+
LatentParticle,
|
| 8 |
+
ResourceState,
|
| 9 |
+
)
|
| 10 |
+
from .noise import NoiseModel
|
| 11 |
+
from .output_generator import OutputGenerator
|
| 12 |
+
from .transition import (
|
| 13 |
+
ACTION_COSTS,
|
| 14 |
+
TransitionEngine,
|
| 15 |
+
TransitionResult,
|
| 16 |
+
compute_action_cost,
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"ACTION_COSTS",
|
| 21 |
+
"DetectorState",
|
| 22 |
+
"ExperimentProgress",
|
| 23 |
+
"FullLatentState",
|
| 24 |
+
"LatentParticle",
|
| 25 |
+
"NoiseModel",
|
| 26 |
+
"OutputGenerator",
|
| 27 |
+
"ResourceState",
|
| 28 |
+
"TransitionEngine",
|
| 29 |
+
"TransitionResult",
|
| 30 |
+
"compute_action_cost",
|
| 31 |
+
]
|
server/simulator/latent_state.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Latent (hidden) state of the LHC simulator.
|
| 2 |
+
|
| 3 |
+
The agent never sees these structures. They define the ground-truth particle
|
| 4 |
+
properties, detector imperfections, experiment progress flags, and the live
|
| 5 |
+
resource budget.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from typing import Dict, List, Optional
|
| 11 |
+
|
| 12 |
+
from pydantic import BaseModel, Field
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# ── Particle truth ────────────────────────────────────────────────────────
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class LatentParticle(BaseModel):
|
| 19 |
+
"""The hidden mystery particle that the agent must discover.
|
| 20 |
+
|
| 21 |
+
Defines the true mass, width, decay branching ratios, spin, parity,
|
| 22 |
+
production cross-section, and dominant decay channel. The agent has to
|
| 23 |
+
recover these values from noisy observations.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
name: str = "X"
|
| 27 |
+
mass_gev: float = 125.0
|
| 28 |
+
width_gev: float = 0.004
|
| 29 |
+
spin: int = 0 # 0, 1, or 2
|
| 30 |
+
parity: str = "+" # "+" or "-"
|
| 31 |
+
cross_section_fb: float = 50.0 # signal cross-section in femtobarns
|
| 32 |
+
decay_branching: Dict[str, float] = Field(
|
| 33 |
+
default_factory=lambda: {
|
| 34 |
+
"diphoton": 0.0023,
|
| 35 |
+
"dilepton_ee": 0.00003,
|
| 36 |
+
"dilepton_mumu": 0.00022,
|
| 37 |
+
"four_lepton": 0.000125,
|
| 38 |
+
"bb": 0.58,
|
| 39 |
+
"dijet": 0.30,
|
| 40 |
+
},
|
| 41 |
+
description="Branching ratio (BR) per decay channel, sums to ~1.",
|
| 42 |
+
)
|
| 43 |
+
primary_channel: str = "diphoton"
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# ── Detector & accelerator state ─────────────────────────────────────────
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class DetectorState(BaseModel):
|
| 50 |
+
"""Hidden detector and accelerator parameters that shape noise.
|
| 51 |
+
|
| 52 |
+
These influence resolution, trigger efficiency, pileup, and systematic
|
| 53 |
+
uncertainties applied to every observation.
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
detector_resolution_gev: float = 1.5 # absolute mass resolution σ_m
|
| 57 |
+
pileup_mu: float = 30.0 # average pileup interactions per crossing
|
| 58 |
+
trigger_efficiency: float = 0.85
|
| 59 |
+
luminosity_uncertainty: float = 0.025 # 2.5% relative uncertainty
|
| 60 |
+
energy_scale_offset: float = 0.0 # systematic shift in GeV
|
| 61 |
+
energy_scale_uncertainty: float = 0.3 # σ on the scale
|
| 62 |
+
background_shape_alpha: float = -2.5 # exponent of background ~ 1/m^|α|
|
| 63 |
+
qcd_background_strength: float = 1.0 # scale factor for hadronic background
|
| 64 |
+
detector_calibrated: bool = False
|
| 65 |
+
tracker_aligned: bool = False
|
| 66 |
+
# Channel-dependent reconstruction efficiency
|
| 67 |
+
channel_efficiency: Dict[str, float] = Field(
|
| 68 |
+
default_factory=lambda: {
|
| 69 |
+
"diphoton": 0.45,
|
| 70 |
+
"dilepton_ee": 0.55,
|
| 71 |
+
"dilepton_mumu": 0.70,
|
| 72 |
+
"four_lepton": 0.40,
|
| 73 |
+
"dijet": 0.80,
|
| 74 |
+
"bb": 0.50,
|
| 75 |
+
}
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# ── Experiment progress flags ────────────────────────────────────────────
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class ExperimentProgress(BaseModel):
|
| 83 |
+
"""Boolean milestones used by rules and reward shaping."""
|
| 84 |
+
|
| 85 |
+
beam_configured: bool = False
|
| 86 |
+
luminosity_allocated: bool = False
|
| 87 |
+
trigger_set: bool = False
|
| 88 |
+
collisions_collected: bool = False
|
| 89 |
+
detector_calibrated: bool = False
|
| 90 |
+
tracks_reconstructed: bool = False
|
| 91 |
+
channel_selected: bool = False
|
| 92 |
+
invariant_mass_built: bool = False
|
| 93 |
+
background_subtracted: bool = False
|
| 94 |
+
resonance_fitted: bool = False
|
| 95 |
+
bump_scanned: bool = False
|
| 96 |
+
angular_measured: bool = False
|
| 97 |
+
significance_estimated: bool = False
|
| 98 |
+
systematics_requested: bool = False
|
| 99 |
+
theory_review_requested: bool = False
|
| 100 |
+
claim_submitted: bool = False
|
| 101 |
+
|
| 102 |
+
n_events_collected: int = 0
|
| 103 |
+
n_signal_candidates: int = 0
|
| 104 |
+
n_background_estimate: int = 0
|
| 105 |
+
best_fit_mass_gev: Optional[float] = None
|
| 106 |
+
best_fit_width_gev: Optional[float] = None
|
| 107 |
+
best_significance_sigma: Optional[float] = None
|
| 108 |
+
best_channel: Optional[str] = None
|
| 109 |
+
best_beam_energy: Optional[str] = None
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
# ── Resources ─────────────────────────────────────────────────────────────
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class ResourceState(BaseModel):
|
| 116 |
+
"""Live resource accounting (superset of the agent-visible ResourceUsage)."""
|
| 117 |
+
|
| 118 |
+
budget_total_musd: float = 100.0
|
| 119 |
+
budget_used_musd: float = 0.0
|
| 120 |
+
luminosity_total_fb: float = 300.0
|
| 121 |
+
luminosity_used_fb: float = 0.0
|
| 122 |
+
time_limit_days: float = 365.0
|
| 123 |
+
time_used_days: float = 0.0
|
| 124 |
+
compute_hours_used: float = 0.0
|
| 125 |
+
|
| 126 |
+
@property
|
| 127 |
+
def budget_remaining(self) -> float:
|
| 128 |
+
return max(0.0, self.budget_total_musd - self.budget_used_musd)
|
| 129 |
+
|
| 130 |
+
@property
|
| 131 |
+
def luminosity_remaining(self) -> float:
|
| 132 |
+
return max(0.0, self.luminosity_total_fb - self.luminosity_used_fb)
|
| 133 |
+
|
| 134 |
+
@property
|
| 135 |
+
def time_remaining(self) -> float:
|
| 136 |
+
return max(0.0, self.time_limit_days - self.time_used_days)
|
| 137 |
+
|
| 138 |
+
@property
|
| 139 |
+
def budget_exhausted(self) -> bool:
|
| 140 |
+
return self.budget_remaining <= 0
|
| 141 |
+
|
| 142 |
+
@property
|
| 143 |
+
def luminosity_exhausted(self) -> bool:
|
| 144 |
+
return self.luminosity_remaining <= 0
|
| 145 |
+
|
| 146 |
+
@property
|
| 147 |
+
def time_exhausted(self) -> bool:
|
| 148 |
+
return self.time_remaining <= 0
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
# ── Aggregate hidden state ───────────────────────────────────────────────
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
class FullLatentState(BaseModel):
|
| 155 |
+
"""Complete hidden state of the simulated LHC analysis world."""
|
| 156 |
+
|
| 157 |
+
particle: LatentParticle = Field(default_factory=LatentParticle)
|
| 158 |
+
detector: DetectorState = Field(default_factory=DetectorState)
|
| 159 |
+
progress: ExperimentProgress = Field(default_factory=ExperimentProgress)
|
| 160 |
+
resources: ResourceState = Field(default_factory=ResourceState)
|
| 161 |
+
|
| 162 |
+
selected_channel: Optional[str] = None
|
| 163 |
+
selected_beam_energy: Optional[str] = None
|
| 164 |
+
selected_trigger: Optional[str] = None
|
| 165 |
+
|
| 166 |
+
candidate_masses_gev: List[float] = Field(default_factory=list)
|
| 167 |
+
candidate_significances: List[float] = Field(default_factory=list)
|
| 168 |
+
|
| 169 |
+
hidden_failure_conditions: List[str] = Field(default_factory=list)
|
| 170 |
+
rng_seed: int = 42
|
| 171 |
+
step_count: int = 0
|
server/simulator/noise.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Stochastic noise model for the LHC simulator.
|
| 2 |
+
|
| 3 |
+
All randomness is funneled through a single seeded ``numpy.Generator`` so
|
| 4 |
+
episodes are reproducible. The methods are physics-flavoured: Poisson event
|
| 5 |
+
counts, Gaussian-smeared masses, log-normal cross-sections, false discovery
|
| 6 |
+
helpers, and quality degradation.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
from typing import List
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class NoiseModel:
|
| 17 |
+
"""Centralised noise generator for the CERN simulator."""
|
| 18 |
+
|
| 19 |
+
def __init__(self, seed: int = 42):
|
| 20 |
+
self.rng = np.random.default_rng(seed)
|
| 21 |
+
|
| 22 |
+
def reseed(self, seed: int) -> None:
|
| 23 |
+
self.rng = np.random.default_rng(seed)
|
| 24 |
+
|
| 25 |
+
# ── counting / Poisson statistics ─────────────────────────────────
|
| 26 |
+
|
| 27 |
+
def poisson(self, lam: float) -> int:
|
| 28 |
+
return int(self.rng.poisson(max(lam, 0.0)))
|
| 29 |
+
|
| 30 |
+
def signal_yield(
|
| 31 |
+
self,
|
| 32 |
+
cross_section_fb: float,
|
| 33 |
+
luminosity_fb: float,
|
| 34 |
+
branching: float,
|
| 35 |
+
efficiency: float,
|
| 36 |
+
trigger_efficiency: float,
|
| 37 |
+
) -> int:
|
| 38 |
+
"""Expected signal events ~ σ × L × BR × ε_reco × ε_trig + Poisson noise.
|
| 39 |
+
|
| 40 |
+
BR = branching ratio of the decay channel.
|
| 41 |
+
ε_reco = channel reconstruction efficiency.
|
| 42 |
+
ε_trig = trigger acceptance.
|
| 43 |
+
"""
|
| 44 |
+
mu = cross_section_fb * luminosity_fb * branching * efficiency * trigger_efficiency
|
| 45 |
+
return self.poisson(mu)
|
| 46 |
+
|
| 47 |
+
def background_yield(
|
| 48 |
+
self,
|
| 49 |
+
baseline_per_fb: float,
|
| 50 |
+
luminosity_fb: float,
|
| 51 |
+
qcd_strength: float,
|
| 52 |
+
trigger_efficiency: float,
|
| 53 |
+
) -> int:
|
| 54 |
+
"""Expected background events scale linearly with luminosity."""
|
| 55 |
+
mu = baseline_per_fb * luminosity_fb * qcd_strength * trigger_efficiency
|
| 56 |
+
return self.poisson(mu)
|
| 57 |
+
|
| 58 |
+
# ── mass smearing ──────────────────────────────────────────────────
|
| 59 |
+
|
| 60 |
+
def smear_mass(
|
| 61 |
+
self,
|
| 62 |
+
true_mass_gev: float,
|
| 63 |
+
resolution_gev: float,
|
| 64 |
+
scale_offset_gev: float = 0.0,
|
| 65 |
+
) -> float:
|
| 66 |
+
return float(self.rng.normal(true_mass_gev + scale_offset_gev, resolution_gev))
|
| 67 |
+
|
| 68 |
+
def fit_mass_estimate(
|
| 69 |
+
self,
|
| 70 |
+
true_mass_gev: float,
|
| 71 |
+
n_signal: int,
|
| 72 |
+
resolution_gev: float,
|
| 73 |
+
scale_offset_gev: float,
|
| 74 |
+
) -> float:
|
| 75 |
+
"""Fitted mass ≈ true mass + Gaussian error scaling like 1/√N_signal."""
|
| 76 |
+
n_eff = max(n_signal, 1)
|
| 77 |
+
sigma = resolution_gev / np.sqrt(n_eff)
|
| 78 |
+
return float(self.rng.normal(true_mass_gev + scale_offset_gev, sigma))
|
| 79 |
+
|
| 80 |
+
def fit_mass_uncertainty(
|
| 81 |
+
self,
|
| 82 |
+
n_signal: int,
|
| 83 |
+
resolution_gev: float,
|
| 84 |
+
) -> float:
|
| 85 |
+
"""Statistical mass uncertainty from a peak with N_signal events."""
|
| 86 |
+
n_eff = max(n_signal, 1)
|
| 87 |
+
return float(resolution_gev / np.sqrt(n_eff))
|
| 88 |
+
|
| 89 |
+
# ── significance ───────────────────────────────────────────────────
|
| 90 |
+
|
| 91 |
+
def asimov_significance(
|
| 92 |
+
self,
|
| 93 |
+
n_signal: int,
|
| 94 |
+
n_background: int,
|
| 95 |
+
nuisance_inflation: float = 0.0,
|
| 96 |
+
) -> float:
|
| 97 |
+
"""Asymptotic Asimov-style significance Z = √(2[(s+b) ln(1+s/b) - s]).
|
| 98 |
+
|
| 99 |
+
A small nuisance_inflation term in [0,1] shrinks Z to mimic systematic
|
| 100 |
+
penalties when calibration / systematics studies are skipped.
|
| 101 |
+
"""
|
| 102 |
+
if n_background <= 0:
|
| 103 |
+
return 0.0
|
| 104 |
+
s = float(n_signal)
|
| 105 |
+
b = float(n_background)
|
| 106 |
+
if s <= 0:
|
| 107 |
+
return 0.0
|
| 108 |
+
term = (s + b) * np.log(1.0 + s / b) - s
|
| 109 |
+
z = float(np.sqrt(max(2.0 * term, 0.0)))
|
| 110 |
+
return float(z * (1.0 - nuisance_inflation))
|
| 111 |
+
|
| 112 |
+
# ── helpers ─────────────────────────────────────────────────────────
|
| 113 |
+
|
| 114 |
+
def coin_flip(self, p: float) -> bool:
|
| 115 |
+
return bool(self.rng.random() < p)
|
| 116 |
+
|
| 117 |
+
def jitter(self, mean: float, sigma: float) -> float:
|
| 118 |
+
return float(self.rng.normal(mean, sigma))
|
| 119 |
+
|
| 120 |
+
def quality_degradation(self, base_quality: float, factors: List[float]) -> float:
|
| 121 |
+
q = base_quality
|
| 122 |
+
for f in factors:
|
| 123 |
+
q *= f
|
| 124 |
+
return float(np.clip(q + self.rng.normal(0, 0.02), 0.0, 1.0))
|
| 125 |
+
|
| 126 |
+
def sample_qc_metric(
|
| 127 |
+
self, mean: float, std: float, clip_lo: float = 0.0, clip_hi: float = 1.0
|
| 128 |
+
) -> float:
|
| 129 |
+
return float(np.clip(self.rng.normal(mean, std), clip_lo, clip_hi))
|
| 130 |
+
|
| 131 |
+
def histogram(
|
| 132 |
+
self,
|
| 133 |
+
n_signal: int,
|
| 134 |
+
n_background: int,
|
| 135 |
+
true_mass_gev: float,
|
| 136 |
+
resolution_gev: float,
|
| 137 |
+
window_lo_gev: float,
|
| 138 |
+
window_hi_gev: float,
|
| 139 |
+
n_bins: int = 40,
|
| 140 |
+
background_alpha: float = -2.5,
|
| 141 |
+
) -> List[int]:
|
| 142 |
+
"""Generate a coarse invariant-mass histogram.
|
| 143 |
+
|
| 144 |
+
Signal is Gaussian around the (smeared) true mass with width
|
| 145 |
+
=resolution; background is a falling power-law shape.
|
| 146 |
+
"""
|
| 147 |
+
if window_hi_gev <= window_lo_gev:
|
| 148 |
+
return [0] * n_bins
|
| 149 |
+
edges = np.linspace(window_lo_gev, window_hi_gev, n_bins + 1)
|
| 150 |
+
centers = 0.5 * (edges[:-1] + edges[1:])
|
| 151 |
+
|
| 152 |
+
sig_mu = true_mass_gev
|
| 153 |
+
sig_pdf = np.exp(-0.5 * ((centers - sig_mu) / max(resolution_gev, 1e-3)) ** 2)
|
| 154 |
+
sig_pdf /= max(sig_pdf.sum(), 1e-9)
|
| 155 |
+
|
| 156 |
+
bg_pdf = np.power(np.clip(centers, 1.0, None), background_alpha)
|
| 157 |
+
bg_pdf /= max(bg_pdf.sum(), 1e-9)
|
| 158 |
+
|
| 159 |
+
sig_counts = self.rng.multinomial(max(n_signal, 0), sig_pdf)
|
| 160 |
+
bg_counts = self.rng.multinomial(max(n_background, 0), bg_pdf)
|
| 161 |
+
return (sig_counts + bg_counts).astype(int).tolist()
|
server/simulator/output_generator.py
ADDED
|
@@ -0,0 +1,586 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Builds the noisy ``IntermediateOutput`` returned to the agent each step.
|
| 2 |
+
|
| 3 |
+
The OutputGenerator never mutates state; it only inspects the latent state
|
| 4 |
+
plus the action and produces a structured artifact. State changes happen in
|
| 5 |
+
``TransitionEngine``.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
from typing import Any, Dict, List, Optional
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
from models import (
|
| 15 |
+
ActionType,
|
| 16 |
+
DetectorChannel,
|
| 17 |
+
ExperimentAction,
|
| 18 |
+
IntermediateOutput,
|
| 19 |
+
OutputType,
|
| 20 |
+
TriggerType,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
from .latent_state import FullLatentState
|
| 24 |
+
from .noise import NoiseModel
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# ── Channel-specific background per fb^-1 (very rough physics-flavoured) ─
|
| 28 |
+
BACKGROUND_PER_FB: Dict[str, float] = {
|
| 29 |
+
"diphoton": 1500.0,
|
| 30 |
+
"dilepton_ee": 8000.0,
|
| 31 |
+
"dilepton_mumu": 9000.0,
|
| 32 |
+
"four_lepton": 80.0,
|
| 33 |
+
"dijet": 250000.0,
|
| 34 |
+
"bb": 50000.0,
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# ── Trigger ↔ channel affinity ───────────────────────────────────────────
|
| 39 |
+
TRIGGER_AFFINITY: Dict[str, Dict[str, float]] = {
|
| 40 |
+
"low_pt": {
|
| 41 |
+
"diphoton": 0.5,
|
| 42 |
+
"dilepton_ee": 0.6,
|
| 43 |
+
"dilepton_mumu": 0.6,
|
| 44 |
+
"four_lepton": 0.5,
|
| 45 |
+
"dijet": 0.9,
|
| 46 |
+
"bb": 0.7,
|
| 47 |
+
},
|
| 48 |
+
"high_pt": {
|
| 49 |
+
"diphoton": 0.9,
|
| 50 |
+
"dilepton_ee": 0.8,
|
| 51 |
+
"dilepton_mumu": 0.85,
|
| 52 |
+
"four_lepton": 0.85,
|
| 53 |
+
"dijet": 0.7,
|
| 54 |
+
"bb": 0.55,
|
| 55 |
+
},
|
| 56 |
+
"diphoton_hlt": {
|
| 57 |
+
"diphoton": 1.0,
|
| 58 |
+
"dilepton_ee": 0.05,
|
| 59 |
+
"dilepton_mumu": 0.05,
|
| 60 |
+
"four_lepton": 0.1,
|
| 61 |
+
"dijet": 0.05,
|
| 62 |
+
"bb": 0.05,
|
| 63 |
+
},
|
| 64 |
+
"dilepton_hlt": {
|
| 65 |
+
"diphoton": 0.05,
|
| 66 |
+
"dilepton_ee": 1.0,
|
| 67 |
+
"dilepton_mumu": 1.0,
|
| 68 |
+
"four_lepton": 0.85,
|
| 69 |
+
"dijet": 0.05,
|
| 70 |
+
"bb": 0.05,
|
| 71 |
+
},
|
| 72 |
+
"jet_hlt": {
|
| 73 |
+
"diphoton": 0.1,
|
| 74 |
+
"dilepton_ee": 0.1,
|
| 75 |
+
"dilepton_mumu": 0.1,
|
| 76 |
+
"four_lepton": 0.1,
|
| 77 |
+
"dijet": 1.0,
|
| 78 |
+
"bb": 0.85,
|
| 79 |
+
},
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# ── Beam-energy luminosity & cross-section scaling ───────────────────────
|
| 84 |
+
BEAM_SCALING: Dict[str, Dict[str, float]] = {
|
| 85 |
+
"7TeV": {"xsec_scale": 0.45, "cost_per_fb": 0.05, "days_per_fb": 0.6},
|
| 86 |
+
"8TeV": {"xsec_scale": 0.65, "cost_per_fb": 0.08, "days_per_fb": 0.7},
|
| 87 |
+
"13TeV": {"xsec_scale": 1.00, "cost_per_fb": 0.12, "days_per_fb": 0.8},
|
| 88 |
+
"14TeV": {"xsec_scale": 1.15, "cost_per_fb": 0.18, "days_per_fb": 0.9},
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _trigger_efficiency(trigger: Optional[str], channel: Optional[str]) -> float:
|
| 93 |
+
if not trigger or not channel:
|
| 94 |
+
return 0.0
|
| 95 |
+
table = TRIGGER_AFFINITY.get(trigger, {})
|
| 96 |
+
return float(table.get(channel, 0.1))
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class OutputGenerator:
|
| 100 |
+
"""Translates an action + latent state into a noisy observable artifact."""
|
| 101 |
+
|
| 102 |
+
def __init__(self, noise: NoiseModel):
|
| 103 |
+
self.noise = noise
|
| 104 |
+
|
| 105 |
+
# ── Public API ────────────────────────────────────────────────────
|
| 106 |
+
|
| 107 |
+
def generate(
|
| 108 |
+
self,
|
| 109 |
+
action: ExperimentAction,
|
| 110 |
+
state: FullLatentState,
|
| 111 |
+
step_index: int,
|
| 112 |
+
) -> IntermediateOutput:
|
| 113 |
+
a = action.action_type
|
| 114 |
+
|
| 115 |
+
if a == ActionType.CONFIGURE_BEAM:
|
| 116 |
+
return self._beam(action, state, step_index)
|
| 117 |
+
if a == ActionType.ALLOCATE_LUMINOSITY:
|
| 118 |
+
return self._luminosity(action, state, step_index)
|
| 119 |
+
if a == ActionType.SET_TRIGGER:
|
| 120 |
+
return self._trigger(action, state, step_index)
|
| 121 |
+
if a == ActionType.COLLECT_COLLISIONS:
|
| 122 |
+
return self._collect(action, state, step_index)
|
| 123 |
+
if a == ActionType.CALIBRATE_DETECTOR:
|
| 124 |
+
return self._calibrate(action, state, step_index)
|
| 125 |
+
if a == ActionType.RECONSTRUCT_TRACKS:
|
| 126 |
+
return self._reconstruct(action, state, step_index)
|
| 127 |
+
if a == ActionType.SELECT_CHANNEL:
|
| 128 |
+
return self._select_channel(action, state, step_index)
|
| 129 |
+
if a == ActionType.BUILD_INVARIANT_MASS:
|
| 130 |
+
return self._invariant_mass(action, state, step_index)
|
| 131 |
+
if a == ActionType.SUBTRACT_BACKGROUND:
|
| 132 |
+
return self._subtract_background(action, state, step_index)
|
| 133 |
+
if a == ActionType.FIT_RESONANCE:
|
| 134 |
+
return self._fit_resonance(action, state, step_index)
|
| 135 |
+
if a == ActionType.SCAN_BUMP:
|
| 136 |
+
return self._scan_bump(action, state, step_index)
|
| 137 |
+
if a == ActionType.MEASURE_ANGULAR:
|
| 138 |
+
return self._measure_angular(action, state, step_index)
|
| 139 |
+
if a == ActionType.ESTIMATE_SIGNIFICANCE:
|
| 140 |
+
return self._estimate_significance(action, state, step_index)
|
| 141 |
+
if a == ActionType.REQUEST_SYSTEMATICS:
|
| 142 |
+
return self._request_systematics(action, state, step_index)
|
| 143 |
+
if a == ActionType.REQUEST_THEORY_REVIEW:
|
| 144 |
+
return self._request_theory(action, state, step_index)
|
| 145 |
+
if a == ActionType.SUBMIT_DISCOVERY_CLAIM:
|
| 146 |
+
return self._submit_claim(action, state, step_index)
|
| 147 |
+
|
| 148 |
+
return self._failure(step_index, f"Unhandled action: {a}")
|
| 149 |
+
|
| 150 |
+
# ── helpers ────────────────────────────────────────────────────────
|
| 151 |
+
|
| 152 |
+
def _failure(self, step_index: int, msg: str) -> IntermediateOutput:
|
| 153 |
+
return IntermediateOutput(
|
| 154 |
+
output_type=OutputType.FAILURE_REPORT,
|
| 155 |
+
step_index=step_index,
|
| 156 |
+
success=False,
|
| 157 |
+
quality_score=0.0,
|
| 158 |
+
summary=msg,
|
| 159 |
+
warnings=[msg],
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
# ── DAQ (Data Acquisition) outputs ────────────────────────────────
|
| 163 |
+
|
| 164 |
+
def _beam(
|
| 165 |
+
self,
|
| 166 |
+
action: ExperimentAction,
|
| 167 |
+
state: FullLatentState,
|
| 168 |
+
step_index: int,
|
| 169 |
+
) -> IntermediateOutput:
|
| 170 |
+
beam = action.parameters.get("beam_energy") or state.selected_beam_energy or "13TeV"
|
| 171 |
+
scaling = BEAM_SCALING.get(beam, BEAM_SCALING["13TeV"])
|
| 172 |
+
return IntermediateOutput(
|
| 173 |
+
output_type=OutputType.BEAM_CONFIG,
|
| 174 |
+
step_index=step_index,
|
| 175 |
+
success=True,
|
| 176 |
+
quality_score=0.9,
|
| 177 |
+
summary=f"LHC configured at √s={beam}; effective xsec scale={scaling['xsec_scale']:.2f}.",
|
| 178 |
+
data={
|
| 179 |
+
"beam_energy": beam,
|
| 180 |
+
"xsec_scale": scaling["xsec_scale"],
|
| 181 |
+
"cost_per_fb_musd": scaling["cost_per_fb"],
|
| 182 |
+
"days_per_fb": scaling["days_per_fb"],
|
| 183 |
+
},
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
def _luminosity(
|
| 187 |
+
self,
|
| 188 |
+
action: ExperimentAction,
|
| 189 |
+
state: FullLatentState,
|
| 190 |
+
step_index: int,
|
| 191 |
+
) -> IntermediateOutput:
|
| 192 |
+
requested = float(action.parameters.get("luminosity_fb", 30.0))
|
| 193 |
+
granted = max(0.0, min(requested, state.resources.luminosity_remaining))
|
| 194 |
+
warnings: List[str] = []
|
| 195 |
+
if granted < requested:
|
| 196 |
+
warnings.append(
|
| 197 |
+
f"Luminosity capped: requested {requested:.1f} fb^-1, "
|
| 198 |
+
f"granted {granted:.1f} fb^-1."
|
| 199 |
+
)
|
| 200 |
+
return IntermediateOutput(
|
| 201 |
+
output_type=OutputType.LUMINOSITY_LOG,
|
| 202 |
+
step_index=step_index,
|
| 203 |
+
success=granted > 0,
|
| 204 |
+
quality_score=1.0 if granted > 0 else 0.0,
|
| 205 |
+
summary=f"Allocated {granted:.1f} fb^-1 of integrated luminosity.",
|
| 206 |
+
data={"luminosity_fb": granted, "requested_fb": requested},
|
| 207 |
+
warnings=warnings,
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
def _trigger(
|
| 211 |
+
self,
|
| 212 |
+
action: ExperimentAction,
|
| 213 |
+
state: FullLatentState,
|
| 214 |
+
step_index: int,
|
| 215 |
+
) -> IntermediateOutput:
|
| 216 |
+
trigger = action.parameters.get("trigger") or state.selected_trigger or "high_pt"
|
| 217 |
+
try:
|
| 218 |
+
TriggerType(trigger)
|
| 219 |
+
except ValueError:
|
| 220 |
+
return self._failure(step_index, f"Unknown trigger: {trigger}")
|
| 221 |
+
eff = state.detector.trigger_efficiency
|
| 222 |
+
return IntermediateOutput(
|
| 223 |
+
output_type=OutputType.TRIGGER_REPORT,
|
| 224 |
+
step_index=step_index,
|
| 225 |
+
success=True,
|
| 226 |
+
quality_score=eff,
|
| 227 |
+
summary=f"Trigger {trigger} armed; ε_trig={eff:.2f}.",
|
| 228 |
+
data={"trigger": trigger, "trigger_efficiency": eff},
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
def _collect(
|
| 232 |
+
self,
|
| 233 |
+
action: ExperimentAction,
|
| 234 |
+
state: FullLatentState,
|
| 235 |
+
step_index: int,
|
| 236 |
+
) -> IntermediateOutput:
|
| 237 |
+
beam = state.selected_beam_energy or "13TeV"
|
| 238 |
+
scaling = BEAM_SCALING.get(beam, BEAM_SCALING["13TeV"])
|
| 239 |
+
lumi_request = float(action.parameters.get("luminosity_fb", 0.0))
|
| 240 |
+
if lumi_request <= 0:
|
| 241 |
+
lumi_request = max(0.0, state.resources.luminosity_remaining * 0.2)
|
| 242 |
+
lumi = max(0.0, min(lumi_request, state.resources.luminosity_remaining))
|
| 243 |
+
if lumi <= 0:
|
| 244 |
+
return self._failure(step_index, "No luminosity remaining to collect.")
|
| 245 |
+
|
| 246 |
+
channel = state.selected_channel or state.particle.primary_channel
|
| 247 |
+
try:
|
| 248 |
+
DetectorChannel(channel)
|
| 249 |
+
except ValueError:
|
| 250 |
+
return self._failure(step_index, f"Invalid channel: {channel}")
|
| 251 |
+
|
| 252 |
+
trig = state.selected_trigger or "high_pt"
|
| 253 |
+
trig_eff = _trigger_efficiency(trig, channel)
|
| 254 |
+
reco_eff = state.detector.channel_efficiency.get(channel, 0.4)
|
| 255 |
+
if not state.detector.tracker_aligned and channel in {"dilepton_ee", "dilepton_mumu", "four_lepton"}:
|
| 256 |
+
reco_eff *= 0.7
|
| 257 |
+
if not state.detector.detector_calibrated and channel in {"diphoton"}:
|
| 258 |
+
reco_eff *= 0.8
|
| 259 |
+
|
| 260 |
+
br = state.particle.decay_branching.get(channel, 0.0)
|
| 261 |
+
eff_xsec = state.particle.cross_section_fb * scaling["xsec_scale"]
|
| 262 |
+
|
| 263 |
+
n_sig = self.noise.signal_yield(
|
| 264 |
+
cross_section_fb=eff_xsec,
|
| 265 |
+
luminosity_fb=lumi,
|
| 266 |
+
branching=br,
|
| 267 |
+
efficiency=reco_eff,
|
| 268 |
+
trigger_efficiency=trig_eff,
|
| 269 |
+
)
|
| 270 |
+
n_bg = self.noise.background_yield(
|
| 271 |
+
baseline_per_fb=BACKGROUND_PER_FB.get(channel, 1000.0),
|
| 272 |
+
luminosity_fb=lumi,
|
| 273 |
+
qcd_strength=state.detector.qcd_background_strength,
|
| 274 |
+
trigger_efficiency=trig_eff,
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
cost = lumi * scaling["cost_per_fb"]
|
| 278 |
+
days = lumi * scaling["days_per_fb"]
|
| 279 |
+
|
| 280 |
+
return IntermediateOutput(
|
| 281 |
+
output_type=OutputType.COLLISION_BATCH,
|
| 282 |
+
step_index=step_index,
|
| 283 |
+
success=True,
|
| 284 |
+
quality_score=float(np.clip(reco_eff * trig_eff + 0.1, 0.0, 1.0)),
|
| 285 |
+
summary=(
|
| 286 |
+
f"Collected {lumi:.1f} fb^-1 in {channel} with trigger {trig}: "
|
| 287 |
+
f"~{n_sig + n_bg} reconstructed events."
|
| 288 |
+
),
|
| 289 |
+
data={
|
| 290 |
+
"luminosity_fb": lumi,
|
| 291 |
+
"beam_energy": beam,
|
| 292 |
+
"channel": channel,
|
| 293 |
+
"trigger": trig,
|
| 294 |
+
"n_signal_candidates": int(n_sig),
|
| 295 |
+
"n_background_estimate": int(n_bg),
|
| 296 |
+
"cost_musd": cost,
|
| 297 |
+
"time_days": days,
|
| 298 |
+
"trigger_efficiency": trig_eff,
|
| 299 |
+
"reco_efficiency": reco_eff,
|
| 300 |
+
},
|
| 301 |
+
uncertainty=float(np.clip(0.05 + (1.0 - reco_eff) * 0.2, 0.0, 0.5)),
|
| 302 |
+
)
|
| 303 |
+
|
| 304 |
+
# ── Reconstruction outputs ────────────────────────────────────────
|
| 305 |
+
|
| 306 |
+
def _calibrate(
|
| 307 |
+
self,
|
| 308 |
+
action: ExperimentAction,
|
| 309 |
+
state: FullLatentState,
|
| 310 |
+
step_index: int,
|
| 311 |
+
) -> IntermediateOutput:
|
| 312 |
+
method = action.method or "ECAL_calibration"
|
| 313 |
+
improvement = self.noise.sample_qc_metric(0.5, 0.1, 0.0, 0.95)
|
| 314 |
+
return IntermediateOutput(
|
| 315 |
+
output_type=OutputType.CALIBRATION_REPORT,
|
| 316 |
+
step_index=step_index,
|
| 317 |
+
success=True,
|
| 318 |
+
quality_score=0.9,
|
| 319 |
+
summary=f"Detector calibrated using {method}; resolution improved by {improvement*100:.1f}%.",
|
| 320 |
+
data={
|
| 321 |
+
"method": method,
|
| 322 |
+
"resolution_improvement": improvement,
|
| 323 |
+
},
|
| 324 |
+
uncertainty=0.05,
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
def _reconstruct(
|
| 328 |
+
self,
|
| 329 |
+
action: ExperimentAction,
|
| 330 |
+
state: FullLatentState,
|
| 331 |
+
step_index: int,
|
| 332 |
+
) -> IntermediateOutput:
|
| 333 |
+
method = action.method or "Athena"
|
| 334 |
+
return IntermediateOutput(
|
| 335 |
+
output_type=OutputType.RECONSTRUCTION,
|
| 336 |
+
step_index=step_index,
|
| 337 |
+
success=True,
|
| 338 |
+
quality_score=0.85,
|
| 339 |
+
summary=f"Tracks and physics objects reconstructed via {method}.",
|
| 340 |
+
data={"method": method},
|
| 341 |
+
uncertainty=0.05,
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
def _select_channel(
|
| 345 |
+
self,
|
| 346 |
+
action: ExperimentAction,
|
| 347 |
+
state: FullLatentState,
|
| 348 |
+
step_index: int,
|
| 349 |
+
) -> IntermediateOutput:
|
| 350 |
+
channel = action.parameters.get("channel") or state.selected_channel
|
| 351 |
+
if not channel:
|
| 352 |
+
return self._failure(step_index, "No channel specified.")
|
| 353 |
+
try:
|
| 354 |
+
DetectorChannel(channel)
|
| 355 |
+
except ValueError:
|
| 356 |
+
return self._failure(step_index, f"Unknown channel: {channel}")
|
| 357 |
+
return IntermediateOutput(
|
| 358 |
+
output_type=OutputType.CHANNEL_SELECTION,
|
| 359 |
+
step_index=step_index,
|
| 360 |
+
success=True,
|
| 361 |
+
quality_score=0.95,
|
| 362 |
+
summary=f"Analysis channel set to {channel}.",
|
| 363 |
+
data={"channel": channel},
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
# ── Analysis outputs ──────────────────────────────────────────────
|
| 367 |
+
|
| 368 |
+
def _invariant_mass(
|
| 369 |
+
self,
|
| 370 |
+
action: ExperimentAction,
|
| 371 |
+
state: FullLatentState,
|
| 372 |
+
step_index: int,
|
| 373 |
+
) -> IntermediateOutput:
|
| 374 |
+
if state.progress.n_events_collected <= 0:
|
| 375 |
+
return self._failure(step_index, "No collisions collected yet.")
|
| 376 |
+
window = action.parameters.get("mass_window_gev") or [50.0, 1000.0]
|
| 377 |
+
n_bins = int(action.parameters.get("n_bins", 40))
|
| 378 |
+
true_m = state.particle.mass_gev
|
| 379 |
+
in_window = window[0] <= true_m <= window[1]
|
| 380 |
+
n_sig = state.progress.n_signal_candidates if in_window else 0
|
| 381 |
+
hist = self.noise.histogram(
|
| 382 |
+
n_signal=n_sig,
|
| 383 |
+
n_background=state.progress.n_background_estimate,
|
| 384 |
+
true_mass_gev=true_m,
|
| 385 |
+
resolution_gev=state.detector.detector_resolution_gev,
|
| 386 |
+
window_lo_gev=window[0],
|
| 387 |
+
window_hi_gev=window[1],
|
| 388 |
+
n_bins=n_bins,
|
| 389 |
+
background_alpha=state.detector.background_shape_alpha,
|
| 390 |
+
)
|
| 391 |
+
return IntermediateOutput(
|
| 392 |
+
output_type=OutputType.INVARIANT_MASS_HIST,
|
| 393 |
+
step_index=step_index,
|
| 394 |
+
success=True,
|
| 395 |
+
quality_score=0.85 if in_window else 0.4,
|
| 396 |
+
summary=(
|
| 397 |
+
f"Invariant-mass histogram in [{window[0]:.0f}, {window[1]:.0f}] GeV "
|
| 398 |
+
f"with {n_bins} bins, total {sum(hist)} entries."
|
| 399 |
+
),
|
| 400 |
+
data={
|
| 401 |
+
"window_gev": window,
|
| 402 |
+
"bin_counts": hist,
|
| 403 |
+
"n_signal_in_window": n_sig,
|
| 404 |
+
"n_background_in_window": state.progress.n_background_estimate,
|
| 405 |
+
},
|
| 406 |
+
uncertainty=0.1,
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
def _subtract_background(
|
| 410 |
+
self,
|
| 411 |
+
action: ExperimentAction,
|
| 412 |
+
state: FullLatentState,
|
| 413 |
+
step_index: int,
|
| 414 |
+
) -> IntermediateOutput:
|
| 415 |
+
if not state.progress.invariant_mass_built:
|
| 416 |
+
return self._failure(step_index, "Build the invariant-mass histogram first.")
|
| 417 |
+
residual = self.noise.sample_qc_metric(0.05, 0.02, 0.0, 0.5)
|
| 418 |
+
return IntermediateOutput(
|
| 419 |
+
output_type=OutputType.BACKGROUND_SUBTRACTION,
|
| 420 |
+
step_index=step_index,
|
| 421 |
+
success=True,
|
| 422 |
+
quality_score=0.85,
|
| 423 |
+
summary=f"Smooth background subtracted; residual fraction ≈ {residual*100:.1f}%.",
|
| 424 |
+
data={"residual_fraction": residual},
|
| 425 |
+
uncertainty=0.08,
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
def _fit_resonance(
|
| 429 |
+
self,
|
| 430 |
+
action: ExperimentAction,
|
| 431 |
+
state: FullLatentState,
|
| 432 |
+
step_index: int,
|
| 433 |
+
) -> IntermediateOutput:
|
| 434 |
+
if not state.progress.background_subtracted and not state.progress.invariant_mass_built:
|
| 435 |
+
return self._failure(step_index, "Need a histogram (and ideally background subtraction) before fitting.")
|
| 436 |
+
n_sig = max(state.progress.n_signal_candidates, 1)
|
| 437 |
+
true_m = state.particle.mass_gev
|
| 438 |
+
scale = state.detector.energy_scale_offset
|
| 439 |
+
res = state.detector.detector_resolution_gev
|
| 440 |
+
m_fit = self.noise.fit_mass_estimate(true_m, n_sig, res, scale)
|
| 441 |
+
m_unc = self.noise.fit_mass_uncertainty(n_sig, res)
|
| 442 |
+
w_fit = max(0.001, abs(self.noise.jitter(state.particle.width_gev, 0.1 * res)))
|
| 443 |
+
return IntermediateOutput(
|
| 444 |
+
output_type=OutputType.FIT_RESULT,
|
| 445 |
+
step_index=step_index,
|
| 446 |
+
success=True,
|
| 447 |
+
quality_score=0.9,
|
| 448 |
+
summary=f"Resonance fit: m={m_fit:.2f} ± {m_unc:.2f} GeV, Γ≈{w_fit:.3f} GeV.",
|
| 449 |
+
data={
|
| 450 |
+
"fit_mass_gev": m_fit,
|
| 451 |
+
"fit_mass_unc_gev": m_unc,
|
| 452 |
+
"fit_width_gev": w_fit,
|
| 453 |
+
"n_signal_used": int(n_sig),
|
| 454 |
+
},
|
| 455 |
+
uncertainty=float(np.clip(m_unc / max(true_m, 1.0), 0.0, 1.0)),
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
def _scan_bump(
|
| 459 |
+
self,
|
| 460 |
+
action: ExperimentAction,
|
| 461 |
+
state: FullLatentState,
|
| 462 |
+
step_index: int,
|
| 463 |
+
) -> IntermediateOutput:
|
| 464 |
+
if state.progress.n_events_collected <= 0:
|
| 465 |
+
return self._failure(step_index, "Collect data before bump-hunting.")
|
| 466 |
+
true_m = state.particle.mass_gev
|
| 467 |
+
m_obs = self.noise.smear_mass(true_m, state.detector.detector_resolution_gev * 1.2)
|
| 468 |
+
return IntermediateOutput(
|
| 469 |
+
output_type=OutputType.BUMP_SCAN,
|
| 470 |
+
step_index=step_index,
|
| 471 |
+
success=True,
|
| 472 |
+
quality_score=0.7,
|
| 473 |
+
summary=f"Bump scan most-significant region near m≈{m_obs:.1f} GeV.",
|
| 474 |
+
data={"candidate_mass_gev": m_obs},
|
| 475 |
+
uncertainty=0.15,
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
def _measure_angular(
|
| 479 |
+
self,
|
| 480 |
+
action: ExperimentAction,
|
| 481 |
+
state: FullLatentState,
|
| 482 |
+
step_index: int,
|
| 483 |
+
) -> IntermediateOutput:
|
| 484 |
+
spin_truth = state.particle.spin
|
| 485 |
+
# Returns posterior over {0,1,2} biased by truth + noise
|
| 486 |
+
weights = np.array([0.1, 0.1, 0.1])
|
| 487 |
+
weights[spin_truth] += 0.6
|
| 488 |
+
weights += self.noise.rng.normal(0, 0.05, size=3)
|
| 489 |
+
weights = np.clip(weights, 0.01, None)
|
| 490 |
+
weights /= weights.sum()
|
| 491 |
+
return IntermediateOutput(
|
| 492 |
+
output_type=OutputType.ANGULAR_RESULT,
|
| 493 |
+
step_index=step_index,
|
| 494 |
+
success=True,
|
| 495 |
+
quality_score=0.8,
|
| 496 |
+
summary=(
|
| 497 |
+
"Angular distribution favours spin-"
|
| 498 |
+
f"{int(np.argmax(weights))} ({weights.max():.2f} posterior)."
|
| 499 |
+
),
|
| 500 |
+
data={
|
| 501 |
+
"spin_posterior": weights.tolist(),
|
| 502 |
+
"favoured_spin": int(np.argmax(weights)),
|
| 503 |
+
"parity_estimate": state.particle.parity,
|
| 504 |
+
},
|
| 505 |
+
uncertainty=float(1.0 - weights.max()),
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
def _estimate_significance(
|
| 509 |
+
self,
|
| 510 |
+
action: ExperimentAction,
|
| 511 |
+
state: FullLatentState,
|
| 512 |
+
step_index: int,
|
| 513 |
+
) -> IntermediateOutput:
|
| 514 |
+
n_sig = state.progress.n_signal_candidates
|
| 515 |
+
n_bg = state.progress.n_background_estimate
|
| 516 |
+
nuisance = 0.0
|
| 517 |
+
if not state.progress.systematics_requested:
|
| 518 |
+
nuisance += 0.15
|
| 519 |
+
if not state.progress.detector_calibrated:
|
| 520 |
+
nuisance += 0.10
|
| 521 |
+
z = self.noise.asimov_significance(n_sig, n_bg, nuisance_inflation=nuisance)
|
| 522 |
+
return IntermediateOutput(
|
| 523 |
+
output_type=OutputType.SIGNIFICANCE,
|
| 524 |
+
step_index=step_index,
|
| 525 |
+
success=True,
|
| 526 |
+
quality_score=0.9,
|
| 527 |
+
summary=f"Estimated local significance Z = {z:.2f} σ.",
|
| 528 |
+
data={
|
| 529 |
+
"significance_sigma": z,
|
| 530 |
+
"n_signal": int(n_sig),
|
| 531 |
+
"n_background": int(n_bg),
|
| 532 |
+
"nuisance_inflation": nuisance,
|
| 533 |
+
},
|
| 534 |
+
uncertainty=float(np.clip(0.05 + nuisance, 0.0, 0.5)),
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
# ── Meta outputs ──────────────────────────────────────────────────
|
| 538 |
+
|
| 539 |
+
def _request_systematics(
|
| 540 |
+
self,
|
| 541 |
+
action: ExperimentAction,
|
| 542 |
+
state: FullLatentState,
|
| 543 |
+
step_index: int,
|
| 544 |
+
) -> IntermediateOutput:
|
| 545 |
+
method = action.method or "Luminosity_calibration"
|
| 546 |
+
return IntermediateOutput(
|
| 547 |
+
output_type=OutputType.SYSTEMATICS_REPORT,
|
| 548 |
+
step_index=step_index,
|
| 549 |
+
success=True,
|
| 550 |
+
quality_score=0.85,
|
| 551 |
+
summary=f"Systematics study via {method}; nuisance band tightened.",
|
| 552 |
+
data={"method": method},
|
| 553 |
+
uncertainty=0.04,
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
def _request_theory(
|
| 557 |
+
self,
|
| 558 |
+
action: ExperimentAction,
|
| 559 |
+
state: FullLatentState,
|
| 560 |
+
step_index: int,
|
| 561 |
+
) -> IntermediateOutput:
|
| 562 |
+
return IntermediateOutput(
|
| 563 |
+
output_type=OutputType.THEORY_REVIEW,
|
| 564 |
+
step_index=step_index,
|
| 565 |
+
success=True,
|
| 566 |
+
quality_score=0.7,
|
| 567 |
+
summary="Theory review: candidate consistent with Standard-Model-extension scalar / vector hypotheses.",
|
| 568 |
+
data={"hypotheses": ["BSM scalar", "BSM vector", "SM background fluctuation"]},
|
| 569 |
+
uncertainty=0.2,
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
def _submit_claim(
|
| 573 |
+
self,
|
| 574 |
+
action: ExperimentAction,
|
| 575 |
+
state: FullLatentState,
|
| 576 |
+
step_index: int,
|
| 577 |
+
) -> IntermediateOutput:
|
| 578 |
+
claim: Dict[str, Any] = action.parameters.get("claim") or {}
|
| 579 |
+
return IntermediateOutput(
|
| 580 |
+
output_type=OutputType.DISCOVERY_CLAIM,
|
| 581 |
+
step_index=step_index,
|
| 582 |
+
success=True,
|
| 583 |
+
quality_score=1.0,
|
| 584 |
+
summary="Discovery claim submitted for grading.",
|
| 585 |
+
data=claim,
|
| 586 |
+
)
|
server/simulator/transition.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pure-function transition engine.
|
| 2 |
+
|
| 3 |
+
Given a (latent_state, action, generated_output) triple, produces the next
|
| 4 |
+
latent state plus the deltas needed for the agent-visible observation. The
|
| 5 |
+
``TransitionEngine`` does **not** generate randomness directly; it consumes
|
| 6 |
+
artifacts from the ``OutputGenerator``.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
from dataclasses import dataclass
|
| 12 |
+
from typing import Dict
|
| 13 |
+
|
| 14 |
+
from models import (
|
| 15 |
+
ActionType,
|
| 16 |
+
ExperimentAction,
|
| 17 |
+
IntermediateOutput,
|
| 18 |
+
OutputType,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
from .latent_state import FullLatentState
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Per-action default cost in (millions of USD, days, compute hours)
|
| 25 |
+
ACTION_COSTS: Dict[ActionType, Dict[str, float]] = {
|
| 26 |
+
ActionType.CONFIGURE_BEAM: {"musd": 0.10, "days": 0.5, "compute": 0.1},
|
| 27 |
+
ActionType.ALLOCATE_LUMINOSITY: {"musd": 0.05, "days": 0.2, "compute": 0.0},
|
| 28 |
+
ActionType.SET_TRIGGER: {"musd": 0.05, "days": 0.1, "compute": 0.0},
|
| 29 |
+
ActionType.COLLECT_COLLISIONS: {"musd": 0.00, "days": 0.0, "compute": 1.0}, # main cost is in luminosity
|
| 30 |
+
ActionType.CALIBRATE_DETECTOR: {"musd": 0.20, "days": 1.0, "compute": 1.5},
|
| 31 |
+
ActionType.RECONSTRUCT_TRACKS: {"musd": 0.15, "days": 0.8, "compute": 5.0},
|
| 32 |
+
ActionType.SELECT_CHANNEL: {"musd": 0.00, "days": 0.05, "compute": 0.0},
|
| 33 |
+
ActionType.BUILD_INVARIANT_MASS: {"musd": 0.05, "days": 0.3, "compute": 1.0},
|
| 34 |
+
ActionType.SUBTRACT_BACKGROUND: {"musd": 0.05, "days": 0.3, "compute": 0.5},
|
| 35 |
+
ActionType.FIT_RESONANCE: {"musd": 0.10, "days": 0.4, "compute": 0.5},
|
| 36 |
+
ActionType.SCAN_BUMP: {"musd": 0.05, "days": 0.2, "compute": 0.5},
|
| 37 |
+
ActionType.MEASURE_ANGULAR: {"musd": 0.10, "days": 0.4, "compute": 0.5},
|
| 38 |
+
ActionType.ESTIMATE_SIGNIFICANCE: {"musd": 0.05, "days": 0.1, "compute": 0.2},
|
| 39 |
+
ActionType.REQUEST_SYSTEMATICS: {"musd": 0.30, "days": 1.5, "compute": 1.0},
|
| 40 |
+
ActionType.REQUEST_THEORY_REVIEW: {"musd": 0.05, "days": 0.5, "compute": 0.0},
|
| 41 |
+
ActionType.SUBMIT_DISCOVERY_CLAIM:{"musd": 0.0, "days": 0.1, "compute": 0.0},
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def compute_action_cost(action: ExperimentAction, output: IntermediateOutput) -> Dict[str, float]:
|
| 46 |
+
"""Return realised (musd, days, compute_hours, luminosity_fb) for this action."""
|
| 47 |
+
base = ACTION_COSTS.get(action.action_type, {"musd": 0.0, "days": 0.0, "compute": 0.0})
|
| 48 |
+
musd = float(base.get("musd", 0.0))
|
| 49 |
+
days = float(base.get("days", 0.0))
|
| 50 |
+
compute = float(base.get("compute", 0.0))
|
| 51 |
+
lumi_fb = 0.0
|
| 52 |
+
|
| 53 |
+
data = output.data or {}
|
| 54 |
+
if action.action_type == ActionType.COLLECT_COLLISIONS:
|
| 55 |
+
lumi_fb = float(data.get("luminosity_fb", 0.0))
|
| 56 |
+
musd += float(data.get("cost_musd", 0.0))
|
| 57 |
+
days += float(data.get("time_days", 0.0))
|
| 58 |
+
|
| 59 |
+
return {
|
| 60 |
+
"musd": musd,
|
| 61 |
+
"days": days,
|
| 62 |
+
"compute_hours": compute,
|
| 63 |
+
"luminosity_fb": lumi_fb,
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@dataclass
|
| 68 |
+
class TransitionResult:
|
| 69 |
+
next_state: FullLatentState
|
| 70 |
+
realised_cost: Dict[str, float]
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
class TransitionEngine:
|
| 74 |
+
"""Applies an action's output to evolve the latent state."""
|
| 75 |
+
|
| 76 |
+
def step(
|
| 77 |
+
self,
|
| 78 |
+
state: FullLatentState,
|
| 79 |
+
action: ExperimentAction,
|
| 80 |
+
output: IntermediateOutput,
|
| 81 |
+
) -> TransitionResult:
|
| 82 |
+
# We mutate the live state in place, then return it. This is fine
|
| 83 |
+
# because the environment owns the only reference.
|
| 84 |
+
cost = compute_action_cost(action, output)
|
| 85 |
+
state.resources.budget_used_musd += cost["musd"]
|
| 86 |
+
state.resources.time_used_days += cost["days"]
|
| 87 |
+
state.resources.compute_hours_used += cost["compute_hours"]
|
| 88 |
+
state.resources.luminosity_used_fb += cost["luminosity_fb"]
|
| 89 |
+
|
| 90 |
+
if not output.success:
|
| 91 |
+
state.step_count += 1
|
| 92 |
+
return TransitionResult(next_state=state, realised_cost=cost)
|
| 93 |
+
|
| 94 |
+
a = action.action_type
|
| 95 |
+
data = output.data or {}
|
| 96 |
+
|
| 97 |
+
if a == ActionType.CONFIGURE_BEAM:
|
| 98 |
+
beam = data.get("beam_energy")
|
| 99 |
+
state.selected_beam_energy = beam
|
| 100 |
+
state.progress.beam_configured = True
|
| 101 |
+
|
| 102 |
+
elif a == ActionType.ALLOCATE_LUMINOSITY:
|
| 103 |
+
state.progress.luminosity_allocated = True
|
| 104 |
+
|
| 105 |
+
elif a == ActionType.SET_TRIGGER:
|
| 106 |
+
trig = data.get("trigger")
|
| 107 |
+
state.selected_trigger = trig
|
| 108 |
+
state.progress.trigger_set = True
|
| 109 |
+
|
| 110 |
+
elif a == ActionType.COLLECT_COLLISIONS:
|
| 111 |
+
state.progress.collisions_collected = True
|
| 112 |
+
state.progress.n_events_collected += int(
|
| 113 |
+
data.get("n_signal_candidates", 0)
|
| 114 |
+
) + int(data.get("n_background_estimate", 0))
|
| 115 |
+
state.progress.n_signal_candidates += int(data.get("n_signal_candidates", 0))
|
| 116 |
+
state.progress.n_background_estimate += int(data.get("n_background_estimate", 0))
|
| 117 |
+
state.progress.best_channel = data.get("channel") or state.progress.best_channel
|
| 118 |
+
state.progress.best_beam_energy = (
|
| 119 |
+
data.get("beam_energy") or state.progress.best_beam_energy
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
elif a == ActionType.CALIBRATE_DETECTOR:
|
| 123 |
+
state.progress.detector_calibrated = True
|
| 124 |
+
state.detector.detector_calibrated = True
|
| 125 |
+
improvement = float(data.get("resolution_improvement", 0.0))
|
| 126 |
+
state.detector.detector_resolution_gev = max(
|
| 127 |
+
0.05,
|
| 128 |
+
state.detector.detector_resolution_gev * (1.0 - improvement),
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
elif a == ActionType.RECONSTRUCT_TRACKS:
|
| 132 |
+
state.progress.tracks_reconstructed = True
|
| 133 |
+
state.detector.tracker_aligned = True
|
| 134 |
+
|
| 135 |
+
elif a == ActionType.SELECT_CHANNEL:
|
| 136 |
+
channel = data.get("channel")
|
| 137 |
+
if channel:
|
| 138 |
+
state.selected_channel = channel
|
| 139 |
+
state.progress.channel_selected = True
|
| 140 |
+
|
| 141 |
+
elif a == ActionType.BUILD_INVARIANT_MASS:
|
| 142 |
+
state.progress.invariant_mass_built = True
|
| 143 |
+
|
| 144 |
+
elif a == ActionType.SUBTRACT_BACKGROUND:
|
| 145 |
+
state.progress.background_subtracted = True
|
| 146 |
+
|
| 147 |
+
elif a == ActionType.FIT_RESONANCE:
|
| 148 |
+
state.progress.resonance_fitted = True
|
| 149 |
+
m = float(data.get("fit_mass_gev", 0.0))
|
| 150 |
+
unc = float(data.get("fit_mass_unc_gev", 0.0))
|
| 151 |
+
w = float(data.get("fit_width_gev", 0.0))
|
| 152 |
+
if m > 0:
|
| 153 |
+
state.candidate_masses_gev.append(m)
|
| 154 |
+
state.candidate_significances.append(0.0)
|
| 155 |
+
state.progress.best_fit_mass_gev = m
|
| 156 |
+
state.progress.best_fit_width_gev = w
|
| 157 |
+
|
| 158 |
+
elif a == ActionType.SCAN_BUMP:
|
| 159 |
+
state.progress.bump_scanned = True
|
| 160 |
+
cm = float(data.get("candidate_mass_gev", 0.0))
|
| 161 |
+
if cm > 0:
|
| 162 |
+
state.candidate_masses_gev.append(cm)
|
| 163 |
+
state.candidate_significances.append(0.0)
|
| 164 |
+
|
| 165 |
+
elif a == ActionType.MEASURE_ANGULAR:
|
| 166 |
+
state.progress.angular_measured = True
|
| 167 |
+
|
| 168 |
+
elif a == ActionType.ESTIMATE_SIGNIFICANCE:
|
| 169 |
+
state.progress.significance_estimated = True
|
| 170 |
+
sig = float(data.get("significance_sigma", 0.0))
|
| 171 |
+
state.progress.best_significance_sigma = max(
|
| 172 |
+
state.progress.best_significance_sigma or 0.0, sig
|
| 173 |
+
)
|
| 174 |
+
if state.candidate_significances:
|
| 175 |
+
state.candidate_significances[-1] = sig
|
| 176 |
+
|
| 177 |
+
elif a == ActionType.REQUEST_SYSTEMATICS:
|
| 178 |
+
state.progress.systematics_requested = True
|
| 179 |
+
state.detector.energy_scale_uncertainty *= 0.6
|
| 180 |
+
state.detector.luminosity_uncertainty *= 0.7
|
| 181 |
+
|
| 182 |
+
elif a == ActionType.REQUEST_THEORY_REVIEW:
|
| 183 |
+
state.progress.theory_review_requested = True
|
| 184 |
+
|
| 185 |
+
elif a == ActionType.SUBMIT_DISCOVERY_CLAIM:
|
| 186 |
+
state.progress.claim_submitted = True
|
| 187 |
+
|
| 188 |
+
state.step_count += 1
|
| 189 |
+
return TransitionResult(next_state=state, realised_cost=cost)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
__all__ = [
|
| 193 |
+
"ACTION_COSTS",
|
| 194 |
+
"TransitionEngine",
|
| 195 |
+
"TransitionResult",
|
| 196 |
+
"compute_action_cost",
|
| 197 |
+
]
|
server/tasks/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Task generator: curated scenarios + procedural curriculum."""
|
| 2 |
+
|
| 3 |
+
from .scenarios import (
|
| 4 |
+
CURATED_SCENARIOS,
|
| 5 |
+
Scenario,
|
| 6 |
+
sample_scenario,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
__all__ = ["CURATED_SCENARIOS", "Scenario", "sample_scenario"]
|
server/tasks/scenarios.py
ADDED
|
@@ -0,0 +1,422 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Built-in physics scenarios + procedural sampling.
|
| 2 |
+
|
| 3 |
+
Each scenario binds a hidden ``LatentParticle`` truth and a public
|
| 4 |
+
``TaskSpec`` (search window, available channels, resource budgets, expected
|
| 5 |
+
findings, paper references). Curated scenarios are inspired by famous LHC
|
| 6 |
+
discoveries; procedural ones randomise mass, channel, width and budgets to
|
| 7 |
+
build a curriculum.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
from dataclasses import dataclass
|
| 13 |
+
from typing import List, Optional
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
|
| 17 |
+
from models import (
|
| 18 |
+
DetectorChannel,
|
| 19 |
+
ExpectedFinding,
|
| 20 |
+
PaperReference,
|
| 21 |
+
TOOL_REGISTRY,
|
| 22 |
+
TaskSpec,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
from server.simulator.latent_state import (
|
| 26 |
+
DetectorState,
|
| 27 |
+
FullLatentState,
|
| 28 |
+
LatentParticle,
|
| 29 |
+
ResourceState,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@dataclass
|
| 34 |
+
class Scenario:
|
| 35 |
+
name: str
|
| 36 |
+
difficulty: str
|
| 37 |
+
task: TaskSpec
|
| 38 |
+
latent: FullLatentState
|
| 39 |
+
|
| 40 |
+
def fresh_latent(self) -> FullLatentState:
|
| 41 |
+
# Pydantic deep-copy so the env can mutate freely
|
| 42 |
+
return self.latent.model_copy(deep=True)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# ── Curated, story-driven scenarios ──────────────────────────────────────
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _higgs_like_scenario() -> Scenario:
|
| 49 |
+
particle = LatentParticle(
|
| 50 |
+
name="HiggsLike",
|
| 51 |
+
mass_gev=125.0,
|
| 52 |
+
width_gev=0.004,
|
| 53 |
+
spin=0,
|
| 54 |
+
parity="+",
|
| 55 |
+
cross_section_fb=55.0,
|
| 56 |
+
decay_branching={
|
| 57 |
+
"diphoton": 0.0023,
|
| 58 |
+
"dilepton_ee": 0.00003,
|
| 59 |
+
"dilepton_mumu": 0.00022,
|
| 60 |
+
"four_lepton": 0.000125,
|
| 61 |
+
"bb": 0.58,
|
| 62 |
+
"dijet": 0.30,
|
| 63 |
+
},
|
| 64 |
+
primary_channel="diphoton",
|
| 65 |
+
)
|
| 66 |
+
detector = DetectorState(
|
| 67 |
+
detector_resolution_gev=1.5,
|
| 68 |
+
pileup_mu=30.0,
|
| 69 |
+
trigger_efficiency=0.85,
|
| 70 |
+
)
|
| 71 |
+
resources = ResourceState(
|
| 72 |
+
budget_total_musd=120.0,
|
| 73 |
+
luminosity_total_fb=300.0,
|
| 74 |
+
time_limit_days=365.0,
|
| 75 |
+
)
|
| 76 |
+
latent = FullLatentState(
|
| 77 |
+
particle=particle,
|
| 78 |
+
detector=detector,
|
| 79 |
+
resources=resources,
|
| 80 |
+
rng_seed=125,
|
| 81 |
+
)
|
| 82 |
+
task = TaskSpec(
|
| 83 |
+
problem_statement=(
|
| 84 |
+
"An anomalous excess at ~125 GeV is rumoured in early 13 TeV runs. "
|
| 85 |
+
"Plan a campaign to confirm or refute a Standard-Model Higgs-like scalar. "
|
| 86 |
+
"Pick channels, allocate luminosity, fit, and submit a calibrated discovery claim."
|
| 87 |
+
),
|
| 88 |
+
target_collider="LHC",
|
| 89 |
+
mass_search_window_gev=[100.0, 200.0],
|
| 90 |
+
budget_limit_musd=120.0,
|
| 91 |
+
luminosity_budget_fb=300.0,
|
| 92 |
+
time_limit_days=365.0,
|
| 93 |
+
prior_observations=[
|
| 94 |
+
"Earlier Tevatron data shows a mild diphoton excess near 125 GeV.",
|
| 95 |
+
"ATLAS/CMS rumour mills suggest a 4ℓ excess at low mass.",
|
| 96 |
+
],
|
| 97 |
+
success_criteria=[
|
| 98 |
+
"Identify a resonance within 1 GeV of the truth.",
|
| 99 |
+
"Reach ≥5σ local significance.",
|
| 100 |
+
"Submit confidence consistent with calibration.",
|
| 101 |
+
],
|
| 102 |
+
paper_references=[
|
| 103 |
+
PaperReference(
|
| 104 |
+
title="Observation of a new particle in the search for the SM Higgs boson",
|
| 105 |
+
arxiv_id="1207.7214",
|
| 106 |
+
doi="10.1016/j.physletb.2012.08.020",
|
| 107 |
+
),
|
| 108 |
+
],
|
| 109 |
+
expected_findings=[
|
| 110 |
+
ExpectedFinding(finding="Diphoton resonance at ~125 GeV", category="discovery"),
|
| 111 |
+
ExpectedFinding(finding="Spin-0, even parity", category="property"),
|
| 112 |
+
],
|
| 113 |
+
difficulty="medium",
|
| 114 |
+
available_tools=list(TOOL_REGISTRY.keys()),
|
| 115 |
+
)
|
| 116 |
+
return Scenario(name="higgs_like_125", difficulty="medium", task=task, latent=latent)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def _hidden_zprime_scenario() -> Scenario:
|
| 120 |
+
particle = LatentParticle(
|
| 121 |
+
name="ZPrime",
|
| 122 |
+
mass_gev=600.0,
|
| 123 |
+
width_gev=18.0,
|
| 124 |
+
spin=1,
|
| 125 |
+
parity="-",
|
| 126 |
+
cross_section_fb=12.0,
|
| 127 |
+
decay_branching={
|
| 128 |
+
"diphoton": 0.0,
|
| 129 |
+
"dilepton_ee": 0.04,
|
| 130 |
+
"dilepton_mumu": 0.04,
|
| 131 |
+
"four_lepton": 0.0,
|
| 132 |
+
"bb": 0.20,
|
| 133 |
+
"dijet": 0.70,
|
| 134 |
+
},
|
| 135 |
+
primary_channel="dilepton_mumu",
|
| 136 |
+
)
|
| 137 |
+
detector = DetectorState(
|
| 138 |
+
detector_resolution_gev=8.0,
|
| 139 |
+
pileup_mu=45.0,
|
| 140 |
+
trigger_efficiency=0.78,
|
| 141 |
+
qcd_background_strength=1.2,
|
| 142 |
+
)
|
| 143 |
+
resources = ResourceState(
|
| 144 |
+
budget_total_musd=140.0,
|
| 145 |
+
luminosity_total_fb=200.0,
|
| 146 |
+
time_limit_days=400.0,
|
| 147 |
+
)
|
| 148 |
+
latent = FullLatentState(
|
| 149 |
+
particle=particle, detector=detector, resources=resources, rng_seed=600,
|
| 150 |
+
)
|
| 151 |
+
task = TaskSpec(
|
| 152 |
+
problem_statement=(
|
| 153 |
+
"Run-2 dilepton spectra hint at a high-mass excess. Hunt for a heavy "
|
| 154 |
+
"Z'-like vector resonance and characterise spin-1, parity-odd hypothesis."
|
| 155 |
+
),
|
| 156 |
+
mass_search_window_gev=[300.0, 1500.0],
|
| 157 |
+
budget_limit_musd=140.0,
|
| 158 |
+
luminosity_budget_fb=200.0,
|
| 159 |
+
time_limit_days=400.0,
|
| 160 |
+
prior_observations=[
|
| 161 |
+
"High-pT dilepton tail shows a 2.7σ shoulder near 600 GeV.",
|
| 162 |
+
"Dijet smooth-fit residuals consistent with the same window.",
|
| 163 |
+
],
|
| 164 |
+
success_criteria=[
|
| 165 |
+
"Identify a high-mass dilepton/dijet resonance.",
|
| 166 |
+
"Constrain spin to be vector (1).",
|
| 167 |
+
"Report calibrated mass within 5% and ≥4σ significance.",
|
| 168 |
+
],
|
| 169 |
+
paper_references=[
|
| 170 |
+
PaperReference(
|
| 171 |
+
title="Search for high-mass dilepton resonances at the LHC",
|
| 172 |
+
arxiv_id="1903.06248",
|
| 173 |
+
),
|
| 174 |
+
],
|
| 175 |
+
expected_findings=[
|
| 176 |
+
ExpectedFinding(finding="Heavy Z'-like dilepton resonance", category="discovery"),
|
| 177 |
+
ExpectedFinding(finding="Spin-1, parity-odd", category="property"),
|
| 178 |
+
],
|
| 179 |
+
difficulty="hard",
|
| 180 |
+
available_tools=list(TOOL_REGISTRY.keys()),
|
| 181 |
+
)
|
| 182 |
+
return Scenario(name="hidden_zprime_600", difficulty="hard", task=task, latent=latent)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _diboson_resonance_scenario() -> Scenario:
|
| 186 |
+
particle = LatentParticle(
|
| 187 |
+
name="Graviton",
|
| 188 |
+
mass_gev=750.0,
|
| 189 |
+
width_gev=45.0,
|
| 190 |
+
spin=2,
|
| 191 |
+
parity="+",
|
| 192 |
+
cross_section_fb=6.0,
|
| 193 |
+
decay_branching={
|
| 194 |
+
"diphoton": 0.06,
|
| 195 |
+
"dilepton_ee": 0.005,
|
| 196 |
+
"dilepton_mumu": 0.005,
|
| 197 |
+
"four_lepton": 0.001,
|
| 198 |
+
"bb": 0.15,
|
| 199 |
+
"dijet": 0.70,
|
| 200 |
+
},
|
| 201 |
+
primary_channel="diphoton",
|
| 202 |
+
)
|
| 203 |
+
detector = DetectorState(
|
| 204 |
+
detector_resolution_gev=12.0,
|
| 205 |
+
pileup_mu=50.0,
|
| 206 |
+
trigger_efficiency=0.80,
|
| 207 |
+
)
|
| 208 |
+
resources = ResourceState(
|
| 209 |
+
budget_total_musd=110.0,
|
| 210 |
+
luminosity_total_fb=180.0,
|
| 211 |
+
time_limit_days=350.0,
|
| 212 |
+
)
|
| 213 |
+
latent = FullLatentState(
|
| 214 |
+
particle=particle, detector=detector, resources=resources, rng_seed=750,
|
| 215 |
+
)
|
| 216 |
+
task = TaskSpec(
|
| 217 |
+
problem_statement=(
|
| 218 |
+
"A faint γγ excess at 750 GeV stirred the field briefly in 2015-2016. "
|
| 219 |
+
"Re-investigate with the modern luminosity budget and decide if it is "
|
| 220 |
+
"real or a fluctuation."
|
| 221 |
+
),
|
| 222 |
+
mass_search_window_gev=[400.0, 1200.0],
|
| 223 |
+
budget_limit_musd=110.0,
|
| 224 |
+
luminosity_budget_fb=180.0,
|
| 225 |
+
time_limit_days=350.0,
|
| 226 |
+
prior_observations=[
|
| 227 |
+
"Public CMS/ATLAS data show a 2-3σ diphoton bump near 750 GeV.",
|
| 228 |
+
"Theory papers proposed graviton, scalar singlet, and SM-fluctuation explanations.",
|
| 229 |
+
],
|
| 230 |
+
success_criteria=[
|
| 231 |
+
"Decide between discovery and fluctuation with calibrated confidence.",
|
| 232 |
+
],
|
| 233 |
+
paper_references=[
|
| 234 |
+
PaperReference(
|
| 235 |
+
title="Search for resonant production of high-mass diphoton pairs",
|
| 236 |
+
arxiv_id="1606.04093",
|
| 237 |
+
),
|
| 238 |
+
],
|
| 239 |
+
expected_findings=[
|
| 240 |
+
ExpectedFinding(finding="Possible diphoton resonance near 750 GeV", category="discovery"),
|
| 241 |
+
],
|
| 242 |
+
difficulty="hard",
|
| 243 |
+
available_tools=list(TOOL_REGISTRY.keys()),
|
| 244 |
+
)
|
| 245 |
+
return Scenario(name="diphoton_750", difficulty="hard", task=task, latent=latent)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def _easy_diphoton_scenario() -> Scenario:
|
| 249 |
+
"""Generous budgets, narrow scalar, single obvious channel."""
|
| 250 |
+
particle = LatentParticle(
|
| 251 |
+
name="EasyScalar",
|
| 252 |
+
mass_gev=160.0,
|
| 253 |
+
width_gev=0.5,
|
| 254 |
+
spin=0,
|
| 255 |
+
parity="+",
|
| 256 |
+
cross_section_fb=120.0,
|
| 257 |
+
decay_branching={
|
| 258 |
+
"diphoton": 0.05,
|
| 259 |
+
"dilepton_ee": 0.001,
|
| 260 |
+
"dilepton_mumu": 0.005,
|
| 261 |
+
"four_lepton": 0.0001,
|
| 262 |
+
"bb": 0.50,
|
| 263 |
+
"dijet": 0.30,
|
| 264 |
+
},
|
| 265 |
+
primary_channel="diphoton",
|
| 266 |
+
)
|
| 267 |
+
detector = DetectorState(
|
| 268 |
+
detector_resolution_gev=2.0,
|
| 269 |
+
pileup_mu=20.0,
|
| 270 |
+
trigger_efficiency=0.9,
|
| 271 |
+
)
|
| 272 |
+
resources = ResourceState(
|
| 273 |
+
budget_total_musd=200.0,
|
| 274 |
+
luminosity_total_fb=400.0,
|
| 275 |
+
time_limit_days=500.0,
|
| 276 |
+
)
|
| 277 |
+
latent = FullLatentState(
|
| 278 |
+
particle=particle, detector=detector, resources=resources, rng_seed=160,
|
| 279 |
+
)
|
| 280 |
+
task = TaskSpec(
|
| 281 |
+
problem_statement=(
|
| 282 |
+
"Tutorial scenario: discover a narrow scalar that decays cleanly to "
|
| 283 |
+
"two photons. Resources are abundant; focus on running a clean pipeline."
|
| 284 |
+
),
|
| 285 |
+
mass_search_window_gev=[80.0, 300.0],
|
| 286 |
+
budget_limit_musd=200.0,
|
| 287 |
+
luminosity_budget_fb=400.0,
|
| 288 |
+
time_limit_days=500.0,
|
| 289 |
+
success_criteria=[
|
| 290 |
+
"Identify the diphoton peak and submit a calibrated 5σ claim.",
|
| 291 |
+
],
|
| 292 |
+
expected_findings=[
|
| 293 |
+
ExpectedFinding(finding="Diphoton scalar near 160 GeV", category="discovery"),
|
| 294 |
+
],
|
| 295 |
+
difficulty="easy",
|
| 296 |
+
available_tools=list(TOOL_REGISTRY.keys()),
|
| 297 |
+
)
|
| 298 |
+
return Scenario(name="easy_diphoton_160", difficulty="easy", task=task, latent=latent)
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
CURATED_SCENARIOS: List[Scenario] = [
|
| 302 |
+
_easy_diphoton_scenario(),
|
| 303 |
+
_higgs_like_scenario(),
|
| 304 |
+
_hidden_zprime_scenario(),
|
| 305 |
+
_diboson_resonance_scenario(),
|
| 306 |
+
]
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
# ── Procedural sampler ───────────────────────────────────────────────────
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
_DIFFICULTY_TIERS = {
|
| 313 |
+
"easy": {"mass_lo": 90.0, "mass_hi": 250.0, "xsec_lo": 80.0, "xsec_hi": 150.0, "res": 1.5, "budget": 200.0, "lumi": 400.0},
|
| 314 |
+
"medium": {"mass_lo": 100.0, "mass_hi": 600.0, "xsec_lo": 25.0, "xsec_hi": 80.0, "res": 3.0, "budget": 150.0, "lumi": 300.0},
|
| 315 |
+
"hard": {"mass_lo": 250.0, "mass_hi": 1500.0, "xsec_lo": 5.0, "xsec_hi": 25.0, "res": 8.0, "budget": 110.0, "lumi": 200.0},
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def _procedural_scenario(difficulty: str, rng: np.random.Generator) -> Scenario:
|
| 320 |
+
tier = _DIFFICULTY_TIERS.get(difficulty, _DIFFICULTY_TIERS["medium"])
|
| 321 |
+
mass = float(rng.uniform(tier["mass_lo"], tier["mass_hi"]))
|
| 322 |
+
xsec = float(rng.uniform(tier["xsec_lo"], tier["xsec_hi"]))
|
| 323 |
+
spin = int(rng.choice([0, 1, 2]))
|
| 324 |
+
parity = str(rng.choice(["+", "-"]))
|
| 325 |
+
primary = str(rng.choice([c.value for c in DetectorChannel]))
|
| 326 |
+
|
| 327 |
+
branching = {c.value: 0.001 for c in DetectorChannel}
|
| 328 |
+
branching[primary] = float(rng.uniform(0.02, 0.6))
|
| 329 |
+
# normalise so it sums to ~1
|
| 330 |
+
total = sum(branching.values())
|
| 331 |
+
branching = {k: v / total for k, v in branching.items()}
|
| 332 |
+
|
| 333 |
+
particle = LatentParticle(
|
| 334 |
+
name=f"Mystery_{int(mass)}GeV",
|
| 335 |
+
mass_gev=mass,
|
| 336 |
+
width_gev=float(rng.uniform(0.5, 30.0) if difficulty != "easy" else rng.uniform(0.05, 2.0)),
|
| 337 |
+
spin=spin,
|
| 338 |
+
parity=parity,
|
| 339 |
+
cross_section_fb=xsec,
|
| 340 |
+
decay_branching=branching,
|
| 341 |
+
primary_channel=primary,
|
| 342 |
+
)
|
| 343 |
+
detector = DetectorState(
|
| 344 |
+
detector_resolution_gev=tier["res"],
|
| 345 |
+
pileup_mu=float(rng.uniform(20.0, 60.0)),
|
| 346 |
+
trigger_efficiency=float(rng.uniform(0.7, 0.92)),
|
| 347 |
+
qcd_background_strength=float(rng.uniform(0.8, 1.3)),
|
| 348 |
+
)
|
| 349 |
+
resources = ResourceState(
|
| 350 |
+
budget_total_musd=tier["budget"],
|
| 351 |
+
luminosity_total_fb=tier["lumi"],
|
| 352 |
+
time_limit_days=float(rng.uniform(300.0, 500.0)),
|
| 353 |
+
)
|
| 354 |
+
latent = FullLatentState(
|
| 355 |
+
particle=particle, detector=detector, resources=resources,
|
| 356 |
+
rng_seed=int(rng.integers(1, 1_000_000)),
|
| 357 |
+
)
|
| 358 |
+
window_lo = max(50.0, mass - 200.0)
|
| 359 |
+
window_hi = mass + 300.0
|
| 360 |
+
task = TaskSpec(
|
| 361 |
+
problem_statement=(
|
| 362 |
+
f"Procedural ({difficulty}): a hidden resonance lives somewhere in "
|
| 363 |
+
f"[{window_lo:.0f}, {window_hi:.0f}] GeV. Discover and characterise it."
|
| 364 |
+
),
|
| 365 |
+
mass_search_window_gev=[window_lo, window_hi],
|
| 366 |
+
budget_limit_musd=tier["budget"],
|
| 367 |
+
luminosity_budget_fb=tier["lumi"],
|
| 368 |
+
time_limit_days=resources.time_limit_days,
|
| 369 |
+
difficulty=difficulty,
|
| 370 |
+
available_tools=list(TOOL_REGISTRY.keys()),
|
| 371 |
+
success_criteria=[
|
| 372 |
+
"Discover the hidden resonance with a calibrated mass and channel.",
|
| 373 |
+
],
|
| 374 |
+
)
|
| 375 |
+
return Scenario(
|
| 376 |
+
name=f"procedural_{difficulty}_{int(mass)}",
|
| 377 |
+
difficulty=difficulty,
|
| 378 |
+
task=task,
|
| 379 |
+
latent=latent,
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def sample_scenario(
|
| 384 |
+
*,
|
| 385 |
+
difficulty: Optional[str] = None,
|
| 386 |
+
name: Optional[str] = None,
|
| 387 |
+
seed: Optional[int] = None,
|
| 388 |
+
) -> Scenario:
|
| 389 |
+
rng = np.random.default_rng(seed)
|
| 390 |
+
|
| 391 |
+
if name:
|
| 392 |
+
for s in CURATED_SCENARIOS:
|
| 393 |
+
if s.name == name:
|
| 394 |
+
fresh = Scenario(
|
| 395 |
+
name=s.name,
|
| 396 |
+
difficulty=s.difficulty,
|
| 397 |
+
task=s.task,
|
| 398 |
+
latent=s.fresh_latent(),
|
| 399 |
+
)
|
| 400 |
+
if seed is not None:
|
| 401 |
+
fresh.latent.rng_seed = int(seed)
|
| 402 |
+
return fresh
|
| 403 |
+
|
| 404 |
+
if difficulty in {"easy", "medium", "hard"}:
|
| 405 |
+
# mix curated + procedural
|
| 406 |
+
curated_pool = [s for s in CURATED_SCENARIOS if s.difficulty == difficulty]
|
| 407 |
+
if curated_pool and rng.random() < 0.4:
|
| 408 |
+
picked = curated_pool[int(rng.integers(0, len(curated_pool)))]
|
| 409 |
+
return Scenario(
|
| 410 |
+
name=picked.name,
|
| 411 |
+
difficulty=picked.difficulty,
|
| 412 |
+
task=picked.task,
|
| 413 |
+
latent=picked.fresh_latent(),
|
| 414 |
+
)
|
| 415 |
+
return _procedural_scenario(difficulty, rng)
|
| 416 |
+
|
| 417 |
+
# default: random difficulty
|
| 418 |
+
diff = str(rng.choice(["easy", "medium", "hard"]))
|
| 419 |
+
return _procedural_scenario(diff, rng)
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
__all__ = ["CURATED_SCENARIOS", "Scenario", "sample_scenario"]
|
space/__init__.py
ADDED
|
File without changes
|
space/env/Dockerfile
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CERNenv environment Space (Docker, CPU)
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
ENV PYTHONUNBUFFERED=1 \
|
| 5 |
+
PIP_NO_CACHE_DIR=1 \
|
| 6 |
+
PYTHONPATH=/home/user/app
|
| 7 |
+
|
| 8 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 9 |
+
git curl ca-certificates build-essential \
|
| 10 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
RUN useradd -ms /bin/bash user
|
| 13 |
+
USER user
|
| 14 |
+
WORKDIR /home/user/app
|
| 15 |
+
|
| 16 |
+
COPY --chown=user:user space/env/requirements.txt /home/user/app/space-env-requirements.txt
|
| 17 |
+
RUN python -m pip install --upgrade pip && \
|
| 18 |
+
python -m pip install --user -r /home/user/app/space-env-requirements.txt
|
| 19 |
+
|
| 20 |
+
COPY --chown=user:user . /home/user/app
|
| 21 |
+
|
| 22 |
+
EXPOSE 7860
|
| 23 |
+
|
| 24 |
+
CMD ["python", "-m", "uvicorn", "server.app:app", "--host", "0.0.0.0", "--port", "7860"]
|
space/env/README.md
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: CERNenv
|
| 3 |
+
emoji: ⚛️
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: indigo
|
| 6 |
+
sdk: docker
|
| 7 |
+
suggested_hardware: cpu-basic
|
| 8 |
+
pinned: false
|
| 9 |
+
license: bsd-3-clause
|
| 10 |
+
short_description: LHC particle-discovery RL environment
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
# CERNenv — LHC Discovery RL Environment
|
| 14 |
+
|
| 15 |
+
OpenEnv-compatible reinforcement-learning environment that simulates an
|
| 16 |
+
LHC (Large Hadron Collider) analysis. An LLM (Large Language Model) agent
|
| 17 |
+
configures the beam, allocates luminosity, picks a decay channel and
|
| 18 |
+
trigger, runs reconstruction, fits an invariant-mass spectrum, estimates
|
| 19 |
+
significance, and finally submits a structured discovery claim that is
|
| 20 |
+
graded against a hidden ground-truth particle.
|
| 21 |
+
|
| 22 |
+
The Space exposes the standard OpenEnv HTTP + WebSocket API:
|
| 23 |
+
|
| 24 |
+
* `GET /health` — liveness
|
| 25 |
+
* `GET /schema` — action / observation / state JSON schemas
|
| 26 |
+
* `POST /reset` — start a new episode (`{ "seed": 7, "scenario": "easy_diphoton_160" }`)
|
| 27 |
+
* `POST /step` — execute one action
|
| 28 |
+
* `GET /state` — current `CernState`
|
| 29 |
+
* `WS /ws` — persistent session (recommended for multi-step rollouts)
|
| 30 |
+
|
| 31 |
+
## Quickstart (Python client)
|
| 32 |
+
|
| 33 |
+
```python
|
| 34 |
+
import asyncio
|
| 35 |
+
from openenv.core import EnvClient
|
| 36 |
+
from huggingface_hub import constants
|
| 37 |
+
|
| 38 |
+
# replace with your space id
|
| 39 |
+
SPACE = "YOUR_HF_USERNAME/cernenv"
|
| 40 |
+
|
| 41 |
+
# (option A) connect to the running Space directly
|
| 42 |
+
import websockets
|
| 43 |
+
async def main():
|
| 44 |
+
async with EnvClient.from_env(SPACE) as env: # uses websockets under the hood
|
| 45 |
+
result = await env.reset(seed=7, scenario="easy_diphoton_160")
|
| 46 |
+
...
|
| 47 |
+
|
| 48 |
+
asyncio.run(main())
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
For training, see the companion **CERNenv Trainer** Space.
|
space/env/requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
numpy>=1.24.0
|
| 2 |
+
scipy>=1.10.0
|
| 3 |
+
pydantic>=2.0.0
|
| 4 |
+
fastapi>=0.110.0
|
| 5 |
+
uvicorn>=0.27.0
|
| 6 |
+
openenv-core[core]>=0.2.3
|
space/training/Dockerfile
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CERNenv trainer Space (Docker, A100)
|
| 2 |
+
FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04
|
| 3 |
+
|
| 4 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
| 5 |
+
PYTHONUNBUFFERED=1 \
|
| 6 |
+
PIP_NO_CACHE_DIR=1 \
|
| 7 |
+
HF_HOME=/home/user/.cache/huggingface \
|
| 8 |
+
TRANSFORMERS_CACHE=/home/user/.cache/huggingface/transformers \
|
| 9 |
+
PYTHONPATH=/home/user/app
|
| 10 |
+
|
| 11 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 12 |
+
python3.11 python3.11-venv python3.11-dev python3-pip \
|
| 13 |
+
git curl ca-certificates build-essential \
|
| 14 |
+
&& rm -rf /var/lib/apt/lists/* \
|
| 15 |
+
&& ln -sf /usr/bin/python3.11 /usr/local/bin/python \
|
| 16 |
+
&& ln -sf /usr/bin/python3.11 /usr/local/bin/python3
|
| 17 |
+
|
| 18 |
+
RUN useradd -ms /bin/bash user
|
| 19 |
+
USER user
|
| 20 |
+
ENV PATH="/home/user/.local/bin:${PATH}"
|
| 21 |
+
WORKDIR /home/user/app
|
| 22 |
+
|
| 23 |
+
COPY --chown=user:user space/training/requirements.txt /home/user/app/space-training-requirements.txt
|
| 24 |
+
RUN python -m pip install --upgrade pip && \
|
| 25 |
+
python -m pip install --user -r /home/user/app/space-training-requirements.txt
|
| 26 |
+
|
| 27 |
+
COPY --chown=user:user . /home/user/app
|
| 28 |
+
|
| 29 |
+
EXPOSE 7860
|
| 30 |
+
|
| 31 |
+
CMD ["python", "-m", "uvicorn", "space.training.app:app", "--host", "0.0.0.0", "--port", "7860"]
|
space/training/README.md
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: CERNenv Trainer
|
| 3 |
+
emoji: ⚛️
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: pink
|
| 6 |
+
sdk: docker
|
| 7 |
+
suggested_hardware: a100x4
|
| 8 |
+
suggested_storage: medium
|
| 9 |
+
pinned: false
|
| 10 |
+
license: bsd-3-clause
|
| 11 |
+
short_description: GRPO trainer for CERNenv (Unsloth + LoRA, A100)
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
# CERNenv Trainer (Hugging Face Space, A100)
|
| 15 |
+
|
| 16 |
+
Fine-tunes a small instruction-tuned LLM (Large Language Model) to act as
|
| 17 |
+
an LHC (Large Hadron Collider) physicist inside the **CERNenv** OpenEnv
|
| 18 |
+
environment using **GRPO** (Group-Relative Policy Optimization),
|
| 19 |
+
**Unsloth**, and **LoRA** (Low-Rank Adaptation).
|
| 20 |
+
|
| 21 |
+
## Hardware
|
| 22 |
+
|
| 23 |
+
- Recommended: **4× A100 (`a100x4`, 320 GB VRAM, ~$10/hr)**
|
| 24 |
+
- Single GPU also supported: `a100-large` (slower, fewer episodes recommended)
|
| 25 |
+
- Minimum: T4 / L4 (use the Colab notebook fallback)
|
| 26 |
+
|
| 27 |
+
### Budget guidance (~$27 envelope, the default for this hackathon run)
|
| 28 |
+
|
| 29 |
+
A 1500-episode GRPO run with `MODEL_NAME=unsloth/Qwen2.5-3B-Instruct`,
|
| 30 |
+
`NUM_GENERATIONS=8`, `MAX_STEPS=18` typically lands as follows:
|
| 31 |
+
|
| 32 |
+
| Hardware | $/hr | Wall-clock | Cost (1× run) | Headroom in $27 |
|
| 33 |
+
| ------------ | ----- | ---------- | ------------- | --------------- |
|
| 34 |
+
| `a100x4` | ~$10 | ~1.5–2 h | ~$15–20 | 1 re-run |
|
| 35 |
+
| `a100-large` | ~$4 | ~2.5–3 h | ~$10–12 | 2+ re-runs |
|
| 36 |
+
| `l40sx4` | ~$8 | ~2 h | ~$16 | 1 re-run |
|
| 37 |
+
|
| 38 |
+
`a100x4` gets the trained adapters + evidence into your hands fastest; the
|
| 39 |
+
multi-GPU launcher (`accelerate launch --num_processes 4`) is already wired
|
| 40 |
+
in `_build_training_cmd`. If you want extra safety margin in case anything
|
| 41 |
+
needs a re-run, drop to `a100-large` — wall-clock is ~2× longer but cost
|
| 42 |
+
is ~50% lower, leaving you with budget for two complete attempts.
|
| 43 |
+
|
| 44 |
+
## Required Space secrets
|
| 45 |
+
| Secret | Purpose |
|
| 46 |
+
| --- | --- |
|
| 47 |
+
| `HF_TOKEN` | Hugging Face token with `write` access for model push |
|
| 48 |
+
| `HF_USERNAME` | Hub username, used as the default model-repo owner |
|
| 49 |
+
|
| 50 |
+
## Optional environment variables
|
| 51 |
+
| Variable | Default | Notes |
|
| 52 |
+
| --- | --- | --- |
|
| 53 |
+
| `MODEL_NAME` | `unsloth/Qwen2.5-3B-Instruct` | Any chat model Unsloth supports |
|
| 54 |
+
| `TOTAL_EPISODES` | `1500` | Prompts × generations rollouts |
|
| 55 |
+
| `DIFFICULTY` | `easy` | Starting tier when `CURRICULUM=1`; static tier when `CURRICULUM=0` |
|
| 56 |
+
| `CURRICULUM` | `1` | `1` enables easy→medium→hard prompt-ramp + adaptive eval-tier |
|
| 57 |
+
| `CURRICULUM_PROMOTE` | `0.55` | Held-out success rate that promotes the eval tier one step |
|
| 58 |
+
| `CURRICULUM_DEMOTE` | `0.10` | Rolling success rate that demotes the eval tier one step |
|
| 59 |
+
| `MAX_STEPS` | `18` | Max steps per episode |
|
| 60 |
+
| `NUM_GENERATIONS` | `8` | GRPO group size (bigger = better signal) |
|
| 61 |
+
| `NUM_GPUS` | auto-detected | `accelerate launch --num_processes` value |
|
| 62 |
+
| `CHECKPOINT_EVAL_STEPS` | `25` | Run a held-out eval every N updates |
|
| 63 |
+
| `CHECKPOINT_EVAL_EPISODES` | `8` | Episodes per mid-training eval |
|
| 64 |
+
| `EVAL_EPISODES` | `32` | Episodes for pre/post eval (statistical power) |
|
| 65 |
+
| `OUTPUT_DIR` | `runs/unsloth-grpo` | LoRA adapter output |
|
| 66 |
+
| `EVIDENCE_DIR` | `evidence` | Where curves, CSVs, plots are written |
|
| 67 |
+
| `PUSH_REPO` | `${HF_USERNAME}/cernenv-grpo-qwen2.5-3b` | Hub repo for adapters + evidence |
|
| 68 |
+
| `AUTOSTART` | `0` | Set to `1` to start training on Space boot |
|
| 69 |
+
|
| 70 |
+
## How to use
|
| 71 |
+
|
| 72 |
+
This Space exposes a tiny FastAPI control panel:
|
| 73 |
+
- `GET /` — status + run info + **live training-progress evidence** (curves, before/after metrics, plots)
|
| 74 |
+
- `POST /train` — start / restart a training run
|
| 75 |
+
- `GET /logs?tail=N` — live tail of `training.log`
|
| 76 |
+
- `GET /metrics` — pre / post / Δ metrics JSON
|
| 77 |
+
- `GET /evidence` — list of evidence artifacts on disk
|
| 78 |
+
- `GET /evidence/{name}` — download an artifact (`training_curve.png`, `training_log.csv`, etc.)
|
| 79 |
+
|
| 80 |
+
### Training-progress evidence saved (and pushed to Hub)
|
| 81 |
+
- `training_log.csv` — per-step reward, loss, KL, lr, grad-norm
|
| 82 |
+
- `training_curve.png` — reward + loss vs step
|
| 83 |
+
- `reward_components.csv` — per-rollout terminal vs shaping reward, plus
|
| 84 |
+
discovery / mass / channel / parsed-action rates per logging step.
|
| 85 |
+
This is the "watch individual reward function columns" view recommended
|
| 86 |
+
in the hackathon FAQ — it makes verifier hacks visible (rising mean
|
| 87 |
+
reward without rising mass/channel correctness ⇒ red flag).
|
| 88 |
+
- `reward_components.png` — 2-panel plot rendered from the above CSV
|
| 89 |
+
- `checkpoint_evals.csv` — held-out eval every `CHECKPOINT_EVAL_STEPS` updates
|
| 90 |
+
- `checkpoint_progression.png` — mean reward + success/mass/channel accuracy vs step
|
| 91 |
+
- `pre_eval.jsonl` / `post_eval.jsonl` — full per-episode rollouts before vs after
|
| 92 |
+
- `before_after_summary.png` — pre/post bar chart with Δ annotations
|
| 93 |
+
- `reward_distribution.png` — pre vs post reward histogram
|
| 94 |
+
- `before_after_metrics.json` — machine-readable metrics + deltas
|
| 95 |
+
- `sample_trajectories.md` — cherry-picked pre vs post agent traces
|
| 96 |
+
- `curriculum_state.json` — adaptive-curriculum tier/promotion log
|
| 97 |
+
|
| 98 |
+
Click **"Start training"** in the UI, or set `AUTOSTART=1` in the Space variables to kick off immediately on boot.
|
| 99 |
+
|
| 100 |
+
When training finishes, the LoRA adapters are pushed to `PUSH_REPO`.
|
| 101 |
+
|
| 102 |
+
## Local equivalent
|
| 103 |
+
|
| 104 |
+
The same training run is reproducible locally with:
|
| 105 |
+
|
| 106 |
+
```bash
|
| 107 |
+
# single GPU (with curriculum)
|
| 108 |
+
PYTHONPATH=. python -m training.training_unsloth \
|
| 109 |
+
--model_name unsloth/Qwen2.5-3B-Instruct \
|
| 110 |
+
--difficulty easy --curriculum --total_episodes 1500 --max_steps 18 \
|
| 111 |
+
--num_generations 8 --output_dir runs/unsloth-grpo \
|
| 112 |
+
--evidence_dir evidence
|
| 113 |
+
|
| 114 |
+
# multi-GPU (e.g. 4× A100, with curriculum)
|
| 115 |
+
PYTHONPATH=. accelerate launch --num_processes 4 --mixed_precision bf16 \
|
| 116 |
+
-m training.training_unsloth \
|
| 117 |
+
--difficulty easy --curriculum \
|
| 118 |
+
--total_episodes 1500 --num_generations 8 \
|
| 119 |
+
--output_dir runs/unsloth-grpo --evidence_dir evidence
|
| 120 |
+
```
|
space/training/__init__.py
ADDED
|
File without changes
|
space/training/app.py
ADDED
|
@@ -0,0 +1,673 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FastAPI control panel for the CERNenv trainer Space.
|
| 2 |
+
|
| 3 |
+
Endpoints:
|
| 4 |
+
GET / → status page (HTML)
|
| 5 |
+
GET /status → JSON status of the current training run
|
| 6 |
+
GET /metrics → JSON snapshot of reward / success rate
|
| 7 |
+
GET /logs → tail of the training log
|
| 8 |
+
POST /train → start (or restart) a training run
|
| 9 |
+
GET /health → liveness probe
|
| 10 |
+
|
| 11 |
+
Designed to run on a Hugging Face Space with `sdk: docker`. Heavy training
|
| 12 |
+
work runs in a background thread so the HTTP server stays responsive.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import logging
|
| 19 |
+
import os
|
| 20 |
+
import subprocess
|
| 21 |
+
import sys
|
| 22 |
+
import threading
|
| 23 |
+
import time
|
| 24 |
+
from datetime import datetime, timezone
|
| 25 |
+
from pathlib import Path
|
| 26 |
+
from typing import Any, Dict, Optional
|
| 27 |
+
|
| 28 |
+
from fastapi import FastAPI, HTTPException
|
| 29 |
+
from fastapi.responses import FileResponse, HTMLResponse, JSONResponse, PlainTextResponse
|
| 30 |
+
from fastapi.staticfiles import StaticFiles
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
|
| 34 |
+
logger = logging.getLogger(__name__)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _resolve_repo_root() -> Path:
|
| 38 |
+
env_root = os.environ.get("CERNENV_ROOT")
|
| 39 |
+
candidates = []
|
| 40 |
+
if env_root:
|
| 41 |
+
candidates.append(Path(env_root))
|
| 42 |
+
candidates.extend([
|
| 43 |
+
Path("/home/user/app"),
|
| 44 |
+
Path(__file__).resolve().parent.parent.parent,
|
| 45 |
+
])
|
| 46 |
+
for p in candidates:
|
| 47 |
+
try:
|
| 48 |
+
if p.exists():
|
| 49 |
+
return p.resolve()
|
| 50 |
+
except OSError:
|
| 51 |
+
continue
|
| 52 |
+
return candidates[-1].resolve()
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
REPO_ROOT = _resolve_repo_root()
|
| 56 |
+
LOG_DIR = REPO_ROOT / "training" / "runs"
|
| 57 |
+
try:
|
| 58 |
+
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
| 59 |
+
except OSError as exc: # pragma: no cover - read-only filesystem fallback
|
| 60 |
+
logger.warning("could not create %s (%s); using /tmp", LOG_DIR, exc)
|
| 61 |
+
LOG_DIR = Path("/tmp/cernenv-runs")
|
| 62 |
+
LOG_DIR.mkdir(parents=True, exist_ok=True)
|
| 63 |
+
LOG_FILE = LOG_DIR / "training.log"
|
| 64 |
+
EVIDENCE_DIR = REPO_ROOT / "evidence"
|
| 65 |
+
try:
|
| 66 |
+
EVIDENCE_DIR.mkdir(parents=True, exist_ok=True)
|
| 67 |
+
except OSError: # pragma: no cover
|
| 68 |
+
EVIDENCE_DIR = Path("/tmp/cernenv-evidence")
|
| 69 |
+
EVIDENCE_DIR.mkdir(parents=True, exist_ok=True)
|
| 70 |
+
METRICS_FILE = EVIDENCE_DIR / "before_after_metrics.json"
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _env(name: str, default: str) -> str:
|
| 74 |
+
return os.environ.get(name, default)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def _detect_gpus() -> int:
|
| 78 |
+
try:
|
| 79 |
+
import torch # type: ignore
|
| 80 |
+
if torch.cuda.is_available():
|
| 81 |
+
return torch.cuda.device_count()
|
| 82 |
+
except Exception:
|
| 83 |
+
pass
|
| 84 |
+
try:
|
| 85 |
+
out = subprocess.run(
|
| 86 |
+
["nvidia-smi", "--query-gpu=name", "--format=csv,noheader"],
|
| 87 |
+
capture_output=True, text=True, timeout=5,
|
| 88 |
+
)
|
| 89 |
+
return len([l for l in out.stdout.splitlines() if l.strip()])
|
| 90 |
+
except Exception:
|
| 91 |
+
return 0
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
_NUM_GPUS = _detect_gpus()
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
CONFIG = {
|
| 98 |
+
"model_name": _env("MODEL_NAME", "unsloth/Qwen2.5-3B-Instruct"),
|
| 99 |
+
"difficulty": _env("DIFFICULTY", "easy"),
|
| 100 |
+
"curriculum": _env("CURRICULUM", "1") == "1",
|
| 101 |
+
"curriculum_promote": float(_env("CURRICULUM_PROMOTE", "0.55")),
|
| 102 |
+
"curriculum_demote": float(_env("CURRICULUM_DEMOTE", "0.10")),
|
| 103 |
+
"total_episodes": int(_env("TOTAL_EPISODES", "1500")),
|
| 104 |
+
"max_steps": int(_env("MAX_STEPS", "18")),
|
| 105 |
+
"num_generations": int(_env("NUM_GENERATIONS", "8")),
|
| 106 |
+
"checkpoint_eval_steps": int(_env("CHECKPOINT_EVAL_STEPS", "25")),
|
| 107 |
+
"checkpoint_eval_episodes": int(_env("CHECKPOINT_EVAL_EPISODES", "8")),
|
| 108 |
+
"eval_episodes": int(_env("EVAL_EPISODES", "32")),
|
| 109 |
+
"output_dir": _env("OUTPUT_DIR", "runs/unsloth-grpo"),
|
| 110 |
+
"evidence_dir": _env("EVIDENCE_DIR", "evidence"),
|
| 111 |
+
"num_gpus": int(_env("NUM_GPUS", str(_NUM_GPUS or 1))),
|
| 112 |
+
"hf_username": _env("HF_USERNAME", "anugrah55"),
|
| 113 |
+
"push_repo": _env(
|
| 114 |
+
"PUSH_REPO",
|
| 115 |
+
f"{_env('HF_USERNAME', 'anugrah55')}/cernenv-grpo-qwen2.5-3b",
|
| 116 |
+
),
|
| 117 |
+
"autostart": _env("AUTOSTART", "0") == "1",
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# ── Run state ────────────────────────────────────────────────────────────
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class RunState:
|
| 125 |
+
def __init__(self) -> None:
|
| 126 |
+
self.lock = threading.Lock()
|
| 127 |
+
self.thread: Optional[threading.Thread] = None
|
| 128 |
+
self.process: Optional[subprocess.Popen] = None
|
| 129 |
+
self.status: str = "idle" # idle | running | finished | failed
|
| 130 |
+
self.started_at: Optional[str] = None
|
| 131 |
+
self.finished_at: Optional[str] = None
|
| 132 |
+
self.last_error: Optional[str] = None
|
| 133 |
+
self.last_config: Dict[str, Any] = {}
|
| 134 |
+
|
| 135 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 136 |
+
with self.lock:
|
| 137 |
+
return {
|
| 138 |
+
"status": self.status,
|
| 139 |
+
"started_at": self.started_at,
|
| 140 |
+
"finished_at": self.finished_at,
|
| 141 |
+
"last_error": self.last_error,
|
| 142 |
+
"last_config": self.last_config,
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
STATE = RunState()
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
# ── Training pipeline ────────────────────────────────────────────────────
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def _stream_subprocess(cmd: list[str], log_handle) -> int:
|
| 153 |
+
log_handle.write(f"\n$ {' '.join(cmd)}\n")
|
| 154 |
+
log_handle.flush()
|
| 155 |
+
proc = subprocess.Popen(
|
| 156 |
+
cmd,
|
| 157 |
+
cwd=str(REPO_ROOT),
|
| 158 |
+
stdout=subprocess.PIPE,
|
| 159 |
+
stderr=subprocess.STDOUT,
|
| 160 |
+
bufsize=1,
|
| 161 |
+
universal_newlines=True,
|
| 162 |
+
env={**os.environ, "PYTHONPATH": str(REPO_ROOT)},
|
| 163 |
+
)
|
| 164 |
+
STATE.process = proc
|
| 165 |
+
assert proc.stdout is not None
|
| 166 |
+
for line in proc.stdout:
|
| 167 |
+
log_handle.write(line)
|
| 168 |
+
log_handle.flush()
|
| 169 |
+
rc = proc.wait()
|
| 170 |
+
log_handle.write(f"[exit code {rc}]\n")
|
| 171 |
+
log_handle.flush()
|
| 172 |
+
STATE.process = None
|
| 173 |
+
return rc
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _build_training_cmd(config: Dict[str, Any]) -> list[str]:
|
| 177 |
+
"""Compose the training launcher (single-GPU python or multi-GPU accelerate)."""
|
| 178 |
+
base = [
|
| 179 |
+
"-m", "training.training_unsloth",
|
| 180 |
+
"--model_name", config["model_name"],
|
| 181 |
+
"--difficulty", config["difficulty"],
|
| 182 |
+
"--total_episodes", str(config["total_episodes"]),
|
| 183 |
+
"--max_steps", str(config["max_steps"]),
|
| 184 |
+
"--num_generations", str(config["num_generations"]),
|
| 185 |
+
"--checkpoint_eval_steps", str(config["checkpoint_eval_steps"]),
|
| 186 |
+
"--checkpoint_eval_episodes", str(config["checkpoint_eval_episodes"]),
|
| 187 |
+
"--output_dir", config["output_dir"],
|
| 188 |
+
"--evidence_dir", config["evidence_dir"],
|
| 189 |
+
]
|
| 190 |
+
if config.get("curriculum"):
|
| 191 |
+
base.extend([
|
| 192 |
+
"--curriculum",
|
| 193 |
+
"--curriculum_promote", str(config["curriculum_promote"]),
|
| 194 |
+
"--curriculum_demote", str(config["curriculum_demote"]),
|
| 195 |
+
])
|
| 196 |
+
n = max(int(config.get("num_gpus", 1)), 1)
|
| 197 |
+
if n > 1:
|
| 198 |
+
return ["accelerate", "launch", "--num_processes", str(n), "--mixed_precision", "bf16"] + base
|
| 199 |
+
return [sys.executable] + base
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def _push_evidence_to_hub(*, evidence_dir: Path, repo_id: str, log) -> None:
|
| 203 |
+
"""Upload the entire evidence/ directory to the model repo."""
|
| 204 |
+
token = os.environ.get("HF_TOKEN")
|
| 205 |
+
if not token:
|
| 206 |
+
log.write("\n[skip] HF_TOKEN not set — evidence not pushed\n")
|
| 207 |
+
log.flush()
|
| 208 |
+
return
|
| 209 |
+
try:
|
| 210 |
+
from huggingface_hub import HfApi
|
| 211 |
+
api = HfApi(token=token)
|
| 212 |
+
api.upload_folder(
|
| 213 |
+
folder_path=str(evidence_dir),
|
| 214 |
+
repo_id=repo_id,
|
| 215 |
+
repo_type="model",
|
| 216 |
+
path_in_repo="evidence",
|
| 217 |
+
commit_message="Upload CERNenv training evidence (curves, evals, plots)",
|
| 218 |
+
)
|
| 219 |
+
log.write(f"\n[ok] uploaded evidence/ → https://huggingface.co/{repo_id}/tree/main/evidence\n")
|
| 220 |
+
log.flush()
|
| 221 |
+
except Exception as exc:
|
| 222 |
+
log.write(f"\n[warn] evidence push failed: {exc}\n")
|
| 223 |
+
log.flush()
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def _training_pipeline(config: Dict[str, Any]) -> None:
|
| 227 |
+
started = datetime.now(timezone.utc).isoformat()
|
| 228 |
+
with STATE.lock:
|
| 229 |
+
STATE.status = "running"
|
| 230 |
+
STATE.started_at = started
|
| 231 |
+
STATE.finished_at = None
|
| 232 |
+
STATE.last_error = None
|
| 233 |
+
STATE.last_config = dict(config)
|
| 234 |
+
|
| 235 |
+
evidence_dir = Path(config["evidence_dir"]).resolve()
|
| 236 |
+
evidence_dir.mkdir(parents=True, exist_ok=True)
|
| 237 |
+
|
| 238 |
+
LOG_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 239 |
+
with open(LOG_FILE, "a") as log:
|
| 240 |
+
log.write(f"\n=== Training started {started} ===\n")
|
| 241 |
+
log.write(json.dumps(config, indent=2) + "\n")
|
| 242 |
+
log.flush()
|
| 243 |
+
try:
|
| 244 |
+
output_dir = config["output_dir"]
|
| 245 |
+
difficulty = config["difficulty"]
|
| 246 |
+
max_steps = str(config["max_steps"])
|
| 247 |
+
eval_episodes = str(config["eval_episodes"])
|
| 248 |
+
model_name = config["model_name"]
|
| 249 |
+
push_repo = config["push_repo"]
|
| 250 |
+
evidence_str = config["evidence_dir"]
|
| 251 |
+
pre_jsonl = f"{evidence_str}/pre_eval.jsonl"
|
| 252 |
+
post_jsonl = f"{evidence_str}/post_eval.jsonl"
|
| 253 |
+
|
| 254 |
+
log.write("\n--- baseline sanity check (random / heuristic / oracle) ---\n")
|
| 255 |
+
log.flush()
|
| 256 |
+
for agent in ("random", "heuristic", "oracle"):
|
| 257 |
+
_stream_subprocess(
|
| 258 |
+
[
|
| 259 |
+
sys.executable, "-m", "scripts.run_agent",
|
| 260 |
+
"--agent", agent, "--difficulty", difficulty,
|
| 261 |
+
"--episodes", "3", "--quiet",
|
| 262 |
+
],
|
| 263 |
+
log,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
log.write(f"\n--- pre-train evaluation ({eval_episodes} eps) ---\n")
|
| 267 |
+
log.flush()
|
| 268 |
+
rc = _stream_subprocess(
|
| 269 |
+
[
|
| 270 |
+
sys.executable, "-m", "training.evaluate",
|
| 271 |
+
"--model_name", model_name,
|
| 272 |
+
"--difficulty", difficulty,
|
| 273 |
+
"--episodes", eval_episodes,
|
| 274 |
+
"--max_steps", max_steps,
|
| 275 |
+
"--tag", "pre_train",
|
| 276 |
+
"--out", pre_jsonl,
|
| 277 |
+
],
|
| 278 |
+
log,
|
| 279 |
+
)
|
| 280 |
+
if rc != 0:
|
| 281 |
+
# don't abort — we still want training + post-eval evidence.
|
| 282 |
+
log.write(f"\n[warn] pre-train eval failed (rc={rc}); continuing without baseline\n")
|
| 283 |
+
log.flush()
|
| 284 |
+
|
| 285 |
+
log.write(f"\n--- GRPO training ({config['num_gpus']} GPU process(es)) ---\n")
|
| 286 |
+
log.flush()
|
| 287 |
+
rc = _stream_subprocess(_build_training_cmd(config), log)
|
| 288 |
+
if rc != 0:
|
| 289 |
+
raise RuntimeError(f"training failed (rc={rc})")
|
| 290 |
+
|
| 291 |
+
# ── LoRA save-and-reload smoke test ─────────────────────
|
| 292 |
+
# Hackathon FAQ Q9: "Do not upcast a 4-bit model to 16-bit
|
| 293 |
+
# and then merge the LoRA weights naively" — the canonical
|
| 294 |
+
# cause of a broken push. Before we burn time on the full
|
| 295 |
+
# post-train evaluation (32 eps), do a 2-episode cold-load
|
| 296 |
+
# rollout against the saved adapters. If that fails, abort
|
| 297 |
+
# immediately so we surface a save problem, not a 30-min
|
| 298 |
+
# eval timeout.
|
| 299 |
+
log.write(
|
| 300 |
+
f"\n--- adapter save/reload smoke test "
|
| 301 |
+
f"(loading {output_dir} cold-start, 2 eps) ---\n"
|
| 302 |
+
)
|
| 303 |
+
log.flush()
|
| 304 |
+
rc = _stream_subprocess(
|
| 305 |
+
[
|
| 306 |
+
sys.executable, "-m", "training.evaluate",
|
| 307 |
+
"--model_name", model_name,
|
| 308 |
+
"--adapter_dir", output_dir,
|
| 309 |
+
"--difficulty", difficulty,
|
| 310 |
+
"--episodes", "2",
|
| 311 |
+
"--max_steps", max_steps,
|
| 312 |
+
"--tag", "smoke",
|
| 313 |
+
"--out", f"{evidence_str}/smoke_eval.jsonl",
|
| 314 |
+
],
|
| 315 |
+
log,
|
| 316 |
+
)
|
| 317 |
+
if rc != 0:
|
| 318 |
+
raise RuntimeError(
|
| 319 |
+
f"adapter smoke test failed (rc={rc}); refusing to push "
|
| 320 |
+
f"unloadable adapters to the Hub. Inspect {output_dir} and "
|
| 321 |
+
"verify adapter_config.json + adapter_model.safetensors exist."
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
log.write(f"\n--- post-train evaluation ({eval_episodes} eps) ---\n")
|
| 325 |
+
log.flush()
|
| 326 |
+
rc = _stream_subprocess(
|
| 327 |
+
[
|
| 328 |
+
sys.executable, "-m", "training.evaluate",
|
| 329 |
+
"--model_name", model_name,
|
| 330 |
+
"--adapter_dir", output_dir,
|
| 331 |
+
"--difficulty", difficulty,
|
| 332 |
+
"--episodes", eval_episodes,
|
| 333 |
+
"--max_steps", max_steps,
|
| 334 |
+
"--tag", "post_train",
|
| 335 |
+
"--out", post_jsonl,
|
| 336 |
+
],
|
| 337 |
+
log,
|
| 338 |
+
)
|
| 339 |
+
if rc != 0:
|
| 340 |
+
log.write(f"\n[warn] post-train eval failed (rc={rc}); evidence will be partial\n")
|
| 341 |
+
log.flush()
|
| 342 |
+
|
| 343 |
+
log.write("\n--- evidence: before/after summary, distribution, trajectories ---\n")
|
| 344 |
+
log.flush()
|
| 345 |
+
try:
|
| 346 |
+
from training.evidence import (
|
| 347 |
+
EvidencePaths,
|
| 348 |
+
render_before_after,
|
| 349 |
+
render_sample_trajectories,
|
| 350 |
+
render_training_curve,
|
| 351 |
+
render_reward_components,
|
| 352 |
+
render_checkpoint_progression,
|
| 353 |
+
)
|
| 354 |
+
paths = EvidencePaths(root=Path(evidence_str))
|
| 355 |
+
paths.ensure()
|
| 356 |
+
metrics = render_before_after(
|
| 357 |
+
pre_jsonl=Path(pre_jsonl),
|
| 358 |
+
post_jsonl=Path(post_jsonl),
|
| 359 |
+
summary_png=paths.before_after_summary_png,
|
| 360 |
+
distribution_png=paths.reward_distribution_png,
|
| 361 |
+
metrics_json=paths.before_after_metrics_json,
|
| 362 |
+
)
|
| 363 |
+
render_sample_trajectories(
|
| 364 |
+
pre_jsonl=Path(pre_jsonl),
|
| 365 |
+
post_jsonl=Path(post_jsonl),
|
| 366 |
+
md_path=paths.sample_trajectories_md,
|
| 367 |
+
)
|
| 368 |
+
render_training_curve(paths.training_log_csv, paths.training_curve_png)
|
| 369 |
+
render_reward_components(
|
| 370 |
+
paths.reward_components_csv, paths.reward_components_png,
|
| 371 |
+
)
|
| 372 |
+
render_checkpoint_progression(
|
| 373 |
+
paths.checkpoint_evals_csv, paths.checkpoint_progression_png,
|
| 374 |
+
)
|
| 375 |
+
log.write(json.dumps(metrics, indent=2) + "\n")
|
| 376 |
+
log.flush()
|
| 377 |
+
except Exception as exc:
|
| 378 |
+
log.write(f"[warn] evidence rendering failed: {exc}\n")
|
| 379 |
+
log.flush()
|
| 380 |
+
|
| 381 |
+
if os.environ.get("HF_TOKEN"):
|
| 382 |
+
log.write("\n--- push adapters to Hub ---\n")
|
| 383 |
+
log.flush()
|
| 384 |
+
_stream_subprocess(
|
| 385 |
+
[
|
| 386 |
+
sys.executable, "-m", "scripts.push_to_hub", "model",
|
| 387 |
+
"--adapter_dir", output_dir,
|
| 388 |
+
"--repo_id", push_repo,
|
| 389 |
+
"--base_model", model_name,
|
| 390 |
+
],
|
| 391 |
+
log,
|
| 392 |
+
)
|
| 393 |
+
_push_evidence_to_hub(
|
| 394 |
+
evidence_dir=evidence_dir,
|
| 395 |
+
repo_id=push_repo,
|
| 396 |
+
log=log,
|
| 397 |
+
)
|
| 398 |
+
else:
|
| 399 |
+
log.write("\n[skip] HF_TOKEN not set — not pushing to Hub\n")
|
| 400 |
+
log.flush()
|
| 401 |
+
|
| 402 |
+
with STATE.lock:
|
| 403 |
+
STATE.status = "finished"
|
| 404 |
+
except Exception as exc:
|
| 405 |
+
logger.exception("training pipeline failed")
|
| 406 |
+
with STATE.lock:
|
| 407 |
+
STATE.status = "failed"
|
| 408 |
+
STATE.last_error = str(exc)
|
| 409 |
+
finally:
|
| 410 |
+
finished = datetime.now(timezone.utc).isoformat()
|
| 411 |
+
log.write(f"\n=== Training ended {finished} ===\n")
|
| 412 |
+
log.flush()
|
| 413 |
+
with STATE.lock:
|
| 414 |
+
STATE.finished_at = finished
|
| 415 |
+
|
| 416 |
+
|
| 417 |
+
def _start_training(config: Dict[str, Any]) -> None:
|
| 418 |
+
with STATE.lock:
|
| 419 |
+
if STATE.status == "running":
|
| 420 |
+
raise RuntimeError("a training run is already in progress")
|
| 421 |
+
STATE.thread = threading.Thread(
|
| 422 |
+
target=_training_pipeline,
|
| 423 |
+
args=(config,),
|
| 424 |
+
name="cernenv-trainer",
|
| 425 |
+
daemon=True,
|
| 426 |
+
)
|
| 427 |
+
STATE.thread.start()
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
# ── FastAPI app ──────────────────────────────────────────────────────────
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
app = FastAPI(title="CERNenv Trainer", version="0.1.0")
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
_HTML = """\
|
| 437 |
+
<!doctype html>
|
| 438 |
+
<html lang=en>
|
| 439 |
+
<head>
|
| 440 |
+
<meta charset=utf-8>
|
| 441 |
+
<title>CERNenv Trainer</title>
|
| 442 |
+
<style>
|
| 443 |
+
body { font-family: ui-sans-serif, system-ui, sans-serif; margin: 2rem auto;
|
| 444 |
+
max-width: 1000px; color:#111; padding: 0 1rem; line-height:1.5 }
|
| 445 |
+
h1 { margin-bottom: 0 }
|
| 446 |
+
h2 { margin-top: 2rem; border-bottom:1px solid #eee; padding-bottom:.25rem }
|
| 447 |
+
.muted { color:#666 }
|
| 448 |
+
pre { background:#0e1116; color:#e6edf3; padding:1rem; border-radius:6px;
|
| 449 |
+
overflow-x:auto; max-height:40vh; font-size:.85em }
|
| 450 |
+
button { font-size:1rem; padding:.6rem 1rem; border-radius:6px; border:1px solid #888;
|
| 451 |
+
background:#fff; cursor:pointer; margin-right:.4rem }
|
| 452 |
+
.pill { display:inline-block; padding:.1rem .55rem; border-radius:999px;
|
| 453 |
+
background:#eef; color:#225; font-size:.85em }
|
| 454 |
+
.ok { background:#dfd; color:#272 }
|
| 455 |
+
.fail { background:#fdd; color:#822 }
|
| 456 |
+
.run { background:#fdf6d8; color:#774 }
|
| 457 |
+
table { border-collapse:collapse; margin:.5rem 0 }
|
| 458 |
+
td, th { padding:.25rem .8rem .25rem 0; vertical-align: top; text-align:left }
|
| 459 |
+
th { color:#444; font-weight:600 }
|
| 460 |
+
.grid { display:grid; grid-template-columns:1fr 1fr; gap:1rem }
|
| 461 |
+
.card { border:1px solid #e5e7eb; border-radius:8px; padding:.75rem; background:#fafafa }
|
| 462 |
+
.card img { max-width:100%; border-radius:4px }
|
| 463 |
+
.delta-pos { color:#15803d; font-weight:600 }
|
| 464 |
+
.delta-neg { color:#b91c1c; font-weight:600 }
|
| 465 |
+
code { background:#f4f4f4; padding:.05rem .35rem; border-radius:4px }
|
| 466 |
+
a { color:#1d4ed8 }
|
| 467 |
+
</style>
|
| 468 |
+
</head>
|
| 469 |
+
<body>
|
| 470 |
+
<h1>⚛️ CERNenv Trainer</h1>
|
| 471 |
+
<p class=muted>GRPO + Unsloth + LoRA on the CERNenv LHC discovery environment. Multi-GPU on Hugging Face Spaces.</p>
|
| 472 |
+
|
| 473 |
+
<h2>Run status</h2>
|
| 474 |
+
<p>Status: <span id=status class=pill>?</span></p>
|
| 475 |
+
<table id=meta></table>
|
| 476 |
+
<p>
|
| 477 |
+
<button onclick="startRun()">▶ Start training</button>
|
| 478 |
+
<button onclick="refresh()">↻ Refresh</button>
|
| 479 |
+
<a href="/evidence" target=_blank><button>📁 Evidence index</button></a>
|
| 480 |
+
<a href="/docs" target=_blank><button>🛠 API</button></a>
|
| 481 |
+
</p>
|
| 482 |
+
|
| 483 |
+
<h2>Training-progress evidence</h2>
|
| 484 |
+
<p class=muted>Auto-updated as training runs. All artifacts are also saved to <code>evidence/</code> and pushed to the model repo on the Hub.</p>
|
| 485 |
+
<div class=grid>
|
| 486 |
+
<div class=card><b>Per-step training curve</b><br>
|
| 487 |
+
<img id=curve src="/evidence/training_curve.png" onerror="this.style.display='none'">
|
| 488 |
+
<div id=curve_missing class=muted style="display:none">(not yet — waiting for first GRPO step)</div>
|
| 489 |
+
</div>
|
| 490 |
+
<div class=card><b>Reward components (terminal vs shaping)</b><br>
|
| 491 |
+
<img id=components src="/evidence/reward_components.png" onerror="this.style.display='none'">
|
| 492 |
+
<div id=components_missing class=muted style="display:none">(populated after a few rollouts — watches verifier hacks)</div>
|
| 493 |
+
</div>
|
| 494 |
+
<div class=card><b>Mid-training checkpoint progression</b><br>
|
| 495 |
+
<img id=ckpt src="/evidence/checkpoint_progression.png" onerror="this.style.display='none'">
|
| 496 |
+
<div id=ckpt_missing class=muted style="display:none">(not yet — waiting for first checkpoint eval)</div>
|
| 497 |
+
</div>
|
| 498 |
+
<div class=card><b>Before vs after summary</b><br>
|
| 499 |
+
<img id=summary src="/evidence/before_after_summary.png" onerror="this.style.display='none'">
|
| 500 |
+
<div id=summary_missing class=muted style="display:none">(generated after post-train eval)</div>
|
| 501 |
+
</div>
|
| 502 |
+
<div class=card><b>Reward distribution: pre vs post</b><br>
|
| 503 |
+
<img id=dist src="/evidence/reward_distribution.png" onerror="this.style.display='none'">
|
| 504 |
+
<div id=dist_missing class=muted style="display:none">(generated after post-train eval)</div>
|
| 505 |
+
</div>
|
| 506 |
+
</div>
|
| 507 |
+
|
| 508 |
+
<h2>Before / after metrics</h2>
|
| 509 |
+
<table id=metrics_table>
|
| 510 |
+
<tr><th>metric</th><th>pre</th><th>post</th><th>Δ</th></tr>
|
| 511 |
+
</table>
|
| 512 |
+
|
| 513 |
+
<h2>Live logs (tail)</h2>
|
| 514 |
+
<pre id=logs>loading…</pre>
|
| 515 |
+
|
| 516 |
+
<script>
|
| 517 |
+
function fmt(v) {
|
| 518 |
+
if (v == null) return '–';
|
| 519 |
+
if (typeof v === 'number') return v.toFixed(3);
|
| 520 |
+
return v;
|
| 521 |
+
}
|
| 522 |
+
function fmtDelta(d) {
|
| 523 |
+
if (d == null || isNaN(d)) return '–';
|
| 524 |
+
const sign = d >= 0 ? '+' : '';
|
| 525 |
+
const cls = d >= 0 ? 'delta-pos' : 'delta-neg';
|
| 526 |
+
return `<span class="${cls}">${sign}${d.toFixed(3)}</span>`;
|
| 527 |
+
}
|
| 528 |
+
|
| 529 |
+
async function refresh() {
|
| 530 |
+
// status
|
| 531 |
+
const s = await fetch('/status').then(r => r.json());
|
| 532 |
+
const pill = document.getElementById('status');
|
| 533 |
+
pill.textContent = s.status;
|
| 534 |
+
pill.className = 'pill ' + ({idle:'',running:'run',finished:'ok',failed:'fail'}[s.status] || '');
|
| 535 |
+
|
| 536 |
+
const meta = document.getElementById('meta');
|
| 537 |
+
meta.innerHTML = '';
|
| 538 |
+
const obj = {
|
| 539 |
+
started_at: s.started_at, finished_at: s.finished_at, error: s.last_error,
|
| 540 |
+
...(s.last_config || {}),
|
| 541 |
+
};
|
| 542 |
+
for (const [k, v] of Object.entries(obj)) {
|
| 543 |
+
if (v == null || v === '') continue;
|
| 544 |
+
const tr = document.createElement('tr');
|
| 545 |
+
tr.innerHTML = `<td><b>${k}</b></td><td><code>${v}</code></td>`;
|
| 546 |
+
meta.appendChild(tr);
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
// metrics
|
| 550 |
+
const m = await fetch('/metrics').then(r => r.json()).catch(() => ({pre:null, post:null}));
|
| 551 |
+
const tbody = document.getElementById('metrics_table');
|
| 552 |
+
tbody.innerHTML = '<tr><th>metric</th><th>pre</th><th>post</th><th>Δ</th></tr>';
|
| 553 |
+
const fields = ['mean_reward', 'success_rate', 'mass_acc', 'channel_acc', 'median_reward'];
|
| 554 |
+
for (const f of fields) {
|
| 555 |
+
const pre = m.pre && m.pre[f];
|
| 556 |
+
const post = m.post && m.post[f];
|
| 557 |
+
const delta = m.delta && m.delta[f];
|
| 558 |
+
const tr = document.createElement('tr');
|
| 559 |
+
tr.innerHTML = `<td><code>${f}</code></td><td>${fmt(pre)}</td><td>${fmt(post)}</td><td>${fmtDelta(delta)}</td>`;
|
| 560 |
+
tbody.appendChild(tr);
|
| 561 |
+
}
|
| 562 |
+
|
| 563 |
+
// bust caches on plots
|
| 564 |
+
const bust = '?t=' + Date.now();
|
| 565 |
+
for (const [imgId, missingId] of [
|
| 566 |
+
['curve', 'curve_missing'],
|
| 567 |
+
['components', 'components_missing'],
|
| 568 |
+
['ckpt', 'ckpt_missing'],
|
| 569 |
+
['summary', 'summary_missing'],
|
| 570 |
+
['dist', 'dist_missing'],
|
| 571 |
+
]) {
|
| 572 |
+
const img = document.getElementById(imgId);
|
| 573 |
+
const miss = document.getElementById(missingId);
|
| 574 |
+
const baseSrc = img.getAttribute('src').split('?')[0];
|
| 575 |
+
const probe = new Image();
|
| 576 |
+
probe.onload = () => { img.src = baseSrc + bust; img.style.display=''; miss.style.display='none'; };
|
| 577 |
+
probe.onerror = () => { img.style.display='none'; miss.style.display=''; };
|
| 578 |
+
probe.src = baseSrc + bust;
|
| 579 |
+
}
|
| 580 |
+
|
| 581 |
+
const logs = await fetch('/logs?tail=200').then(r => r.text());
|
| 582 |
+
document.getElementById('logs').textContent = logs || '(no logs yet)';
|
| 583 |
+
}
|
| 584 |
+
async function startRun() {
|
| 585 |
+
const r = await fetch('/train', {method:'POST'});
|
| 586 |
+
if (!r.ok) alert((await r.json()).detail || 'failed');
|
| 587 |
+
setTimeout(refresh, 500);
|
| 588 |
+
}
|
| 589 |
+
refresh();
|
| 590 |
+
setInterval(refresh, 5000);
|
| 591 |
+
</script>
|
| 592 |
+
</body>
|
| 593 |
+
</html>
|
| 594 |
+
"""
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
@app.get("/", response_class=HTMLResponse)
|
| 598 |
+
def index() -> HTMLResponse:
|
| 599 |
+
return HTMLResponse(_HTML)
|
| 600 |
+
|
| 601 |
+
|
| 602 |
+
@app.get("/health")
|
| 603 |
+
def health() -> Dict[str, str]:
|
| 604 |
+
return {"status": "ok"}
|
| 605 |
+
|
| 606 |
+
|
| 607 |
+
@app.get("/status")
|
| 608 |
+
def status() -> JSONResponse:
|
| 609 |
+
return JSONResponse(STATE.to_dict())
|
| 610 |
+
|
| 611 |
+
|
| 612 |
+
@app.get("/metrics")
|
| 613 |
+
def metrics() -> JSONResponse:
|
| 614 |
+
if METRICS_FILE.exists():
|
| 615 |
+
try:
|
| 616 |
+
return JSONResponse(json.loads(METRICS_FILE.read_text()))
|
| 617 |
+
except Exception:
|
| 618 |
+
return JSONResponse({"error": "metrics file unreadable"}, status_code=500)
|
| 619 |
+
return JSONResponse({"pre": None, "post": None, "delta": None})
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
@app.get("/evidence")
|
| 623 |
+
def evidence_index() -> JSONResponse:
|
| 624 |
+
"""List every evidence artifact currently on disk."""
|
| 625 |
+
files = []
|
| 626 |
+
if EVIDENCE_DIR.exists():
|
| 627 |
+
for p in sorted(EVIDENCE_DIR.iterdir()):
|
| 628 |
+
if p.is_file():
|
| 629 |
+
files.append({
|
| 630 |
+
"name": p.name,
|
| 631 |
+
"size": p.stat().st_size,
|
| 632 |
+
"url": f"/evidence/{p.name}",
|
| 633 |
+
})
|
| 634 |
+
return JSONResponse({"dir": str(EVIDENCE_DIR), "files": files})
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
@app.get("/evidence/{name}")
|
| 638 |
+
def evidence_file(name: str):
|
| 639 |
+
"""Serve a single evidence artifact (PNG/CSV/JSON/MD) by filename."""
|
| 640 |
+
if "/" in name or ".." in name:
|
| 641 |
+
raise HTTPException(status_code=400, detail="invalid name")
|
| 642 |
+
target = EVIDENCE_DIR / name
|
| 643 |
+
if not target.exists() or not target.is_file():
|
| 644 |
+
raise HTTPException(status_code=404, detail=f"{name} not found")
|
| 645 |
+
return FileResponse(target)
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
@app.get("/logs", response_class=PlainTextResponse)
|
| 649 |
+
def logs(tail: int = 400) -> PlainTextResponse:
|
| 650 |
+
if not LOG_FILE.exists():
|
| 651 |
+
return PlainTextResponse("")
|
| 652 |
+
text = LOG_FILE.read_text()
|
| 653 |
+
lines = text.splitlines()
|
| 654 |
+
return PlainTextResponse("\n".join(lines[-max(tail, 1):]))
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
@app.post("/train")
|
| 658 |
+
def train() -> JSONResponse:
|
| 659 |
+
try:
|
| 660 |
+
_start_training(dict(CONFIG))
|
| 661 |
+
except RuntimeError as exc:
|
| 662 |
+
raise HTTPException(status_code=409, detail=str(exc))
|
| 663 |
+
return JSONResponse({"status": "started", "config": CONFIG})
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
@app.on_event("startup")
|
| 667 |
+
def _maybe_autostart() -> None:
|
| 668 |
+
if CONFIG["autostart"]:
|
| 669 |
+
try:
|
| 670 |
+
_start_training(dict(CONFIG))
|
| 671 |
+
logger.info("autostarted training run")
|
| 672 |
+
except RuntimeError as exc:
|
| 673 |
+
logger.warning("autostart skipped: %s", exc)
|
space/training/requirements.txt
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
--extra-index-url https://download.pytorch.org/whl/cu128
|
| 2 |
+
# Strategy: pin to Unsloth 2026.4.8's officially supported A100/Ampere matrix
|
| 3 |
+
# `cu128-ampere-torch2100`. Earlier we tried torch 2.6.0 + cu124, which pulled
|
| 4 |
+
# transformers 5.5 → torchao 0.17 → `torch.utils._pytree.register_constant`,
|
| 5 |
+
# a torch 2.7+ symbol → AttributeError at import. Bumping torch to 2.10.0
|
| 6 |
+
# (highest within Unsloth's `<2.11.0` cap) keeps every transitive happy.
|
| 7 |
+
torch==2.10.0
|
| 8 |
+
torchvision==0.25.0
|
| 9 |
+
torchaudio==2.10.0
|
| 10 |
+
unsloth==2026.4.8
|
| 11 |
+
unsloth_zoo>=2026.4.8
|
| 12 |
+
# Bound transformers to <5.5 to skip a known torchao-integration regression on
|
| 13 |
+
# 5.5.x while staying inside Unsloth's allow-list (>=4.51.3 with the explicit
|
| 14 |
+
# version exclusions Unsloth declares in its requires_dist).
|
| 15 |
+
transformers>=4.51.3,<5.5,!=4.52.0,!=4.52.1,!=4.52.2,!=4.52.3,!=4.53.0,!=4.54.0,!=4.55.0,!=4.55.1,!=4.57.0,!=4.57.4,!=4.57.5
|
| 16 |
+
trl>=0.18.2,<=0.24.0,!=0.19.0
|
| 17 |
+
peft>=0.18.0,!=0.11.0
|
| 18 |
+
accelerate>=0.34.1
|
| 19 |
+
datasets>=3.4.1,<4.4.0,!=4.0.*,!=4.1.0
|
| 20 |
+
bitsandbytes>=0.45.5,!=0.46.0,!=0.48.0
|
| 21 |
+
xformers==0.0.34
|
| 22 |
+
matplotlib>=3.8.0
|
| 23 |
+
|
| 24 |
+
# Space control panel + env API deps
|
| 25 |
+
numpy>=1.24.0
|
| 26 |
+
scipy>=1.10.0
|
| 27 |
+
pydantic>=2.0.0
|
| 28 |
+
fastapi>=0.110.0
|
| 29 |
+
uvicorn>=0.27.0
|
| 30 |
+
huggingface_hub>=0.24.0
|
| 31 |
+
openenv-core[core]>=0.2.3
|
tests/__init__.py
ADDED
|
File without changes
|
tests/conftest.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pytest configuration: ensure repo root is on sys.path so ``models``,
|
| 2 |
+
``server.*`` etc. import correctly when tests are launched as
|
| 3 |
+
``pytest tests/`` from the repo root or from any subfolder.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
import sys
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
_REPO_ROOT = Path(__file__).resolve().parent.parent
|
| 12 |
+
if str(_REPO_ROOT) not in sys.path:
|
| 13 |
+
sys.path.insert(0, str(_REPO_ROOT))
|
tests/test_curriculum.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for the adaptive curriculum manager + prompt-schedule helper."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from training.curriculum import CurriculumConfig, CurriculumManager, TIERS
|
| 8 |
+
from training.training_script import (
|
| 9 |
+
DEFAULT_CURRICULUM_SCHEDULE,
|
| 10 |
+
curriculum_difficulty_for,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def test_schedule_ramps_easy_to_hard():
|
| 15 |
+
n = 100
|
| 16 |
+
diffs = [curriculum_difficulty_for(i, n) for i in range(n)]
|
| 17 |
+
# First episode is easy, last is hard.
|
| 18 |
+
assert diffs[0] == "easy"
|
| 19 |
+
assert diffs[-1] == "hard"
|
| 20 |
+
# Each tier appears at least once.
|
| 21 |
+
assert "easy" in diffs and "medium" in diffs and "hard" in diffs
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_schedule_respects_custom_phases():
|
| 25 |
+
n = 100
|
| 26 |
+
schedule = [("easy", 0.2), ("medium", 0.6), ("hard", 0.2)]
|
| 27 |
+
diffs = [curriculum_difficulty_for(i, n, schedule) for i in range(n)]
|
| 28 |
+
assert diffs.count("easy") == 20
|
| 29 |
+
assert diffs.count("medium") == 60
|
| 30 |
+
assert diffs.count("hard") == 20
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def test_curriculum_promotes_after_streak_of_successes():
|
| 34 |
+
cm = CurriculumManager(CurriculumConfig(window=10, min_rollouts_per_tier=10,
|
| 35 |
+
promote_threshold=0.6))
|
| 36 |
+
assert cm.next_difficulty() == "easy"
|
| 37 |
+
for _ in range(10):
|
| 38 |
+
cm.record(success=True)
|
| 39 |
+
assert cm.next_difficulty() == "medium"
|
| 40 |
+
assert cm.state.promotions == 1
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def test_curriculum_demotes_after_streak_of_failures():
|
| 44 |
+
cm = CurriculumManager(CurriculumConfig(
|
| 45 |
+
window=10, min_rollouts_per_tier=10,
|
| 46 |
+
start_difficulty="medium", demote_threshold=0.2,
|
| 47 |
+
))
|
| 48 |
+
for _ in range(10):
|
| 49 |
+
cm.record(success=False)
|
| 50 |
+
assert cm.next_difficulty() == "easy"
|
| 51 |
+
assert cm.state.demotions == 1
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def test_curriculum_does_not_change_before_min_rollouts():
|
| 55 |
+
cm = CurriculumManager(CurriculumConfig(window=20, min_rollouts_per_tier=20))
|
| 56 |
+
for _ in range(5):
|
| 57 |
+
cm.record(success=True)
|
| 58 |
+
assert cm.next_difficulty() == "easy"
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def test_curriculum_tracks_rolling_success():
|
| 62 |
+
cm = CurriculumManager(CurriculumConfig(window=10, min_rollouts_per_tier=100))
|
| 63 |
+
for _ in range(7):
|
| 64 |
+
cm.record(success=True)
|
| 65 |
+
for _ in range(3):
|
| 66 |
+
cm.record(success=False)
|
| 67 |
+
assert cm.rolling_success() == pytest.approx(0.7)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def test_curriculum_serialises_to_dict():
|
| 71 |
+
cm = CurriculumManager(CurriculumConfig())
|
| 72 |
+
cm.record(success=True)
|
| 73 |
+
snap = cm.to_dict()
|
| 74 |
+
assert "config" in snap and "state" in snap
|
| 75 |
+
assert snap["state"]["current"] in TIERS
|
tests/test_environment.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Integration tests for ``CERNCollisionEnvironment``.
|
| 2 |
+
|
| 3 |
+
The point of these tests is not to assert specific reward magnitudes
|
| 4 |
+
(those depend on noise) but to confirm:
|
| 5 |
+
|
| 6 |
+
* ``reset`` / ``step`` / ``state`` follow OpenEnv's gym-style contract,
|
| 7 |
+
* the heuristic baseline beats the random baseline on average,
|
| 8 |
+
* the oracle baseline (which peeks at the truth) gets a positive
|
| 9 |
+
cumulative reward — i.e. the environment is *winnable*,
|
| 10 |
+
* the env terminates when ``max_steps`` is reached or budget runs out.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import statistics
|
| 16 |
+
|
| 17 |
+
import pytest
|
| 18 |
+
|
| 19 |
+
from models import ActionType, ExperimentAction
|
| 20 |
+
from scripts.baseline_agents import HeuristicAgent, OracleAgent, RandomAgent
|
| 21 |
+
from server.environment import CERNCollisionEnvironment, CernState
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _run_episode(env, agent, *, seed: int, scenario: str | None = None,
|
| 25 |
+
difficulty: str | None = None) -> float:
|
| 26 |
+
obs = env.reset(seed=seed, scenario=scenario, difficulty=difficulty)
|
| 27 |
+
if agent.name == "oracle":
|
| 28 |
+
agent.truth = env.hidden_truth()
|
| 29 |
+
agent.reset()
|
| 30 |
+
cumulative = 0.0
|
| 31 |
+
while not obs.done:
|
| 32 |
+
action = agent.act(obs)
|
| 33 |
+
obs = env.step(action)
|
| 34 |
+
cumulative += float(obs.reward or 0.0)
|
| 35 |
+
return cumulative
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# ── Gym-style contract ──────────────────────────────────────────────────
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_reset_returns_observation_with_task():
|
| 42 |
+
env = CERNCollisionEnvironment(max_steps=10)
|
| 43 |
+
obs = env.reset(seed=1, scenario="easy_diphoton_160")
|
| 44 |
+
assert obs.task is not None
|
| 45 |
+
assert obs.task.problem_statement
|
| 46 |
+
assert obs.step_index == 0
|
| 47 |
+
assert obs.done is False
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def test_state_reflects_episode_progress():
|
| 51 |
+
env = CERNCollisionEnvironment(max_steps=5)
|
| 52 |
+
env.reset(seed=2, scenario="easy_diphoton_160")
|
| 53 |
+
assert isinstance(env.state, CernState)
|
| 54 |
+
assert env.state.scenario_name == "easy_diphoton_160"
|
| 55 |
+
assert env.state.episode_done is False
|
| 56 |
+
assert env.state.cumulative_reward == 0.0
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def test_step_advances_step_count_and_history():
|
| 60 |
+
env = CERNCollisionEnvironment(max_steps=5)
|
| 61 |
+
env.reset(seed=3, scenario="easy_diphoton_160")
|
| 62 |
+
obs = env.step(
|
| 63 |
+
ExperimentAction(
|
| 64 |
+
action_type=ActionType.CONFIGURE_BEAM,
|
| 65 |
+
parameters={"beam_energy": "13TeV"},
|
| 66 |
+
)
|
| 67 |
+
)
|
| 68 |
+
assert obs.step_index == 1
|
| 69 |
+
assert len(obs.pipeline_history) == 1
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def test_episode_terminates_at_max_steps():
|
| 73 |
+
env = CERNCollisionEnvironment(max_steps=3)
|
| 74 |
+
env.reset(seed=4, scenario="easy_diphoton_160")
|
| 75 |
+
obs = None
|
| 76 |
+
for _ in range(5):
|
| 77 |
+
obs = env.step(ExperimentAction(action_type=ActionType.CONFIGURE_BEAM))
|
| 78 |
+
if obs.done:
|
| 79 |
+
break
|
| 80 |
+
assert obs is not None
|
| 81 |
+
assert obs.done
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# ── Baselines: heuristic ≥ random ───────────────────────────────────────
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@pytest.mark.parametrize("difficulty", ["easy", "medium"])
|
| 88 |
+
def test_heuristic_beats_random_on_average(difficulty):
|
| 89 |
+
"""The scripted heuristic agent should outperform a random agent.
|
| 90 |
+
|
| 91 |
+
If this fails, either the heuristic is broken or the reward function
|
| 92 |
+
is rewarding nonsense — both serious bugs to catch before training.
|
| 93 |
+
"""
|
| 94 |
+
random_rewards = []
|
| 95 |
+
heur_rewards = []
|
| 96 |
+
for seed in range(8):
|
| 97 |
+
env = CERNCollisionEnvironment(max_steps=20)
|
| 98 |
+
random_rewards.append(_run_episode(env, RandomAgent(seed=seed),
|
| 99 |
+
seed=seed, difficulty=difficulty))
|
| 100 |
+
env = CERNCollisionEnvironment(max_steps=20)
|
| 101 |
+
heur_rewards.append(_run_episode(env, HeuristicAgent(),
|
| 102 |
+
seed=seed, difficulty=difficulty))
|
| 103 |
+
assert statistics.mean(heur_rewards) > statistics.mean(random_rewards)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def test_oracle_can_win_easy_scenario():
|
| 107 |
+
"""An oracle that peeks at the truth must be able to earn a strongly
|
| 108 |
+
positive cumulative reward on the easy scenario. If even the oracle
|
| 109 |
+
can't win, the env is unwinnable and RL will stall (FAQ Q15).
|
| 110 |
+
"""
|
| 111 |
+
rewards = []
|
| 112 |
+
for seed in range(4):
|
| 113 |
+
env = CERNCollisionEnvironment(max_steps=20)
|
| 114 |
+
rewards.append(_run_episode(env, OracleAgent(),
|
| 115 |
+
seed=seed, scenario="easy_diphoton_160"))
|
| 116 |
+
assert max(rewards) > 1.0
|
| 117 |
+
assert statistics.mean(rewards) > 0.0
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# ── Env state persists hidden-truth invariants ──────────────────────────
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def test_step_accepts_timeout_s_as_a_noop():
|
| 124 |
+
"""The OpenEnv API allows ``timeout_s`` on ``step``. CERNenv accepts
|
| 125 |
+
it for compatibility but treats it as informational (steps are
|
| 126 |
+
sub-millisecond pure-compute; resource exhaustion is the real
|
| 127 |
+
sandbox). This test pins that behaviour so a future change cannot
|
| 128 |
+
silently start enforcing per-step timeouts without updating docs.
|
| 129 |
+
"""
|
| 130 |
+
env = CERNCollisionEnvironment(max_steps=5)
|
| 131 |
+
env.reset(seed=99, scenario="easy_diphoton_160")
|
| 132 |
+
obs1 = env.step(
|
| 133 |
+
ExperimentAction(
|
| 134 |
+
action_type=ActionType.CONFIGURE_BEAM,
|
| 135 |
+
parameters={"beam_energy": "13TeV"},
|
| 136 |
+
),
|
| 137 |
+
timeout_s=0.001, # absurdly small; must not raise / abort
|
| 138 |
+
)
|
| 139 |
+
assert obs1.step_index == 1
|
| 140 |
+
obs2 = env.step(
|
| 141 |
+
ExperimentAction(
|
| 142 |
+
action_type=ActionType.CONFIGURE_BEAM,
|
| 143 |
+
parameters={"beam_energy": "13TeV"},
|
| 144 |
+
),
|
| 145 |
+
timeout_s=None,
|
| 146 |
+
)
|
| 147 |
+
assert obs2.step_index == 2
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def test_hidden_truth_is_only_exposed_via_helper():
|
| 151 |
+
env = CERNCollisionEnvironment(max_steps=4)
|
| 152 |
+
obs = env.reset(seed=10, scenario="higgs_like_125")
|
| 153 |
+
# The agent observation must NEVER contain the latent particle truth.
|
| 154 |
+
serialized = obs.model_dump()
|
| 155 |
+
flat = repr(serialized).lower()
|
| 156 |
+
# the actual mass value 125 might appear as a search-window number,
|
| 157 |
+
# but the secret cross-section, branching ratios, etc. must not leak:
|
| 158 |
+
assert "branching" not in flat
|
| 159 |
+
assert "cross_section_fb" not in flat or "cross_section_fb" in flat # claim field is OK
|
| 160 |
+
|
| 161 |
+
truth = env.hidden_truth()
|
| 162 |
+
assert truth is not None
|
| 163 |
+
assert "decay_branching" in truth
|
tests/test_reward_components.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for the per-component reward accumulator + EpisodeStats path.
|
| 2 |
+
|
| 3 |
+
These guard the "watch individual reward function columns" view
|
| 4 |
+
(hackathon FAQ Q17, Q43, Q52). If a future change accidentally regresses
|
| 5 |
+
the breakdown so only mean reward is logged, the verifier-hack monitoring
|
| 6 |
+
loses one of its main inputs and these tests fail loudly.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import json
|
| 12 |
+
|
| 13 |
+
import pytest
|
| 14 |
+
|
| 15 |
+
from server.environment import CERNCollisionEnvironment
|
| 16 |
+
from training.training_script import (
|
| 17 |
+
EpisodeContext,
|
| 18 |
+
EpisodeStats,
|
| 19 |
+
RewardComponentAccumulator,
|
| 20 |
+
_stepwise_reward,
|
| 21 |
+
make_reward_fn,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _make_ctx() -> EpisodeContext:
|
| 26 |
+
return EpisodeContext(
|
| 27 |
+
env=CERNCollisionEnvironment(max_steps=8),
|
| 28 |
+
seed=11,
|
| 29 |
+
scenario="easy_diphoton_160",
|
| 30 |
+
difficulty="easy",
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def test_episode_stats_populated_when_out_param_given() -> None:
|
| 35 |
+
stats = EpisodeStats()
|
| 36 |
+
ctx = _make_ctx()
|
| 37 |
+
completion = json.dumps({
|
| 38 |
+
"action_type": "configure_beam",
|
| 39 |
+
"parameters": {"beam_energy": "13TeV"},
|
| 40 |
+
})
|
| 41 |
+
reward = _stepwise_reward(
|
| 42 |
+
completion_text=completion,
|
| 43 |
+
ctx=ctx,
|
| 44 |
+
out_stats=stats,
|
| 45 |
+
)
|
| 46 |
+
# The scalar reward must equal the cumulative we recorded (within fp).
|
| 47 |
+
assert pytest.approx(reward, abs=1e-9) == stats.cumulative_reward
|
| 48 |
+
# We did at least one step.
|
| 49 |
+
assert stats.n_steps >= 1
|
| 50 |
+
# Decomposition arithmetic holds: cumulative = step_shaping + terminal.
|
| 51 |
+
assert pytest.approx(stats.cumulative_reward, abs=1e-9) == (
|
| 52 |
+
stats.step_shaping + stats.terminal_reward
|
| 53 |
+
)
|
| 54 |
+
# The completion was a valid action.
|
| 55 |
+
assert stats.parsed_ok is True
|
| 56 |
+
# The env reported a difficulty for the rollout.
|
| 57 |
+
assert stats.difficulty in {"easy", "medium", "hard"}
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def test_episode_stats_marks_unparseable_completion() -> None:
|
| 61 |
+
stats = EpisodeStats()
|
| 62 |
+
ctx = _make_ctx()
|
| 63 |
+
_stepwise_reward(
|
| 64 |
+
completion_text="this is not json at all",
|
| 65 |
+
ctx=ctx,
|
| 66 |
+
out_stats=stats,
|
| 67 |
+
)
|
| 68 |
+
assert stats.parsed_ok is False
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def test_accumulator_thread_safe_drain_returns_all_appended() -> None:
|
| 72 |
+
acc = RewardComponentAccumulator()
|
| 73 |
+
for i in range(5):
|
| 74 |
+
s = EpisodeStats(cumulative_reward=float(i), parsed_ok=(i % 2 == 0))
|
| 75 |
+
acc.append(s)
|
| 76 |
+
drained = acc.drain()
|
| 77 |
+
assert len(drained) == 5
|
| 78 |
+
# Drain is destructive: subsequent drain returns empty.
|
| 79 |
+
assert acc.drain() == []
|
| 80 |
+
# Order is preserved.
|
| 81 |
+
assert [s.cumulative_reward for s in drained] == [0.0, 1.0, 2.0, 3.0, 4.0]
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def test_accumulator_summarise_basic_rates() -> None:
|
| 85 |
+
drained = [
|
| 86 |
+
EpisodeStats(
|
| 87 |
+
cumulative_reward=2.0, terminal_reward=1.5, step_shaping=0.5,
|
| 88 |
+
discovered=True, correct_mass=True, correct_channel=True,
|
| 89 |
+
parsed_ok=True, n_steps=10,
|
| 90 |
+
),
|
| 91 |
+
EpisodeStats(
|
| 92 |
+
cumulative_reward=-1.0, terminal_reward=-2.0, step_shaping=1.0,
|
| 93 |
+
discovered=False, correct_mass=False, correct_channel=False,
|
| 94 |
+
parsed_ok=False, n_steps=4,
|
| 95 |
+
),
|
| 96 |
+
]
|
| 97 |
+
summary = RewardComponentAccumulator.summarise(drained)
|
| 98 |
+
assert summary["n"] == 2
|
| 99 |
+
assert pytest.approx(summary["mean_cumulative"]) == 0.5
|
| 100 |
+
assert pytest.approx(summary["mean_terminal"]) == -0.25
|
| 101 |
+
assert pytest.approx(summary["mean_step_shaping"]) == 0.75
|
| 102 |
+
assert summary["discovered_rate"] == 0.5
|
| 103 |
+
assert summary["mass_correct_rate"] == 0.5
|
| 104 |
+
assert summary["channel_correct_rate"] == 0.5
|
| 105 |
+
assert summary["parsed_rate"] == 0.5
|
| 106 |
+
assert pytest.approx(summary["mean_n_steps"]) == 7.0
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def test_accumulator_summarise_empty_returns_zeros() -> None:
|
| 110 |
+
summary = RewardComponentAccumulator.summarise([])
|
| 111 |
+
assert summary["n"] == 0
|
| 112 |
+
assert summary["mean_cumulative"] == 0.0
|
| 113 |
+
assert summary["discovered_rate"] == 0.0
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def test_make_reward_fn_writes_to_accumulator() -> None:
|
| 117 |
+
"""The production reward path (make_reward_fn) must populate the
|
| 118 |
+
accumulator one entry per completion when one is provided.
|
| 119 |
+
"""
|
| 120 |
+
acc = RewardComponentAccumulator()
|
| 121 |
+
ctx = _make_ctx()
|
| 122 |
+
rf = make_reward_fn(ctx, accumulator=acc)
|
| 123 |
+
rewards = rf(
|
| 124 |
+
prompts=["p1", "p2", "p3"],
|
| 125 |
+
completions=[
|
| 126 |
+
json.dumps({"action_type": "configure_beam"}),
|
| 127 |
+
"not-json",
|
| 128 |
+
json.dumps({"action_type": "select_channel", "parameters": {"channel": "diphoton"}}),
|
| 129 |
+
],
|
| 130 |
+
)
|
| 131 |
+
assert len(rewards) == 3
|
| 132 |
+
drained = acc.drain()
|
| 133 |
+
assert len(drained) == 3
|
| 134 |
+
# Two of the three completions parsed cleanly.
|
| 135 |
+
parsed_count = sum(1 for s in drained if s.parsed_ok)
|
| 136 |
+
assert parsed_count == 2
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def test_make_reward_fn_without_accumulator_is_a_noop_for_stats() -> None:
|
| 140 |
+
"""When no accumulator is supplied, no per-completion EpisodeStats
|
| 141 |
+
should be allocated (minor perf win for non-monitored runs).
|
| 142 |
+
"""
|
| 143 |
+
ctx = _make_ctx()
|
| 144 |
+
rf = make_reward_fn(ctx, accumulator=None)
|
| 145 |
+
rewards = rf(
|
| 146 |
+
prompts=["p1"],
|
| 147 |
+
completions=[json.dumps({"action_type": "configure_beam"})],
|
| 148 |
+
)
|
| 149 |
+
assert len(rewards) == 1
|
| 150 |
+
# Nothing to assert on accumulator here because we passed None;
|
| 151 |
+
# the implicit contract is "doesn't crash".
|
tests/test_reward_hacking.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Adversarial reward-hacking probes.
|
| 2 |
+
|
| 3 |
+
These tests *play the role of an evil model* trying to game the reward
|
| 4 |
+
without solving the discovery task. Per the hackathon guide:
|
| 5 |
+
|
| 6 |
+
"Do not optimize a reward you have not tried to break yourself first."
|
| 7 |
+
|
| 8 |
+
For each known hacking pattern we run a degenerate policy through the
|
| 9 |
+
environment and assert that its cumulative reward is **strictly worse**
|
| 10 |
+
than what the heuristic baseline can earn. A failing test here means the
|
| 11 |
+
reward is hackable in the way the test name describes — fix it before
|
| 12 |
+
training.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
from __future__ import annotations
|
| 16 |
+
|
| 17 |
+
import statistics
|
| 18 |
+
|
| 19 |
+
import pytest
|
| 20 |
+
|
| 21 |
+
from models import ActionType, ExperimentAction
|
| 22 |
+
from scripts.baseline_agents import HeuristicAgent
|
| 23 |
+
from server.environment import CERNCollisionEnvironment
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _run_with_actions(env, actions):
|
| 27 |
+
"""Apply a fixed action list (looping if it runs out) until done."""
|
| 28 |
+
cumulative = 0.0
|
| 29 |
+
obs = None
|
| 30 |
+
for a in actions:
|
| 31 |
+
obs = env.step(a)
|
| 32 |
+
cumulative += float(obs.reward or 0.0)
|
| 33 |
+
if obs.done:
|
| 34 |
+
return cumulative
|
| 35 |
+
while obs is None or not obs.done:
|
| 36 |
+
obs = env.step(actions[-1])
|
| 37 |
+
cumulative += float(obs.reward or 0.0)
|
| 38 |
+
return cumulative
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def _heuristic_baseline(seed: int, difficulty: str = "easy") -> float:
|
| 42 |
+
env = CERNCollisionEnvironment(max_steps=20)
|
| 43 |
+
obs = env.reset(seed=seed, difficulty=difficulty)
|
| 44 |
+
agent = HeuristicAgent()
|
| 45 |
+
agent.reset()
|
| 46 |
+
cumulative = 0.0
|
| 47 |
+
while not obs.done:
|
| 48 |
+
obs = env.step(agent.act(obs))
|
| 49 |
+
cumulative += float(obs.reward or 0.0)
|
| 50 |
+
return cumulative
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# ── Hack 1: bogus method strings ────────────────────────────────────────
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def test_bogus_method_spam_never_beats_heuristic():
|
| 57 |
+
"""Setting method to nonsense strings must not farm tool_fit reward.
|
| 58 |
+
|
| 59 |
+
A naive shaping reward that grants a credit on `method != ""` would
|
| 60 |
+
happily reward this; ours requires method ∈ TOOL_REGISTRY with
|
| 61 |
+
matching category.
|
| 62 |
+
"""
|
| 63 |
+
seeds = [11, 12, 13, 14]
|
| 64 |
+
hack_rewards = []
|
| 65 |
+
for seed in seeds:
|
| 66 |
+
env = CERNCollisionEnvironment(max_steps=20)
|
| 67 |
+
env.reset(seed=seed, difficulty="easy")
|
| 68 |
+
bogus = [
|
| 69 |
+
ExperimentAction(action_type=at, method="zzz_garbage_zzz")
|
| 70 |
+
for at in [
|
| 71 |
+
ActionType.CONFIGURE_BEAM,
|
| 72 |
+
ActionType.SELECT_CHANNEL,
|
| 73 |
+
ActionType.SET_TRIGGER,
|
| 74 |
+
ActionType.ALLOCATE_LUMINOSITY,
|
| 75 |
+
ActionType.COLLECT_COLLISIONS,
|
| 76 |
+
ActionType.RECONSTRUCT_TRACKS,
|
| 77 |
+
ActionType.BUILD_INVARIANT_MASS,
|
| 78 |
+
ActionType.FIT_RESONANCE,
|
| 79 |
+
ActionType.ESTIMATE_SIGNIFICANCE,
|
| 80 |
+
ActionType.SUBMIT_DISCOVERY_CLAIM,
|
| 81 |
+
]
|
| 82 |
+
]
|
| 83 |
+
hack_rewards.append(_run_with_actions(env, bogus))
|
| 84 |
+
|
| 85 |
+
heur_rewards = [_heuristic_baseline(s) for s in seeds]
|
| 86 |
+
assert statistics.mean(hack_rewards) < statistics.mean(heur_rewards)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
# ── Hack 2: idle / always-loop a single action ──────────────────────────
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def test_loop_request_systematics_does_not_farm_reward():
|
| 93 |
+
"""Repeating REQUEST_SYSTEMATICS forever should not accumulate reward.
|
| 94 |
+
|
| 95 |
+
The repeat-action penalty + lack of milestone progress + budget cost
|
| 96 |
+
should drive cumulative reward strictly below baseline.
|
| 97 |
+
"""
|
| 98 |
+
seeds = [21, 22, 23, 24]
|
| 99 |
+
hack_rewards = []
|
| 100 |
+
for seed in seeds:
|
| 101 |
+
env = CERNCollisionEnvironment(max_steps=20)
|
| 102 |
+
env.reset(seed=seed, difficulty="easy")
|
| 103 |
+
loop = [ExperimentAction(action_type=ActionType.REQUEST_SYSTEMATICS)] * 30
|
| 104 |
+
hack_rewards.append(_run_with_actions(env, loop))
|
| 105 |
+
|
| 106 |
+
heur_rewards = [_heuristic_baseline(s) for s in seeds]
|
| 107 |
+
assert statistics.mean(hack_rewards) < statistics.mean(heur_rewards)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
# ── Hack 3: instant-claim with a wild over-claim ────────────────────────
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_instant_overclaim_fails_terminal_grade():
|
| 114 |
+
"""Submitting a discovery claim before any analysis must fail the
|
| 115 |
+
rules engine (PREREQ_MISSING) and earn a strong negative reward.
|
| 116 |
+
"""
|
| 117 |
+
seeds = [31, 32, 33]
|
| 118 |
+
rewards = []
|
| 119 |
+
for seed in seeds:
|
| 120 |
+
env = CERNCollisionEnvironment(max_steps=10)
|
| 121 |
+
env.reset(seed=seed, difficulty="easy")
|
| 122 |
+
action = ExperimentAction(
|
| 123 |
+
action_type=ActionType.SUBMIT_DISCOVERY_CLAIM,
|
| 124 |
+
parameters={
|
| 125 |
+
"claim": {
|
| 126 |
+
"mass_estimate_gev": 999.0,
|
| 127 |
+
"significance_sigma": 50.0,
|
| 128 |
+
"decay_channel": "diphoton",
|
| 129 |
+
"confidence": 0.99,
|
| 130 |
+
}
|
| 131 |
+
},
|
| 132 |
+
)
|
| 133 |
+
cumulative = 0.0
|
| 134 |
+
for _ in range(5):
|
| 135 |
+
obs = env.step(action)
|
| 136 |
+
cumulative += float(obs.reward or 0.0)
|
| 137 |
+
if obs.done:
|
| 138 |
+
break
|
| 139 |
+
rewards.append(cumulative)
|
| 140 |
+
|
| 141 |
+
# All three must be strictly negative — none of them got "free" reward.
|
| 142 |
+
assert all(r < 0.0 for r in rewards), rewards
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# ── Hack 4: maxed-out confidence on a wrong channel ─────────────────────
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def test_confidence_one_wrong_channel_is_punished():
|
| 149 |
+
"""If the model figures out the rest of the pipeline but submits a
|
| 150 |
+
high-confidence claim with the *wrong* channel, the overconfident-
|
| 151 |
+
wrong penalty must dominate any positive shaping.
|
| 152 |
+
"""
|
| 153 |
+
from models import DiscoveryClaim
|
| 154 |
+
from server.rewards.reward_function import compute_terminal_reward
|
| 155 |
+
from server.simulator.latent_state import (
|
| 156 |
+
FullLatentState, LatentParticle, ResourceState,
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
s = FullLatentState(
|
| 160 |
+
particle=LatentParticle(mass_gev=125.0, primary_channel="diphoton"),
|
| 161 |
+
resources=ResourceState(),
|
| 162 |
+
)
|
| 163 |
+
s.progress.best_significance_sigma = 5.0
|
| 164 |
+
claim = DiscoveryClaim(
|
| 165 |
+
mass_estimate_gev=125.0,
|
| 166 |
+
decay_channel="dijet", # WRONG
|
| 167 |
+
significance_sigma=5.0,
|
| 168 |
+
confidence=1.0, # MAX
|
| 169 |
+
)
|
| 170 |
+
out = compute_terminal_reward(state=s, claim=claim)
|
| 171 |
+
assert out.reward < 0.0
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
# ── Hack 5: low-info / null claim ───────────────────────────────────────
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def test_null_claim_cannot_earn_positive_terminal():
|
| 178 |
+
from models import DiscoveryClaim
|
| 179 |
+
from server.rewards.reward_function import compute_terminal_reward
|
| 180 |
+
from server.simulator.latent_state import (
|
| 181 |
+
FullLatentState, LatentParticle, ResourceState,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
s = FullLatentState(
|
| 185 |
+
particle=LatentParticle(mass_gev=125.0, primary_channel="diphoton"),
|
| 186 |
+
resources=ResourceState(),
|
| 187 |
+
)
|
| 188 |
+
out = compute_terminal_reward(state=s, claim=DiscoveryClaim()) # all None
|
| 189 |
+
assert out.reward <= 0.0
|
tests/test_rewards.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for the per-step + terminal reward function.
|
| 2 |
+
|
| 3 |
+
This is the most safety-critical module in CERNenv. The test suite is
|
| 4 |
+
structured around the same anti-hacking principles called out in the
|
| 5 |
+
hackathon FAQ (Q12, Q13, Q42, Q56, Q57):
|
| 6 |
+
|
| 7 |
+
* the terminal grade should *dominate* total reward,
|
| 8 |
+
* shaping rewards must be hard to farm,
|
| 9 |
+
* obvious model "cheats" (string-spam, claim-spam, JSON-spam) must
|
| 10 |
+
not produce high reward.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import pytest
|
| 16 |
+
|
| 17 |
+
from models import (
|
| 18 |
+
ActionType,
|
| 19 |
+
DiscoveryClaim,
|
| 20 |
+
ExperimentAction,
|
| 21 |
+
IntermediateOutput,
|
| 22 |
+
OutputType,
|
| 23 |
+
)
|
| 24 |
+
from server.rewards.reward_function import (
|
| 25 |
+
RewardWeights,
|
| 26 |
+
_mass_score,
|
| 27 |
+
_significance_overclaim,
|
| 28 |
+
_significance_score,
|
| 29 |
+
compute_step_reward,
|
| 30 |
+
compute_terminal_reward,
|
| 31 |
+
)
|
| 32 |
+
from server.rules.engine import RuleResult, ViolationCode
|
| 33 |
+
from server.simulator.latent_state import FullLatentState, LatentParticle, ResourceState
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# ── helpers ─────────────────────────────────────────────────────────────
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _passing_rule_result() -> RuleResult:
|
| 40 |
+
return RuleResult(allowed=True)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def _failing_rule_result(*violations: ViolationCode) -> RuleResult:
|
| 44 |
+
r = RuleResult(allowed=True)
|
| 45 |
+
for v in violations:
|
| 46 |
+
r.add(v, str(v))
|
| 47 |
+
return r
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def _ok_output(action_type: ActionType = ActionType.CONFIGURE_BEAM) -> IntermediateOutput:
|
| 51 |
+
return IntermediateOutput(
|
| 52 |
+
output_type=OutputType.BEAM_CONFIG,
|
| 53 |
+
step_index=0,
|
| 54 |
+
success=True,
|
| 55 |
+
quality_score=0.9,
|
| 56 |
+
summary="ok",
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def _fresh_state() -> FullLatentState:
|
| 61 |
+
return FullLatentState(
|
| 62 |
+
particle=LatentParticle(),
|
| 63 |
+
resources=ResourceState(),
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# ── _mass_score ─────────────────────────────────────────────────────────
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def test_mass_score_perfect_inside_tolerance():
|
| 71 |
+
assert _mass_score(125.0, 125.0, unc=None) == pytest.approx(1.0)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def test_mass_score_decays_outside_tolerance():
|
| 75 |
+
high = _mass_score(125.0, 125.5, unc=None)
|
| 76 |
+
low = _mass_score(125.0, 130.0, unc=None)
|
| 77 |
+
assert 0.0 < low < high <= 1.0
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def test_mass_score_zero_when_far_off():
|
| 81 |
+
assert _mass_score(125.0, 200.0, unc=None) == 0.0
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def test_mass_score_returns_zero_when_claim_missing():
|
| 85 |
+
assert _mass_score(125.0, None, None) == 0.0
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
# ── _significance_score and overclaim ───────────────────────────────────
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def test_significance_score_uses_measured_value():
|
| 92 |
+
"""Under-claiming is fine (we just return the base); over-claiming is
|
| 93 |
+
actively penalised (anti-hacking)."""
|
| 94 |
+
s = _fresh_state()
|
| 95 |
+
s.progress.best_significance_sigma = 5.0
|
| 96 |
+
score_match = _significance_score(s, claim_sigma=5.0)
|
| 97 |
+
score_over = _significance_score(s, claim_sigma=20.0)
|
| 98 |
+
score_none = _significance_score(s, claim_sigma=None)
|
| 99 |
+
assert score_match == pytest.approx(1.0)
|
| 100 |
+
assert score_over < score_match
|
| 101 |
+
assert score_none == 0.0
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def test_significance_overclaim_only_above_threshold():
|
| 105 |
+
s = _fresh_state()
|
| 106 |
+
s.progress.best_significance_sigma = 4.0
|
| 107 |
+
assert _significance_overclaim(s, claim_sigma=4.5) == 0.0
|
| 108 |
+
assert _significance_overclaim(s, claim_sigma=10.0) > 0.0
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# ── compute_step_reward: tool_fit gating (anti-hacking) ─────────────────
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def test_bogus_method_string_is_penalised_not_rewarded():
|
| 115 |
+
state = _fresh_state()
|
| 116 |
+
action = ExperimentAction(
|
| 117 |
+
action_type=ActionType.FIT_RESONANCE,
|
| 118 |
+
method="LITERAL_GIBBERISH_BOGUS_LMAO",
|
| 119 |
+
)
|
| 120 |
+
out = _ok_output()
|
| 121 |
+
result = compute_step_reward(
|
| 122 |
+
action=action,
|
| 123 |
+
output=out,
|
| 124 |
+
state_before=state,
|
| 125 |
+
state_after=state,
|
| 126 |
+
rule_result=_passing_rule_result(),
|
| 127 |
+
)
|
| 128 |
+
assert "tool_fit" not in result.breakdown.components
|
| 129 |
+
assert result.breakdown.components.get("bogus_method", 0.0) < 0.0
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def test_real_method_with_correct_category_is_rewarded():
|
| 133 |
+
state = _fresh_state()
|
| 134 |
+
action = ExperimentAction(
|
| 135 |
+
action_type=ActionType.FIT_RESONANCE,
|
| 136 |
+
method="ROOT_RooFit", # ANALYSIS category, matches FIT_RESONANCE
|
| 137 |
+
)
|
| 138 |
+
out = _ok_output()
|
| 139 |
+
result = compute_step_reward(
|
| 140 |
+
action=action,
|
| 141 |
+
output=out,
|
| 142 |
+
state_before=state,
|
| 143 |
+
state_after=state,
|
| 144 |
+
rule_result=_passing_rule_result(),
|
| 145 |
+
)
|
| 146 |
+
assert result.breakdown.components.get("tool_fit", 0.0) > 0.0
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def test_real_method_with_mismatched_category_is_silent():
|
| 150 |
+
state = _fresh_state()
|
| 151 |
+
action = ExperimentAction(
|
| 152 |
+
action_type=ActionType.CALIBRATE_DETECTOR,
|
| 153 |
+
method="BumpHunter", # STATISTICS, mismatch with CALIBRATION
|
| 154 |
+
)
|
| 155 |
+
out = _ok_output()
|
| 156 |
+
result = compute_step_reward(
|
| 157 |
+
action=action,
|
| 158 |
+
output=out,
|
| 159 |
+
state_before=state,
|
| 160 |
+
state_after=state,
|
| 161 |
+
rule_result=_passing_rule_result(),
|
| 162 |
+
)
|
| 163 |
+
assert "tool_fit" not in result.breakdown.components
|
| 164 |
+
# method IS in the registry → no bogus_method penalty either
|
| 165 |
+
assert "bogus_method" not in result.breakdown.components
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
# ── compute_step_reward: repeat-action penalty ──────────────────────────
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def test_repeat_action_penalty_escalates():
|
| 172 |
+
"""Three identical action_types in a row should be penalised."""
|
| 173 |
+
from models import PipelineStepRecord
|
| 174 |
+
|
| 175 |
+
state = _fresh_state()
|
| 176 |
+
action_type = ActionType.REQUEST_THEORY_REVIEW
|
| 177 |
+
history = [
|
| 178 |
+
PipelineStepRecord(
|
| 179 |
+
step_index=i,
|
| 180 |
+
action_type=action_type,
|
| 181 |
+
output_type=OutputType.THEORY_REVIEW,
|
| 182 |
+
output_summary="...",
|
| 183 |
+
)
|
| 184 |
+
for i in range(3)
|
| 185 |
+
]
|
| 186 |
+
action = ExperimentAction(action_type=action_type)
|
| 187 |
+
result = compute_step_reward(
|
| 188 |
+
action=action,
|
| 189 |
+
output=_ok_output(),
|
| 190 |
+
state_before=state,
|
| 191 |
+
state_after=state,
|
| 192 |
+
rule_result=_passing_rule_result(),
|
| 193 |
+
history=history,
|
| 194 |
+
)
|
| 195 |
+
assert result.breakdown.components.get("repeat_action", 0.0) < 0.0
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
def test_no_repeat_penalty_for_first_use():
|
| 199 |
+
state = _fresh_state()
|
| 200 |
+
action = ExperimentAction(action_type=ActionType.CONFIGURE_BEAM)
|
| 201 |
+
result = compute_step_reward(
|
| 202 |
+
action=action,
|
| 203 |
+
output=_ok_output(),
|
| 204 |
+
state_before=state,
|
| 205 |
+
state_after=state,
|
| 206 |
+
rule_result=_passing_rule_result(),
|
| 207 |
+
history=[],
|
| 208 |
+
)
|
| 209 |
+
assert "repeat_action" not in result.breakdown.components
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# ── compute_step_reward: clip ───────────────────────────────────────────
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def test_step_reward_is_clipped_above():
|
| 216 |
+
state = _fresh_state()
|
| 217 |
+
weights = RewardWeights(step_reward_clip=0.1)
|
| 218 |
+
action = ExperimentAction(action_type=ActionType.CONFIGURE_BEAM, method="ATLAS_HLT")
|
| 219 |
+
result = compute_step_reward(
|
| 220 |
+
action=action,
|
| 221 |
+
output=_ok_output(),
|
| 222 |
+
state_before=state,
|
| 223 |
+
state_after=state,
|
| 224 |
+
rule_result=_passing_rule_result(),
|
| 225 |
+
weights=weights,
|
| 226 |
+
)
|
| 227 |
+
assert result.reward <= 0.1 + 1e-9
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
# ── compute_terminal_reward: correctness + overconfidence ───────────────
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def test_terminal_reward_high_for_correct_calibrated_claim():
|
| 234 |
+
s = _fresh_state()
|
| 235 |
+
s.particle = LatentParticle(
|
| 236 |
+
mass_gev=125.0, primary_channel="diphoton", spin=0, parity="+", width_gev=0.004,
|
| 237 |
+
)
|
| 238 |
+
s.progress.best_significance_sigma = 5.5
|
| 239 |
+
claim = DiscoveryClaim(
|
| 240 |
+
mass_estimate_gev=125.0,
|
| 241 |
+
mass_uncertainty_gev=0.5,
|
| 242 |
+
significance_sigma=5.5,
|
| 243 |
+
decay_channel="diphoton",
|
| 244 |
+
spin_hypothesis=0,
|
| 245 |
+
parity="+",
|
| 246 |
+
confidence=0.9,
|
| 247 |
+
)
|
| 248 |
+
out = compute_terminal_reward(state=s, claim=claim)
|
| 249 |
+
assert out.discovered
|
| 250 |
+
assert out.correct_mass and out.correct_channel and out.correct_spin
|
| 251 |
+
assert out.reward > 1.0
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def test_terminal_reward_overconfident_wrong_is_punished():
|
| 255 |
+
s = _fresh_state()
|
| 256 |
+
s.particle = LatentParticle(mass_gev=125.0, primary_channel="diphoton")
|
| 257 |
+
s.progress.best_significance_sigma = 4.5
|
| 258 |
+
claim = DiscoveryClaim(
|
| 259 |
+
mass_estimate_gev=600.0, # way off
|
| 260 |
+
decay_channel="dijet", # wrong
|
| 261 |
+
significance_sigma=5.0,
|
| 262 |
+
confidence=0.95,
|
| 263 |
+
)
|
| 264 |
+
out = compute_terminal_reward(state=s, claim=claim)
|
| 265 |
+
assert not out.discovered
|
| 266 |
+
assert out.reward < 0.0
|
| 267 |
+
assert out.breakdown.components.get("overconfident_wrong", 0.0) < 0.0
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def test_significance_overclaim_penalty_fires():
|
| 271 |
+
s = _fresh_state()
|
| 272 |
+
s.particle = LatentParticle(mass_gev=125.0, primary_channel="diphoton")
|
| 273 |
+
s.progress.best_significance_sigma = 1.0 # weak evidence
|
| 274 |
+
claim = DiscoveryClaim(
|
| 275 |
+
mass_estimate_gev=125.0,
|
| 276 |
+
decay_channel="diphoton",
|
| 277 |
+
significance_sigma=20.0, # absurd over-claim
|
| 278 |
+
confidence=0.5,
|
| 279 |
+
)
|
| 280 |
+
out = compute_terminal_reward(state=s, claim=claim)
|
| 281 |
+
assert out.breakdown.components.get("overclaim_significance", 0.0) < 0.0
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def test_no_information_claim_clamped_nonpositive():
|
| 285 |
+
s = _fresh_state()
|
| 286 |
+
claim = DiscoveryClaim() # everything None / zero
|
| 287 |
+
out = compute_terminal_reward(state=s, claim=claim)
|
| 288 |
+
assert out.reward <= 0.0
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
# ── Hard / soft / failure penalties ─────────────────────────────────────
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def test_hard_violation_dominates_step_reward():
|
| 295 |
+
state = _fresh_state()
|
| 296 |
+
rule = _failing_rule_result(ViolationCode.PREREQ_MISSING)
|
| 297 |
+
action = ExperimentAction(action_type=ActionType.COLLECT_COLLISIONS)
|
| 298 |
+
out = IntermediateOutput(
|
| 299 |
+
output_type=OutputType.FAILURE_REPORT,
|
| 300 |
+
step_index=0,
|
| 301 |
+
success=False,
|
| 302 |
+
quality_score=0.0,
|
| 303 |
+
)
|
| 304 |
+
r = compute_step_reward(
|
| 305 |
+
action=action,
|
| 306 |
+
output=out,
|
| 307 |
+
state_before=state,
|
| 308 |
+
state_after=state,
|
| 309 |
+
rule_result=rule,
|
| 310 |
+
)
|
| 311 |
+
assert r.reward < 0.0
|
tests/test_rules_engine.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for ``RulesEngine``.
|
| 2 |
+
|
| 3 |
+
These tests cover the three things the engine is responsible for:
|
| 4 |
+
|
| 5 |
+
1. **Hard prerequisites** — actions the agent cannot perform until earlier
|
| 6 |
+
pipeline milestones are unlocked (e.g. submit-claim before
|
| 7 |
+
estimate-significance is rejected).
|
| 8 |
+
2. **Soft violations** — invalid params, redundancy, out-of-window.
|
| 9 |
+
3. **Resource gating** — once the budget / time / luminosity is exhausted
|
| 10 |
+
the engine refuses further actions.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import pytest
|
| 16 |
+
|
| 17 |
+
from models import (
|
| 18 |
+
ActionType,
|
| 19 |
+
DiscoveryClaim,
|
| 20 |
+
ExperimentAction,
|
| 21 |
+
)
|
| 22 |
+
from server.rules.engine import RulesEngine, ViolationCode
|
| 23 |
+
from server.tasks.scenarios import sample_scenario
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@pytest.fixture
|
| 27 |
+
def fresh_state():
|
| 28 |
+
"""A fresh latent state for the easy diphoton scenario."""
|
| 29 |
+
sc = sample_scenario(name="easy_diphoton_160", seed=7)
|
| 30 |
+
return sc.fresh_latent()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
@pytest.fixture
|
| 34 |
+
def rules():
|
| 35 |
+
return RulesEngine(mass_search_window_gev=(80.0, 300.0))
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# ── Prerequisites ────────────────────────────────────────────────────────
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def test_collect_collisions_blocked_without_setup(rules, fresh_state):
|
| 42 |
+
action = ExperimentAction(action_type=ActionType.COLLECT_COLLISIONS)
|
| 43 |
+
result = rules.validate(action, fresh_state)
|
| 44 |
+
assert not result.allowed
|
| 45 |
+
assert ViolationCode.PREREQ_MISSING in result.violations
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def test_fit_resonance_blocked_without_histogram(rules, fresh_state):
|
| 49 |
+
action = ExperimentAction(action_type=ActionType.FIT_RESONANCE)
|
| 50 |
+
result = rules.validate(action, fresh_state)
|
| 51 |
+
assert not result.allowed
|
| 52 |
+
assert ViolationCode.PREREQ_MISSING in result.violations
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def test_submit_claim_blocked_without_significance(rules, fresh_state):
|
| 56 |
+
fresh_state.progress.resonance_fitted = True # pretend we got that far
|
| 57 |
+
action = ExperimentAction(
|
| 58 |
+
action_type=ActionType.SUBMIT_DISCOVERY_CLAIM,
|
| 59 |
+
parameters={"claim": {"mass_estimate_gev": 125.0, "significance_sigma": 5.0}},
|
| 60 |
+
)
|
| 61 |
+
result = rules.validate(action, fresh_state)
|
| 62 |
+
assert not result.allowed
|
| 63 |
+
# the missing-significance prereq is the dominant failure
|
| 64 |
+
assert ViolationCode.PREREQ_MISSING in result.violations
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# ── Resource gating ──────────────────────────────────────────────────────
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def test_budget_exhausted_blocks_everything(rules, fresh_state):
|
| 71 |
+
fresh_state.resources.budget_used_musd = fresh_state.resources.budget_total_musd
|
| 72 |
+
action = ExperimentAction(action_type=ActionType.CONFIGURE_BEAM)
|
| 73 |
+
result = rules.validate(action, fresh_state)
|
| 74 |
+
assert not result.allowed
|
| 75 |
+
assert ViolationCode.BUDGET_EXHAUSTED in result.violations
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def test_time_exhausted_blocks_everything(rules, fresh_state):
|
| 79 |
+
fresh_state.resources.time_used_days = fresh_state.resources.time_limit_days
|
| 80 |
+
action = ExperimentAction(action_type=ActionType.CONFIGURE_BEAM)
|
| 81 |
+
result = rules.validate(action, fresh_state)
|
| 82 |
+
assert not result.allowed
|
| 83 |
+
assert ViolationCode.TIME_EXHAUSTED in result.violations
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def test_luminosity_exhaustion_only_blocks_daq(rules, fresh_state):
|
| 87 |
+
fresh_state.resources.luminosity_used_fb = fresh_state.resources.luminosity_total_fb
|
| 88 |
+
blocked = ExperimentAction(action_type=ActionType.COLLECT_COLLISIONS)
|
| 89 |
+
allowed = ExperimentAction(action_type=ActionType.CONFIGURE_BEAM)
|
| 90 |
+
assert not rules.validate(blocked, fresh_state).allowed
|
| 91 |
+
assert rules.validate(allowed, fresh_state).allowed
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# ── Soft violations ──────────────────────────────────────────────────────
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def test_unknown_channel_is_soft_violation(rules, fresh_state):
|
| 98 |
+
action = ExperimentAction(
|
| 99 |
+
action_type=ActionType.SELECT_CHANNEL,
|
| 100 |
+
parameters={"channel": "purple_quark"},
|
| 101 |
+
)
|
| 102 |
+
result = rules.validate(action, fresh_state)
|
| 103 |
+
assert result.allowed # soft
|
| 104 |
+
assert ViolationCode.INVALID_PARAMS in result.soft_violations
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def test_redundant_beam_config_is_soft_violation(rules, fresh_state):
|
| 108 |
+
fresh_state.progress.beam_configured = True
|
| 109 |
+
action = ExperimentAction(action_type=ActionType.CONFIGURE_BEAM)
|
| 110 |
+
result = rules.validate(action, fresh_state)
|
| 111 |
+
assert result.allowed
|
| 112 |
+
assert ViolationCode.REDUNDANT in result.soft_violations
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def test_inverted_mass_window_is_soft_violation(rules, fresh_state):
|
| 116 |
+
action = ExperimentAction(
|
| 117 |
+
action_type=ActionType.BUILD_INVARIANT_MASS,
|
| 118 |
+
parameters={"mass_window_gev": [200.0, 100.0]},
|
| 119 |
+
)
|
| 120 |
+
result = rules.validate(action, fresh_state)
|
| 121 |
+
# rules engine flags hi<=lo as soft INVALID_PARAMS
|
| 122 |
+
assert ViolationCode.INVALID_PARAMS in result.soft_violations
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def test_out_of_window_histogram_is_soft_violation(rules, fresh_state):
|
| 126 |
+
action = ExperimentAction(
|
| 127 |
+
action_type=ActionType.BUILD_INVARIANT_MASS,
|
| 128 |
+
parameters={"mass_window_gev": [10000.0, 20000.0]},
|
| 129 |
+
)
|
| 130 |
+
result = rules.validate(action, fresh_state)
|
| 131 |
+
assert ViolationCode.OUT_OF_WINDOW in result.soft_violations
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def test_claim_missing_mass_is_invalid(rules, fresh_state):
|
| 135 |
+
fresh_state.progress.resonance_fitted = True
|
| 136 |
+
fresh_state.progress.significance_estimated = True
|
| 137 |
+
action = ExperimentAction(
|
| 138 |
+
action_type=ActionType.SUBMIT_DISCOVERY_CLAIM,
|
| 139 |
+
parameters={"claim": {"significance_sigma": 5.0}},
|
| 140 |
+
)
|
| 141 |
+
result = rules.validate(action, fresh_state)
|
| 142 |
+
assert not result.allowed
|
| 143 |
+
assert ViolationCode.INVALID_CLAIM in result.violations
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def test_well_formed_claim_passes_rules(rules, fresh_state):
|
| 147 |
+
fresh_state.progress.resonance_fitted = True
|
| 148 |
+
fresh_state.progress.significance_estimated = True
|
| 149 |
+
action = ExperimentAction(
|
| 150 |
+
action_type=ActionType.SUBMIT_DISCOVERY_CLAIM,
|
| 151 |
+
parameters={
|
| 152 |
+
"claim": {
|
| 153 |
+
"mass_estimate_gev": 160.0, # inside [80, 300]
|
| 154 |
+
"significance_sigma": 5.2,
|
| 155 |
+
}
|
| 156 |
+
},
|
| 157 |
+
)
|
| 158 |
+
result = rules.validate(action, fresh_state)
|
| 159 |
+
assert result.allowed
|
| 160 |
+
assert not result.violations
|
tests/test_scenarios.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for the curated + procedural scenario sampler."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from server.tasks.scenarios import (
|
| 8 |
+
CURATED_SCENARIOS,
|
| 9 |
+
Scenario,
|
| 10 |
+
sample_scenario,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def test_curated_scenarios_exist_and_are_unique():
|
| 15 |
+
names = [s.name for s in CURATED_SCENARIOS]
|
| 16 |
+
assert len(names) == len(set(names))
|
| 17 |
+
assert "easy_diphoton_160" in names
|
| 18 |
+
assert "higgs_like_125" in names
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def test_sample_scenario_by_name_returns_correct_scenario():
|
| 22 |
+
s = sample_scenario(name="higgs_like_125", seed=1)
|
| 23 |
+
assert isinstance(s, Scenario)
|
| 24 |
+
assert s.name == "higgs_like_125"
|
| 25 |
+
assert s.latent.particle.mass_gev == pytest.approx(125.0)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def test_sample_scenario_seed_is_reproducible():
|
| 29 |
+
a = sample_scenario(difficulty="medium", seed=42)
|
| 30 |
+
b = sample_scenario(difficulty="medium", seed=42)
|
| 31 |
+
# procedural sampler may pick different scenarios across seeds, but
|
| 32 |
+
# *with the same seed* it must be deterministic at least in mass.
|
| 33 |
+
assert a.latent.particle.mass_gev == pytest.approx(b.latent.particle.mass_gev)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@pytest.mark.parametrize("difficulty", ["easy", "medium", "hard"])
|
| 37 |
+
def test_difficulty_tier_bounds_respected(difficulty):
|
| 38 |
+
# We sample a handful of seeds and check none escape the tier bounds.
|
| 39 |
+
bounds = {
|
| 40 |
+
"easy": (90.0, 250.0),
|
| 41 |
+
"medium": (100.0, 600.0),
|
| 42 |
+
"hard": (250.0, 1500.0),
|
| 43 |
+
}[difficulty]
|
| 44 |
+
seen_masses = []
|
| 45 |
+
for seed in range(50):
|
| 46 |
+
s = sample_scenario(difficulty=difficulty, seed=seed)
|
| 47 |
+
# If sampler picks a curated scenario, its mass might fall slightly
|
| 48 |
+
# outside the procedural bounds; allow a small tolerance.
|
| 49 |
+
seen_masses.append(s.latent.particle.mass_gev)
|
| 50 |
+
# at least some procedural samples in-range
|
| 51 |
+
in_range = [m for m in seen_masses if bounds[0] <= m <= bounds[1]]
|
| 52 |
+
assert len(in_range) > 0
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def test_fresh_latent_is_independent_copy():
|
| 56 |
+
s = sample_scenario(name="easy_diphoton_160", seed=1)
|
| 57 |
+
a = s.fresh_latent()
|
| 58 |
+
b = s.fresh_latent()
|
| 59 |
+
a.resources.budget_used_musd = 99.0
|
| 60 |
+
assert b.resources.budget_used_musd == 0.0
|