athurlow commited on
Commit
90b7596
·
unverified ·
2 Parent(s): ed745252091813

Merge pull request #1 from athurlow/claude/qcal-copilot-mvp-OZ9wj

Browse files
.github/workflows/release.yml ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Release to PyPI
2
+
3
+ # Tag-triggered release for `qcal-copilot`. Pipeline:
4
+ #
5
+ # build → publish-testpypi → smoke-test-testpypi → publish-pypi → github-release
6
+ #
7
+ # Uses PyPI Trusted Publishing (OIDC), so no long-lived tokens in secrets.
8
+ # BEFORE this workflow can publish, configure the trusted publisher once on
9
+ # each index:
10
+ #
11
+ # TestPyPI: https://test.pypi.org/manage/account/publishing/
12
+ # PyPI: https://pypi.org/manage/account/publishing/
13
+ #
14
+ # For both, register:
15
+ # repo owner: athurlow
16
+ # repo name: qcal
17
+ # workflow: release.yml
18
+ # environment: pypi (for PyPI)
19
+ # testpypi (for TestPyPI)
20
+ #
21
+ # Matching GitHub environments `pypi` and `testpypi` must exist with
22
+ # `id-token: write` permitted — that's the only way OIDC tokens get minted.
23
+ #
24
+ # Manual dry-run: Actions tab → Run workflow → set `testpypi_only: true`
25
+ # to publish a pre-release to TestPyPI without touching PyPI. The tag push
26
+ # path always publishes to both.
27
+
28
+ on:
29
+ push:
30
+ tags:
31
+ - 'v*.*.*'
32
+ workflow_dispatch:
33
+ inputs:
34
+ testpypi_only:
35
+ description: "Publish to TestPyPI only (skip PyPI + GitHub release)"
36
+ type: boolean
37
+ default: true
38
+
39
+ env:
40
+ FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
41
+
42
+ jobs:
43
+ # -------------------------------------------------------------------------
44
+ # 1. Build sdist + wheel once. Downstream jobs consume the artifact so the
45
+ # exact bytes published to TestPyPI are the same ones published to PyPI.
46
+ # -------------------------------------------------------------------------
47
+ build:
48
+ name: Build sdist + wheel
49
+ runs-on: ubuntu-latest
50
+ steps:
51
+ - uses: actions/checkout@v4
52
+ with:
53
+ fetch-depth: 0
54
+
55
+ - uses: actions/setup-python@v5
56
+ with:
57
+ python-version: "3.12"
58
+
59
+ - name: Install build tooling
60
+ run: python -m pip install --upgrade pip build
61
+
62
+ # Fail the release if pyproject.toml's version doesn't match the tag,
63
+ # so we never ship a 0.1.0 wheel tagged as v0.2.0 (or vice versa).
64
+ # Skipped on workflow_dispatch since there's no tag to compare against.
65
+ - name: Verify pyproject version matches tag
66
+ if: github.event_name == 'push'
67
+ run: |
68
+ tag_version="${GITHUB_REF_NAME#v}"
69
+ pkg_version=$(python -c "
70
+ import tomllib, pathlib
71
+ print(tomllib.loads(pathlib.Path('pyproject.toml').read_text())['project']['version'])
72
+ ")
73
+ if [ "${tag_version}" != "${pkg_version}" ]; then
74
+ echo "::error::Tag v${tag_version} does not match pyproject.toml version ${pkg_version}."
75
+ echo "Bump pyproject.toml or retag before pushing."
76
+ exit 1
77
+ fi
78
+ echo "Version check OK: ${pkg_version}"
79
+
80
+ - name: Build distributions
81
+ run: python -m build
82
+
83
+ - name: List built artifacts
84
+ run: ls -la dist/
85
+
86
+ - uses: actions/upload-artifact@v4
87
+ with:
88
+ name: dist
89
+ path: dist/
90
+ if-no-files-found: error
91
+
92
+ # -------------------------------------------------------------------------
93
+ # 2. Publish to TestPyPI first, always.
94
+ # -------------------------------------------------------------------------
95
+ publish-testpypi:
96
+ name: Publish to TestPyPI
97
+ needs: build
98
+ runs-on: ubuntu-latest
99
+ environment:
100
+ name: testpypi
101
+ url: https://test.pypi.org/project/qcal-copilot/
102
+ permissions:
103
+ id-token: write # required for Trusted Publishing OIDC
104
+ steps:
105
+ - uses: actions/download-artifact@v4
106
+ with:
107
+ name: dist
108
+ path: dist/
109
+
110
+ - uses: pypa/gh-action-pypi-publish@release/v1
111
+ with:
112
+ repository-url: https://test.pypi.org/legacy/
113
+ # TestPyPI sometimes has leftover versions from prior dry runs;
114
+ # don't fail the release over it.
115
+ skip-existing: true
116
+
117
+ # -------------------------------------------------------------------------
118
+ # 3. Smoke-test: install from TestPyPI in a clean runner and assert that
119
+ # the CLI + Python imports work. If this fails, do NOT publish to PyPI.
120
+ # -------------------------------------------------------------------------
121
+ smoke-test-testpypi:
122
+ name: Smoke-test TestPyPI install
123
+ needs: publish-testpypi
124
+ runs-on: ubuntu-latest
125
+ steps:
126
+ - uses: actions/setup-python@v5
127
+ with:
128
+ python-version: "3.12"
129
+
130
+ - name: Install from TestPyPI
131
+ run: |
132
+ # TestPyPI doesn't mirror transitive deps — fall back to PyPI for those.
133
+ # Retry a few times because TestPyPI's CDN takes 30-60s to propagate
134
+ # a fresh upload before the new version becomes resolvable.
135
+ for attempt in 1 2 3 4 5; do
136
+ if python -m pip install \
137
+ --index-url https://test.pypi.org/simple/ \
138
+ --extra-index-url https://pypi.org/simple/ \
139
+ qcal-copilot; then
140
+ exit 0
141
+ fi
142
+ echo "Attempt ${attempt} failed, sleeping before retry..."
143
+ sleep 15
144
+ done
145
+ echo "::error::pip install from TestPyPI failed after 5 attempts."
146
+ exit 1
147
+
148
+ - name: Verify CLI + imports
149
+ run: |
150
+ qcal version
151
+ python -c "
152
+ import qcal
153
+ from qcal import analyzer, codegen, data, decoder, fit, simulator, config, cli
154
+ assert qcal.__version__, 'missing __version__'
155
+ print('smoke test OK, version =', qcal.__version__)
156
+ "
157
+
158
+ # -------------------------------------------------------------------------
159
+ # 4. Publish to real PyPI. Skipped on workflow_dispatch dry runs.
160
+ # -------------------------------------------------------------------------
161
+ publish-pypi:
162
+ name: Publish to PyPI
163
+ needs: smoke-test-testpypi
164
+ if: github.event_name == 'push' || github.event.inputs.testpypi_only != 'true'
165
+ runs-on: ubuntu-latest
166
+ environment:
167
+ name: pypi
168
+ url: https://pypi.org/project/qcal-copilot/
169
+ permissions:
170
+ id-token: write
171
+ steps:
172
+ - uses: actions/download-artifact@v4
173
+ with:
174
+ name: dist
175
+ path: dist/
176
+
177
+ - uses: pypa/gh-action-pypi-publish@release/v1
178
+
179
+ # -------------------------------------------------------------------------
180
+ # 5. Attach the built wheel + sdist to a GitHub Release, with auto-generated
181
+ # notes. Tag-push only.
182
+ # -------------------------------------------------------------------------
183
+ github-release:
184
+ name: Create GitHub Release
185
+ needs: publish-pypi
186
+ if: github.event_name == 'push'
187
+ runs-on: ubuntu-latest
188
+ permissions:
189
+ contents: write
190
+ steps:
191
+ - uses: actions/download-artifact@v4
192
+ with:
193
+ name: dist
194
+ path: dist/
195
+
196
+ - uses: softprops/action-gh-release@v2
197
+ with:
198
+ files: dist/*
199
+ generate_release_notes: true
.github/workflows/sync-to-hf-space.yml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Space
2
+
3
+ # Mirrors this repo to https://huggingface.co/spaces/athurlow/qcal so the
4
+ # Space rebuilds whenever you push here. The Space itself is a git repo;
5
+ # this workflow just force-pushes the latest commit to its `main` branch.
6
+
7
+ on:
8
+ push:
9
+ branches:
10
+ - main
11
+ - claude/qcal-copilot-mvp-OZ9wj
12
+ workflow_dispatch: # allow manual sync from the Actions tab
13
+
14
+ env:
15
+ # Opt in to Node 24 now so JS actions (checkout@v4) don't emit the Node 20
16
+ # deprecation warning. Safe to remove once Node 24 is the runner default.
17
+ FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
18
+
19
+ jobs:
20
+ sync:
21
+ runs-on: ubuntu-latest
22
+ steps:
23
+ - name: Checkout repo (full history — HF rejects shallow clones)
24
+ uses: actions/checkout@v4
25
+ with:
26
+ fetch-depth: 0
27
+ lfs: true
28
+
29
+ # Fail fast with a human-readable message if HF_TOKEN is missing or
30
+ # obviously invalid, rather than letting `git push` die with a cryptic
31
+ # "Authentication required" 40 lines into the log.
32
+ - name: Preflight — HF_TOKEN sanity check
33
+ env:
34
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
35
+ run: |
36
+ if [ -z "${HF_TOKEN}" ]; then
37
+ echo "::error::HF_TOKEN secret is not set on this repository."
38
+ echo "Create a Write-scope token at https://huggingface.co/settings/tokens"
39
+ echo "and add it as a repo secret named HF_TOKEN (Settings → Secrets → Actions)."
40
+ exit 1
41
+ fi
42
+ # Real HF user tokens start with `hf_` and are >=20 chars. This
43
+ # catches pasted-in empties/quotes without hitting the API.
44
+ case "${HF_TOKEN}" in
45
+ hf_*) : ;;
46
+ *)
47
+ echo "::error::HF_TOKEN does not look like a Hugging Face user token (expected prefix 'hf_')."
48
+ echo "Regenerate at https://huggingface.co/settings/tokens with Write scope."
49
+ exit 1
50
+ ;;
51
+ esac
52
+ if [ "${#HF_TOKEN}" -lt 20 ]; then
53
+ echo "::error::HF_TOKEN is suspiciously short (${#HF_TOKEN} chars) — likely truncated."
54
+ exit 1
55
+ fi
56
+ # Verify the token is actually accepted by HF's whoami endpoint
57
+ # before we try to push. Saves wading through git's auth output.
58
+ http_code=$(curl -s -o /tmp/hf_whoami.json -w "%{http_code}" \
59
+ -H "Authorization: Bearer ${HF_TOKEN}" \
60
+ https://huggingface.co/api/whoami-v2)
61
+ if [ "${http_code}" != "200" ]; then
62
+ echo "::error::HF whoami returned ${http_code}. Token is invalid or revoked."
63
+ echo "Regenerate at https://huggingface.co/settings/tokens with Write scope."
64
+ exit 1
65
+ fi
66
+ echo "HF_TOKEN looks good (whoami returned 200)."
67
+
68
+ - name: Push to Hugging Face Space
69
+ env:
70
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
71
+ HF_USER: athurlow
72
+ HF_SPACE: qcal
73
+ run: |
74
+ git config user.name "github-actions[bot]"
75
+ git config user.email "github-actions[bot]@users.noreply.github.com"
76
+ git remote add space "https://${HF_USER}:${HF_TOKEN}@huggingface.co/spaces/${HF_USER}/${HF_SPACE}"
77
+ # Force-push the current branch onto the Space's main branch.
78
+ git push --force space "HEAD:main"
.gitignore CHANGED
@@ -1,6 +1,8 @@
1
  __pycache__/
2
  *.py[cod]
3
  *.egg-info/
 
 
4
  .venv/
5
  venv/
6
  .env
 
1
  __pycache__/
2
  *.py[cod]
3
  *.egg-info/
4
+ build/
5
+ dist/
6
  .venv/
7
  venv/
8
  .env
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2026 QCal Copilot contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,32 +1,117 @@
1
- # QCal Copilot — MVP
2
-
3
- AI-assisted quantum calibration. Upload a calibration plot or CSV, get an
4
- analysis from NVIDIA's Ising Calibration vision-language model, receive a
5
- ready-to-run CUDA-Q Python script with the suggested tuning, and execute it
6
- on the local `cudaq` simulator.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  ```
9
  ┌─────────┐ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐
10
  │ Upload │──▶│ Analyzer │──▶│ Code gen │──▶│ Simulator │
11
  │ (img/csv│ │ (Ising VLM) │ │ (CUDA-Q) │ │ (cudaq.sample│
12
- └─────────┘ └─────────────┘ └──────────────┘ └──────────────┘
 
 
 
 
 
 
13
  ```
14
 
15
  ## Layout
16
 
17
  ```
18
  app.py # Gradio UI + pipeline wiring
19
- qcal/
20
- data.py # image/CSV preprocessing
 
 
21
  analyzer.py # Ising VLM (local HF or NIM)
22
  codegen.py # CUDA-Q script generator
23
  simulator.py # executes the generated script
 
 
 
 
24
  requirements.txt
25
  ```
26
 
27
  The analyzer and simulator are decoupled, so adding a later-stage 3D CNN
28
  decoder or swapping in a different model is a one-file change.
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  ## Quick start
31
 
32
  ### 1. System requirements
@@ -99,20 +184,99 @@ Open <http://localhost:7860>. Upload a calibration plot, click
99
  **Analyze calibration**, inspect the generated CUDA-Q script, then click
100
  **Run simulation** to execute it on the `cudaq` simulator.
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  ## Environment variables
103
 
104
- | Variable | Purpose |
105
- | ------------------- | ----------------------------------------------------- |
106
- | `NVIDIA_API_KEY` | API key for the NIM endpoint (backend = `nim`) |
107
- | `QCAL_MODEL_ID` | Override local HF model id |
108
- | `QCAL_NIM_MODEL` | Override NIM model name |
109
- | `QCAL_NIM_ENDPOINT` | Override NIM base URL |
110
- | `QCAL_HOST` | Gradio bind host (default `0.0.0.0`) |
111
- | `QCAL_PORT` | Gradio port (default `7860`) |
112
- | `QCAL_SHARE` | Set to `1` to enable Gradio public share link |
 
 
 
 
113
 
114
  ## Input formats
115
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  - **Images** — `.png`, `.jpg`, `.jpeg`, `.bmp`, `.tiff`, `.webp`. Any
117
  calibration artifact the VLM understands: Rabi chevrons, T1/T2 decays,
118
  Ramsey fringes, readout histograms, resonator spectroscopy, oscilloscope
@@ -134,6 +298,28 @@ Module boundaries to keep the MVP clean:
134
 
135
  - `qcal.data` — file I/O and normalization only.
136
  - `qcal.analyzer` — model calls; returns a strict JSON dict.
137
- - `qcal.codegen` — pure function: analysis dict → script text.
138
  - `qcal.simulator` — executes script text; never imports `cudaq` itself.
 
139
  - `app.py` — UI glue only; no ML logic.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: QCal Copilot
3
+ emoji: ⚛️
4
+ colorFrom: blue
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.44.0
8
+ python_version: "3.12"
9
+ app_file: app.py
10
+ pinned: false
11
+ license: mit
12
+ short_description: AI-assisted quantum calibration + CUDA-Q + Ising decoder
13
+ ---
14
+
15
+ # QCal Copilot
16
+
17
+ AI-assisted quantum calibration. Point it at a raw `.npy` trace (or image, or
18
+ CSV) and it renders a plot, auto-fits the standard calibration model (Rabi,
19
+ Ramsey, T1, T2-echo), hands both to NVIDIA's Ising Calibration VLM, and emits a
20
+ ready-to-run CUDA-Q script seeded with the recommended tuning.
21
+
22
+ Ships three ways:
23
+
24
+ - **`pip install qcal-copilot`** — CLI + Python API (`qcal analyze`, `from qcal.data import from_npy`).
25
+ - **Gradio web app** — `qcal serve` or [the hosted Space](https://huggingface.co/spaces/athurlow/qcal).
26
+ - **Jupyter** — see `examples/` for Rabi, Ramsey-drift, and readout-IQ notebooks.
27
 
28
  ```
29
  ┌─────────┐ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐
30
  │ Upload │──▶│ Analyzer │──▶│ Code gen │──▶│ Simulator │
31
  │ (img/csv│ │ (Ising VLM) │ │ (CUDA-Q) │ │ (cudaq.sample│
32
+ └─────────┘ └─────────────┘ └──────────────┘ └──────────────┘
33
+
34
+
35
+ ┌───────────────────────────────┐
36
+ │ Decoder (optional) │
37
+ │ Ising 3D CNN → PyMatching │
38
+ └───────────────────────────────┘
39
  ```
40
 
41
  ## Layout
42
 
43
  ```
44
  app.py # Gradio UI + pipeline wiring
45
+ pyproject.toml # package metadata + CLI entry point
46
+ src/qcal/
47
+ data.py # image/CSV/.npy/.npz preprocessing + plot rendering
48
+ fit.py # scipy-backed curve fits (Rabi/Ramsey/T1/T2)
49
  analyzer.py # Ising VLM (local HF or NIM)
50
  codegen.py # CUDA-Q script generator
51
  simulator.py # executes the generated script
52
+ decoder.py # Ising 3D CNN pre-decoder + MWPM
53
+ config.py # persists NIM API key to ~/.config/qcal/config.toml
54
+ cli.py # `qcal ...` Typer commands
55
+ examples/ # Rabi / Ramsey-drift / readout-IQ notebooks
56
  requirements.txt
57
  ```
58
 
59
  The analyzer and simulator are decoupled, so adding a later-stage 3D CNN
60
  decoder or swapping in a different model is a one-file change.
61
 
62
+ ## Install (pip)
63
+
64
+ ```bash
65
+ pip install qcal-copilot # CLI + NIM backend
66
+ pip install "qcal-copilot[decoder]" # + PyMatching
67
+ pip install "qcal-copilot[gui]" # + Gradio (for `qcal serve`)
68
+ pip install "qcal-copilot[ml]" # + torch + transformers (local 35B VLM)
69
+ pip install "qcal-copilot[all]" # everything
70
+ ```
71
+
72
+ Store your NIM API key (or set `NVIDIA_API_KEY` in your shell):
73
+
74
+ ```bash
75
+ qcal login # prompts for the key, saves to ~/.config/qcal/config.toml (0600)
76
+ ```
77
+
78
+ ### CLI
79
+
80
+ ```bash
81
+ # Rabi trace stored as a 1-D .npy with a matching time axis
82
+ qcal analyze rabi.npy --experiment rabi --out report.md --script rabi.py
83
+
84
+ # .npz archive with x, y arrays
85
+ qcal analyze ramsey.npz --experiment ramsey --json out.json
86
+
87
+ # Regenerate the CUDA-Q script from a saved analysis
88
+ qcal generate out.json --out rabi.py
89
+
90
+ # Run the Ising 3D CNN decoder on a synthetic syndrome volume
91
+ qcal decode --variant fast --distance 5 --rounds 5 --p 0.005 --shots 128
92
+
93
+ # Launch the Gradio UI locally (needs [gui])
94
+ qcal serve
95
+ ```
96
+
97
+ ### Python
98
+
99
+ ```python
100
+ from qcal.data import from_npy
101
+ from qcal.analyzer import analyze_payload
102
+ from qcal.codegen import generate_script
103
+
104
+ payload = from_npy("rabi.npy", experiment_type="rabi",
105
+ x_path="rabi_time.npy", x_unit="s")
106
+ payload.fit # FitResult: {amplitude, freq_per_s, tau_s, offset, phase_rad, R^2}
107
+
108
+ result = analyze_payload(payload, backend="auto") # "nim" if key present, else local
109
+ print(result.markdown())
110
+ print(generate_script(result.parsed))
111
+ ```
112
+
113
+ Both `payload.fit` and `result` render as rich markdown in Jupyter.
114
+
115
  ## Quick start
116
 
117
  ### 1. System requirements
 
184
  **Analyze calibration**, inspect the generated CUDA-Q script, then click
185
  **Run simulation** to execute it on the `cudaq` simulator.
186
 
187
+ ## Deploy to Hugging Face Spaces
188
+
189
+ This repo is ready to deploy as a Gradio Space (e.g. `athurlow/qcal`). The
190
+ YAML frontmatter at the top of this README tells Spaces which SDK to use and
191
+ which file to run.
192
+
193
+ 1. Push the repo to the Space:
194
+
195
+ ```bash
196
+ git remote add space https://huggingface.co/spaces/athurlow/qcal
197
+ git push space claude/qcal-copilot-mvp-OZ9wj:main
198
+ ```
199
+ 2. In the Space **Settings → Variables and secrets**, add:
200
+ - `NVIDIA_API_KEY` — required; the hosted Space can't download the 35B
201
+ VLM locally, so the app should call the NIM endpoint.
202
+ 3. (Optional) Override model ids via Space secrets if you have custom
203
+ deployments: `QCAL_NIM_MODEL`, `QCAL_DECODER_FAST_ID`,
204
+ `QCAL_DECODER_ACCURATE_ID`.
205
+ 4. **Hardware:** a free CPU Space runs the decoder's small CNN (~1.8M params)
206
+ and the NIM-backed analyzer fine. A GPU Space (T4 or better) is only
207
+ needed if you want to host the calibration VLM locally; `cudaq` requires
208
+ an NVIDIA GPU Space to run the simulation stage.
209
+
210
+ The app falls back gracefully when dependencies are missing: no
211
+ `NVIDIA_API_KEY` → analyzer reports the missing key; no `cudaq` → simulator
212
+ button surfaces the install hint; no `pymatching` → decoder shows density
213
+ metrics without MWPM timing.
214
+
215
+ ## Error-correction decoder (optional stage)
216
+
217
+ After a successful calibration analysis, expand the
218
+ **"Error-correction decoder (Ising 3D CNN)"** panel to run an NVIDIA Ising
219
+ surface-code pre-decoder on a synthetic syndrome volume. The panel lets you:
220
+
221
+ - Pick the `fast` (~912k params) or `accurate` (~1.79M params) variant.
222
+ - Set code distance (d), syndrome rounds (T), physical error rate (p),
223
+ and shot count.
224
+ - See before/after metrics: syndrome density, CNN inference time, MWPM
225
+ decode time on raw vs denoised syndromes (via PyMatching), and a
226
+ logical-error-rate proxy.
227
+ - View a side-by-side plot of the raw and denoised syndrome slices plus a
228
+ before/after bar chart.
229
+
230
+ The `p` slider auto-populates from the calibration analysis (larger drive
231
+ amplitude mismatch → larger `p`). Running the decoder regenerates the
232
+ CUDA-Q script with a header block documenting the decoder variant and
233
+ improvement metrics.
234
+
235
+ **What's synthetic and what's real:** syndromes are sampled from Bernoulli(p)
236
+ with a few injected correlated chains, the matching graph is a toy 6-nearest-
237
+ neighbor graph (not a stim-generated DEM), and "LER" is a syndrome-weight
238
+ proxy. If the Ising decoder weights aren't reachable, the module falls back
239
+ to a 3D neighbor-support sparsifier so the pipeline still demos end-to-end.
240
+
241
+ **Swapping in real data:** call
242
+ `qcal.decoder.run_decoder(...)` directly with your own `numpy` volume in place
243
+ of the generated one, or replace `generate_syndromes()` with a stim-backed
244
+ sampler.
245
+
246
  ## Environment variables
247
 
248
+ | Variable | Purpose |
249
+ | --------------------------- | ------------------------------------------------ |
250
+ | `NVIDIA_API_KEY` | API key for the NIM endpoint (backend = `nim`) |
251
+ | `QCAL_MODEL_ID` | Override local HF calibration VLM id |
252
+ | `QCAL_NIM_MODEL` | Override NIM model name |
253
+ | `QCAL_NIM_ENDPOINT` | Override NIM base URL |
254
+ | `QCAL_DECODER_FAST_ID` | Override HF id for the fast decoder variant |
255
+ | `QCAL_DECODER_ACCURATE_ID` | Override HF id for the accurate decoder variant |
256
+ | `QCAL_HOST` | Gradio bind host (default `0.0.0.0`) |
257
+ | `QCAL_PORT` | Gradio port (default `7860`) |
258
+ | `QCAL_SHARE` | Set to `1` to enable Gradio public share link |
259
+ | `QCAL_CONFIG_PATH` | Override config file path (default `~/.config/qcal/config.toml`) |
260
+ | `NIM_API_KEY` | Alias for `NVIDIA_API_KEY` |
261
 
262
  ## Input formats
263
 
264
+ - **`.npy` / `.npz`** (preferred for real-hardware workflows) — Raw measurement
265
+ arrays from your control stack. Pass `--experiment` (or `experiment_type=`)
266
+ so `qcal` knows the expected shape and fit model:
267
+
268
+ | `experiment_type` | Array shape | Auto-fit model |
269
+ |------------------------|---------------|----------------------------------------------|
270
+ | `rabi` | `(N,)` | damped sine → `{amplitude, freq, tau, offset, phase}` |
271
+ | `ramsey` | `(N,)` | damped cosine → `{amplitude, detuning, t2star, offset, phase}` |
272
+ | `t1`, `t2_echo` | `(N,)` | exponential decay → `{amplitude, tau, offset}` |
273
+ | `rabi_chevron` | `(F, A)` | heatmap (no fit) |
274
+ | `readout_iq` | `(N, 2)` | scatter (no fit) |
275
+ | `iq_trace`, `resonator_spec`, `unknown` | `(N,)` | plot only |
276
+
277
+ `.npz` should contain at least a `y` key and optionally an `x` key. Disable
278
+ fitting for air-gapped installs without `scipy` via `--no-fit` (CLI) or
279
+ `from_npy(..., fit=False)` (Python).
280
  - **Images** — `.png`, `.jpg`, `.jpeg`, `.bmp`, `.tiff`, `.webp`. Any
281
  calibration artifact the VLM understands: Rabi chevrons, T1/T2 decays,
282
  Ramsey fringes, readout histograms, resonator spectroscopy, oscilloscope
 
298
 
299
  - `qcal.data` — file I/O and normalization only.
300
  - `qcal.analyzer` — model calls; returns a strict JSON dict.
301
+ - `qcal.codegen` — pure function: analysis dict (+ optional decoder info) → script text.
302
  - `qcal.simulator` — executes script text; never imports `cudaq` itself.
303
+ - `qcal.decoder` — Ising 3D CNN sparsifier + optional PyMatching MWPM.
304
  - `app.py` — UI glue only; no ML logic.
305
+
306
+ ### Testing the decoder stage
307
+
308
+ Without launching the UI:
309
+
310
+ ```python
311
+ from qcal.decoder import run_decoder
312
+
313
+ r = run_decoder(variant="fast", distance=5, rounds=5, error_rate=0.01, n_shots=64)
314
+ print(r.markdown())
315
+ print("density reduction:", round(r.density_reduction * 100, 1), "%")
316
+ ```
317
+
318
+ Through the UI:
319
+
320
+ 1. Upload any calibration plot and click **Analyze calibration**.
321
+ 2. Expand **Error-correction decoder (Ising 3D CNN)**.
322
+ 3. Pick `fast` or `accurate`, adjust `d` / `T` / `p`, click **Run decoder**.
323
+ 4. Inspect the metrics panel (density reduction, MWPM timing, LER proxy
324
+ improvement) and the side-by-side plot.
325
+ 5. The CUDA-Q script auto-refreshes with a decoder header block.
app.py CHANGED
@@ -1,26 +1,66 @@
1
  """QCal Copilot — Gradio MVP.
2
 
3
  Upload a calibration plot (image) or CSV, get an AI analysis from the
4
- Ising Calibration VLM, generate a runnable CUDA-Q script, and optionally
5
- execute it on the local cudaq simulator.
 
6
 
7
  Run:
8
  python app.py
9
 
10
  Environment (optional):
11
- NVIDIA_API_KEY API key for build.nvidia.com NIM endpoint
12
- QCAL_MODEL_ID HF model id (default: nvidia/Ising-Calibration-1-35B-A3B)
13
- QCAL_NIM_MODEL NIM model name
14
- QCAL_NIM_ENDPOINT NIM base URL
 
 
15
  """
16
 
17
  from __future__ import annotations
18
 
19
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  import gradio as gr
22
 
23
- from qcal import analyzer, codegen, data, simulator
24
 
25
 
26
  # ---------------------------------------------------------------------------
@@ -35,12 +75,17 @@ def step_analyze(file_obj, backend_choice: str):
35
  gr.update(value=""),
36
  gr.update(value=""),
37
  None,
 
38
  )
39
 
40
  try:
41
  payload = data.load_payload(file_obj.name if hasattr(file_obj, "name") else file_obj)
42
  except Exception as exc: # noqa: BLE001
43
- return gr.update(value=f"**Input error:** {exc}"), "", "", None
 
 
 
 
44
 
45
  summary = payload.summary()
46
  table_md = payload.table_preview_markdown() if payload.kind == "csv" else None
@@ -56,9 +101,10 @@ def step_analyze(file_obj, backend_choice: str):
56
  analysis_md = header + result.markdown()
57
 
58
  script = codegen.generate_script(result.parsed) if result.ok else ""
59
- script_md_hint = "" if result.ok else "_(no script generated — fix the analysis error first)_"
 
60
 
61
- return analysis_md, script, script_md_hint, result.parsed
62
 
63
 
64
  def step_run_simulation(script_text: str):
@@ -68,6 +114,48 @@ def step_run_simulation(script_text: str):
68
  return simulator.format_result_markdown(result)
69
 
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  # ---------------------------------------------------------------------------
72
  # UI
73
  # ---------------------------------------------------------------------------
@@ -88,11 +176,16 @@ def build_ui() -> gr.Blocks:
88
  """
89
  <div id="qcal-header">
90
  <h1>QCal Copilot</h1>
91
- <p>AI-assisted quantum calibration · Ising Calibration VLM + CUDA-Q</p>
92
  </div>
93
  """
94
  )
95
 
 
 
 
 
 
96
  with gr.Row():
97
  with gr.Column(scale=1):
98
  file_in = gr.File(
@@ -129,20 +222,72 @@ def build_ui() -> gr.Blocks:
129
  script_hint = gr.Markdown()
130
  sim_out = gr.Markdown(label="Simulation result")
131
 
132
- # State holds the parsed analysis dict so we could extend the flow
133
- # later (e.g. decoder stage) without re-calling the VLM.
134
- analysis_state = gr.State(value=None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
  analyze_btn.click(
137
  fn=step_analyze,
138
  inputs=[file_in, backend_choice],
139
- outputs=[analysis_out, script_out, script_hint, analysis_state],
 
 
 
140
  )
141
  run_btn.click(
142
  fn=step_run_simulation,
143
  inputs=[script_out],
144
  outputs=[sim_out],
145
  )
 
 
 
 
 
 
 
 
146
 
147
  return demo
148
 
@@ -153,6 +298,11 @@ def main() -> None:
153
  server_name=os.getenv("QCAL_HOST", "0.0.0.0"),
154
  server_port=int(os.getenv("QCAL_PORT", "7860")),
155
  share=os.getenv("QCAL_SHARE", "").lower() in {"1", "true", "yes"},
 
 
 
 
 
156
  )
157
 
158
 
 
1
  """QCal Copilot — Gradio MVP.
2
 
3
  Upload a calibration plot (image) or CSV, get an AI analysis from the
4
+ Ising Calibration VLM, generate a runnable CUDA-Q script, execute it on
5
+ the local cudaq simulator, and optionally run the Ising 3D CNN decoder
6
+ stage on a synthetic surface-code syndrome volume.
7
 
8
  Run:
9
  python app.py
10
 
11
  Environment (optional):
12
+ NVIDIA_API_KEY API key for build.nvidia.com NIM endpoint
13
+ QCAL_MODEL_ID HF model id for the calibration VLM
14
+ QCAL_NIM_MODEL NIM model name
15
+ QCAL_NIM_ENDPOINT NIM base URL
16
+ QCAL_DECODER_FAST_ID Override fast decoder HF id
17
+ QCAL_DECODER_ACCURATE_ID Override accurate decoder HF id
18
  """
19
 
20
  from __future__ import annotations
21
 
22
  import os
23
+ import sys
24
+ from pathlib import Path
25
+
26
+ # On HF Spaces the build step only mounts requirements.txt, so `pip install -e .`
27
+ # can't reach pyproject.toml. Put src/ on sys.path so `from qcal import ...`
28
+ # resolves against the src-layout package without needing it installed.
29
+ _SRC = Path(__file__).resolve().parent / "src"
30
+ if _SRC.is_dir() and str(_SRC) not in sys.path:
31
+ sys.path.insert(0, str(_SRC))
32
+
33
+ # gradio 4.44.0 crashes on every page load when a JSON sub-schema is a bool
34
+ # (JSON Schema draft 2020-12 allows `additionalProperties: True|False`, and
35
+ # pydantic emits it). Fixed in 4.44.1, but HF Spaces pins 4.44.0 in its
36
+ # build bootstrap. Wrap both the schema walker and the type dispatcher so
37
+ # bool schemas degrade to "Any" instead of raising APIInfoParseError.
38
+ # Recursion inside gradio_client.utils resolves these names via module
39
+ # globals, so patching the module attributes intercepts every call site.
40
+ try:
41
+ from gradio_client import utils as _gc_utils
42
+
43
+ _orig_walk = _gc_utils._json_schema_to_python_type
44
+ _orig_get_type = _gc_utils.get_type
45
+
46
+ def _safe_walk(schema, defs=None):
47
+ if isinstance(schema, bool):
48
+ return "Any"
49
+ return _orig_walk(schema, defs)
50
+
51
+ def _safe_get_type(schema):
52
+ if not isinstance(schema, dict):
53
+ return "Any"
54
+ return _orig_get_type(schema)
55
+
56
+ _gc_utils._json_schema_to_python_type = _safe_walk
57
+ _gc_utils.get_type = _safe_get_type
58
+ except Exception: # noqa: BLE001 — best-effort; don't block startup
59
+ pass
60
 
61
  import gradio as gr
62
 
63
+ from qcal import analyzer, codegen, data, decoder, simulator
64
 
65
 
66
  # ---------------------------------------------------------------------------
 
75
  gr.update(value=""),
76
  gr.update(value=""),
77
  None,
78
+ gr.update(value=decoder.suggest_error_rate(None)),
79
  )
80
 
81
  try:
82
  payload = data.load_payload(file_obj.name if hasattr(file_obj, "name") else file_obj)
83
  except Exception as exc: # noqa: BLE001
84
+ return (
85
+ gr.update(value=f"**Input error:** {exc}"),
86
+ "", "", None,
87
+ gr.update(value=decoder.suggest_error_rate(None)),
88
+ )
89
 
90
  summary = payload.summary()
91
  table_md = payload.table_preview_markdown() if payload.kind == "csv" else None
 
101
  analysis_md = header + result.markdown()
102
 
103
  script = codegen.generate_script(result.parsed) if result.ok else ""
104
+ script_hint = "" if result.ok else "_(no script generated — fix the analysis error first)_"
105
+ suggested_p = decoder.suggest_error_rate(result.parsed)
106
 
107
+ return analysis_md, script, script_hint, result.parsed, gr.update(value=suggested_p)
108
 
109
 
110
  def step_run_simulation(script_text: str):
 
114
  return simulator.format_result_markdown(result)
115
 
116
 
117
+ def step_run_decoder(
118
+ variant: str,
119
+ distance: int,
120
+ rounds: int,
121
+ error_rate: float,
122
+ n_shots: int,
123
+ analysis: dict | None,
124
+ script_text: str,
125
+ ):
126
+ """Run the Ising 3D CNN decoder and refresh metrics/plots/script."""
127
+ result = decoder.run_decoder(
128
+ variant=variant,
129
+ distance=int(distance),
130
+ rounds=int(rounds),
131
+ error_rate=float(error_rate),
132
+ n_shots=int(n_shots),
133
+ )
134
+ metrics_md = result.markdown()
135
+
136
+ try:
137
+ fig = decoder.plot_comparison(result) if result.ok else None
138
+ except Exception as exc: # noqa: BLE001
139
+ fig = None
140
+ metrics_md += f"\n\n_(plot unavailable: {exc})_"
141
+
142
+ # Re-generate the CUDA-Q script with a decoder header block, so the user
143
+ # can copy a script that documents which decoder ran upstream.
144
+ new_script = script_text
145
+ if result.ok and analysis:
146
+ decoder_info = {
147
+ "variant": result.variant,
148
+ "model_id": result.model_id,
149
+ "distance": result.distance,
150
+ "rounds": result.rounds,
151
+ "density_reduction": result.density_reduction,
152
+ "ler_improvement": result.ler_improvement,
153
+ }
154
+ new_script = codegen.generate_script(analysis, decoder_info=decoder_info)
155
+
156
+ return metrics_md, fig, new_script
157
+
158
+
159
  # ---------------------------------------------------------------------------
160
  # UI
161
  # ---------------------------------------------------------------------------
 
176
  """
177
  <div id="qcal-header">
178
  <h1>QCal Copilot</h1>
179
+ <p>AI-assisted quantum calibration · Ising VLM + 3D CNN decoder + CUDA-Q</p>
180
  </div>
181
  """
182
  )
183
 
184
+ # State holds the parsed analysis dict so downstream stages (decoder,
185
+ # future 3D CNN tile, etc.) can read it without re-calling the VLM.
186
+ analysis_state = gr.State(value=None)
187
+
188
+ # ---- Stage 1: calibration analysis ---------------------------------
189
  with gr.Row():
190
  with gr.Column(scale=1):
191
  file_in = gr.File(
 
222
  script_hint = gr.Markdown()
223
  sim_out = gr.Markdown(label="Simulation result")
224
 
225
+ # ---- Stage 2: error-correction decoder -----------------------------
226
+ with gr.Accordion("Error-correction decoder (Ising 3D CNN)", open=False):
227
+ gr.Markdown(
228
+ "Sparsify a synthetic surface-code syndrome volume with one of "
229
+ "the Ising pre-decoders, then hand off to MWPM (PyMatching). "
230
+ "Error rate defaults are suggested from your calibration analysis."
231
+ )
232
+ with gr.Row():
233
+ with gr.Column(scale=1):
234
+ variant_choice = gr.Radio(
235
+ label="Decoder variant",
236
+ choices=[decoder.VARIANT_FAST, decoder.VARIANT_ACCURATE],
237
+ value=decoder.VARIANT_FAST,
238
+ info=(
239
+ "`fast` ≈ 912k params — lower latency. "
240
+ "`accurate` ≈ 1.79M params — better LER."
241
+ ),
242
+ )
243
+ distance_slider = gr.Slider(
244
+ 3, 11, value=5, step=2,
245
+ label="Code distance (d)",
246
+ )
247
+ rounds_slider = gr.Slider(
248
+ 1, 17, value=5, step=1,
249
+ label="Syndrome rounds (T)",
250
+ )
251
+ error_rate_slider = gr.Slider(
252
+ 0.0, 0.05, value=0.005, step=0.001,
253
+ label="Physical error rate (p)",
254
+ )
255
+ shots_slider = gr.Slider(
256
+ 16, 1024, value=128, step=16,
257
+ label="Shots",
258
+ )
259
+ run_decoder_btn = gr.Button(
260
+ "Run decoder", variant="primary"
261
+ )
262
+
263
+ with gr.Column(scale=2):
264
+ decoder_metrics = gr.Markdown(
265
+ "_Run the decoder to see density reduction, MWPM timing, "
266
+ "and LER-proxy improvement here._"
267
+ )
268
+ decoder_plot = gr.Plot(label="Raw vs denoised syndromes")
269
 
270
  analyze_btn.click(
271
  fn=step_analyze,
272
  inputs=[file_in, backend_choice],
273
+ outputs=[
274
+ analysis_out, script_out, script_hint, analysis_state,
275
+ error_rate_slider,
276
+ ],
277
  )
278
  run_btn.click(
279
  fn=step_run_simulation,
280
  inputs=[script_out],
281
  outputs=[sim_out],
282
  )
283
+ run_decoder_btn.click(
284
+ fn=step_run_decoder,
285
+ inputs=[
286
+ variant_choice, distance_slider, rounds_slider,
287
+ error_rate_slider, shots_slider, analysis_state, script_out,
288
+ ],
289
+ outputs=[decoder_metrics, decoder_plot, script_out],
290
+ )
291
 
292
  return demo
293
 
 
298
  server_name=os.getenv("QCAL_HOST", "0.0.0.0"),
299
  server_port=int(os.getenv("QCAL_PORT", "7860")),
300
  share=os.getenv("QCAL_SHARE", "").lower() in {"1", "true", "yes"},
301
+ # Gradio 4.44.0's client-side schema serializer crashes on
302
+ # `additionalProperties: False` ("argument of type 'bool' is not
303
+ # iterable"); fixed in 4.44.1, but HF Spaces pins 4.44.0. Disabling
304
+ # the OpenAPI endpoint avoids the crash — the UI is unaffected.
305
+ show_api=False,
306
  )
307
 
308
 
examples/01_rabi.ipynb ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Rabi calibration with QCal Copilot\n",
8
+ "\n",
9
+ "Load a raw `.npy` Rabi trace, auto-fit a damped sine, hand the plot + fit numbers to\n",
10
+ "the Ising Calibration VLM, and emit a CUDA-Q script seeded with the recommended\n",
11
+ "pulse amplitude.\n",
12
+ "\n",
13
+ "Inputs expected: a 1-D numpy array of readout populations vs drive amplitude (or time)\n",
14
+ "and an optional matching `x` array. Works offline — the local VLM is optional."
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {},
21
+ "outputs": [],
22
+ "source": [
23
+ "import numpy as np\n",
24
+ "\n",
25
+ "# Synthesize a Rabi sweep (replace with `np.load('your_trace.npy')`).\n",
26
+ "t = np.linspace(0, 500e-9, 201) # seconds\n",
27
+ "y = 0.5 * np.exp(-t/300e-9) * np.sin(2*np.pi*10e6*t) + 0.5\n",
28
+ "np.save('rabi_trace.npy', y)\n",
29
+ "np.save('rabi_time.npy', t)"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": null,
35
+ "metadata": {},
36
+ "outputs": [],
37
+ "source": [
38
+ "from qcal.data import from_npy\n",
39
+ "\n",
40
+ "payload = from_npy(\n",
41
+ " 'rabi_trace.npy',\n",
42
+ " experiment_type='rabi',\n",
43
+ " x_path='rabi_time.npy',\n",
44
+ " x_unit='s',\n",
45
+ ")\n",
46
+ "payload.fit # FitResult — rich-displays in Jupyter as a table"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": null,
52
+ "metadata": {},
53
+ "outputs": [],
54
+ "source": [
55
+ "payload.image # the rendered plot the VLM will see"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "code",
60
+ "execution_count": null,
61
+ "metadata": {},
62
+ "outputs": [],
63
+ "source": [
64
+ "from qcal.analyzer import analyze_payload\n",
65
+ "\n",
66
+ "# backend='auto' uses NIM when NVIDIA_API_KEY is set, else the local HF weights.\n",
67
+ "result = analyze_payload(payload, backend='auto')\n",
68
+ "result # _repr_markdown_ — shows experiment, issues, recommended params"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": null,
74
+ "metadata": {},
75
+ "outputs": [],
76
+ "source": [
77
+ "from qcal.codegen import generate_script\n",
78
+ "\n",
79
+ "print(generate_script(result.parsed))"
80
+ ]
81
+ }
82
+ ],
83
+ "metadata": {
84
+ "kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"},
85
+ "language_info": {"name": "python", "version": "3.11"}
86
+ },
87
+ "nbformat": 4,
88
+ "nbformat_minor": 5
89
+ }
examples/02_ramsey_drift.ipynb ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Ramsey detuning drift over a shift\n",
8
+ "\n",
9
+ "Track how the detuning (qubit frequency - drive frequency) drifts across a series\n",
10
+ "of Ramsey experiments run over several hours — the signature of a TLS hop, a thermal\n",
11
+ "excursion, or flux-line crosstalk that the VLM is good at flagging.\n",
12
+ "\n",
13
+ "Typical workflow: your control stack dumps one `.npz` per Ramsey run into a\n",
14
+ "watched directory; this notebook walks that directory, auto-fits each trace, and\n",
15
+ "plots the extracted detuning vs wall-clock time."
16
+ ]
17
+ },
18
+ {
19
+ "cell_type": "code",
20
+ "execution_count": null,
21
+ "metadata": {},
22
+ "outputs": [],
23
+ "source": [
24
+ "import numpy as np\n",
25
+ "import matplotlib.pyplot as plt\n",
26
+ "\n",
27
+ "rng = np.random.default_rng(1)\n",
28
+ "tau = np.linspace(0, 10e-6, 201) # 10 µs delay sweep\n",
29
+ "wall_times = np.arange(8) * 30 * 60 # every 30 min, 4 hours total\n",
30
+ "\n",
31
+ "# Simulate detuning drifting from 120 kHz → 180 kHz with a TLS-like jump halfway.\n",
32
+ "detunings = np.array([120e3, 125e3, 132e3, 140e3, 170e3, 172e3, 175e3, 180e3])\n",
33
+ "\n",
34
+ "for i, det in enumerate(detunings):\n",
35
+ " phase = rng.uniform(0, 2*np.pi)\n",
36
+ " y = 0.5 * np.exp(-tau/4e-6) * np.cos(2*np.pi*det*tau + phase) + 0.5\n",
37
+ " y += rng.normal(0, 0.01, tau.shape) # readout noise\n",
38
+ " np.savez(f'ramsey_{i:02d}.npz', x=tau, y=y, wall_time=wall_times[i])"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "execution_count": null,
44
+ "metadata": {},
45
+ "outputs": [],
46
+ "source": [
47
+ "from pathlib import Path\n",
48
+ "from qcal.data import from_npz\n",
49
+ "\n",
50
+ "records = []\n",
51
+ "for path in sorted(Path('.').glob('ramsey_*.npz')):\n",
52
+ " p = from_npz(path, experiment_type='ramsey', x_unit='s')\n",
53
+ " if p.fit and p.fit.ok:\n",
54
+ " det = next(v for k, v in p.fit.params.items() if k.startswith('detuning_per_'))\n",
55
+ " t2s = next(v for k, v in p.fit.params.items() if k.startswith('t2star_'))\n",
56
+ " records.append({'file': path.name, 'detuning_hz': det, 't2star_s': t2s,\n",
57
+ " 'fit_quality': p.fit.fit_quality})\n",
58
+ "\n",
59
+ "import pandas as pd\n",
60
+ "df = pd.DataFrame(records)\n",
61
+ "df"
62
+ ]
63
+ },
64
+ {
65
+ "cell_type": "code",
66
+ "execution_count": null,
67
+ "metadata": {},
68
+ "outputs": [],
69
+ "source": [
70
+ "fig, ax = plt.subplots(figsize=(6.5, 3.8))\n",
71
+ "ax.plot(wall_times/3600, df['detuning_hz']/1e3, marker='o')\n",
72
+ "ax.set_xlabel('Wall time (hours)')\n",
73
+ "ax.set_ylabel('Fitted detuning (kHz)')\n",
74
+ "ax.set_title('Qubit detuning drift')\n",
75
+ "ax.grid(True, alpha=0.3)"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "markdown",
80
+ "metadata": {},
81
+ "source": [
82
+ "Hand the **most recent** Ramsey trace to the VLM. It sees both the plot and the\n",
83
+ "fitted numbers in the prompt, so `notes` / `drift_prediction` usually flag the\n",
84
+ "step change in detuning."
85
+ ]
86
+ },
87
+ {
88
+ "cell_type": "code",
89
+ "execution_count": null,
90
+ "metadata": {},
91
+ "outputs": [],
92
+ "source": [
93
+ "from qcal.analyzer import analyze_payload\n",
94
+ "\n",
95
+ "latest = from_npz('ramsey_07.npz', experiment_type='ramsey', x_unit='s')\n",
96
+ "analyze_payload(latest, backend='auto')"
97
+ ]
98
+ }
99
+ ],
100
+ "metadata": {
101
+ "kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"},
102
+ "language_info": {"name": "python", "version": "3.11"}
103
+ },
104
+ "nbformat": 4,
105
+ "nbformat_minor": 5
106
+ }
examples/03_readout_iq.ipynb ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Readout IQ blob classification\n",
8
+ "\n",
9
+ "Feed a dense cloud of single-shot IQ values to the VLM. The model recognizes\n",
10
+ "under-separated blobs, leakage to `|2⟩`, and skewed populations — all things\n",
11
+ "that degrade readout fidelity. We also sketch a cheap linear threshold for\n",
12
+ "reference."
13
+ ]
14
+ },
15
+ {
16
+ "cell_type": "code",
17
+ "execution_count": null,
18
+ "metadata": {},
19
+ "outputs": [],
20
+ "source": [
21
+ "import numpy as np\n",
22
+ "\n",
23
+ "rng = np.random.default_rng(7)\n",
24
+ "n = 4000\n",
25
+ "ground = rng.normal(loc=(-0.9, 0.1), scale=(0.35, 0.35), size=(n//2, 2))\n",
26
+ "excited = rng.normal(loc=(+0.8, 0.4), scale=(0.40, 0.40), size=(n//2, 2))\n",
27
+ "iq = np.vstack([ground, excited])\n",
28
+ "np.save('readout_iq.npy', iq)\n",
29
+ "iq.shape"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": null,
35
+ "metadata": {},
36
+ "outputs": [],
37
+ "source": [
38
+ "from qcal.data import from_npy\n",
39
+ "\n",
40
+ "payload = from_npy('readout_iq.npy', experiment_type='readout_iq')\n",
41
+ "payload.image"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": null,
47
+ "metadata": {},
48
+ "outputs": [],
49
+ "source": [
50
+ "from qcal.analyzer import analyze_payload\n",
51
+ "\n",
52
+ "result = analyze_payload(payload, backend='auto')\n",
53
+ "result"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "markdown",
58
+ "metadata": {},
59
+ "source": [
60
+ "Once the VLM returns a `recommended_parameters.readout_threshold`, wire it into\n",
61
+ "your control stack's classifier. Below: a crude linear threshold along the I-axis\n",
62
+ "for reference."
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": null,
68
+ "metadata": {},
69
+ "outputs": [],
70
+ "source": [
71
+ "import matplotlib.pyplot as plt\n",
72
+ "\n",
73
+ "thresh = result.parsed.get('recommended_parameters', {}).get('readout_threshold', 0.0)\n",
74
+ "fig, ax = plt.subplots(figsize=(5, 5))\n",
75
+ "ax.scatter(iq[:, 0], iq[:, 1], s=3, alpha=0.4)\n",
76
+ "ax.axvline(thresh, color='r', linewidth=1.0, label=f'threshold = {thresh}')\n",
77
+ "ax.set_xlabel('I'); ax.set_ylabel('Q'); ax.legend(); ax.grid(alpha=0.3)"
78
+ ]
79
+ }
80
+ ],
81
+ "metadata": {
82
+ "kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"},
83
+ "language_info": {"name": "python", "version": "3.11"}
84
+ },
85
+ "nbformat": 4,
86
+ "nbformat_minor": 5
87
+ }
pyproject.toml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools>=68", "wheel"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "qcal-copilot"
7
+ version = "0.1.0"
8
+ description = "AI-assisted quantum calibration: Ising VLM analysis, surface-code decoder, and CUDA-Q codegen for calibration engineers."
9
+ readme = "README.md"
10
+ requires-python = ">=3.10"
11
+ license = { text = "MIT" }
12
+ authors = [{ name = "QCal Copilot contributors" }]
13
+ keywords = ["quantum", "calibration", "cudaq", "ising", "nvidia", "surface-code"]
14
+ classifiers = [
15
+ "Development Status :: 3 - Alpha",
16
+ "Intended Audience :: Science/Research",
17
+ "License :: OSI Approved :: MIT License",
18
+ "Programming Language :: Python :: 3",
19
+ "Programming Language :: Python :: 3.10",
20
+ "Programming Language :: Python :: 3.11",
21
+ "Programming Language :: Python :: 3.12",
22
+ "Topic :: Scientific/Engineering :: Physics",
23
+ ]
24
+
25
+ dependencies = [
26
+ "numpy>=1.26",
27
+ "pandas>=2.2",
28
+ "matplotlib>=3.8",
29
+ "pillow>=10.0",
30
+ "requests>=2.32",
31
+ "scipy>=1.11", # curve_fit for rabi/ramsey/t1/t2 auto-fitting
32
+ "typer>=0.12", # CLI
33
+ "platformdirs>=4.0", # ~/.config/qcal lookup
34
+ "tomli>=2.0; python_version<'3.11'",
35
+ ]
36
+
37
+ [project.optional-dependencies]
38
+ # Vision-language model path — needed for local inference of the Ising VLM.
39
+ # Users who only want NIM can skip this and save ~2 GB of installs.
40
+ ml = [
41
+ "torch>=2.3",
42
+ "torchvision>=0.18",
43
+ "torchaudio>=2.3",
44
+ "transformers>=4.45",
45
+ "accelerate>=0.33",
46
+ ]
47
+
48
+ # Surface-code decoder MWPM stage.
49
+ decoder = [
50
+ "pymatching>=2.2",
51
+ ]
52
+
53
+ # Gradio web UI (app.py). `qcal serve` needs this.
54
+ gui = [
55
+ "gradio>=4.44",
56
+ ]
57
+
58
+ # Integrations with common quantum frameworks. Phase 2 territory, but we
59
+ # register the extra now so the import path is stable.
60
+ integrations = []
61
+
62
+ # Everything.
63
+ all = [
64
+ "qcal-copilot[ml,decoder,gui,integrations]",
65
+ ]
66
+
67
+ [project.scripts]
68
+ qcal = "qcal.cli:app"
69
+
70
+ [project.urls]
71
+ Homepage = "https://github.com/athurlow/qcal"
72
+ Issues = "https://github.com/athurlow/qcal/issues"
73
+
74
+ [tool.setuptools]
75
+ package-dir = { "" = "src" }
76
+
77
+ [tool.setuptools.packages.find]
78
+ where = ["src"]
qcal/data.py DELETED
@@ -1,102 +0,0 @@
1
- """Input preprocessing for QCal Copilot.
2
-
3
- Normalizes user uploads (image file or CSV) into a structured payload the
4
- analyzer module can send to the vision-language model.
5
- """
6
-
7
- from __future__ import annotations
8
-
9
- import io
10
- from dataclasses import dataclass
11
- from pathlib import Path
12
- from typing import Optional
13
-
14
- import pandas as pd
15
- from PIL import Image
16
-
17
-
18
- SUPPORTED_IMAGE_EXTS = {".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".webp"}
19
- SUPPORTED_TABLE_EXTS = {".csv", ".tsv"}
20
-
21
-
22
- @dataclass
23
- class CalibrationPayload:
24
- """Container holding normalized calibration data for downstream analysis."""
25
-
26
- image: Optional[Image.Image] = None
27
- table: Optional[pd.DataFrame] = None
28
- source_name: str = ""
29
- kind: str = "unknown" # "image" | "csv" | "unknown"
30
-
31
- def summary(self) -> str:
32
- if self.kind == "image" and self.image is not None:
33
- w, h = self.image.size
34
- return f"Image `{self.source_name}` ({w}x{h}, mode={self.image.mode})"
35
- if self.kind == "csv" and self.table is not None:
36
- rows, cols = self.table.shape
37
- col_list = ", ".join(map(str, self.table.columns[:8]))
38
- more = " …" if self.table.shape[1] > 8 else ""
39
- return (
40
- f"Table `{self.source_name}` ({rows} rows × {cols} cols). "
41
- f"Columns: {col_list}{more}"
42
- )
43
- return "No data provided."
44
-
45
- def table_preview_markdown(self, max_rows: int = 10) -> str:
46
- if self.table is None:
47
- return ""
48
- return self.table.head(max_rows).to_markdown(index=False)
49
-
50
-
51
- def _render_table_as_image(df: pd.DataFrame) -> Image.Image:
52
- """Render a small preview image of a table so the VLM can still see it.
53
-
54
- We only render a capped view — enough for the model to reason about shape
55
- and rough values without blowing up token budgets.
56
- """
57
- import matplotlib.pyplot as plt
58
-
59
- preview = df.head(25)
60
- fig, ax = plt.subplots(
61
- figsize=(min(2 + 1.1 * len(preview.columns), 14), min(1 + 0.3 * len(preview), 10))
62
- )
63
- ax.axis("off")
64
- tbl = ax.table(
65
- cellText=preview.round(4).astype(str).values,
66
- colLabels=[str(c) for c in preview.columns],
67
- loc="center",
68
- cellLoc="center",
69
- )
70
- tbl.auto_set_font_size(False)
71
- tbl.set_fontsize(8)
72
- tbl.scale(1, 1.2)
73
- buf = io.BytesIO()
74
- fig.savefig(buf, format="png", bbox_inches="tight", dpi=120)
75
- plt.close(fig)
76
- buf.seek(0)
77
- return Image.open(buf).convert("RGB")
78
-
79
-
80
- def load_payload(file_path: str | Path) -> CalibrationPayload:
81
- """Load an uploaded file into a CalibrationPayload."""
82
- if file_path is None:
83
- return CalibrationPayload()
84
-
85
- path = Path(file_path)
86
- ext = path.suffix.lower()
87
- name = path.name
88
-
89
- if ext in SUPPORTED_IMAGE_EXTS:
90
- img = Image.open(path).convert("RGB")
91
- return CalibrationPayload(image=img, source_name=name, kind="image")
92
-
93
- if ext in SUPPORTED_TABLE_EXTS:
94
- sep = "," if ext == ".csv" else "\t"
95
- df = pd.read_csv(path, sep=sep)
96
- img = _render_table_as_image(df)
97
- return CalibrationPayload(image=img, table=df, source_name=name, kind="csv")
98
-
99
- raise ValueError(
100
- f"Unsupported file type '{ext}'. Accepted: "
101
- f"{sorted(SUPPORTED_IMAGE_EXTS | SUPPORTED_TABLE_EXTS)}"
102
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,14 +1,49 @@
1
- gradio>=4.44.0
2
- transformers>=4.45.0
3
- torch>=2.3.0
4
- torchvision>=0.18.0
5
- torchaudio>=2.3.0
6
- accelerate>=0.33.0
7
- pillow>=10.0.0
8
- numpy>=1.26.0
9
- pandas>=2.2.0
10
- matplotlib>=3.8.0
11
- requests>=2.32.0
12
- python-dotenv>=1.0.1
13
- # CUDA-Q is installed separately (see README). Pin here if wheels are available:
14
- # cudaq>=0.8.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # HF Space / quick-start requirements.
2
+ #
3
+ # We DON'T use `-e .[...]` here because HF Spaces only mounts requirements.txt
4
+ # during the pip-install build step — pyproject.toml isn't available yet — and
5
+ # the editable install fails with "file:///app does not appear to be a Python
6
+ # project". Instead we list the deps directly; `app.py` puts `src/` on sys.path
7
+ # at import time so `from qcal import ...` still resolves.
8
+ #
9
+ # For a proper pip install (CLI + Python API), use:
10
+ # pip install qcal-copilot # core + NIM
11
+ # pip install "qcal-copilot[decoder]" # + PyMatching
12
+ # pip install "qcal-copilot[gui]" # + Gradio
13
+ # pip install "qcal-copilot[ml]" # + torch + transformers
14
+ # pip install "qcal-copilot[all]" # everything
15
+
16
+ # Core (kept in sync with pyproject.toml [dependencies])
17
+ numpy>=1.26
18
+ pandas>=2.2
19
+ matplotlib>=3.8
20
+ pillow>=10.0
21
+ requests>=2.32
22
+ scipy>=1.11
23
+ typer>=0.12
24
+ platformdirs>=4.0
25
+ tabulate>=0.9 # pandas.DataFrame.to_markdown
26
+
27
+ # [decoder]
28
+ pymatching>=2.2
29
+
30
+ # [gui] — HF Space's bootstrap pins gradio==4.44.0, so don't override it here.
31
+ # Python 3.13 removed the stdlib `audioop` module (PEP 594), which pydub
32
+ # (pulled in by gradio) still imports at load time. Install the backport
33
+ # on 3.13+ so the app starts. Harmless on 3.10–3.12.
34
+ audioop-lts>=0.2.1; python_version >= "3.13"
35
+
36
+ # Gradio 4.44's oauth module imports `HfFolder`, which was removed in
37
+ # huggingface-hub 0.30. Pin below that until we can upgrade Gradio.
38
+ huggingface-hub>=0.23,<0.30
39
+
40
+ # Starlette 0.40 made `request` the first positional arg of
41
+ # `TemplateResponse`, but gradio 4.44.0 still calls it as
42
+ # `TemplateResponse(name, context, ...)`. With a newer starlette the
43
+ # context dict lands in the `name` slot and Jinja2 crashes with
44
+ # "unhashable type: 'dict'" the moment the UI is requested. Pin to the
45
+ # pre-break line; fastapi capped to match.
46
+ starlette<0.40
47
+ fastapi<0.113
48
+
49
+ # CUDA-Q is installed separately on the Space (see README).
{qcal → src/qcal}/__init__.py RENAMED
File without changes
{qcal → src/qcal}/analyzer.py RENAMED
@@ -20,6 +20,9 @@ from typing import Any, Optional
20
 
21
  from PIL import Image
22
 
 
 
 
23
 
24
  DEFAULT_MODEL_ID = os.getenv("QCAL_MODEL_ID", "nvidia/Ising-Calibration-1-35B-A3B")
25
  NIM_ENDPOINT = os.getenv(
@@ -65,6 +68,9 @@ class AnalysisResult:
65
  parsed: dict = field(default_factory=dict)
66
  backend: str = "unknown"
67
  error: Optional[str] = None
 
 
 
68
 
69
  @property
70
  def ok(self) -> bool:
@@ -92,6 +98,12 @@ class AnalysisResult:
92
  for k, v in (p.get("recommended_parameters") or {}).items():
93
  lines.append(f"- `{k}` = {v}")
94
  lines.append("")
 
 
 
 
 
 
95
  lines.append(f"**Drift prediction:** {p.get('drift_prediction', 'n/a')}")
96
  lines.append("")
97
  lines.append(f"**Notes:** {p.get('notes', '')}")
@@ -99,6 +111,9 @@ class AnalysisResult:
99
  lines.append(f"_Backend: {self.backend}_")
100
  return "\n".join(lines)
101
 
 
 
 
102
 
103
  # ---------------------------------------------------------------------------
104
  # Backend: NVIDIA NIM (HTTP)
@@ -242,28 +257,101 @@ def _safe_json(text: str) -> dict[str, Any]:
242
  return {}
243
 
244
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245
  def analyze(
246
  image: Image.Image,
247
  source: str = "uploaded file",
248
  table_preview: Optional[str] = None,
249
  backend: str = "auto",
 
 
250
  ) -> AnalysisResult:
251
  """Run the Ising Calibration VLM on a calibration image.
252
 
253
- backend:
254
- "auto" — NIM if NVIDIA_API_KEY is set, else local HF
255
- "nim" — force NIM
256
- "local" — force local HF
 
 
 
 
 
 
 
 
 
 
 
 
257
  """
258
  if image is None:
259
  return AnalysisResult(raw_text="", backend=backend, error="No image provided.")
260
 
261
- extra = f"\n\nAccompanying table preview (markdown):\n{table_preview}" if table_preview else ""
262
-
263
- choice = backend
264
- if choice == "auto":
265
- choice = "nim" if os.getenv("NVIDIA_API_KEY") or os.getenv("NIM_API_KEY") else "local"
 
 
 
266
 
 
267
  if choice == "nim":
268
- return _analyze_via_nim(image, extra, source)
269
- return _analyze_via_local(image, extra, source)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  from PIL import Image
22
 
23
+ from .data import CalibrationPayload
24
+ from .fit import FitResult
25
+
26
 
27
  DEFAULT_MODEL_ID = os.getenv("QCAL_MODEL_ID", "nvidia/Ising-Calibration-1-35B-A3B")
28
  NIM_ENDPOINT = os.getenv(
 
68
  parsed: dict = field(default_factory=dict)
69
  backend: str = "unknown"
70
  error: Optional[str] = None
71
+ fit_params: dict = field(default_factory=dict) # auto-fit params, if any
72
+ fit: Optional[FitResult] = None # full FitResult for tooling
73
+ source: str = ""
74
 
75
  @property
76
  def ok(self) -> bool:
 
98
  for k, v in (p.get("recommended_parameters") or {}).items():
99
  lines.append(f"- `{k}` = {v}")
100
  lines.append("")
101
+ if self.fit is not None and self.fit.ok:
102
+ lines.append("**Auto-fit (numerical):**")
103
+ for k, v in self.fit.params.items():
104
+ lines.append(f"- `{k}` = {self.fit._fmt(v)}")
105
+ lines.append(f"- R² = {self.fit.fit_quality:.4f}")
106
+ lines.append("")
107
  lines.append(f"**Drift prediction:** {p.get('drift_prediction', 'n/a')}")
108
  lines.append("")
109
  lines.append(f"**Notes:** {p.get('notes', '')}")
 
111
  lines.append(f"_Backend: {self.backend}_")
112
  return "\n".join(lines)
113
 
114
+ def _repr_markdown_(self) -> str: # Jupyter renders this directly
115
+ return self.markdown()
116
+
117
 
118
  # ---------------------------------------------------------------------------
119
  # Backend: NVIDIA NIM (HTTP)
 
257
  return {}
258
 
259
 
260
+ def _resolve_backend(choice: str) -> str:
261
+ if choice == "auto":
262
+ return "nim" if _resolve_api_key() else "local"
263
+ return choice
264
+
265
+
266
+ def _resolve_api_key() -> Optional[str]:
267
+ """Look up the NIM API key. Env var wins; config file is a fallback.
268
+
269
+ Kept as a thin indirection so :mod:`qcal.config` can install itself later
270
+ without touching callers.
271
+ """
272
+ env = os.getenv("NVIDIA_API_KEY") or os.getenv("NIM_API_KEY")
273
+ if env:
274
+ return env
275
+ try:
276
+ from .config import get_api_key # local import to avoid cycle at import-time
277
+
278
+ return get_api_key()
279
+ except Exception: # noqa: BLE001 — config is optional, never block analysis on it
280
+ return None
281
+
282
+
283
  def analyze(
284
  image: Image.Image,
285
  source: str = "uploaded file",
286
  table_preview: Optional[str] = None,
287
  backend: str = "auto",
288
+ fit: Optional[FitResult] = None,
289
+ extra_context: Optional[str] = None,
290
  ) -> AnalysisResult:
291
  """Run the Ising Calibration VLM on a calibration image.
292
 
293
+ Parameters
294
+ ----------
295
+ image
296
+ PIL image of the calibration artifact.
297
+ source
298
+ Short label for the input — shown in logs and the VLM prompt.
299
+ table_preview
300
+ Optional markdown table to append to the prompt (used for CSV input).
301
+ backend
302
+ ``"auto"`` — NIM if an API key is available, else local HF.
303
+ ``"nim"`` — force NIM. ``"local"`` — force local HF weights.
304
+ fit
305
+ Optional :class:`~qcal.fit.FitResult`; its summary is appended to the
306
+ prompt and stored on the returned :class:`AnalysisResult`.
307
+ extra_context
308
+ Any additional text to weave into the prompt (stats, metadata, …).
309
  """
310
  if image is None:
311
  return AnalysisResult(raw_text="", backend=backend, error="No image provided.")
312
 
313
+ bits: list[str] = []
314
+ if table_preview:
315
+ bits.append(f"Accompanying table preview (markdown):\n{table_preview}")
316
+ if fit is not None and fit.ok:
317
+ bits.append(fit.summary_text())
318
+ if extra_context:
319
+ bits.append(extra_context)
320
+ extra = ("\n\n" + "\n\n".join(bits)) if bits else ""
321
 
322
+ choice = _resolve_backend(backend)
323
  if choice == "nim":
324
+ result = _analyze_via_nim(image, extra, source)
325
+ else:
326
+ result = _analyze_via_local(image, extra, source)
327
+
328
+ result.source = source
329
+ if fit is not None:
330
+ result.fit = fit
331
+ if fit.ok:
332
+ result.fit_params = dict(fit.params)
333
+ return result
334
+
335
+
336
+ def analyze_payload(
337
+ payload: CalibrationPayload,
338
+ backend: str = "auto",
339
+ ) -> AnalysisResult:
340
+ """Convenience wrapper: analyze a :class:`CalibrationPayload` directly.
341
+
342
+ Pulls the image, fit, and numeric/metadata context from the payload and
343
+ hands them to :func:`analyze`. This is the entrypoint the CLI and the
344
+ notebook examples use.
345
+ """
346
+ if payload is None or payload.image is None:
347
+ return AnalysisResult(
348
+ raw_text="", backend=backend, error="No image in payload."
349
+ )
350
+ return analyze(
351
+ image=payload.image,
352
+ source=payload.source_name or "uploaded file",
353
+ table_preview=payload.table_preview_markdown() if payload.table is not None else None,
354
+ backend=backend,
355
+ fit=payload.fit,
356
+ extra_context=payload.prompt_context(),
357
+ )
src/qcal/cli.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Command-line interface for QCal Copilot.
2
+
3
+ Installed as the ``qcal`` console script via :mod:`pyproject.toml`.
4
+
5
+ Commands
6
+ --------
7
+ * ``qcal analyze FILE`` — run the Ising VLM on a calibration artifact
8
+ * ``qcal decode`` — run the Ising 3D CNN decoder on synthetic syndromes
9
+ * ``qcal generate FILE`` — write a CUDA-Q script from a saved analysis JSON
10
+ * ``qcal serve`` — launch the Gradio UI (needs ``qcal[gui]``)
11
+ * ``qcal login`` — store an NVIDIA NIM API key in the user config
12
+ * ``qcal logout`` — clear the stored API key
13
+ * ``qcal config`` — print resolved config
14
+ * ``qcal version`` — print installed version
15
+
16
+ Design notes
17
+ ------------
18
+ All commands honor ``--json`` for machine-readable output so labs can wire
19
+ ``qcal`` into CI or drift-monitoring cron jobs. Exit codes: 0 on success,
20
+ 1 on analyzer/decoder error, 2 on user error, 3 on missing optional dep.
21
+ """
22
+
23
+ from __future__ import annotations
24
+
25
+ import json
26
+ import sys
27
+ from pathlib import Path
28
+ from typing import Optional
29
+
30
+ import typer
31
+
32
+ from . import __version__, config
33
+ from .data import load_payload
34
+
35
+
36
+ app = typer.Typer(
37
+ add_completion=False,
38
+ no_args_is_help=True,
39
+ help="AI-assisted quantum calibration (Ising VLM + 3D CNN decoder + CUDA-Q).",
40
+ context_settings={"help_option_names": ["-h", "--help"]},
41
+ )
42
+
43
+
44
+ # ---------------------------------------------------------------------------
45
+ # analyze
46
+ # ---------------------------------------------------------------------------
47
+
48
+ @app.command()
49
+ def analyze(
50
+ file: Path = typer.Argument(..., exists=True, readable=True, help="Input file (.npy, .npz, .csv, .png, .jpg, …)."),
51
+ experiment: str = typer.Option(
52
+ "unknown",
53
+ "--experiment", "-e",
54
+ help="Experiment type for .npy inputs: rabi|ramsey|t1|t2_echo|readout_iq|rabi_chevron|iq_trace|resonator_spec|unknown.",
55
+ ),
56
+ backend: str = typer.Option("auto", "--backend", "-b", help="auto | nim | local"),
57
+ no_fit: bool = typer.Option(False, "--no-fit", help="Skip scipy curve-fitting for .npy inputs."),
58
+ out: Optional[Path] = typer.Option(None, "--out", "-o", help="Write the markdown report to this path."),
59
+ json_out: Optional[Path] = typer.Option(None, "--json", help="Write the raw analyzer JSON to this path."),
60
+ script_out: Optional[Path] = typer.Option(None, "--script", help="Write the generated CUDA-Q script to this path."),
61
+ quiet: bool = typer.Option(False, "--quiet", "-q", help="Suppress stdout; only write files + exit code."),
62
+ ) -> None:
63
+ """Analyze a calibration artifact with the Ising VLM."""
64
+ from . import analyzer, codegen # local imports so `qcal --help` stays fast
65
+
66
+ try:
67
+ payload = load_payload(file, experiment_type=experiment, fit=not no_fit)
68
+ except Exception as exc: # noqa: BLE001
69
+ typer.secho(f"Failed to load {file}: {exc}", err=True, fg=typer.colors.RED)
70
+ raise typer.Exit(code=2)
71
+
72
+ result = analyzer.analyze_payload(payload, backend=backend)
73
+
74
+ if not result.ok:
75
+ typer.secho(
76
+ f"Analyzer error ({result.backend}): {result.error or 'empty response'}",
77
+ err=True, fg=typer.colors.RED,
78
+ )
79
+ if result.raw_text and not quiet:
80
+ typer.echo(result.raw_text)
81
+ raise typer.Exit(code=1)
82
+
83
+ md = result.markdown()
84
+ if out:
85
+ out.write_text(md, encoding="utf-8")
86
+ if json_out:
87
+ json_out.write_text(
88
+ json.dumps(
89
+ {
90
+ "analysis": result.parsed,
91
+ "fit": result.fit_params,
92
+ "backend": result.backend,
93
+ "source": result.source,
94
+ "qcal_version": __version__,
95
+ },
96
+ indent=2,
97
+ ),
98
+ encoding="utf-8",
99
+ )
100
+ if script_out:
101
+ script_text = codegen.generate_script(result.parsed)
102
+ script_out.write_text(script_text, encoding="utf-8")
103
+
104
+ if not quiet:
105
+ typer.echo(md)
106
+
107
+
108
+ # ---------------------------------------------------------------------------
109
+ # decode
110
+ # ---------------------------------------------------------------------------
111
+
112
+ @app.command()
113
+ def decode(
114
+ variant: str = typer.Option("fast", "--variant", "-v", help="fast | accurate"),
115
+ distance: int = typer.Option(5, "--distance", "-d", min=3, max=15),
116
+ rounds: int = typer.Option(5, "--rounds", "-r", min=1, max=25),
117
+ error_rate: float = typer.Option(0.005, "--p", help="Physical error rate."),
118
+ shots: int = typer.Option(128, "--shots", "-n", min=1),
119
+ seed: int = typer.Option(42, "--seed"),
120
+ json_out: Optional[Path] = typer.Option(None, "--json"),
121
+ quiet: bool = typer.Option(False, "--quiet", "-q"),
122
+ ) -> None:
123
+ """Run the Ising 3D CNN pre-decoder on a synthetic syndrome volume."""
124
+ from . import decoder
125
+
126
+ result = decoder.run_decoder(
127
+ variant=variant,
128
+ distance=distance,
129
+ rounds=rounds,
130
+ error_rate=error_rate,
131
+ n_shots=shots,
132
+ seed=seed,
133
+ )
134
+ if not result.ok:
135
+ typer.secho(f"Decoder error: {result.error}", err=True, fg=typer.colors.RED)
136
+ raise typer.Exit(code=1)
137
+
138
+ if json_out:
139
+ payload = {
140
+ "variant": result.variant,
141
+ "model_id": result.model_id,
142
+ "distance": result.distance,
143
+ "rounds": result.rounds,
144
+ "error_rate": result.error_rate,
145
+ "n_shots": result.n_shots,
146
+ "density_before": result.density_before,
147
+ "density_after": result.density_after,
148
+ "density_reduction": result.density_reduction,
149
+ "inference_ms": result.inference_ms,
150
+ "mwpm_ms_before": result.mwpm_ms_before,
151
+ "mwpm_ms_after": result.mwpm_ms_after,
152
+ "ler_proxy_before": result.ler_proxy_before,
153
+ "ler_proxy_after": result.ler_proxy_after,
154
+ "ler_improvement": result.ler_improvement,
155
+ "backend_note": result.backend_note,
156
+ }
157
+ json_out.write_text(json.dumps(payload, indent=2), encoding="utf-8")
158
+ if not quiet:
159
+ typer.echo(result.markdown())
160
+
161
+
162
+ # ---------------------------------------------------------------------------
163
+ # generate
164
+ # ---------------------------------------------------------------------------
165
+
166
+ @app.command()
167
+ def generate(
168
+ analysis_json: Path = typer.Argument(..., exists=True, readable=True, help="Analyzer JSON produced by `qcal analyze --json`."),
169
+ out: Optional[Path] = typer.Option(None, "--out", "-o"),
170
+ ) -> None:
171
+ """Generate a CUDA-Q script from a saved analyzer JSON."""
172
+ from . import codegen
173
+
174
+ try:
175
+ blob = json.loads(analysis_json.read_text(encoding="utf-8"))
176
+ except json.JSONDecodeError as exc:
177
+ typer.secho(f"Invalid JSON in {analysis_json}: {exc}", err=True, fg=typer.colors.RED)
178
+ raise typer.Exit(code=2)
179
+
180
+ analysis = blob.get("analysis") if isinstance(blob, dict) and "analysis" in blob else blob
181
+ script = codegen.generate_script(analysis)
182
+ if out:
183
+ out.write_text(script, encoding="utf-8")
184
+ else:
185
+ typer.echo(script)
186
+
187
+
188
+ # ---------------------------------------------------------------------------
189
+ # serve
190
+ # ---------------------------------------------------------------------------
191
+
192
+ @app.command()
193
+ def serve(
194
+ host: str = typer.Option("0.0.0.0", envvar="QCAL_HOST"),
195
+ port: int = typer.Option(7860, envvar="QCAL_PORT"),
196
+ share: bool = typer.Option(False, "--share", envvar="QCAL_SHARE"),
197
+ ) -> None:
198
+ """Launch the Gradio UI locally (requires ``qcal[gui]``)."""
199
+ try:
200
+ import gradio # noqa: F401
201
+ except ImportError:
202
+ typer.secho(
203
+ "Gradio isn't installed. Run: pip install 'qcal-copilot[gui]'",
204
+ err=True, fg=typer.colors.RED,
205
+ )
206
+ raise typer.Exit(code=3)
207
+
208
+ # Import the app defined at repo root; when running from an installed
209
+ # wheel we fall back to a minimal in-package launcher.
210
+ try:
211
+ sys.path.insert(0, str(Path.cwd()))
212
+ import app as gradio_app # type: ignore[import-not-found]
213
+
214
+ demo = gradio_app.build_ui()
215
+ except Exception: # noqa: BLE001
216
+ from . import app_inproc
217
+
218
+ demo = app_inproc.build_ui()
219
+
220
+ demo.queue(max_size=8).launch(server_name=host, server_port=port, share=share)
221
+
222
+
223
+ # ---------------------------------------------------------------------------
224
+ # login / logout / config
225
+ # ---------------------------------------------------------------------------
226
+
227
+ @app.command()
228
+ def login(
229
+ api_key: Optional[str] = typer.Option(
230
+ None, "--api-key",
231
+ help="NIM API key. If omitted, you'll be prompted (hidden input).",
232
+ ),
233
+ ) -> None:
234
+ """Store an NVIDIA NIM API key under ``~/.config/qcal/config.toml``."""
235
+ key = api_key or typer.prompt("NVIDIA NIM API key", hide_input=True)
236
+ if not key.strip():
237
+ typer.secho("Empty key — nothing saved.", err=True, fg=typer.colors.RED)
238
+ raise typer.Exit(code=2)
239
+ path = config.set_api_key(key.strip())
240
+ typer.secho(f"Saved API key to {path} (mode 0600).", fg=typer.colors.GREEN)
241
+
242
+
243
+ @app.command()
244
+ def logout() -> None:
245
+ """Remove the stored NIM API key."""
246
+ path = config.clear_api_key()
247
+ if path:
248
+ typer.echo(f"Cleared API key from {path}.")
249
+ else:
250
+ typer.echo("No stored API key to clear.")
251
+
252
+
253
+ @app.command(name="config")
254
+ def config_cmd(show_key: bool = typer.Option(False, "--show-key", help="Print the stored API key in full.")) -> None:
255
+ """Print the resolved config (key masked unless ``--show-key``)."""
256
+ data = config.load()
257
+ key = config.get_api_key()
258
+ if key and not show_key:
259
+ masked = key[:6] + "…" + key[-4:] if len(key) > 12 else "***"
260
+ data.setdefault("nvidia", {})["api_key"] = masked
261
+ elif key:
262
+ data.setdefault("nvidia", {})["api_key"] = key
263
+ typer.echo(json.dumps(data, indent=2))
264
+ typer.echo(f"\nconfig path: {config.config_path()}")
265
+
266
+
267
+ # ---------------------------------------------------------------------------
268
+ # version
269
+ # ---------------------------------------------------------------------------
270
+
271
+ @app.command()
272
+ def version() -> None:
273
+ """Print the installed qcal-copilot version."""
274
+ typer.echo(__version__)
275
+
276
+
277
+ def main() -> None: # entry point target for scripts
278
+ app()
279
+
280
+
281
+ if __name__ == "__main__":
282
+ main()
{qcal → src/qcal}/codegen.py RENAMED
@@ -17,6 +17,7 @@ Auto-generated CUDA-Q calibration script from QCal Copilot.
17
  Experiment: {experiment}
18
  Qubit: {qubit_id}
19
  Notes: {notes}
 
20
  """
21
 
22
  import cudaq
@@ -89,8 +90,31 @@ def _params_repr(params: dict[str, Any]) -> str:
89
  return dumped
90
 
91
 
92
- def generate_script(analysis: dict[str, Any]) -> str:
93
- """Build the CUDA-Q script text from analyzer output."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  experiment = analysis.get("experiment") or "unspecified"
95
  qubit_id = analysis.get("qubit_id") or "q0"
96
  notes = (analysis.get("notes") or "").replace('"""', "'''")
@@ -101,5 +125,6 @@ def generate_script(analysis: dict[str, Any]) -> str:
101
  qubit_id=qubit_id,
102
  notes=textwrap.shorten(notes, width=240, placeholder="…"),
103
  params_repr=_params_repr(params),
 
104
  )
105
  return script
 
17
  Experiment: {experiment}
18
  Qubit: {qubit_id}
19
  Notes: {notes}
20
+ {decoder_header}
21
  """
22
 
23
  import cudaq
 
90
  return dumped
91
 
92
 
93
+ def _decoder_header(decoder_info: dict[str, Any] | None) -> str:
94
+ """Format a header block summarizing the decoder stage, if present."""
95
+ if not decoder_info:
96
+ return ""
97
+ variant = decoder_info.get("variant", "n/a")
98
+ model_id = decoder_info.get("model_id", "n/a")
99
+ distance = decoder_info.get("distance", "n/a")
100
+ rounds = decoder_info.get("rounds", "n/a")
101
+ density_reduction = decoder_info.get("density_reduction", 0.0)
102
+ ler_improvement = decoder_info.get("ler_improvement", 1.0)
103
+ return (
104
+ "\nError-correction decoder (applied upstream):\n"
105
+ f" variant: {variant}\n"
106
+ f" model_id: {model_id}\n"
107
+ f" surface code: d={distance}, rounds={rounds}\n"
108
+ f" syndrome density: {density_reduction*100:.1f}% reduction\n"
109
+ f" LER proxy: {ler_improvement:.2f}x improvement"
110
+ )
111
+
112
+
113
+ def generate_script(
114
+ analysis: dict[str, Any],
115
+ decoder_info: dict[str, Any] | None = None,
116
+ ) -> str:
117
+ """Build the CUDA-Q script text from analyzer (+ optional decoder) output."""
118
  experiment = analysis.get("experiment") or "unspecified"
119
  qubit_id = analysis.get("qubit_id") or "q0"
120
  notes = (analysis.get("notes") or "").replace('"""', "'''")
 
125
  qubit_id=qubit_id,
126
  notes=textwrap.shorten(notes, width=240, placeholder="…"),
127
  params_repr=_params_repr(params),
128
+ decoder_header=_decoder_header(decoder_info),
129
  )
130
  return script
src/qcal/config.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Persistent config for QCal Copilot.
2
+
3
+ Stores user-level settings (API keys, endpoint overrides) in a TOML file
4
+ under ``~/.config/qcal/config.toml`` (XDG-compliant via ``platformdirs``).
5
+ Callers should always prefer environment variables when present; the config
6
+ file is a convenience for long-lived interactive use, never a security
7
+ boundary.
8
+
9
+ Schema (TOML)::
10
+
11
+ [nvidia]
12
+ api_key = "nvapi-..."
13
+ nim_endpoint = "https://integrate.api.nvidia.com/v1/chat/completions"
14
+ nim_model = "nvidia/ising-calibration-1-35b-a3b"
15
+ vlm_model_id = "nvidia/Ising-Calibration-1-35B-A3B"
16
+ decoder_fast = "nvidia/Ising-Decoder-SurfaceCode-1-Fast"
17
+ decoder_accurate = "nvidia/Ising-Decoder-SurfaceCode-1-Accurate"
18
+ """
19
+
20
+ from __future__ import annotations
21
+
22
+ import os
23
+ import sys
24
+ from pathlib import Path
25
+ from typing import Any, Optional
26
+
27
+ from platformdirs import user_config_dir
28
+
29
+ if sys.version_info >= (3, 11):
30
+ import tomllib # type: ignore[attr-defined]
31
+ else: # pragma: no cover — 3.10 fallback declared in pyproject
32
+ import tomli as tomllib # type: ignore[no-redef]
33
+
34
+
35
+ APP_NAME = "qcal"
36
+ CONFIG_FILENAME = "config.toml"
37
+
38
+
39
+ def config_path() -> Path:
40
+ """Absolute path to the config file. Directory is created on first write."""
41
+ override = os.getenv("QCAL_CONFIG_PATH")
42
+ if override:
43
+ return Path(override)
44
+ return Path(user_config_dir(APP_NAME)) / CONFIG_FILENAME
45
+
46
+
47
+ def load() -> dict[str, Any]:
48
+ """Return the parsed config, or an empty dict if none exists or is unreadable."""
49
+ path = config_path()
50
+ if not path.exists():
51
+ return {}
52
+ try:
53
+ with path.open("rb") as f:
54
+ return tomllib.load(f)
55
+ except Exception: # noqa: BLE001 — bad config should never crash analysis
56
+ return {}
57
+
58
+
59
+ def save(data: dict[str, Any]) -> Path:
60
+ """Write ``data`` to the config file, creating parent dirs if needed.
61
+
62
+ Uses a hand-rolled TOML writer to avoid pulling in another dep. Only
63
+ supports the flat [section]-of-scalars shape this app needs.
64
+ """
65
+ path = config_path()
66
+ path.parent.mkdir(parents=True, exist_ok=True)
67
+
68
+ lines: list[str] = []
69
+ for section, values in sorted(data.items()):
70
+ if not isinstance(values, dict):
71
+ continue
72
+ lines.append(f"[{section}]")
73
+ for key, value in sorted(values.items()):
74
+ lines.append(f"{key} = {_toml_value(value)}")
75
+ lines.append("")
76
+
77
+ path.write_text("\n".join(lines).rstrip() + "\n", encoding="utf-8")
78
+ # Tighten perms on the file since it may hold a secret.
79
+ try:
80
+ path.chmod(0o600)
81
+ except OSError:
82
+ pass
83
+ return path
84
+
85
+
86
+ def _toml_value(v: Any) -> str:
87
+ if isinstance(v, bool):
88
+ return "true" if v else "false"
89
+ if isinstance(v, (int, float)):
90
+ return repr(v)
91
+ # default: quoted string, TOML basic-string escaping
92
+ s = str(v).replace("\\", "\\\\").replace('"', '\\"').replace("\n", "\\n")
93
+ return f'"{s}"'
94
+
95
+
96
+ # ---------------------------------------------------------------------------
97
+ # Accessors
98
+ # ---------------------------------------------------------------------------
99
+
100
+ def get(section: str, key: str, default: Optional[Any] = None) -> Any:
101
+ return (load().get(section) or {}).get(key, default)
102
+
103
+
104
+ def set_value(section: str, key: str, value: Any) -> Path:
105
+ data = load()
106
+ data.setdefault(section, {})[key] = value
107
+ return save(data)
108
+
109
+
110
+ def get_api_key() -> Optional[str]:
111
+ """NIM API key: env wins, then config file."""
112
+ env = os.getenv("NVIDIA_API_KEY") or os.getenv("NIM_API_KEY")
113
+ if env:
114
+ return env
115
+ return get("nvidia", "api_key")
116
+
117
+
118
+ def set_api_key(value: str) -> Path:
119
+ return set_value("nvidia", "api_key", value)
120
+
121
+
122
+ def clear_api_key() -> Optional[Path]:
123
+ data = load()
124
+ nvidia = data.get("nvidia") or {}
125
+ if "api_key" in nvidia:
126
+ nvidia.pop("api_key")
127
+ data["nvidia"] = nvidia
128
+ return save(data)
129
+ return None
src/qcal/data.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Input preprocessing for QCal Copilot.
2
+
3
+ Normalizes user inputs — image, CSV/TSV, or raw numpy arrays from control
4
+ hardware — into a :class:`CalibrationPayload` the analyzer can send to the
5
+ vision-language model. For numpy input we also auto-fit standard calibration
6
+ models (Rabi, Ramsey, T1, T2-echo) and attach the fit parameters as text
7
+ context; Ising cross-references those numbers against the rendered plot,
8
+ which is the single biggest quality lever in the pipeline.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import io
14
+ from dataclasses import dataclass, field
15
+ from pathlib import Path
16
+ from typing import Optional, Union
17
+
18
+ import numpy as np
19
+ import pandas as pd
20
+ from PIL import Image
21
+
22
+ from .fit import FitResult, autofit
23
+
24
+
25
+ SUPPORTED_IMAGE_EXTS = {".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".webp"}
26
+ SUPPORTED_TABLE_EXTS = {".csv", ".tsv"}
27
+ SUPPORTED_ARRAY_EXTS = {".npy", ".npz"}
28
+
29
+ # Experiment types this module knows how to render.
30
+ EXPERIMENT_RENDERERS: dict[str, str] = {
31
+ "rabi": "line",
32
+ "ramsey": "line",
33
+ "t1": "line",
34
+ "t2": "line",
35
+ "t2_echo": "line",
36
+ "resonator_spec": "line",
37
+ "iq_trace": "line",
38
+ "rabi_chevron": "heatmap",
39
+ "readout_iq": "scatter",
40
+ "unknown": "line",
41
+ }
42
+
43
+
44
+ ArrayLike = Union[np.ndarray, "list[float]", "tuple[float, ...]"]
45
+
46
+
47
+ @dataclass
48
+ class CalibrationPayload:
49
+ """Container holding normalized calibration data for downstream analysis."""
50
+
51
+ image: Optional[Image.Image] = None
52
+ table: Optional[pd.DataFrame] = None
53
+ source_name: str = ""
54
+ kind: str = "unknown" # "image" | "csv" | "array" | "unknown"
55
+ experiment_type: Optional[str] = None
56
+ fit: Optional[FitResult] = None
57
+ metadata: dict = field(default_factory=dict)
58
+ numeric_summary: str = ""
59
+
60
+ def summary(self) -> str:
61
+ if self.kind == "image" and self.image is not None:
62
+ w, h = self.image.size
63
+ return f"Image `{self.source_name}` ({w}x{h}, mode={self.image.mode})"
64
+ if self.kind == "csv" and self.table is not None:
65
+ rows, cols = self.table.shape
66
+ col_list = ", ".join(map(str, self.table.columns[:8]))
67
+ more = " …" if self.table.shape[1] > 8 else ""
68
+ return (
69
+ f"Table `{self.source_name}` ({rows} rows × {cols} cols). "
70
+ f"Columns: {col_list}{more}"
71
+ )
72
+ if self.kind == "array":
73
+ exp = self.experiment_type or "unknown"
74
+ return f"Array `{self.source_name}` (experiment={exp})"
75
+ return "No data provided."
76
+
77
+ def table_preview_markdown(self, max_rows: int = 10) -> str:
78
+ if self.table is None:
79
+ return ""
80
+ return self.table.head(max_rows).to_markdown(index=False)
81
+
82
+ def prompt_context(self) -> Optional[str]:
83
+ """Text appended to the VLM user prompt, if any."""
84
+ chunks: list[str] = []
85
+ if self.numeric_summary:
86
+ chunks.append(self.numeric_summary)
87
+ if self.fit is not None and self.fit.ok:
88
+ chunks.append(self.fit.summary_text())
89
+ if self.metadata:
90
+ chunks.append(
91
+ "Metadata: "
92
+ + ", ".join(f"{k}={v}" for k, v in self.metadata.items())
93
+ )
94
+ if self.table is not None:
95
+ chunks.append(
96
+ "Table preview (markdown):\n" + self.table_preview_markdown()
97
+ )
98
+ return "\n".join(chunks) if chunks else None
99
+
100
+
101
+ # ---------------------------------------------------------------------------
102
+ # Plot rendering helpers
103
+ # ---------------------------------------------------------------------------
104
+
105
+ def _render_table_as_image(df: pd.DataFrame) -> Image.Image:
106
+ """Render a small preview image of a table so the VLM can still see it."""
107
+ import matplotlib.pyplot as plt
108
+
109
+ preview = df.head(25)
110
+ fig, ax = plt.subplots(
111
+ figsize=(min(2 + 1.1 * len(preview.columns), 14), min(1 + 0.3 * len(preview), 10))
112
+ )
113
+ ax.axis("off")
114
+ tbl = ax.table(
115
+ cellText=preview.round(4).astype(str).values,
116
+ colLabels=[str(c) for c in preview.columns],
117
+ loc="center",
118
+ cellLoc="center",
119
+ )
120
+ tbl.auto_set_font_size(False)
121
+ tbl.set_fontsize(8)
122
+ tbl.scale(1, 1.2)
123
+ buf = io.BytesIO()
124
+ fig.savefig(buf, format="png", bbox_inches="tight", dpi=120)
125
+ plt.close(fig)
126
+ buf.seek(0)
127
+ return Image.open(buf).convert("RGB")
128
+
129
+
130
+ def _fig_to_pil(fig) -> Image.Image:
131
+ import matplotlib.pyplot as plt
132
+
133
+ buf = io.BytesIO()
134
+ fig.savefig(buf, format="png", bbox_inches="tight", dpi=140)
135
+ plt.close(fig)
136
+ buf.seek(0)
137
+ return Image.open(buf).convert("RGB")
138
+
139
+
140
+ def _render_line(
141
+ y: np.ndarray,
142
+ x: Optional[np.ndarray],
143
+ *,
144
+ experiment: str,
145
+ x_label: str,
146
+ y_label: str,
147
+ title: Optional[str],
148
+ fit: Optional[FitResult],
149
+ ) -> Image.Image:
150
+ import matplotlib.pyplot as plt
151
+
152
+ fig, ax = plt.subplots(figsize=(6.5, 4.0))
153
+ xs = x if x is not None else np.arange(y.size)
154
+ ax.plot(xs, y, marker="o", markersize=3, linewidth=1.0, color="#1f77b4", label="data")
155
+
156
+ # Overlay the fit curve when available.
157
+ if fit is not None and fit.ok:
158
+ try:
159
+ x_dense = np.linspace(float(xs.min()), float(xs.max()), 400)
160
+ y_fit = _evaluate_fit(fit, x_dense)
161
+ if y_fit is not None:
162
+ ax.plot(x_dense, y_fit, color="#d62728", linewidth=1.5, label="fit")
163
+ except Exception: # noqa: BLE001 — fit overlay is best-effort
164
+ pass
165
+
166
+ ax.set_xlabel(x_label)
167
+ ax.set_ylabel(y_label)
168
+ ax.set_title(title or f"{experiment} sweep")
169
+ ax.grid(True, alpha=0.3)
170
+ ax.legend(loc="best", frameon=False)
171
+ return _fig_to_pil(fig)
172
+
173
+
174
+ def _render_heatmap(
175
+ z: np.ndarray,
176
+ *,
177
+ experiment: str,
178
+ x_label: str,
179
+ y_label: str,
180
+ title: Optional[str],
181
+ extent: Optional[tuple[float, float, float, float]] = None,
182
+ ) -> Image.Image:
183
+ import matplotlib.pyplot as plt
184
+
185
+ fig, ax = plt.subplots(figsize=(6.5, 4.2))
186
+ im = ax.imshow(
187
+ z,
188
+ aspect="auto",
189
+ origin="lower",
190
+ cmap="viridis",
191
+ extent=extent,
192
+ interpolation="nearest",
193
+ )
194
+ fig.colorbar(im, ax=ax, label="signal")
195
+ ax.set_xlabel(x_label)
196
+ ax.set_ylabel(y_label)
197
+ ax.set_title(title or f"{experiment}")
198
+ return _fig_to_pil(fig)
199
+
200
+
201
+ def _render_scatter(
202
+ iq: np.ndarray,
203
+ *,
204
+ title: Optional[str],
205
+ ) -> Image.Image:
206
+ """Scatter + marginal histograms for readout IQ shots (shape (N, 2))."""
207
+ import matplotlib.pyplot as plt
208
+
209
+ fig, ax = plt.subplots(figsize=(5.5, 5.2))
210
+ ax.scatter(iq[:, 0], iq[:, 1], s=4, alpha=0.5, color="#1f77b4")
211
+ ax.set_xlabel("I (a.u.)")
212
+ ax.set_ylabel("Q (a.u.)")
213
+ ax.set_title(title or "Readout IQ histogram")
214
+ ax.grid(True, alpha=0.3)
215
+ ax.set_aspect("equal", adjustable="datalim")
216
+ return _fig_to_pil(fig)
217
+
218
+
219
+ def _evaluate_fit(fit: FitResult, x: np.ndarray) -> Optional[np.ndarray]:
220
+ """Recreate the fitted curve from stored parameters for overlay plotting."""
221
+ p = fit.params
222
+ if fit.model == "damped_sine":
223
+ A = p["amplitude"]
224
+ freq = next((v for k, v in p.items() if k.startswith("freq_per_")), 0.0)
225
+ tau = next((v for k, v in p.items() if k.startswith("tau_")), 1.0)
226
+ return (
227
+ A * np.exp(-x / tau) * np.sin(2 * np.pi * freq * x + p["phase_rad"])
228
+ + p["offset"]
229
+ )
230
+ if fit.model == "damped_cosine":
231
+ A = p["amplitude"]
232
+ freq = next((v for k, v in p.items() if k.startswith("detuning_per_")), 0.0)
233
+ tau = next((v for k, v in p.items() if k.startswith("t2star_")), 1.0)
234
+ return (
235
+ A * np.exp(-x / tau) * np.cos(2 * np.pi * freq * x + p["phase_rad"])
236
+ + p["offset"]
237
+ )
238
+ if fit.model == "exp_decay":
239
+ A = p["amplitude"]
240
+ tau = next((v for k, v in p.items() if k.startswith("tau_")), 1.0)
241
+ return A * np.exp(-x / tau) + p["offset"]
242
+ return None
243
+
244
+
245
+ # ---------------------------------------------------------------------------
246
+ # Numeric summary
247
+ # ---------------------------------------------------------------------------
248
+
249
+ def _numeric_summary(arr: np.ndarray, experiment: str) -> str:
250
+ arr = np.asarray(arr)
251
+ shape = arr.shape
252
+ finite = arr[np.isfinite(arr)] if arr.dtype.kind in "fc" else arr.ravel()
253
+ if finite.size == 0:
254
+ return f"Numeric summary: shape={shape}, no finite values"
255
+ parts = [
256
+ f"Numeric summary for experiment `{experiment}`:",
257
+ f"- shape: {shape}, dtype: {arr.dtype}",
258
+ f"- range: [{float(finite.min()):.4g}, {float(finite.max()):.4g}]",
259
+ f"- mean: {float(finite.mean()):.4g}, std: {float(finite.std()):.4g}",
260
+ ]
261
+ if arr.ndim == 1:
262
+ parts.append(
263
+ f"- argmax index: {int(np.nanargmax(arr))}, argmin index: {int(np.nanargmin(arr))}"
264
+ )
265
+ return "\n".join(parts)
266
+
267
+
268
+ # ---------------------------------------------------------------------------
269
+ # Public: from_array
270
+ # ---------------------------------------------------------------------------
271
+
272
+ def from_array(
273
+ array: ArrayLike,
274
+ experiment_type: str = "unknown",
275
+ *,
276
+ x: Optional[ArrayLike] = None,
277
+ x_unit: str = "us",
278
+ x_label: Optional[str] = None,
279
+ y_label: Optional[str] = None,
280
+ title: Optional[str] = None,
281
+ metadata: Optional[dict] = None,
282
+ fit: bool = True,
283
+ source_name: str = "array",
284
+ ) -> CalibrationPayload:
285
+ """Build a :class:`CalibrationPayload` from raw numpy data.
286
+
287
+ Parameters
288
+ ----------
289
+ array
290
+ The measurement. Shape depends on ``experiment_type``:
291
+ * 1-D sweep (``rabi``, ``ramsey``, ``t1``, ``t2``, ``t2_echo``,
292
+ ``resonator_spec``, ``iq_trace``)
293
+ * 2-D heatmap (``rabi_chevron``) — shape ``(len(y_axis), len(x_axis))``
294
+ * 2-D scatter (``readout_iq``) — shape ``(N, 2)`` for I/Q shots
295
+ experiment_type
296
+ One of :data:`EXPERIMENT_RENDERERS`; drives plot shape and fit model.
297
+ x
298
+ Independent variable for 1-D sweeps (times, amplitudes, frequencies).
299
+ x_unit
300
+ Unit label used in fit parameter keys and axis labels. Ignored for 2-D.
301
+ x_label, y_label, title
302
+ Optional axis overrides; sensible defaults per ``experiment_type``.
303
+ metadata
304
+ Free-form key/value pairs (qubit id, temperature, run id…) — appended
305
+ verbatim to the VLM prompt.
306
+ fit
307
+ If ``True``, auto-fit the curve for supported 1-D experiments and
308
+ attach :class:`~qcal.fit.FitResult` to the payload. Set ``False`` for
309
+ air-gapped installs without ``scipy`` or when the data isn't fittable.
310
+ source_name
311
+ Display name threaded into the summary and the VLM prompt.
312
+
313
+ Returns
314
+ -------
315
+ CalibrationPayload
316
+ Ready to pass to :func:`qcal.analyzer.analyze`.
317
+ """
318
+ arr = np.asarray(array)
319
+ md = dict(metadata or {})
320
+ kind_hint = EXPERIMENT_RENDERERS.get(experiment_type, "line")
321
+
322
+ # ------ render + optional fit ------
323
+ fit_result: Optional[FitResult] = None
324
+ if kind_hint == "line":
325
+ if arr.ndim != 1:
326
+ raise ValueError(
327
+ f"experiment_type '{experiment_type}' expects a 1-D array, "
328
+ f"got shape {arr.shape}"
329
+ )
330
+ x_arr = np.asarray(x, dtype=float) if x is not None else None
331
+ if fit:
332
+ fit_result = autofit(experiment_type, y=arr, x=x_arr, x_unit=x_unit)
333
+ img = _render_line(
334
+ arr,
335
+ x_arr,
336
+ experiment=experiment_type,
337
+ x_label=x_label or f"sweep ({x_unit})",
338
+ y_label=y_label or "signal (a.u.)",
339
+ title=title,
340
+ fit=fit_result,
341
+ )
342
+ elif kind_hint == "heatmap":
343
+ if arr.ndim != 2:
344
+ raise ValueError(
345
+ f"experiment_type '{experiment_type}' expects a 2-D array, "
346
+ f"got shape {arr.shape}"
347
+ )
348
+ img = _render_heatmap(
349
+ arr,
350
+ experiment=experiment_type,
351
+ x_label=x_label or "drive amp (a.u.)",
352
+ y_label=y_label or "drive freq (MHz)",
353
+ title=title,
354
+ )
355
+ elif kind_hint == "scatter":
356
+ if arr.ndim != 2 or arr.shape[1] != 2:
357
+ raise ValueError(
358
+ f"experiment_type '{experiment_type}' expects shape (N, 2), "
359
+ f"got {arr.shape}"
360
+ )
361
+ img = _render_scatter(arr, title=title)
362
+ else:
363
+ raise ValueError(f"Unknown experiment_type '{experiment_type}'")
364
+
365
+ return CalibrationPayload(
366
+ image=img,
367
+ source_name=source_name,
368
+ kind="array",
369
+ experiment_type=experiment_type,
370
+ fit=fit_result,
371
+ metadata=md,
372
+ numeric_summary=_numeric_summary(arr, experiment_type),
373
+ )
374
+
375
+
376
+ def from_npy(
377
+ path: str | Path,
378
+ experiment_type: str = "unknown",
379
+ *,
380
+ x_path: Optional[str | Path] = None,
381
+ **kwargs,
382
+ ) -> CalibrationPayload:
383
+ """Load a ``.npy`` file and wrap it via :func:`from_array`.
384
+
385
+ For ``.npz``, use :func:`from_npz` — it understands multi-array archives.
386
+ """
387
+ p = Path(path)
388
+ arr = np.load(p, allow_pickle=False)
389
+ x_arr = np.load(x_path, allow_pickle=False) if x_path else None
390
+ return from_array(
391
+ arr,
392
+ experiment_type=experiment_type,
393
+ x=x_arr,
394
+ source_name=p.name,
395
+ **kwargs,
396
+ )
397
+
398
+
399
+ def from_npz(
400
+ path: str | Path,
401
+ experiment_type: str = "unknown",
402
+ *,
403
+ y_key: str = "y",
404
+ x_key: Optional[str] = "x",
405
+ **kwargs,
406
+ ) -> CalibrationPayload:
407
+ """Load a ``.npz`` archive. Expects ``y`` (required) and ``x`` (optional)."""
408
+ p = Path(path)
409
+ with np.load(p, allow_pickle=False) as data:
410
+ if y_key not in data:
411
+ raise KeyError(
412
+ f"{p.name}: missing required key '{y_key}'. Keys present: {list(data.keys())}"
413
+ )
414
+ y = np.asarray(data[y_key])
415
+ x = np.asarray(data[x_key]) if x_key and x_key in data else None
416
+ return from_array(
417
+ y,
418
+ experiment_type=experiment_type,
419
+ x=x,
420
+ source_name=p.name,
421
+ **kwargs,
422
+ )
423
+
424
+
425
+ # ---------------------------------------------------------------------------
426
+ # File loader (existing callers)
427
+ # ---------------------------------------------------------------------------
428
+
429
+ def load_payload(
430
+ file_path: str | Path,
431
+ experiment_type: str = "unknown",
432
+ fit: bool = True,
433
+ ) -> CalibrationPayload:
434
+ """Load an uploaded file into a :class:`CalibrationPayload`.
435
+
436
+ Auto-dispatches by extension across image, CSV/TSV, and ``.npy``/``.npz``.
437
+ """
438
+ if file_path is None:
439
+ return CalibrationPayload()
440
+
441
+ path = Path(file_path)
442
+ ext = path.suffix.lower()
443
+ name = path.name
444
+
445
+ if ext in SUPPORTED_IMAGE_EXTS:
446
+ img = Image.open(path).convert("RGB")
447
+ return CalibrationPayload(image=img, source_name=name, kind="image")
448
+
449
+ if ext in SUPPORTED_TABLE_EXTS:
450
+ sep = "," if ext == ".csv" else "\t"
451
+ df = pd.read_csv(path, sep=sep)
452
+ img = _render_table_as_image(df)
453
+ return CalibrationPayload(image=img, table=df, source_name=name, kind="csv")
454
+
455
+ if ext == ".npy":
456
+ return from_npy(path, experiment_type=experiment_type, fit=fit)
457
+ if ext == ".npz":
458
+ return from_npz(path, experiment_type=experiment_type, fit=fit)
459
+
460
+ raise ValueError(
461
+ f"Unsupported file type '{ext}'. Accepted: "
462
+ f"{sorted(SUPPORTED_IMAGE_EXTS | SUPPORTED_TABLE_EXTS | SUPPORTED_ARRAY_EXTS)}"
463
+ )
src/qcal/decoder.py ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Ising 3D CNN surface-code pre-decoder stage.
2
+
3
+ Runs one of the NVIDIA Ising Decoder 3D CNN pre-decoders on a noisy syndrome
4
+ volume to *sparsify* it, then hands the cleaned volume to PyMatching for final
5
+ MWPM correction. This is a drop-in stage that sits after the calibration
6
+ analyzer — it never mutates calibration state.
7
+
8
+ Two model variants are supported:
9
+
10
+ * "fast" — nvidia/Ising-Decoder-SurfaceCode-1-Fast (~912k params)
11
+ * "accurate" — nvidia/Ising-Decoder-SurfaceCode-1-Accurate (~1.79M params)
12
+
13
+ When the Hugging Face weights can't be loaded (no access, no internet, no
14
+ GPU), we fall back to a deterministic "neighbor-support" denoiser so the demo
15
+ still runs and the architecture is exercised end-to-end.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import os
21
+ import time
22
+ from dataclasses import dataclass, field
23
+ from typing import Any, Optional
24
+
25
+ import numpy as np
26
+
27
+
28
+ # ---------------------------------------------------------------------------
29
+ # Configuration
30
+ # ---------------------------------------------------------------------------
31
+
32
+ VARIANT_FAST = "fast"
33
+ VARIANT_ACCURATE = "accurate"
34
+ VARIANTS = (VARIANT_FAST, VARIANT_ACCURATE)
35
+
36
+ MODEL_IDS: dict[str, str] = {
37
+ VARIANT_FAST: os.getenv(
38
+ "QCAL_DECODER_FAST_ID", "nvidia/Ising-Decoder-SurfaceCode-1-Fast"
39
+ ),
40
+ VARIANT_ACCURATE: os.getenv(
41
+ "QCAL_DECODER_ACCURATE_ID", "nvidia/Ising-Decoder-SurfaceCode-1-Accurate"
42
+ ),
43
+ }
44
+ APPROX_PARAMS: dict[str, int] = {
45
+ VARIANT_FAST: 912_000,
46
+ VARIANT_ACCURATE: 1_790_000,
47
+ }
48
+
49
+
50
+ # ---------------------------------------------------------------------------
51
+ # Result container
52
+ # ---------------------------------------------------------------------------
53
+
54
+ @dataclass
55
+ class DecoderResult:
56
+ """Structured output of a full decoder run (CNN + optional MWPM)."""
57
+
58
+ variant: str
59
+ distance: int
60
+ rounds: int
61
+ n_shots: int
62
+ error_rate: float
63
+
64
+ density_before: float = 0.0
65
+ density_after: float = 0.0
66
+
67
+ inference_ms: float = 0.0
68
+ mwpm_ms_before: Optional[float] = None
69
+ mwpm_ms_after: Optional[float] = None
70
+
71
+ ler_proxy_before: float = 0.0
72
+ ler_proxy_after: float = 0.0
73
+
74
+ raw_example: np.ndarray = field(default_factory=lambda: np.zeros((0, 0)))
75
+ denoised_example: np.ndarray = field(default_factory=lambda: np.zeros((0, 0)))
76
+
77
+ backend_note: str = ""
78
+ model_id: str = ""
79
+ error: Optional[str] = None
80
+
81
+ @property
82
+ def ok(self) -> bool:
83
+ return self.error is None
84
+
85
+ @property
86
+ def density_reduction(self) -> float:
87
+ if self.density_before <= 0:
88
+ return 0.0
89
+ return 1.0 - (self.density_after / self.density_before)
90
+
91
+ @property
92
+ def ler_improvement(self) -> float:
93
+ if self.ler_proxy_after <= 0:
94
+ return float("inf") if self.ler_proxy_before > 0 else 1.0
95
+ return self.ler_proxy_before / self.ler_proxy_after
96
+
97
+ @property
98
+ def mwpm_speedup(self) -> Optional[float]:
99
+ if not self.mwpm_ms_before or not self.mwpm_ms_after:
100
+ return None
101
+ return self.mwpm_ms_before / max(self.mwpm_ms_after, 1e-6)
102
+
103
+ def markdown(self) -> str:
104
+ if self.error:
105
+ return f"**Decoder error:** {self.error}"
106
+ approx = APPROX_PARAMS.get(self.variant, 0)
107
+ lines = [
108
+ f"**Variant:** `{self.variant}` (~{approx/1e6:.2f}M params)",
109
+ f"**Model id:** `{self.model_id}`",
110
+ f"**Surface code:** distance = {self.distance}, rounds = {self.rounds}",
111
+ f"**Shots:** {self.n_shots}",
112
+ f"**Physical error rate (p):** {self.error_rate:.4f}",
113
+ "",
114
+ "**Syndrome density**",
115
+ f"- before CNN: {self.density_before:.4f}",
116
+ f"- after CNN: {self.density_after:.4f}",
117
+ f"- reduction: **{self.density_reduction*100:.1f}%**",
118
+ "",
119
+ "**Timing**",
120
+ f"- CNN inference: {self.inference_ms:.2f} ms total"
121
+ f" ({self.inference_ms/max(self.n_shots,1):.3f} ms/shot)",
122
+ ]
123
+ if self.mwpm_ms_before is not None:
124
+ lines += [
125
+ f"- MWPM on raw syndromes: {self.mwpm_ms_before:.2f} ms",
126
+ f"- MWPM on denoised syndromes: {self.mwpm_ms_after:.2f} ms",
127
+ ]
128
+ speedup = self.mwpm_speedup
129
+ if speedup:
130
+ lines.append(f"- MWPM speedup: **{speedup:.2f}×**")
131
+ else:
132
+ lines.append("- MWPM stage: _skipped (pymatching not installed)_")
133
+ lines += [
134
+ "",
135
+ "**Logical-error-rate proxy** _(syndrome-weight threshold — demo only)_",
136
+ f"- before CNN: {self.ler_proxy_before:.4f}",
137
+ f"- after CNN: {self.ler_proxy_after:.4f}",
138
+ f"- improvement: **{self.ler_improvement:.2f}×**",
139
+ "",
140
+ f"_{self.backend_note}_",
141
+ ]
142
+ return "\n".join(lines)
143
+
144
+ def _repr_markdown_(self) -> str: # Jupyter renders this directly
145
+ return self.markdown()
146
+
147
+
148
+ # ---------------------------------------------------------------------------
149
+ # Synthetic syndrome generation
150
+ # ---------------------------------------------------------------------------
151
+
152
+ def generate_syndromes(
153
+ distance: int,
154
+ rounds: int,
155
+ error_rate: float,
156
+ n_shots: int,
157
+ seed: Optional[int] = 42,
158
+ ) -> np.ndarray:
159
+ """Generate synthetic syndrome volumes.
160
+
161
+ Shape: (n_shots, distance, distance, rounds) of uint8.
162
+
163
+ We sample each space-time cell independently from Bernoulli(p), then add
164
+ a small amount of spatial correlation (a 2-cell "chain" event per shot)
165
+ so the denoiser has structure to exploit. This mimics what a detector
166
+ error model produces for a depolarizing noise channel at low p.
167
+ """
168
+ if distance < 3:
169
+ raise ValueError("distance must be >= 3 for a meaningful demo")
170
+ if rounds < 1:
171
+ raise ValueError("rounds must be >= 1")
172
+ if not 0.0 <= error_rate <= 0.5:
173
+ raise ValueError("error_rate must be in [0, 0.5]")
174
+
175
+ rng = np.random.default_rng(seed)
176
+ shape = (n_shots, distance, distance, rounds)
177
+ volumes = (rng.random(shape) < error_rate).astype(np.uint8)
178
+
179
+ # Inject correlated 2-cell chains — realistic time-like errors.
180
+ n_chains = max(1, int(n_shots * rounds * error_rate * 0.5))
181
+ for _ in range(n_chains):
182
+ s = rng.integers(0, n_shots)
183
+ x = rng.integers(0, distance)
184
+ y = rng.integers(0, distance)
185
+ t = rng.integers(0, max(rounds - 1, 1))
186
+ volumes[s, x, y, t] = 1
187
+ if rounds > 1:
188
+ volumes[s, x, y, t + 1] = 1
189
+ return volumes
190
+
191
+
192
+ # ---------------------------------------------------------------------------
193
+ # Model loading (HF) and fallback
194
+ # ---------------------------------------------------------------------------
195
+
196
+ _MODEL_CACHE: dict[str, Any] = {}
197
+
198
+
199
+ def _try_load_hf_decoder(variant: str) -> tuple[Any, str]:
200
+ """Attempt to load an Ising decoder via Hugging Face.
201
+
202
+ Returns (callable_or_None, backend_note).
203
+ """
204
+ model_id = MODEL_IDS[variant]
205
+ if model_id in _MODEL_CACHE:
206
+ return _MODEL_CACHE[model_id], f"Loaded `{model_id}` from cache."
207
+
208
+ try:
209
+ import torch
210
+ from transformers import AutoModel
211
+ except Exception as exc: # noqa: BLE001
212
+ return None, f"PyTorch/transformers unavailable ({exc}); using fallback denoiser."
213
+
214
+ try:
215
+ device = "cuda" if torch.cuda.is_available() else "cpu"
216
+ dtype = torch.float32
217
+ model = AutoModel.from_pretrained(
218
+ model_id, trust_remote_code=True, torch_dtype=dtype
219
+ ).to(device)
220
+ model.eval()
221
+ _MODEL_CACHE[model_id] = (model, device)
222
+ return (model, device), f"Loaded `{model_id}` on {device}."
223
+ except Exception as exc: # noqa: BLE001
224
+ return (
225
+ None,
226
+ f"Could not load `{model_id}` ({type(exc).__name__}: {exc}). "
227
+ "Falling back to built-in neighbor-support denoiser.",
228
+ )
229
+
230
+
231
+ def _fallback_denoise(volumes: np.ndarray) -> np.ndarray:
232
+ """Drop any detection event with no 3D neighbor — a cheap sparsifier.
233
+
234
+ Isolated single-cell detections are almost always measurement noise at the
235
+ p-regimes the Ising CNNs target; neighborhood support indicates a real
236
+ error chain. This is a useful first-order stand-in when the HF weights
237
+ aren't available.
238
+ """
239
+ v = volumes.astype(np.int16)
240
+ neigh = np.zeros_like(v)
241
+ for dx in (-1, 0, 1):
242
+ for dy in (-1, 0, 1):
243
+ for dt in (-1, 0, 1):
244
+ if dx == dy == dt == 0:
245
+ continue
246
+ shifted = np.roll(v, shift=(dx, dy, dt), axis=(1, 2, 3))
247
+ # zero out the wrap-around planes so boundaries don't leak
248
+ if dx == 1:
249
+ shifted[:, 0, :, :] = 0
250
+ elif dx == -1:
251
+ shifted[:, -1, :, :] = 0
252
+ if dy == 1:
253
+ shifted[:, :, 0, :] = 0
254
+ elif dy == -1:
255
+ shifted[:, :, -1, :] = 0
256
+ if dt == 1:
257
+ shifted[:, :, :, 0] = 0
258
+ elif dt == -1:
259
+ shifted[:, :, :, -1] = 0
260
+ neigh += shifted
261
+ return (v * (neigh >= 1)).astype(np.uint8)
262
+
263
+
264
+ def _run_hf_inference(loaded: tuple[Any, str], volumes: np.ndarray) -> np.ndarray:
265
+ """Run the Ising decoder CNN on a stack of syndrome volumes.
266
+
267
+ We try the common Vision-style entrypoints; model authors expose different
268
+ call conventions, so we guard each attempt. On failure, raise so the
269
+ caller can fall back.
270
+ """
271
+ import torch
272
+
273
+ model, device = loaded
274
+ x = torch.from_numpy(volumes).float().unsqueeze(1).to(device) # (N, 1, D, D, T)
275
+ with torch.no_grad():
276
+ try:
277
+ out = model(x)
278
+ except TypeError:
279
+ out = model(pixel_values=x)
280
+
281
+ if hasattr(out, "logits"):
282
+ out = out.logits
283
+ if isinstance(out, (tuple, list)):
284
+ out = out[0]
285
+
286
+ # The pre-decoder produces a per-cell "keep" probability; threshold it.
287
+ out = torch.sigmoid(out) if out.dtype.is_floating_point else out.float()
288
+ out = out.squeeze(1) # (N, D, D, T)
289
+ mask = (out >= 0.5).to(torch.uint8)
290
+ # The CNN is a sparsifier, not a generator — never turn on new bits.
291
+ mask = mask * torch.from_numpy(volumes).to(mask.device)
292
+ return mask.cpu().numpy().astype(np.uint8)
293
+
294
+
295
+ # ---------------------------------------------------------------------------
296
+ # Optional MWPM stage
297
+ # ---------------------------------------------------------------------------
298
+
299
+ def _build_demo_matching(distance: int, rounds: int):
300
+ """Build a small PyMatching graph over the space-time volume.
301
+
302
+ NOTE: this is **not** a real surface-code detector error model. We connect
303
+ each cell to its 6-neighborhood so PyMatching has something MWPM-able;
304
+ real deployments should feed a `stim`-generated DEM.
305
+ """
306
+ import pymatching
307
+
308
+ m = pymatching.Matching()
309
+
310
+ def node(x: int, y: int, t: int) -> int:
311
+ return t * distance * distance + y * distance + x
312
+
313
+ bit_id = 0
314
+ for t in range(rounds):
315
+ for y in range(distance):
316
+ for x in range(distance):
317
+ if x + 1 < distance:
318
+ m.add_edge(node(x, y, t), node(x + 1, y, t), fault_ids={bit_id}, weight=1.0)
319
+ bit_id += 1
320
+ if y + 1 < distance:
321
+ m.add_edge(node(x, y, t), node(x, y + 1, t), fault_ids={bit_id}, weight=1.0)
322
+ bit_id += 1
323
+ if t + 1 < rounds:
324
+ m.add_edge(node(x, y, t), node(x, y, t + 1), fault_ids={bit_id}, weight=1.5)
325
+ bit_id += 1
326
+ # Boundary edges along the x=0 / x=d-1 columns let isolated detectors terminate.
327
+ for y in range(distance):
328
+ m.add_boundary_edge(node(0, y, t), fault_ids=set(), weight=2.0)
329
+ m.add_boundary_edge(node(distance - 1, y, t), fault_ids=set(), weight=2.0)
330
+ return m
331
+
332
+
333
+ def _time_mwpm(volumes: np.ndarray, matching) -> float:
334
+ """Decode every shot and return total wall time in ms."""
335
+ flat = volumes.reshape(volumes.shape[0], -1)
336
+ t0 = time.perf_counter()
337
+ for row in flat:
338
+ matching.decode(row)
339
+ return (time.perf_counter() - t0) * 1000.0
340
+
341
+
342
+ # ---------------------------------------------------------------------------
343
+ # LER proxy
344
+ # ---------------------------------------------------------------------------
345
+
346
+ def _ler_proxy(volumes: np.ndarray, distance: int) -> float:
347
+ """Fraction of shots with syndrome weight above a distance-scaled threshold.
348
+
349
+ This isn't a real logical error rate — it's a first-order proxy used to
350
+ show the relative improvement from denoising in the UI.
351
+ """
352
+ if volumes.size == 0:
353
+ return 0.0
354
+ per_shot = volumes.reshape(volumes.shape[0], -1).sum(axis=1)
355
+ threshold = max(1, distance)
356
+ return float((per_shot > threshold).mean())
357
+
358
+
359
+ # ---------------------------------------------------------------------------
360
+ # Public entrypoint
361
+ # ---------------------------------------------------------------------------
362
+
363
+ def run_decoder(
364
+ variant: str = VARIANT_FAST,
365
+ distance: int = 5,
366
+ rounds: int = 5,
367
+ error_rate: float = 0.005,
368
+ n_shots: int = 128,
369
+ seed: Optional[int] = 42,
370
+ ) -> DecoderResult:
371
+ """Full pipeline: synthesize syndromes → CNN sparsify → (MWPM) → metrics."""
372
+ if variant not in VARIANTS:
373
+ return DecoderResult(
374
+ variant=variant,
375
+ distance=distance,
376
+ rounds=rounds,
377
+ n_shots=n_shots,
378
+ error_rate=error_rate,
379
+ error=f"Unknown variant '{variant}'. Choose one of {VARIANTS}.",
380
+ )
381
+
382
+ try:
383
+ raw = generate_syndromes(distance, rounds, error_rate, n_shots, seed=seed)
384
+ except Exception as exc: # noqa: BLE001
385
+ return DecoderResult(
386
+ variant=variant, distance=distance, rounds=rounds,
387
+ n_shots=n_shots, error_rate=error_rate,
388
+ error=f"Syndrome generation failed: {exc}",
389
+ )
390
+
391
+ loaded, backend_note = _try_load_hf_decoder(variant)
392
+
393
+ t0 = time.perf_counter()
394
+ if loaded is not None:
395
+ try:
396
+ denoised = _run_hf_inference(loaded, raw)
397
+ except Exception as exc: # noqa: BLE001
398
+ backend_note = (
399
+ f"HF model load succeeded but inference failed ({exc}); "
400
+ "falling back to neighbor-support denoiser."
401
+ )
402
+ denoised = _fallback_denoise(raw)
403
+ else:
404
+ denoised = _fallback_denoise(raw)
405
+ inference_ms = (time.perf_counter() - t0) * 1000.0
406
+
407
+ density_before = float(raw.mean())
408
+ density_after = float(denoised.mean())
409
+
410
+ # Optional MWPM stage
411
+ mwpm_before = mwpm_after = None
412
+ try:
413
+ import pymatching # noqa: F401
414
+
415
+ matching = _build_demo_matching(distance, rounds)
416
+ mwpm_before = _time_mwpm(raw, matching)
417
+ mwpm_after = _time_mwpm(denoised, matching)
418
+ except ImportError:
419
+ backend_note += " (pymatching not installed — MWPM skipped)"
420
+ except Exception as exc: # noqa: BLE001
421
+ backend_note += f" (MWPM stage failed: {exc})"
422
+
423
+ mid_t = rounds // 2
424
+ result = DecoderResult(
425
+ variant=variant,
426
+ distance=distance,
427
+ rounds=rounds,
428
+ n_shots=n_shots,
429
+ error_rate=error_rate,
430
+ density_before=density_before,
431
+ density_after=density_after,
432
+ inference_ms=inference_ms,
433
+ mwpm_ms_before=mwpm_before,
434
+ mwpm_ms_after=mwpm_after,
435
+ ler_proxy_before=_ler_proxy(raw, distance),
436
+ ler_proxy_after=_ler_proxy(denoised, distance),
437
+ raw_example=raw[0, :, :, mid_t].copy(),
438
+ denoised_example=denoised[0, :, :, mid_t].copy(),
439
+ backend_note=backend_note,
440
+ model_id=MODEL_IDS[variant],
441
+ )
442
+ return result
443
+
444
+
445
+ # ---------------------------------------------------------------------------
446
+ # Visualization helper
447
+ # ---------------------------------------------------------------------------
448
+
449
+ def plot_comparison(result: DecoderResult):
450
+ """Return a matplotlib Figure comparing raw vs denoised syndrome slices."""
451
+ import matplotlib.pyplot as plt
452
+
453
+ fig, axes = plt.subplots(1, 3, figsize=(11, 3.6))
454
+ axes[0].imshow(result.raw_example, cmap="Reds", vmin=0, vmax=1, interpolation="nearest")
455
+ axes[0].set_title(f"Raw syndrome\n(shot 0, t={result.rounds // 2})")
456
+ axes[0].set_xticks([]); axes[0].set_yticks([])
457
+
458
+ axes[1].imshow(
459
+ result.denoised_example, cmap="Greens", vmin=0, vmax=1, interpolation="nearest"
460
+ )
461
+ axes[1].set_title(f"After {result.variant} CNN\n(sparsified)")
462
+ axes[1].set_xticks([]); axes[1].set_yticks([])
463
+
464
+ categories = ["density", "LER proxy"]
465
+ before = [result.density_before, result.ler_proxy_before]
466
+ after = [result.density_after, result.ler_proxy_after]
467
+ x = np.arange(len(categories))
468
+ width = 0.38
469
+ axes[2].bar(x - width / 2, before, width, label="raw", color="#d62728")
470
+ axes[2].bar(x + width / 2, after, width, label="denoised", color="#2ca02c")
471
+ axes[2].set_xticks(x)
472
+ axes[2].set_xticklabels(categories)
473
+ axes[2].set_title("Before vs After")
474
+ axes[2].legend()
475
+ axes[2].grid(True, axis="y", alpha=0.3)
476
+
477
+ fig.tight_layout()
478
+ return fig
479
+
480
+
481
+ # ---------------------------------------------------------------------------
482
+ # Helper: derive a sensible error rate from calibration analysis
483
+ # ---------------------------------------------------------------------------
484
+
485
+ def suggest_error_rate(analysis: Optional[dict]) -> float:
486
+ """Map a calibration analysis dict to a plausible physical error rate.
487
+
488
+ Rough heuristic: start at 1e-3 (good transmon) and inflate based on how
489
+ far the recommended drive amplitude deviates from a nominal 1.0.
490
+ """
491
+ if not analysis:
492
+ return 0.005
493
+ params = analysis.get("recommended_parameters") or {}
494
+ try:
495
+ amp = float(params.get("drive_amplitude", 1.0))
496
+ except (TypeError, ValueError):
497
+ amp = 1.0
498
+ deviation = abs(1.0 - amp)
499
+ return float(min(0.03, 0.001 + 0.01 * deviation))
src/qcal/fit.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Auto-fitting for common calibration experiments.
2
+
3
+ Each fit returns a :class:`FitResult` with human-readable, unit-bearing
4
+ parameters plus a normalized quality metric in ``[0, 1]``. These get woven
5
+ into the VLM prompt so Ising can cross-check its vision analysis against
6
+ hard numerical fits — the single biggest quality lever for the whole
7
+ pipeline.
8
+
9
+ Every fit is defensive: bad data returns ``FitResult.failed(reason=...)``
10
+ instead of raising. Callers should always check ``result.ok``.
11
+
12
+ Public entrypoints
13
+ ------------------
14
+ * :func:`fit_rabi` — damped sine (amplitude/duration sweep)
15
+ * :func:`fit_ramsey` — damped cosine with phase
16
+ * :func:`fit_t1` — exponential decay
17
+ * :func:`fit_t2_echo` — exponential decay (alias for T1 shape)
18
+ * :func:`autofit` — dispatch by experiment type
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ import math
24
+ from dataclasses import dataclass, field
25
+ from typing import Any, Callable, Optional
26
+
27
+ import numpy as np
28
+
29
+
30
+ @dataclass
31
+ class FitResult:
32
+ """Structured fit output.
33
+
34
+ ``params`` uses named, unit-bearing keys (e.g. ``"tau_us"``,
35
+ ``"freq_mhz"``) so downstream code and the VLM prompt never have to
36
+ guess what each number means.
37
+ """
38
+
39
+ experiment: str
40
+ model: str
41
+ params: dict[str, float] = field(default_factory=dict)
42
+ fit_quality: float = 0.0 # R^2 in [0, 1]; 1 is perfect
43
+ residual_rms: float = 0.0
44
+ n_points: int = 0
45
+ ok: bool = True
46
+ reason: Optional[str] = None
47
+
48
+ @classmethod
49
+ def failed(cls, experiment: str, model: str, reason: str) -> "FitResult":
50
+ return cls(
51
+ experiment=experiment, model=model, ok=False, reason=reason
52
+ )
53
+
54
+ def summary_text(self) -> str:
55
+ """Compact one-line summary for injecting into the VLM prompt."""
56
+ if not self.ok:
57
+ return f"Fit ({self.experiment}, {self.model}) failed: {self.reason}"
58
+ bits = ", ".join(f"{k}={self._fmt(v)}" for k, v in self.params.items())
59
+ return (
60
+ f"Fit ({self.experiment} → {self.model}): {bits} "
61
+ f"| R²={self.fit_quality:.3f}, n={self.n_points}"
62
+ )
63
+
64
+ def markdown(self) -> str:
65
+ if not self.ok:
66
+ return f"**Fit failed** ({self.experiment}): {self.reason}"
67
+ lines = [
68
+ f"**Fit:** `{self.experiment}` → `{self.model}`",
69
+ f"- R² = {self.fit_quality:.4f} (RMS residual {self.residual_rms:.4g})",
70
+ f"- n = {self.n_points} points",
71
+ "",
72
+ "| param | value |",
73
+ "|---|---|",
74
+ ]
75
+ for k, v in self.params.items():
76
+ lines.append(f"| `{k}` | {self._fmt(v)} |")
77
+ return "\n".join(lines)
78
+
79
+ @staticmethod
80
+ def _fmt(v: float) -> str:
81
+ if not np.isfinite(v):
82
+ return "n/a"
83
+ av = abs(v)
84
+ if av >= 1e4 or (0 < av < 1e-3):
85
+ return f"{v:.3e}"
86
+ return f"{v:.4g}"
87
+
88
+
89
+ # ---------------------------------------------------------------------------
90
+ # Helpers
91
+ # ---------------------------------------------------------------------------
92
+
93
+ def _curve_fit(
94
+ model: Callable[..., np.ndarray],
95
+ x: np.ndarray,
96
+ y: np.ndarray,
97
+ p0: list[float],
98
+ bounds: Optional[tuple[list[float], list[float]]] = None,
99
+ maxfev: int = 5000,
100
+ ):
101
+ """Thin wrapper around ``scipy.optimize.curve_fit`` with kwargs filled."""
102
+ from scipy.optimize import curve_fit # local import — scipy is optional-heavy
103
+
104
+ kwargs: dict[str, Any] = {"p0": p0, "maxfev": maxfev}
105
+ if bounds is not None:
106
+ kwargs["bounds"] = bounds
107
+ return curve_fit(model, x, y, **kwargs)
108
+
109
+
110
+ def _prep(x: Optional[np.ndarray], y: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
111
+ """Validate and coerce inputs to 1-D float arrays."""
112
+ y = np.asarray(y, dtype=float).ravel()
113
+ if y.size < 4:
114
+ raise ValueError("need at least 4 points to fit")
115
+ if x is None:
116
+ x = np.arange(y.size, dtype=float)
117
+ else:
118
+ x = np.asarray(x, dtype=float).ravel()
119
+ if x.shape != y.shape:
120
+ raise ValueError(f"x and y shape mismatch: {x.shape} vs {y.shape}")
121
+ mask = np.isfinite(x) & np.isfinite(y)
122
+ return x[mask], y[mask]
123
+
124
+
125
+ def _r_squared(y: np.ndarray, y_hat: np.ndarray) -> float:
126
+ ss_res = float(np.sum((y - y_hat) ** 2))
127
+ ss_tot = float(np.sum((y - y.mean()) ** 2))
128
+ if ss_tot <= 0:
129
+ return 0.0
130
+ return max(0.0, 1.0 - ss_res / ss_tot)
131
+
132
+
133
+ def _dominant_frequency(x: np.ndarray, y: np.ndarray) -> float:
134
+ """FFT-based seed for sinusoidal fits. Returns cycles per x-unit."""
135
+ if x.size < 4:
136
+ return 1.0
137
+ dx = float(np.median(np.diff(x)))
138
+ if dx <= 0:
139
+ return 1.0
140
+ y_centered = y - y.mean()
141
+ fft = np.fft.rfft(y_centered)
142
+ freqs = np.fft.rfftfreq(y.size, d=dx)
143
+ if freqs.size <= 1:
144
+ return 1.0
145
+ idx = int(np.argmax(np.abs(fft[1:])) + 1) # skip DC
146
+ return float(freqs[idx])
147
+
148
+
149
+ # ---------------------------------------------------------------------------
150
+ # Rabi — damped sine
151
+ # ---------------------------------------------------------------------------
152
+
153
+ def _rabi_model(t, A, freq, tau, offset, phase):
154
+ return A * np.exp(-t / tau) * np.sin(2 * np.pi * freq * t + phase) + offset
155
+
156
+
157
+ def fit_rabi(
158
+ y: np.ndarray,
159
+ x: Optional[np.ndarray] = None,
160
+ x_unit: str = "a.u.",
161
+ ) -> FitResult:
162
+ """Fit a Rabi oscillation: A·exp(-t/τ)·sin(2π·f·t + φ) + c.
163
+
164
+ Returned params keyed for readability:
165
+ ``amplitude``, ``freq`` (cycles per ``x_unit``), ``tau``
166
+ (in ``x_unit``), ``offset``, ``phase_rad``.
167
+ """
168
+ try:
169
+ x_, y_ = _prep(x, y)
170
+ except ValueError as exc:
171
+ return FitResult.failed("rabi", "damped_sine", str(exc))
172
+
173
+ try:
174
+ amp_seed = 0.5 * (y_.max() - y_.min())
175
+ off_seed = 0.5 * (y_.max() + y_.min())
176
+ freq_seed = _dominant_frequency(x_, y_)
177
+ tau_seed = max(float(x_.max() - x_.min()), 1e-9)
178
+ p0 = [amp_seed, freq_seed, tau_seed, off_seed, 0.0]
179
+ bounds = (
180
+ [0.0, 0.0, 1e-12, -np.inf, -2 * np.pi],
181
+ [np.inf, np.inf, np.inf, np.inf, 2 * np.pi],
182
+ )
183
+ popt, _ = _curve_fit(_rabi_model, x_, y_, p0=p0, bounds=bounds)
184
+ y_hat = _rabi_model(x_, *popt)
185
+ return FitResult(
186
+ experiment="rabi",
187
+ model="damped_sine",
188
+ params={
189
+ "amplitude": float(popt[0]),
190
+ f"freq_per_{x_unit}": float(popt[1]),
191
+ f"tau_{x_unit}": float(popt[2]),
192
+ "offset": float(popt[3]),
193
+ "phase_rad": float(popt[4]),
194
+ },
195
+ fit_quality=_r_squared(y_, y_hat),
196
+ residual_rms=float(np.sqrt(np.mean((y_ - y_hat) ** 2))),
197
+ n_points=int(y_.size),
198
+ )
199
+ except Exception as exc: # noqa: BLE001
200
+ return FitResult.failed("rabi", "damped_sine", f"{type(exc).__name__}: {exc}")
201
+
202
+
203
+ # ---------------------------------------------------------------------------
204
+ # Ramsey — damped cosine with phase
205
+ # ---------------------------------------------------------------------------
206
+
207
+ def _ramsey_model(t, A, freq, tau, offset, phase):
208
+ return A * np.exp(-t / tau) * np.cos(2 * np.pi * freq * t + phase) + offset
209
+
210
+
211
+ def fit_ramsey(
212
+ y: np.ndarray,
213
+ x: Optional[np.ndarray] = None,
214
+ x_unit: str = "us",
215
+ ) -> FitResult:
216
+ """Fit a Ramsey fringe: A·exp(-t/τ)·cos(2π·δf·t + φ) + c.
217
+
218
+ ``freq`` here is the detuning between drive and qubit frequency in
219
+ cycles per ``x_unit``.
220
+ """
221
+ try:
222
+ x_, y_ = _prep(x, y)
223
+ except ValueError as exc:
224
+ return FitResult.failed("ramsey", "damped_cosine", str(exc))
225
+
226
+ try:
227
+ amp_seed = 0.5 * (y_.max() - y_.min())
228
+ off_seed = 0.5 * (y_.max() + y_.min())
229
+ freq_seed = max(_dominant_frequency(x_, y_), 1e-6)
230
+ tau_seed = max(float(x_.max() - x_.min()), 1e-9)
231
+ p0 = [amp_seed, freq_seed, tau_seed, off_seed, 0.0]
232
+ bounds = (
233
+ [0.0, 0.0, 1e-12, -np.inf, -2 * np.pi],
234
+ [np.inf, np.inf, np.inf, np.inf, 2 * np.pi],
235
+ )
236
+ popt, _ = _curve_fit(_ramsey_model, x_, y_, p0=p0, bounds=bounds)
237
+ y_hat = _ramsey_model(x_, *popt)
238
+ return FitResult(
239
+ experiment="ramsey",
240
+ model="damped_cosine",
241
+ params={
242
+ "amplitude": float(popt[0]),
243
+ f"detuning_per_{x_unit}": float(popt[1]),
244
+ f"t2star_{x_unit}": float(popt[2]),
245
+ "offset": float(popt[3]),
246
+ "phase_rad": float(popt[4]),
247
+ },
248
+ fit_quality=_r_squared(y_, y_hat),
249
+ residual_rms=float(np.sqrt(np.mean((y_ - y_hat) ** 2))),
250
+ n_points=int(y_.size),
251
+ )
252
+ except Exception as exc: # noqa: BLE001
253
+ return FitResult.failed("ramsey", "damped_cosine", f"{type(exc).__name__}: {exc}")
254
+
255
+
256
+ # ---------------------------------------------------------------------------
257
+ # T1 / T2 — exponential decay
258
+ # ---------------------------------------------------------------------------
259
+
260
+ def _exp_decay(t, A, tau, offset):
261
+ return A * np.exp(-t / tau) + offset
262
+
263
+
264
+ def fit_t1(
265
+ y: np.ndarray,
266
+ x: Optional[np.ndarray] = None,
267
+ x_unit: str = "us",
268
+ ) -> FitResult:
269
+ """Fit T1 relaxation: A·exp(-t/τ) + c."""
270
+ try:
271
+ x_, y_ = _prep(x, y)
272
+ except ValueError as exc:
273
+ return FitResult.failed("t1", "exp_decay", str(exc))
274
+ return _fit_exp_decay(x_, y_, experiment="t1", x_unit=x_unit)
275
+
276
+
277
+ def fit_t2_echo(
278
+ y: np.ndarray,
279
+ x: Optional[np.ndarray] = None,
280
+ x_unit: str = "us",
281
+ ) -> FitResult:
282
+ """Fit T2 (echo / Hahn) decay: A·exp(-t/τ) + c."""
283
+ try:
284
+ x_, y_ = _prep(x, y)
285
+ except ValueError as exc:
286
+ return FitResult.failed("t2_echo", "exp_decay", str(exc))
287
+ return _fit_exp_decay(x_, y_, experiment="t2_echo", x_unit=x_unit)
288
+
289
+
290
+ def _fit_exp_decay(
291
+ x: np.ndarray, y: np.ndarray, experiment: str, x_unit: str
292
+ ) -> FitResult:
293
+ try:
294
+ sign = 1.0 if y[0] > y[-1] else -1.0
295
+ amp_seed = sign * (y[0] - y[-1])
296
+ off_seed = float(y[-1])
297
+ tau_seed = max(float(x.max() - x.min()) / 3.0, 1e-9)
298
+ p0 = [amp_seed, tau_seed, off_seed]
299
+ # tau must be positive; offset unconstrained; amplitude signed
300
+ bounds = (
301
+ [-np.inf, 1e-12, -np.inf],
302
+ [np.inf, np.inf, np.inf],
303
+ )
304
+ popt, _ = _curve_fit(_exp_decay, x, y, p0=p0, bounds=bounds)
305
+ y_hat = _exp_decay(x, *popt)
306
+ return FitResult(
307
+ experiment=experiment,
308
+ model="exp_decay",
309
+ params={
310
+ "amplitude": float(popt[0]),
311
+ f"tau_{x_unit}": float(popt[1]),
312
+ "offset": float(popt[2]),
313
+ },
314
+ fit_quality=_r_squared(y, y_hat),
315
+ residual_rms=float(np.sqrt(np.mean((y - y_hat) ** 2))),
316
+ n_points=int(y.size),
317
+ )
318
+ except Exception as exc: # noqa: BLE001
319
+ return FitResult.failed(experiment, "exp_decay", f"{type(exc).__name__}: {exc}")
320
+
321
+
322
+ # ---------------------------------------------------------------------------
323
+ # Dispatch
324
+ # ---------------------------------------------------------------------------
325
+
326
+ _FIT_DISPATCH: dict[str, Callable[..., FitResult]] = {
327
+ "rabi": fit_rabi,
328
+ "ramsey": fit_ramsey,
329
+ "t1": fit_t1,
330
+ "t2": fit_t2_echo,
331
+ "t2_echo": fit_t2_echo,
332
+ }
333
+
334
+
335
+ def autofit(
336
+ experiment_type: str,
337
+ y: np.ndarray,
338
+ x: Optional[np.ndarray] = None,
339
+ **kwargs: Any,
340
+ ) -> Optional[FitResult]:
341
+ """Run the fit matching ``experiment_type``. Returns ``None`` if there
342
+ is no fitter registered for that experiment (e.g. 2D chevron data)."""
343
+ fn = _FIT_DISPATCH.get(experiment_type.lower())
344
+ if fn is None:
345
+ return None
346
+ return fn(y=y, x=x, **kwargs)
347
+
348
+
349
+ def supported_experiments() -> list[str]:
350
+ """List experiment types that ``autofit`` can handle."""
351
+ return sorted(_FIT_DISPATCH.keys())
{qcal → src/qcal}/simulator.py RENAMED
File without changes