gregjanik commited on
Commit
55cd3fe
·
verified ·
1 Parent(s): 20145da

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. server/app.py +1574 -0
server/app.py ADDED
@@ -0,0 +1,1574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import json
3
+ import os
4
+ import re
5
+ import shutil
6
+ import signal
7
+ import subprocess
8
+ import threading
9
+ import time
10
+ import urllib.request
11
+ from datetime import datetime, timezone
12
+ from pathlib import Path
13
+ from typing import Any
14
+
15
+ from fastapi import Depends, FastAPI, File, Header, HTTPException, UploadFile
16
+ from pydantic import BaseModel, Field, model_validator
17
+
18
+ app = FastAPI(title="Qwen 3.5 SFT Fine-Tuning API", version="2.0.0")
19
+
20
+ API_SECRET = os.environ.get("API_SECRET", "")
21
+ WORKSPACE = Path("/workspace")
22
+ DATA_DIR = WORKSPACE / "data"
23
+ OUTPUT_DIR = WORKSPACE / "output"
24
+ CONFIG_DIR = WORKSPACE / "config"
25
+ LOG_FILE = WORKSPACE / "training.log"
26
+
27
+ DATA_DIR.mkdir(parents=True, exist_ok=True)
28
+ OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
29
+ CONFIG_DIR.mkdir(parents=True, exist_ok=True)
30
+
31
+ # Global event: set when training + HF push are fully done.
32
+ # The SIGTERM handler waits on this before allowing the process to exit,
33
+ # so the container is never killed while a push is in flight.
34
+ _training_done = threading.Event()
35
+ _training_done.set() # starts "done" (no training in progress)
36
+ _training_thread: threading.Thread | None = None
37
+
38
+
39
+ # ---------------------------------------------------------------------------
40
+ # Multi-node readiness barrier
41
+ # ---------------------------------------------------------------------------
42
+ # In a cluster, every node pre-downloads model/dataset independently.
43
+ # Once done, workers POST to the master's /barrier/ready. The master
44
+ # counts itself + all workers, then every node polls /barrier/wait until
45
+ # the count reaches num_nodes. Only then does any node start torchrun.
46
+
47
+
48
+
49
+ # ---------------------------------------------------------------------------
50
+ # HuggingFace Hub push helpers
51
+ # ---------------------------------------------------------------------------
52
+
53
+
54
+ def _ensure_readme_metadata(checkpoint_path: Path, model_id: str):
55
+ """Ensure the checkpoint has a README.md with correct base_model metadata.
56
+
57
+ ms-swift either writes a local cache path as base_model (which HF rejects)
58
+ or the README may be missing entirely. This function fixes existing READMEs
59
+ or creates a minimal one so HF Hub always has the base_model field set to
60
+ the canonical model ID (e.g. ``Qwen/Qwen3.5-4B``).
61
+ """
62
+ readme = checkpoint_path / "README.md"
63
+
64
+ if readme.exists():
65
+ text = readme.read_text(encoding="utf-8")
66
+ fm_match = re.match(r"^---\n(.*?\n)---", text, re.DOTALL)
67
+
68
+ if fm_match:
69
+ front_matter = fm_match.group(1)
70
+ original = front_matter
71
+
72
+ # Fix base_model values that are local paths
73
+ front_matter = re.sub(
74
+ r'(base_model\s*:\s*)["\']?(/[^\s"\']+)["\']?',
75
+ rf"\1{model_id}",
76
+ front_matter,
77
+ )
78
+
79
+ # If base_model is missing entirely, add it
80
+ if "base_model" not in front_matter:
81
+ front_matter = f"base_model: {model_id}\n{front_matter}"
82
+
83
+ if front_matter != original:
84
+ text = f"---\n{front_matter}---" + text[fm_match.end() :]
85
+ readme.write_text(text, encoding="utf-8")
86
+ print(
87
+ f"[HF Push] Fixed README metadata in {checkpoint_path.name} → base_model: {model_id}",
88
+ flush=True,
89
+ )
90
+ return
91
+
92
+ # No README or no front-matter — create a minimal one
93
+ readme.write_text(
94
+ f"---\nbase_model: {model_id}\ntags:\n- fine-tuned\n- ms-swift\nlibrary_name: transformers\n---\n\n"
95
+ f"# {checkpoint_path.name}\n\nFine-tuned from [{model_id}](https://huggingface.co/{model_id}) using [ms-swift](https://github.com/modelscope/ms-swift).\n",
96
+ encoding="utf-8",
97
+ )
98
+ print(
99
+ f"[HF Push] Created README.md for {checkpoint_path.name} with base_model: {model_id}",
100
+ flush=True,
101
+ )
102
+
103
+
104
+ def _fix_adapter_config_base_model(checkpoint_path: Path, model_id: str):
105
+ """Rewrite base_model_name_or_path in adapter_config.json from a local
106
+ cache path (e.g. /workspace/hf-cache/hub/models--Qwen--Qwen3.5-9B/...)
107
+ to the canonical HF model ID (e.g. Qwen/Qwen3.5-9B)."""
108
+ adapter_cfg = checkpoint_path / "adapter_config.json"
109
+ if not adapter_cfg.exists():
110
+ return
111
+ try:
112
+ cfg = json.loads(adapter_cfg.read_text(encoding="utf-8"))
113
+ base = cfg.get("base_model_name_or_path", "")
114
+ if base.startswith("/") or "--" in base:
115
+ cfg["base_model_name_or_path"] = model_id
116
+ adapter_cfg.write_text(
117
+ json.dumps(cfg, indent=2, ensure_ascii=False) + "\n", encoding="utf-8"
118
+ )
119
+ print(
120
+ f"[HF Push] Fixed adapter_config.json base_model_name_or_path: {base} → {model_id}",
121
+ flush=True,
122
+ )
123
+ except Exception as e:
124
+ print(f"[HF Push] Warning: could not fix adapter_config.json: {e}", flush=True)
125
+
126
+
127
+ CHECKPOINT_IGNORE_PATTERNS = [
128
+ "optimizer.pt",
129
+ "optim_states.pt",
130
+ "scheduler.pt",
131
+ "rng_state*.pth",
132
+ "global_step*",
133
+ "zero_to_fp32.py",
134
+ "*.distcp",
135
+ ]
136
+
137
+
138
+ def _checkpoint_is_ready(checkpoint_path: Path, settle_seconds: float = 30) -> bool:
139
+ """Return True only when no file in the checkpoint has been modified recently.
140
+
141
+ This prevents uploading a half-written checkpoint while the trainer is
142
+ still flushing large .safetensors / optimizer files to disk.
143
+ """
144
+ cutoff = time.time() - settle_seconds
145
+ try:
146
+ for f in checkpoint_path.rglob("*"):
147
+ if f.is_file() and f.stat().st_mtime > cutoff:
148
+ return False
149
+ except OSError:
150
+ return False
151
+ return True
152
+
153
+
154
+ def _hf_push_checkpoint(
155
+ checkpoint_path: Path, repo_id: str, hf_token: str, commit_message: str, model_id: str = ""
156
+ ):
157
+ """Push a single checkpoint directory to HuggingFace Hub."""
158
+ try:
159
+ if model_id:
160
+ _ensure_readme_metadata(checkpoint_path, model_id)
161
+ _fix_adapter_config_base_model(checkpoint_path, model_id)
162
+
163
+ from huggingface_hub import HfApi
164
+
165
+ api = HfApi(token=hf_token)
166
+ api.create_repo(repo_id, exist_ok=True, private=True)
167
+ api.upload_folder(
168
+ folder_path=str(checkpoint_path),
169
+ repo_id=repo_id,
170
+ commit_message=commit_message,
171
+ path_in_repo=checkpoint_path.name,
172
+ ignore_patterns=CHECKPOINT_IGNORE_PATTERNS,
173
+ )
174
+ print(f"[HF Push] Pushed {checkpoint_path.name} to {repo_id}")
175
+ except Exception as e:
176
+ print(f"[HF Push] Failed to push {checkpoint_path.name}: {e}")
177
+
178
+
179
+ _HF_PUSH_STATE_FILE = OUTPUT_DIR / ".hf_push_state.json"
180
+
181
+
182
+ def _save_push_state(repo_id: str, hf_token: str, model_id: str, checkpoint_name: str):
183
+ """Persist push intent so it can be recovered after a crash/restart."""
184
+ try:
185
+ _HF_PUSH_STATE_FILE.write_text(
186
+ json.dumps(
187
+ {
188
+ "repo_id": repo_id,
189
+ "hf_token": hf_token,
190
+ "model_id": model_id,
191
+ "checkpoint_name": checkpoint_name,
192
+ "created_at": datetime.now(timezone.utc).isoformat(),
193
+ }
194
+ )
195
+ )
196
+ except Exception as e:
197
+ print(f"[HF Push] Warning: could not save push state: {e}", flush=True)
198
+
199
+
200
+ def _clear_push_state():
201
+ with contextlib.suppress(Exception):
202
+ _HF_PUSH_STATE_FILE.unlink(missing_ok=True)
203
+
204
+
205
+ def _verify_hf_push(repo_id: str, hf_token: str, checkpoint_name: str | None = None) -> bool:
206
+ """Verify that the model was actually pushed to HuggingFace by checking for key files."""
207
+ try:
208
+ from huggingface_hub import HfApi
209
+
210
+ api = HfApi(token=hf_token)
211
+ # list_repo_files returns strings (file paths relative to repo root)
212
+ files = list(api.list_repo_files(repo_id))
213
+
214
+ prefix = f"{checkpoint_name}/" if checkpoint_name else ""
215
+ has_config = f"{prefix}config.json" in files
216
+ has_weights = any(
217
+ f.startswith(prefix) and (f.endswith(".safetensors") or f.endswith(".bin"))
218
+ for f in files
219
+ )
220
+ return has_config and has_weights
221
+ except Exception as e:
222
+ print(f"[HF Push] Verification failed: {e}", flush=True)
223
+ return False
224
+
225
+
226
+ def _hf_push_final_model(
227
+ output_dir: Path,
228
+ repo_id: str,
229
+ hf_token: str,
230
+ model_id: str = "",
231
+ tuner_type: str = "full",
232
+ max_retries: int = 3,
233
+ ) -> bool:
234
+ """Push the final trained model to HuggingFace Hub as the repo root.
235
+
236
+ Returns True if the push succeeded and was verified, False otherwise.
237
+ Retries on transient failures.
238
+ """
239
+ best = _find_best_checkpoint(output_dir)
240
+ if not best:
241
+ print("[HF Push] No checkpoint found for final push", flush=True)
242
+ return False
243
+
244
+ _save_push_state(repo_id, hf_token, model_id, best.name)
245
+
246
+ upload_dir = best
247
+ if model_id:
248
+ _ensure_readme_metadata(upload_dir, model_id)
249
+
250
+ from huggingface_hub import HfApi
251
+
252
+ for attempt in range(1, max_retries + 1):
253
+ try:
254
+ print(
255
+ f"[HF Push] Final push attempt {attempt}/{max_retries}: {best.name} -> {repo_id}",
256
+ flush=True,
257
+ )
258
+ api = HfApi(token=hf_token)
259
+ api.create_repo(repo_id, exist_ok=True, private=True)
260
+ api.upload_folder(
261
+ folder_path=str(upload_dir),
262
+ repo_id=repo_id,
263
+ commit_message=f"Final model from {best.name}",
264
+ ignore_patterns=CHECKPOINT_IGNORE_PATTERNS,
265
+ )
266
+ print("[HF Push] Upload complete, verifying...", flush=True)
267
+
268
+ if _verify_hf_push(repo_id, hf_token):
269
+ print(
270
+ f"[HF Push] Verified: final model from {best.name} is on {repo_id}", flush=True
271
+ )
272
+ _clear_push_state()
273
+ return True
274
+ else:
275
+ print(f"[HF Push] Verification failed after upload (attempt {attempt})", flush=True)
276
+ except Exception as e:
277
+ print(f"[HF Push] Attempt {attempt} failed: {e}", flush=True)
278
+
279
+ if attempt < max_retries:
280
+ wait = 15 * attempt
281
+ print(f"[HF Push] Retrying in {wait}s...", flush=True)
282
+ time.sleep(wait)
283
+
284
+ print(f"[HF Push] CRITICAL: All {max_retries} attempts to push final model failed!", flush=True)
285
+ return False
286
+
287
+
288
+ def _recover_pending_push():
289
+ """On startup, check if a previous push was interrupted and retry it."""
290
+ if not _HF_PUSH_STATE_FILE.exists():
291
+ return
292
+
293
+ try:
294
+ push_state = json.loads(_HF_PUSH_STATE_FILE.read_text())
295
+ except Exception:
296
+ _clear_push_state()
297
+ return
298
+
299
+ repo_id = push_state.get("repo_id")
300
+ hf_token = push_state.get("hf_token")
301
+ model_id = push_state.get("model_id", "")
302
+ checkpoint_name = push_state.get("checkpoint_name", "")
303
+
304
+ if not repo_id or not hf_token:
305
+ _clear_push_state()
306
+ return
307
+
308
+ print(f"[HF Push] Recovering interrupted push: {checkpoint_name} -> {repo_id}", flush=True)
309
+
310
+ if _verify_hf_push(repo_id, hf_token):
311
+ print("[HF Push] Recovery: push already completed (verified on HF)", flush=True)
312
+ _clear_push_state()
313
+ return
314
+
315
+ if _verify_hf_push(repo_id, hf_token, checkpoint_name):
316
+ print(
317
+ f"[HF Push] Recovery: checkpoint {checkpoint_name} already on HF (verified)", flush=True
318
+ )
319
+ _clear_push_state()
320
+ return
321
+
322
+ success = _hf_push_final_model(OUTPUT_DIR, repo_id, hf_token, model_id=model_id)
323
+ if success:
324
+ print("[HF Push] Recovery push succeeded", flush=True)
325
+ else:
326
+ print("[HF Push] Recovery push FAILED — manual intervention needed", flush=True)
327
+
328
+
329
+ def _find_best_checkpoint(output_dir: Path) -> Path | None:
330
+ """Find the best checkpoint in the output directory.
331
+
332
+ Prefers the checkpoint marked as best_model_checkpoint in trainer_state.json
333
+ (written when load_best_model_at_end is enabled). Falls back to the most
334
+ recent checkpoint by modification time.
335
+ """
336
+ best_from_state = _best_checkpoint_from_trainer_state(output_dir)
337
+ if best_from_state and best_from_state.exists():
338
+ print(f"[Checkpoint] Using best by eval metric: {best_from_state}", flush=True)
339
+ return best_from_state
340
+
341
+ candidates = sorted(output_dir.glob("*/checkpoint-*"), key=lambda p: p.stat().st_mtime)
342
+ if candidates:
343
+ return candidates[-1]
344
+ candidates = sorted(output_dir.glob("checkpoint-*"), key=lambda p: p.stat().st_mtime)
345
+ return candidates[-1] if candidates else None
346
+
347
+
348
+ def _best_checkpoint_from_trainer_state(output_dir: Path) -> Path | None:
349
+ """Read trainer_state.json to find the checkpoint with the best eval metric."""
350
+ for state_file in output_dir.rglob("trainer_state.json"):
351
+ try:
352
+ ts = json.loads(state_file.read_text(encoding="utf-8"))
353
+ best = ts.get("best_model_checkpoint")
354
+ if best:
355
+ best_path = Path(best)
356
+ if best_path.exists():
357
+ return best_path
358
+ relative = output_dir / best_path.name
359
+ if relative.exists():
360
+ return relative
361
+ except Exception:
362
+ continue
363
+ return None
364
+
365
+
366
+ def _find_latest_checkpoint(output_dir: Path) -> Path | None:
367
+ """Find the most recent checkpoint by step number across all run directories."""
368
+ all_ckpts: list[tuple[int, Path]] = []
369
+ for pattern in ("*/checkpoint-*", "checkpoint-*"):
370
+ for p in output_dir.glob(pattern):
371
+ m = re.search(r"checkpoint-(\d+)$", p.name)
372
+ if m and p.is_dir():
373
+ all_ckpts.append((int(m.group(1)), p))
374
+ if not all_ckpts:
375
+ return None
376
+ all_ckpts.sort(key=lambda t: t[0])
377
+ return all_ckpts[-1][1]
378
+
379
+
380
+ class _HFPushQueue:
381
+ """Watches the output directory for new checkpoints and pushes them to HF.
382
+
383
+ Uploads are serialized through a single worker thread to prevent concurrent
384
+ commits to the same repo (which cause commit-race failures on HF Hub).
385
+
386
+ Checkpoints are enqueued as soon as they are discovered so they cannot be
387
+ deleted by the trainer (save_total_limit rotation) before the upload starts.
388
+ The upload worker waits for the checkpoint to settle before pushing.
389
+ """
390
+
391
+ def __init__(
392
+ self,
393
+ repo_id: str,
394
+ hf_token: str,
395
+ model_id: str = "",
396
+ output_dir: Path = OUTPUT_DIR,
397
+ poll_interval: float = 15,
398
+ ):
399
+ self.repo_id = repo_id
400
+ self.hf_token = hf_token
401
+ self.model_id = model_id
402
+ self._output_dir = output_dir
403
+ self._poll_interval = poll_interval
404
+ self._pushed: set[str] = set()
405
+ self._queue: list[Path] = []
406
+ self._queue_lock = threading.Lock()
407
+ self._stop = threading.Event()
408
+ self._has_work = threading.Event()
409
+
410
+ self._watcher = threading.Thread(target=self._watch, daemon=False, name="hf-push-watcher")
411
+ self._worker = threading.Thread(
412
+ target=self._upload_worker, daemon=False, name="hf-push-worker"
413
+ )
414
+ self._watcher.start()
415
+ self._worker.start()
416
+
417
+ def _discover_checkpoints(self) -> list[Path]:
418
+ """Find all checkpoint-* dirs under output_dir (any nesting depth)."""
419
+ found = []
420
+ for pattern in ("*/checkpoint-*", "checkpoint-*"):
421
+ for p in self._output_dir.glob(pattern):
422
+ if p.is_dir() and p.name not in self._pushed:
423
+ found.append(p)
424
+ return sorted(found, key=lambda p: p.stat().st_mtime)
425
+
426
+ def _watch(self):
427
+ """Poll for new checkpoints and enqueue immediately on discovery."""
428
+ while not self._stop.is_set():
429
+ self._stop.wait(self._poll_interval)
430
+ if self._stop.is_set():
431
+ break
432
+ for ckpt in self._discover_checkpoints():
433
+ self._pushed.add(ckpt.name)
434
+ self._enqueue(ckpt)
435
+
436
+ def _enqueue(self, checkpoint_path: Path):
437
+ with self._queue_lock:
438
+ self._queue.append(checkpoint_path)
439
+ self._has_work.set()
440
+ print(f"[HF Push] Queued upload for {checkpoint_path.name}", flush=True)
441
+
442
+ def _upload_worker(self):
443
+ """Sequentially process queued uploads — one commit at a time.
444
+
445
+ Waits for each checkpoint to settle (files stop changing) before
446
+ uploading. If the checkpoint directory is deleted before it settles
447
+ (e.g. by save_total_limit rotation), the upload is skipped.
448
+ """
449
+ while True:
450
+ self._has_work.wait(timeout=5)
451
+ self._has_work.clear()
452
+
453
+ while True:
454
+ with self._queue_lock:
455
+ if not self._queue:
456
+ break
457
+ ckpt = self._queue.pop(0)
458
+
459
+ if not ckpt.exists():
460
+ print(f"[HF Push] {ckpt.name} was deleted before upload, skipping", flush=True)
461
+ continue
462
+
463
+ for _ in range(12):
464
+ if _checkpoint_is_ready(ckpt):
465
+ break
466
+ if not ckpt.exists():
467
+ break
468
+ time.sleep(5)
469
+
470
+ if not ckpt.exists():
471
+ print(f"[HF Push] {ckpt.name} was deleted before upload, skipping", flush=True)
472
+ continue
473
+
474
+ _hf_push_checkpoint(
475
+ ckpt, self.repo_id, self.hf_token, f"Checkpoint {ckpt.name}", self.model_id
476
+ )
477
+
478
+ if self._stop.is_set():
479
+ with self._queue_lock:
480
+ if not self._queue:
481
+ break
482
+
483
+ def stop_and_wait(self, timeout: float = 600):
484
+ """Stop watching and drain any remaining uploads."""
485
+ self._stop.set()
486
+ self._watcher.join(timeout=10)
487
+
488
+ for ckpt in self._discover_checkpoints():
489
+ self._pushed.add(ckpt.name)
490
+ self._enqueue(ckpt)
491
+
492
+ self._has_work.set()
493
+ self._worker.join(timeout=timeout)
494
+ print("[HF Push] All uploads done.", flush=True)
495
+
496
+
497
+ # ---------------------------------------------------------------------------
498
+ # Webhook helper
499
+ # ---------------------------------------------------------------------------
500
+
501
+
502
+ class _PostRedirectHandler(urllib.request.HTTPRedirectHandler):
503
+ """Preserve POST method and body through 301/302/307/308 redirects."""
504
+
505
+ def redirect_request(self, req, fp, code, msg, headers, newurl):
506
+ new_req = urllib.request.Request(
507
+ newurl,
508
+ data=req.data,
509
+ headers=dict(req.headers),
510
+ method="POST",
511
+ )
512
+ return new_req
513
+
514
+
515
+ def _fire_webhook(url: str, secret: str, payload: dict):
516
+ """POST JSON to the dashboard webhook."""
517
+ if url.startswith("http://") and "localhost" not in url and "127.0.0.1" not in url:
518
+ url = url.replace("http://", "https://", 1)
519
+ try:
520
+ data = json.dumps(payload).encode()
521
+ req = urllib.request.Request(
522
+ url,
523
+ data=data,
524
+ headers={
525
+ "Content-Type": "application/json",
526
+ "X-Webhook-Secret": secret,
527
+ },
528
+ method="POST",
529
+ )
530
+ opener = urllib.request.build_opener(_PostRedirectHandler)
531
+ with opener.open(req, timeout=15) as resp:
532
+ print(f"[Webhook] Fired to {url} — status {resp.status}")
533
+ except Exception as e:
534
+ print(f"[Webhook] Failed to fire to {url}: {e}")
535
+
536
+
537
+ def _fire_completion_webhook(config: dict, hf_push_ok: bool = False):
538
+ """Notify the dashboard that training completed successfully.
539
+
540
+ Only called after a real successful training run (returncode 0)
541
+ AND after the final HF push has been attempted.
542
+ """
543
+ url = config.get("webhook_url")
544
+ secret = config.get("webhook_secret") or ""
545
+ job_id = config.get("training_job_id")
546
+ if not url or not job_id:
547
+ return
548
+ _fire_webhook(
549
+ url,
550
+ secret,
551
+ {
552
+ "jobId": job_id,
553
+ "status": "completed",
554
+ "hfPushOk": hf_push_ok,
555
+ },
556
+ )
557
+
558
+
559
+ # ---------------------------------------------------------------------------
560
+ # Training state
561
+ # ---------------------------------------------------------------------------
562
+
563
+
564
+ class TrainingState:
565
+ def __init__(self):
566
+ self.status = "idle"
567
+ self.started_at: str | None = None
568
+ self.finished_at: str | None = None
569
+ self.error: str | None = None
570
+ self.pid: int | None = None
571
+ self.config: dict = {}
572
+ self.hf_push_status: str | None = None # None, "pushing", "success", "failed"
573
+
574
+ def to_dict(self):
575
+ tail = ""
576
+ if LOG_FILE.exists():
577
+ try:
578
+ lines = LOG_FILE.read_text().splitlines()
579
+ tail = "\n".join(lines[-50:])
580
+ except Exception:
581
+ pass
582
+ return {
583
+ "status": self.status,
584
+ "started_at": self.started_at,
585
+ "finished_at": self.finished_at,
586
+ "error": self.error,
587
+ "pid": self.pid,
588
+ "config": self.config,
589
+ "log_tail": tail,
590
+ "hf_push_status": self.hf_push_status,
591
+ "push_in_progress": not _training_done.is_set(),
592
+ }
593
+
594
+
595
+ state = TrainingState()
596
+
597
+
598
+ def verify_secret(authorization: str = Header(...)):
599
+ if not API_SECRET:
600
+ raise HTTPException(500, "API_SECRET env var not set on server")
601
+ expected = f"Bearer {API_SECRET}"
602
+ if authorization != expected:
603
+ raise HTTPException(401, "Invalid or missing API secret")
604
+
605
+
606
+ def _to_snake_case(key: str) -> str:
607
+ """Convert camelCase, PascalCase, UPPER_CASE, or kebab-case to snake_case."""
608
+ key = key.replace("-", "_")
609
+ key = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1_\2", key)
610
+ key = re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", key)
611
+ return key.lower()
612
+
613
+
614
+ class TrainRequest(BaseModel):
615
+ model: str = Field("Qwen/Qwen3.5-4B", description="HuggingFace model ID or local path")
616
+ dataset: str | None = Field(
617
+ None,
618
+ description="HuggingFace dataset ID (e.g. 'tatsu-lab/alpaca') or leave empty to use uploaded JSONL",
619
+ )
620
+ dataset_subset: str | None = Field(None, description="Dataset subset/config name")
621
+ val_split_ratio: float | None = Field(
622
+ None,
623
+ ge=0.0,
624
+ le=0.5,
625
+ description="Fraction of training data to hold out for validation (0.0-0.5). Ignored when val_dataset is set",
626
+ )
627
+ val_dataset: str | None = Field(
628
+ None, description="HuggingFace dataset ID for validation, or leave empty"
629
+ )
630
+ num_epochs: int = Field(3, ge=1, le=100, description="Number of training epochs")
631
+ batch_size: int = Field(1, ge=1, description="Per-device training batch size")
632
+ grad_accum: int = Field(
633
+ 4,
634
+ ge=1,
635
+ description="Gradient accumulation steps (effective batch = batch_size * grad_accum * num_gpus)",
636
+ )
637
+ learning_rate: float = Field(2e-5, gt=0, description="Peak learning rate")
638
+ max_length: int = Field(
639
+ 2048,
640
+ ge=128,
641
+ description="Max sequence length in tokens. Rows exceeding this are dropped. Qwen 3.5 supports up to 32768",
642
+ )
643
+ save_steps: int = Field(10, ge=1, description="Save a checkpoint every N steps")
644
+ eval_steps: int | None = Field(
645
+ None, ge=1, description="Run evaluation every N steps. Defaults to save_steps if not set"
646
+ )
647
+ save_total_limit: int = Field(
648
+ 2, ge=1, description="Max number of checkpoints to keep on disk (oldest are deleted)"
649
+ )
650
+ logging_steps: int = Field(5, ge=1, description="Log metrics every N steps")
651
+ tuner_type: str = Field("full", description="Tuning strategy (only 'full' is supported)")
652
+ warmup_ratio: float = Field(
653
+ 0.1,
654
+ ge=0.0,
655
+ le=1.0,
656
+ description="Fraction of total steps used for linear LR warmup (0.0-1.0)",
657
+ )
658
+ lr_scheduler_type: str = Field(
659
+ "cosine",
660
+ description="LR scheduler: cosine, linear, cosine_with_restarts, polynomial, constant, constant_with_warmup, inverse_sqrt",
661
+ )
662
+ weight_decay: float = Field(0.1, ge=0.0, description="L2 weight decay coefficient")
663
+ max_grad_norm: float = Field(
664
+ 1.0, ge=0.0, description="Max gradient norm for clipping (0 = no clipping)"
665
+ )
666
+ optimizer: str = Field(
667
+ "adamw_torch",
668
+ description="Optimizer: adamw_torch, adamw_torch_fused, adamw_8bit, paged_adamw_8bit, paged_adamw_32bit, adafactor, sgd",
669
+ )
670
+ seed: int = Field(42, ge=0, description="Random seed for reproducibility")
671
+ neftune_alpha: float | None = Field(
672
+ None,
673
+ ge=0.0,
674
+ description="NEFTune noise alpha for embedding regularization (null/0 = off, try 5-15)",
675
+ )
676
+ packing: bool = Field(
677
+ False,
678
+ description="Pack multiple samples into uniform-length sequences to reduce padding waste",
679
+ )
680
+ shuffle_dataset: bool = Field(
681
+ False,
682
+ description="Explicitly shuffle the dataset before training. The dataloader already uses a random sampler by default, so this adds an extra pre-shuffle pass",
683
+ )
684
+ lazy_tokenize: bool = Field(
685
+ True,
686
+ description="Tokenize samples on-the-fly during training instead of pre-tokenizing the entire dataset into memory. Prevents OOM on large datasets",
687
+ )
688
+ dataset_num_proc: int = Field(
689
+ 4,
690
+ ge=1,
691
+ le=128,
692
+ description="Number of processes for dataset preprocessing. Higher values speed up tokenization but use more CPU/RAM",
693
+ )
694
+ attn_impl: str = Field(
695
+ "flash_attn",
696
+ description="Attention implementation: 'flash_attn' (recommended, O(n) memory), 'sdpa' (PyTorch native), or 'eager' (naive, O(n^2) memory). Qwen3.5 has full-attention layers that OOM without flash_attn at long sequences",
697
+ )
698
+ deepspeed: str | None = Field(
699
+ None,
700
+ description="DeepSpeed config: 'zero2', 'zero3', or null. Auto-set to 'zero2' when num_gpus > 1",
701
+ )
702
+ num_gpus: int | None = Field(
703
+ None, description="Number of GPUs to use. null = auto-detect all available GPUs"
704
+ )
705
+ num_nodes: int | None = Field(
706
+ None,
707
+ ge=1,
708
+ le=64,
709
+ description="Number of nodes for multi-node training. null = auto-detect from NUM_NODES env var (set by RunPod Instant Clusters). 1 = single-node",
710
+ )
711
+ node_rank: int | None = Field(
712
+ None,
713
+ ge=0,
714
+ description="This node's rank in the cluster. null = auto-detect from NODE_RANK env var. 0 = primary node",
715
+ )
716
+ master_addr: str | None = Field(
717
+ None,
718
+ description="Primary node address for distributed training. null = auto-detect from MASTER_ADDR env var",
719
+ )
720
+ master_port: str | None = Field(
721
+ None,
722
+ description="Primary node port for distributed training. null = auto-detect from MASTER_PORT env var",
723
+ )
724
+ gradient_checkpointing: bool = Field(
725
+ True,
726
+ description="Enable gradient checkpointing to reduce VRAM at the cost of ~20% slower training",
727
+ )
728
+ use_flash_ckpt: bool = Field(
729
+ False,
730
+ description="Use flash checkpointing (experimental, requires dlrover). Disabled by default due to dlrover/ms-swift compatibility issues",
731
+ )
732
+ resume_from_checkpoint: str | None = Field(
733
+ None,
734
+ description=(
735
+ "Resume training from a checkpoint. Values: "
736
+ "'auto' = find the latest local checkpoint automatically; "
737
+ "a local path like '/workspace/output/v0-.../checkpoint-100'; "
738
+ "or null to start fresh"
739
+ ),
740
+ )
741
+ hf_token: str | None = Field(None, description="HuggingFace token (overrides HF_TOKEN env var)")
742
+ hf_repo_id: str | None = Field(
743
+ None, description="HuggingFace repo to push checkpoints/final model (e.g. 'org/model-name')"
744
+ )
745
+ wandb_project: str | None = Field(None, description="W&B project name (enables wandb logging)")
746
+ wandb_entity: str | None = Field(None, description="W&B entity/team name")
747
+ wandb_run_name: str | None = Field(None, description="W&B run name")
748
+ wandb_api_key: str | None = Field(
749
+ None, description="W&B API key (overrides WANDB_API_KEY env var)"
750
+ )
751
+ webhook_url: str | None = Field(None, description="URL to POST when training completes")
752
+ webhook_secret: str | None = Field(None, description="Secret sent in X-Webhook-Secret header")
753
+ training_job_id: str | None = Field(None, description="Dashboard job ID passed back in webhook")
754
+ max_pixels: int | None = Field(
755
+ None,
756
+ ge=1024,
757
+ description="Max pixels per image for multimodal training (controls image resolution/VRAM). e.g. 1003520 for ~1M pixels. null = model default",
758
+ )
759
+ min_pixels: int | None = Field(
760
+ None,
761
+ ge=256,
762
+ description="Min pixels per image for multimodal training. null = model default",
763
+ )
764
+ early_stopping_patience: int | None = Field(
765
+ None,
766
+ ge=1,
767
+ description="Stop training when eval loss hasn't improved for this many eval rounds. Requires validation data (val_dataset or val_split_ratio). null = disabled",
768
+ )
769
+ early_stopping_threshold: float = Field(
770
+ 0.0,
771
+ ge=0.0,
772
+ description="Minimum eval loss improvement to count as 'better' (0.0 = any improvement counts)",
773
+ )
774
+ extra_args: dict | None = Field(
775
+ None,
776
+ description='Extra args passed directly to swift sft (e.g. {"truncation_strategy": "truncation_left"})',
777
+ )
778
+
779
+ _VALID_LR_SCHEDULERS = frozenset(
780
+ {
781
+ "cosine",
782
+ "linear",
783
+ "cosine_with_restarts",
784
+ "polynomial",
785
+ "constant",
786
+ "constant_with_warmup",
787
+ "inverse_sqrt",
788
+ }
789
+ )
790
+ _VALID_OPTIMIZERS = frozenset(
791
+ {
792
+ "adamw_torch",
793
+ "adamw_torch_fused",
794
+ "adamw_8bit",
795
+ "adamw_bnb_8bit",
796
+ "paged_adamw_8bit",
797
+ "paged_adamw_32bit",
798
+ "adafactor",
799
+ "sgd",
800
+ }
801
+ )
802
+ _VALID_ATTN_IMPLS = frozenset(
803
+ {
804
+ "flash_attn",
805
+ "flash_attention_2",
806
+ "sdpa",
807
+ "eager",
808
+ }
809
+ )
810
+
811
+ @model_validator(mode="before")
812
+ @classmethod
813
+ def _normalize_keys(cls, data: Any) -> Any:
814
+ if not isinstance(data, dict):
815
+ return data
816
+ known = set(cls.model_fields.keys())
817
+ normalized: dict[str, Any] = {}
818
+ for key, value in data.items():
819
+ snake = _to_snake_case(key)
820
+ if snake in known:
821
+ normalized[snake] = value
822
+ else:
823
+ normalized[key] = value
824
+ return normalized
825
+
826
+ @model_validator(mode="after")
827
+ def _validate_enums(self):
828
+ if self.lr_scheduler_type not in self._VALID_LR_SCHEDULERS:
829
+ raise ValueError(
830
+ f"lr_scheduler_type must be one of {sorted(self._VALID_LR_SCHEDULERS)}, got '{self.lr_scheduler_type}'"
831
+ )
832
+ if self.optimizer not in self._VALID_OPTIMIZERS:
833
+ raise ValueError(
834
+ f"optimizer must be one of {sorted(self._VALID_OPTIMIZERS)}, got '{self.optimizer}'"
835
+ )
836
+ if self.attn_impl not in self._VALID_ATTN_IMPLS:
837
+ raise ValueError(
838
+ f"attn_impl must be one of {sorted(self._VALID_ATTN_IMPLS)}, got '{self.attn_impl}'"
839
+ )
840
+ return self
841
+
842
+
843
+ # ---------------------------------------------------------------------------
844
+ # Pre-flight checks
845
+ # ---------------------------------------------------------------------------
846
+
847
+
848
+ def _preflight_fla_check():
849
+ """Verify flash-linear-attention is importable before training starts.
850
+
851
+ Qwen 3.5 models use GatedDeltaNet for ~75% of their attention layers.
852
+ Without the `fla` package these layers silently fall back to a naive
853
+ O(n²) recurrence that uses 2-3x the VRAM — no warning, no error.
854
+ This check fails fast so we don't burn GPU hours on a doomed run.
855
+ """
856
+ try:
857
+ import fla # noqa: F401
858
+ from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule # noqa: F401
859
+
860
+ print(
861
+ "[Pre-flight] flash-linear-attention OK — GatedDeltaNet layers will use FLA kernels",
862
+ flush=True,
863
+ )
864
+ except ImportError as e:
865
+ print(
866
+ f"[Pre-flight] WARNING: flash-linear-attention not importable: {e}\n"
867
+ " GatedDeltaNet layers will fall back to naive O(n²) recurrence.\n"
868
+ " This will use 2-3x more VRAM and likely OOM on sequences >4k.\n"
869
+ " Install: pip install git+https://github.com/fla-org/flash-linear-attention",
870
+ flush=True,
871
+ )
872
+ except Exception as e:
873
+ print(f"[Pre-flight] flash-linear-attention import issue (non-fatal): {e}", flush=True)
874
+
875
+ try:
876
+ import causal_conv1d # noqa: F401
877
+
878
+ print("[Pre-flight] causal-conv1d OK", flush=True)
879
+ except ImportError:
880
+ print(
881
+ "[Pre-flight] WARNING: causal-conv1d not available — some FLA ops may be slower",
882
+ flush=True,
883
+ )
884
+
885
+
886
+ # ---------------------------------------------------------------------------
887
+ # Training runner
888
+ # ---------------------------------------------------------------------------
889
+
890
+
891
+ def _log_cluster_diagnostics(
892
+ num_nodes: int,
893
+ node_rank: int,
894
+ master_addr: str,
895
+ master_port: str | int,
896
+ ):
897
+ """Log network interfaces and cluster env vars for debugging."""
898
+ import subprocess as _sp
899
+
900
+ try:
901
+ ifaces = _sp.check_output(["ip", "-4", "addr", "show"], timeout=5).decode()
902
+ print(f"[Cluster] Network interfaces:\n{ifaces}", flush=True)
903
+ except Exception as e:
904
+ print(f"[Cluster] Could not list interfaces: {e}", flush=True)
905
+
906
+ node_addr = os.environ.get("NODE_ADDR", "")
907
+ primary_addr = os.environ.get("PRIMARY_ADDR", "")
908
+ print(
909
+ f"[Cluster] rank={node_rank}/{num_nodes}, "
910
+ f"MASTER_ADDR={master_addr}, MASTER_PORT={master_port}, "
911
+ f"NODE_ADDR={node_addr}, PRIMARY_ADDR={primary_addr}",
912
+ flush=True,
913
+ )
914
+
915
+
916
+ def _prepare_hf_dataset(snap_path: str, dataset_name: str) -> str | None:
917
+ """Copy JSONL from a HF snapshot to /workspace/data/ with absolute image paths.
918
+
919
+ ms-swift resolves relative image paths from the CWD, not from the JSONL
920
+ location. When a dataset is pulled from HuggingFace Hub the images live
921
+ inside the snapshot cache dir, so relative paths like ``images/foo.jpg``
922
+ break. This function rewrites them to absolute paths and writes a local
923
+ copy that ``swift sft`` can consume directly.
924
+
925
+ Returns the path to the local train JSONL, or None on failure.
926
+ """
927
+ import json as _json
928
+ snap = Path(snap_path)
929
+
930
+ jsonl_files = sorted(snap.rglob("*.jsonl"))
931
+ if not jsonl_files:
932
+ print(f"[Dataset] No .jsonl files found in {snap_path}", flush=True)
933
+ return None
934
+
935
+ train_src = None
936
+ test_src = None
937
+ for jf in jsonl_files:
938
+ name = jf.stem.lower()
939
+ if "test" in name or "val" in name:
940
+ test_src = test_src or jf
941
+ else:
942
+ train_src = train_src or jf
943
+
944
+ if not train_src:
945
+ train_src = jsonl_files[0]
946
+
947
+ DATA_DIR.mkdir(parents=True, exist_ok=True)
948
+
949
+ def _rewrite_jsonl(src: Path, dst: Path):
950
+ count = 0
951
+ with open(src) as fin, open(dst, "w") as fout:
952
+ for line in fin:
953
+ line = line.strip()
954
+ if not line:
955
+ continue
956
+ row = _json.loads(line)
957
+ for key in ("images", "videos", "audios"):
958
+ paths = row.get(key)
959
+ if not paths:
960
+ continue
961
+ resolved = []
962
+ for p in paths:
963
+ if p.startswith(("http://", "https://", "data:", "/")):
964
+ resolved.append(p)
965
+ else:
966
+ abs_p = str(snap / p)
967
+ if Path(abs_p).exists():
968
+ resolved.append(abs_p)
969
+ else:
970
+ resolved.append(p)
971
+ row[key] = resolved
972
+ fout.write(_json.dumps(row, ensure_ascii=False) + "\n")
973
+ count += 1
974
+ return count
975
+
976
+ local_train = DATA_DIR / "train.jsonl"
977
+ n = _rewrite_jsonl(train_src, local_train)
978
+ print(f"[Dataset] Prepared {n} samples: {train_src} -> {local_train}", flush=True)
979
+
980
+ if test_src:
981
+ local_test = DATA_DIR / "test.jsonl"
982
+ n_test = _rewrite_jsonl(test_src, local_test)
983
+ print(f"[Dataset] Prepared {n_test} val samples: {test_src} -> {local_test}", flush=True)
984
+
985
+ return str(local_train)
986
+
987
+
988
+ def _run_training(config: dict):
989
+ global state
990
+ push_queue: _HFPushQueue | None = None
991
+ is_primary = True
992
+
993
+ _training_done.clear()
994
+ try:
995
+ state.status = "running"
996
+ state.started_at = datetime.now(timezone.utc).isoformat()
997
+ state.finished_at = None
998
+ state.error = None
999
+ state.config = config
1000
+
1001
+ print(
1002
+ f"[Training] Thread started, dataset={config.get('dataset')}, model={config.get('model')}",
1003
+ flush=True,
1004
+ )
1005
+
1006
+ # Pre-flight: verify flash-linear-attention is usable.
1007
+ # Qwen 3.5 has ~75% GatedDeltaNet layers that silently fall back to
1008
+ # a naive O(n²) implementation if `fla` isn't importable, causing
1009
+ # 2-3x VRAM usage with zero warning.
1010
+ _preflight_fla_check()
1011
+
1012
+ env = os.environ.copy()
1013
+
1014
+ env["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
1015
+ env["HF_HUB_DISABLE_PROGRESS_BARS"] = "0"
1016
+ env["TRANSFORMERS_VERBOSITY"] = "info"
1017
+ env["TORCHELASTIC_LOG_LEVEL"] = "INFO"
1018
+
1019
+ num_gpus = config.get("num_gpus") or _detect_gpu_count()
1020
+
1021
+ if num_gpus > 1:
1022
+ gpu_ids = ",".join(str(i) for i in range(num_gpus))
1023
+ env["CUDA_VISIBLE_DEVICES"] = gpu_ids
1024
+ env["NPROC_PER_NODE"] = str(num_gpus)
1025
+
1026
+ # Multi-node: detect from config or RunPod Instant Cluster env vars
1027
+ num_nodes = config.get("num_nodes") or int(os.environ.get("NUM_NODES", "1"))
1028
+ node_rank = config.get("node_rank")
1029
+ if node_rank is None:
1030
+ node_rank = int(os.environ.get("NODE_RANK", "0"))
1031
+ master_addr = config.get("master_addr") or os.environ.get("MASTER_ADDR", "")
1032
+ master_port = config.get("master_port") or os.environ.get("MASTER_PORT", "29500")
1033
+
1034
+ if num_nodes > 1:
1035
+ if not master_addr:
1036
+ print(
1037
+ "[Training] WARNING: num_nodes > 1 but MASTER_ADDR is empty. "
1038
+ "RunPod Instant Clusters should set this automatically. "
1039
+ "Distributed training will likely fail without it.",
1040
+ flush=True,
1041
+ )
1042
+ env["NNODES"] = str(num_nodes)
1043
+ env["NODE_RANK"] = str(node_rank)
1044
+ env["MASTER_ADDR"] = master_addr
1045
+ env["MASTER_PORT"] = str(master_port)
1046
+
1047
+ if not env.get("NCCL_SOCKET_IFNAME") and os.environ.get("PRIMARY_ADDR"):
1048
+ env["NCCL_SOCKET_IFNAME"] = "ens1"
1049
+
1050
+ env.setdefault("NCCL_DEBUG", "INFO")
1051
+ env.setdefault("NCCL_TIMEOUT", "1800000")
1052
+ env.setdefault("TORCHELASTIC_MAX_RESTARTS", "0")
1053
+
1054
+ print(
1055
+ f"[Training] Multi-node: {num_nodes} nodes, rank={node_rank}, "
1056
+ f"master={master_addr}:{master_port}, NCCL_IFNAME={env.get('NCCL_SOCKET_IFNAME', 'default')}",
1057
+ flush=True,
1058
+ )
1059
+
1060
+ is_primary = node_rank == 0
1061
+
1062
+ if (num_gpus > 1 or num_nodes > 1) and not config.get("deepspeed"):
1063
+ config["deepspeed"] = "zero3"
1064
+
1065
+ hf_token = config.get("hf_token") or os.environ.get("HF_TOKEN", "")
1066
+ if hf_token:
1067
+ env["HF_TOKEN"] = hf_token
1068
+
1069
+ wandb_key = config.get("wandb_api_key") or os.environ.get("WANDB_API_KEY", "")
1070
+ if wandb_key:
1071
+ env["WANDB_API_KEY"] = wandb_key
1072
+ if config.get("wandb_project"):
1073
+ env["WANDB_PROJECT"] = config["wandb_project"]
1074
+ if config.get("wandb_entity"):
1075
+ env["WANDB_ENTITY"] = config["wandb_entity"]
1076
+ if config.get("wandb_run_name"):
1077
+ env["WANDB_NAME"] = config["wandb_run_name"]
1078
+
1079
+ hf_repo_id = config.get("hf_repo_id")
1080
+ if hf_repo_id and hf_token and is_primary:
1081
+ push_queue = _HFPushQueue(
1082
+ hf_repo_id, hf_token, model_id=config["model"], output_dir=OUTPUT_DIR
1083
+ )
1084
+
1085
+ dataset_arg = config.get("dataset") or "/workspace/data/train.jsonl"
1086
+
1087
+ # For HF datasets with multimodal data: download, rewrite relative
1088
+ # image/video/audio paths to absolute, and use the local copy.
1089
+ if dataset_arg and not dataset_arg.startswith("/"):
1090
+ try:
1091
+ from huggingface_hub import snapshot_download
1092
+ ds_repo = dataset_arg.split(":")[0]
1093
+ print(f"[Training] Pre-downloading dataset: {ds_repo}", flush=True)
1094
+ snap_path = snapshot_download(ds_repo, repo_type="dataset", token=hf_token or None)
1095
+ print(f"[Training] Dataset cached: {ds_repo} -> {snap_path}", flush=True)
1096
+
1097
+ local_jsonl = _prepare_hf_dataset(snap_path, dataset_arg)
1098
+ if local_jsonl:
1099
+ dataset_arg = local_jsonl
1100
+ except Exception as e:
1101
+ print(f"[Training] Dataset pre-download note: {e} (swift will retry)", flush=True)
1102
+
1103
+ if config.get("dataset_subset"):
1104
+ dataset_arg = f"{dataset_arg}:{config['dataset_subset']}"
1105
+
1106
+ cmd = [
1107
+ "swift",
1108
+ "sft",
1109
+ "--model",
1110
+ config["model"],
1111
+ "--dataset",
1112
+ dataset_arg,
1113
+ "--tuner_type",
1114
+ config["tuner_type"],
1115
+ "--torch_dtype",
1116
+ "bfloat16",
1117
+ "--num_train_epochs",
1118
+ str(config["num_epochs"]),
1119
+ "--per_device_train_batch_size",
1120
+ str(config["batch_size"]),
1121
+ "--per_device_eval_batch_size",
1122
+ str(config["batch_size"]),
1123
+ "--learning_rate",
1124
+ str(config["learning_rate"]),
1125
+ "--gradient_accumulation_steps",
1126
+ str(config["grad_accum"]),
1127
+ "--eval_strategy",
1128
+ "steps",
1129
+ "--eval_steps",
1130
+ str(config.get("eval_steps") or config["save_steps"]),
1131
+ "--save_steps",
1132
+ str(config["save_steps"]),
1133
+ "--save_total_limit",
1134
+ str(config.get("save_total_limit", 2)),
1135
+ "--logging_steps",
1136
+ str(config["logging_steps"]),
1137
+ "--max_length",
1138
+ str(config["max_length"]),
1139
+ "--output_dir",
1140
+ str(OUTPUT_DIR),
1141
+ "--warmup_ratio",
1142
+ str(config.get("warmup_ratio", 0.1)),
1143
+ "--lr_scheduler_type",
1144
+ config.get("lr_scheduler_type", "cosine"),
1145
+ "--weight_decay",
1146
+ str(config.get("weight_decay", 0.1)),
1147
+ "--max_grad_norm",
1148
+ str(config.get("max_grad_norm", 1.0)),
1149
+ "--optim",
1150
+ config.get("optimizer", "adamw_torch"),
1151
+ "--seed",
1152
+ str(config.get("seed", 42)),
1153
+ "--dataloader_num_workers",
1154
+ "4",
1155
+ "--lazy_tokenize",
1156
+ str(config.get("lazy_tokenize", True)),
1157
+ "--dataset_num_proc",
1158
+ str(config.get("dataset_num_proc", 4)),
1159
+ "--attn_impl",
1160
+ config.get("attn_impl", "flash_attn"),
1161
+ "--use_hf",
1162
+ "true",
1163
+ ]
1164
+
1165
+ if config.get("max_pixels") is not None:
1166
+ cmd += ["--max_pixels", str(config["max_pixels"])]
1167
+ if config.get("min_pixels") is not None:
1168
+ cmd += ["--min_pixels", str(config["min_pixels"])]
1169
+
1170
+ if config.get("neftune_alpha") and config["neftune_alpha"] > 0:
1171
+ cmd += ["--neftune_noise_alpha", str(config["neftune_alpha"])]
1172
+
1173
+ if config.get("packing"):
1174
+ cmd += ["--packing", "true"]
1175
+
1176
+ if config.get("shuffle_dataset"):
1177
+ cmd += ["--dataset_shuffle", "true"]
1178
+
1179
+ if config.get("gradient_checkpointing"):
1180
+ cmd += ["--gradient_checkpointing", "true"]
1181
+
1182
+ if config.get("use_flash_ckpt", False):
1183
+ cmd += ["--use_flash_ckpt", "true"]
1184
+
1185
+ if config.get("deepspeed"):
1186
+ cmd += ["--deepspeed", config["deepspeed"]]
1187
+
1188
+ if is_primary and (config.get("wandb_project") or wandb_key):
1189
+ cmd += ["--report_to", "wandb"]
1190
+
1191
+ val = config.get("val_dataset")
1192
+ if val:
1193
+ cmd += ["--val_dataset", val]
1194
+ elif Path("/workspace/data/test.jsonl").exists():
1195
+ cmd += ["--val_dataset", "/workspace/data/test.jsonl"]
1196
+
1197
+ val_split = config.get("val_split_ratio")
1198
+ if val_split and val_split > 0 and not val:
1199
+ cmd += ["--split_dataset_ratio", str(val_split)]
1200
+
1201
+ has_val = (
1202
+ bool(val)
1203
+ or (val_split and val_split > 0)
1204
+ or Path("/workspace/data/test.jsonl").exists()
1205
+ )
1206
+ patience = config.get("early_stopping_patience")
1207
+ if patience and has_val:
1208
+ cmd += [
1209
+ "--load_best_model_at_end",
1210
+ "true",
1211
+ "--metric_for_best_model",
1212
+ "eval_loss",
1213
+ "--greater_is_better",
1214
+ "false",
1215
+ "--early_stopping_patience",
1216
+ str(patience),
1217
+ ]
1218
+ threshold = config.get("early_stopping_threshold", 0.0)
1219
+ if threshold > 0:
1220
+ cmd += ["--early_stopping_threshold", str(threshold)]
1221
+ save_limit = config.get("save_total_limit", 2)
1222
+ if save_limit < 2:
1223
+ cmd += ["--save_total_limit", "2"]
1224
+ print(
1225
+ "[Training] Bumped save_total_limit to 2 (required for load_best_model_at_end)",
1226
+ flush=True,
1227
+ )
1228
+ elif patience and not has_val:
1229
+ print(
1230
+ "[Training] WARNING: early_stopping_patience ignored — no validation data configured",
1231
+ flush=True,
1232
+ )
1233
+
1234
+ resume = config.get("resume_from_checkpoint")
1235
+ if resume:
1236
+ if resume == "auto":
1237
+ ckpt = _find_latest_checkpoint(OUTPUT_DIR)
1238
+ if ckpt:
1239
+ print(f"[Training] Auto-resume: found {ckpt}", flush=True)
1240
+ cmd += ["--resume_from_checkpoint", str(ckpt)]
1241
+ else:
1242
+ print("[Training] Auto-resume: no checkpoint found, starting fresh", flush=True)
1243
+ else:
1244
+ cmd += ["--resume_from_checkpoint", resume]
1245
+
1246
+ if config.get("extra_args"):
1247
+ for k, v in config["extra_args"].items():
1248
+ flag = f"--{k}" if not k.startswith("--") else k
1249
+ cmd += [flag, str(v)]
1250
+
1251
+ # Pre-download model so torchrun doesn't do it silently
1252
+ model_name = config["model"]
1253
+ print(f"[Training] Pre-downloading model: {model_name}", flush=True)
1254
+ try:
1255
+ from huggingface_hub import snapshot_download
1256
+ snapshot_download(model_name, token=hf_token or None)
1257
+ print(f"[Training] Model cached: {model_name}", flush=True)
1258
+ except Exception as e:
1259
+ print(f"[Training] Model pre-download note: {e} (torchrun will retry)", flush=True)
1260
+
1261
+ if num_nodes > 1 and master_addr:
1262
+ _log_cluster_diagnostics(num_nodes, node_rank, master_addr, master_port)
1263
+
1264
+ print(f"[Training] Running: {' '.join(cmd)}", flush=True)
1265
+
1266
+ with open(LOG_FILE, "w") as log:
1267
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
1268
+ state.pid = proc.pid
1269
+ for line in iter(proc.stdout.readline, b""):
1270
+ decoded = line.decode("utf-8", errors="replace")
1271
+ log.write(decoded)
1272
+ log.flush()
1273
+ print(decoded, end="", flush=True)
1274
+
1275
+ proc.wait()
1276
+
1277
+ if proc.returncode != 0:
1278
+ state.status = "failed"
1279
+ state.error = f"Training exited with code {proc.returncode}"
1280
+ print(f"[Training] FAILED with exit code {proc.returncode}", flush=True)
1281
+ else:
1282
+ state.status = "completed"
1283
+ print("[Training] Completed successfully", flush=True)
1284
+
1285
+ except Exception as e:
1286
+ state.status = "failed"
1287
+ state.error = str(e)
1288
+ print(f"[Training] Exception: {e}", flush=True)
1289
+ finally:
1290
+ state.finished_at = datetime.now(timezone.utc).isoformat()
1291
+ state.pid = None
1292
+
1293
+ if push_queue is not None:
1294
+ push_queue.stop_and_wait()
1295
+
1296
+ hf_push_ok = False
1297
+ if is_primary and state.status == "completed" and hf_repo_id and hf_token:
1298
+ state.hf_push_status = "pushing"
1299
+ print(
1300
+ "[HF Push] Starting final model push — container MUST stay alive until this completes",
1301
+ flush=True,
1302
+ )
1303
+ hf_push_ok = _hf_push_final_model(
1304
+ OUTPUT_DIR,
1305
+ hf_repo_id,
1306
+ hf_token,
1307
+ model_id=config["model"],
1308
+ tuner_type=config["tuner_type"],
1309
+ )
1310
+ state.hf_push_status = "success" if hf_push_ok else "failed"
1311
+ if not hf_push_ok:
1312
+ state.error = "Training completed but final HF push failed after all retries"
1313
+ print(f"[HF Push] Final push finished (success={hf_push_ok})", flush=True)
1314
+
1315
+ if is_primary and state.status == "completed":
1316
+ _fire_completion_webhook(config, hf_push_ok=hf_push_ok)
1317
+ elif not is_primary:
1318
+ print(f"[Training] Worker node (rank {config.get('node_rank', '?')}) — skipping HF push and webhook", flush=True)
1319
+
1320
+ _training_done.set()
1321
+ print("[Training] Training thread fully done (push complete, safe to exit)", flush=True)
1322
+
1323
+
1324
+ # ---------------------------------------------------------------------------
1325
+ # Graceful shutdown: wait for training + HF push before allowing exit
1326
+ # ---------------------------------------------------------------------------
1327
+
1328
+ _original_sigterm = signal.getsignal(signal.SIGTERM)
1329
+ _original_sigint = signal.getsignal(signal.SIGINT)
1330
+
1331
+
1332
+ def _graceful_shutdown(signum, frame):
1333
+ """Block container exit until the HF push is finished."""
1334
+ sig_name = "SIGTERM" if signum == signal.SIGTERM else "SIGINT"
1335
+ if not _training_done.is_set():
1336
+ print(
1337
+ f"[Shutdown] {sig_name} received but training/push still in progress — waiting up to 30 min...",
1338
+ flush=True,
1339
+ )
1340
+ finished = _training_done.wait(timeout=1800)
1341
+ if finished:
1342
+ print("[Shutdown] Training + push completed, proceeding with shutdown", flush=True)
1343
+ else:
1344
+ print(
1345
+ "[Shutdown] CRITICAL: Timed out waiting for push after 30 min, forcing exit",
1346
+ flush=True,
1347
+ )
1348
+ else:
1349
+ print(
1350
+ f"[Shutdown] {sig_name} received, no training in progress — shutting down immediately",
1351
+ flush=True,
1352
+ )
1353
+
1354
+ # Re-raise to the original handler (uvicorn's) so the server actually stops
1355
+ original = _original_sigterm if signum == signal.SIGTERM else _original_sigint
1356
+ if callable(original):
1357
+ original(signum, frame)
1358
+ else:
1359
+ raise SystemExit(0)
1360
+
1361
+
1362
+ # ---------------------------------------------------------------------------
1363
+ # Startup: recover interrupted HF pushes + install signal handlers
1364
+ # ---------------------------------------------------------------------------
1365
+
1366
+
1367
+ @app.on_event("startup")
1368
+ def _on_startup():
1369
+ signal.signal(signal.SIGTERM, _graceful_shutdown)
1370
+ signal.signal(signal.SIGINT, _graceful_shutdown)
1371
+ threading.Thread(target=_recover_pending_push, daemon=True, name="hf-push-recovery").start()
1372
+
1373
+
1374
+ # ---------------------------------------------------------------------------
1375
+ # API endpoints
1376
+ # ---------------------------------------------------------------------------
1377
+
1378
+
1379
+ @app.get("/health")
1380
+ def health():
1381
+ gpus = _gpu_info()
1382
+ num_nodes = int(os.environ.get("NUM_NODES", "1"))
1383
+ node_rank = int(os.environ.get("NODE_RANK", "0"))
1384
+ resp: dict[str, Any] = {"status": "ok", "num_gpus": len(gpus), "gpus": gpus}
1385
+ if num_nodes > 1:
1386
+ resp["cluster"] = {
1387
+ "num_nodes": num_nodes,
1388
+ "node_rank": node_rank,
1389
+ "master_addr": os.environ.get("MASTER_ADDR", ""),
1390
+ "master_port": os.environ.get("MASTER_PORT", "29500"),
1391
+ }
1392
+ return resp
1393
+
1394
+
1395
+ @app.get("/status", dependencies=[Depends(verify_secret)])
1396
+ def get_status():
1397
+ return state.to_dict()
1398
+
1399
+
1400
+ @app.post("/train", dependencies=[Depends(verify_secret)])
1401
+ def start_training(req: TrainRequest):
1402
+ if state.status == "running":
1403
+ raise HTTPException(409, "Training already in progress")
1404
+
1405
+ config = req.model_dump()
1406
+ if not config.get("dataset") and not Path("/workspace/data/train.jsonl").exists():
1407
+ raise HTTPException(
1408
+ 400,
1409
+ "No dataset specified and no train.jsonl found. "
1410
+ "Either set 'dataset' to a HuggingFace ID or upload a JSONL file first.",
1411
+ )
1412
+
1413
+ global _training_thread
1414
+ thread = threading.Thread(
1415
+ target=_run_training, args=(config,), daemon=False, name="training-main"
1416
+ )
1417
+ _training_thread = thread
1418
+ thread.start()
1419
+ return {"message": "Training started", "config": config}
1420
+
1421
+
1422
+ @app.get("/train/config")
1423
+ def get_train_config():
1424
+ """Return the full training configuration schema with defaults, types, and descriptions.
1425
+
1426
+ No auth required so dashboards can populate forms before the user enters a secret.
1427
+ """
1428
+ schema = TrainRequest.model_json_schema()
1429
+ props = schema.get("properties", {})
1430
+
1431
+ fields: list[dict] = []
1432
+ for name, info in props.items():
1433
+ field_type = info.get("type")
1434
+ any_of = info.get("anyOf")
1435
+ if not field_type and any_of:
1436
+ types = [t.get("type") for t in any_of if t.get("type") and t.get("type") != "null"]
1437
+ field_type = types[0] if types else "string"
1438
+ nullable = any(t.get("type") == "null" for t in any_of)
1439
+ else:
1440
+ nullable = False
1441
+
1442
+ entry: dict[str, Any] = {
1443
+ "name": name,
1444
+ "type": field_type or "string",
1445
+ "nullable": nullable,
1446
+ "default": info.get("default"),
1447
+ "description": info.get("description", ""),
1448
+ }
1449
+ if "minimum" in info:
1450
+ entry["min"] = info["minimum"]
1451
+ if "exclusiveMinimum" in info:
1452
+ entry["exclusive_min"] = info["exclusiveMinimum"]
1453
+ if "maximum" in info:
1454
+ entry["max"] = info["maximum"]
1455
+ if "enum" in info:
1456
+ entry["options"] = info["enum"]
1457
+
1458
+ fields.append(entry)
1459
+
1460
+ defaults = TrainRequest().model_dump()
1461
+ return {"fields": fields, "defaults": defaults}
1462
+
1463
+
1464
+ @app.post("/stop", dependencies=[Depends(verify_secret)])
1465
+ def stop_training():
1466
+ if state.status != "running" or not state.pid:
1467
+ if not _training_done.is_set():
1468
+ return {
1469
+ "message": "Training finished but HF push still in progress — container will stay alive"
1470
+ }
1471
+ raise HTTPException(400, "No training in progress")
1472
+ try:
1473
+ os.kill(state.pid, signal.SIGTERM)
1474
+ state.status = "stopped"
1475
+ state.finished_at = datetime.now(timezone.utc).isoformat()
1476
+ return {
1477
+ "message": "Training stop signal sent (HF push will still complete before container exits)"
1478
+ }
1479
+ except ProcessLookupError:
1480
+ state.status = "idle"
1481
+ return {"message": "Process already exited"}
1482
+
1483
+
1484
+ @app.post("/upload/dataset", dependencies=[Depends(verify_secret)])
1485
+ async def upload_dataset(
1486
+ train_file: UploadFile = File(...),
1487
+ test_file: UploadFile | None = File(None),
1488
+ ):
1489
+ train_path = DATA_DIR / "train.jsonl"
1490
+ with open(train_path, "wb") as f:
1491
+ shutil.copyfileobj(train_file.file, f)
1492
+ result = {"train_file": str(train_path), "train_size": train_path.stat().st_size}
1493
+
1494
+ if test_file:
1495
+ test_path = DATA_DIR / "test.jsonl"
1496
+ with open(test_path, "wb") as f:
1497
+ shutil.copyfileobj(test_file.file, f)
1498
+ result["test_file"] = str(test_path)
1499
+ result["test_size"] = test_path.stat().st_size
1500
+
1501
+ return result
1502
+
1503
+
1504
+ @app.post("/upload/config", dependencies=[Depends(verify_secret)])
1505
+ async def upload_config(config_file: UploadFile = File(...)):
1506
+ dest = CONFIG_DIR / config_file.filename
1507
+ with open(dest, "wb") as f:
1508
+ shutil.copyfileobj(config_file.file, f)
1509
+ return {"config_file": str(dest), "size": dest.stat().st_size}
1510
+
1511
+
1512
+ @app.get("/logs", dependencies=[Depends(verify_secret)])
1513
+ def get_logs(lines: int = 100):
1514
+ if not LOG_FILE.exists():
1515
+ return {"logs": ""}
1516
+ all_lines = LOG_FILE.read_text().splitlines()
1517
+ return {"logs": "\n".join(all_lines[-lines:])}
1518
+
1519
+
1520
+ @app.get("/checkpoints", dependencies=[Depends(verify_secret)])
1521
+ def list_checkpoints():
1522
+ checkpoints = []
1523
+ for d in sorted(OUTPUT_DIR.glob("*/checkpoint-*")):
1524
+ checkpoints.append(
1525
+ {
1526
+ "path": str(d),
1527
+ "name": d.name,
1528
+ "run": d.parent.name,
1529
+ }
1530
+ )
1531
+ for d in sorted(OUTPUT_DIR.glob("checkpoint-*")):
1532
+ checkpoints.append(
1533
+ {
1534
+ "path": str(d),
1535
+ "name": d.name,
1536
+ "run": "root",
1537
+ }
1538
+ )
1539
+ return {"checkpoints": checkpoints}
1540
+
1541
+
1542
+ def _detect_gpu_count() -> int:
1543
+ try:
1544
+ out = subprocess.check_output(["nvidia-smi", "-L"], text=True)
1545
+ count = len([l for l in out.strip().splitlines() if l.strip()])
1546
+ return max(count, 1)
1547
+ except Exception:
1548
+ return 1
1549
+
1550
+
1551
+ def _gpu_info():
1552
+ try:
1553
+ out = subprocess.check_output(
1554
+ [
1555
+ "nvidia-smi",
1556
+ "--query-gpu=name,memory.total,memory.used,memory.free",
1557
+ "--format=csv,noheader,nounits",
1558
+ ],
1559
+ text=True,
1560
+ )
1561
+ gpus = []
1562
+ for line in out.strip().splitlines():
1563
+ parts = [p.strip() for p in line.split(",")]
1564
+ gpus.append(
1565
+ {
1566
+ "name": parts[0],
1567
+ "memory_total_mb": int(parts[1]),
1568
+ "memory_used_mb": int(parts[2]),
1569
+ "memory_free_mb": int(parts[3]),
1570
+ }
1571
+ )
1572
+ return gpus
1573
+ except Exception:
1574
+ return []