gregjanik's picture
Upload folder using huggingface_hub
55cd3fe verified
import contextlib
import json
import os
import re
import shutil
import signal
import subprocess
import threading
import time
import urllib.request
from datetime import datetime, timezone
from pathlib import Path
from typing import Any
from fastapi import Depends, FastAPI, File, Header, HTTPException, UploadFile
from pydantic import BaseModel, Field, model_validator
app = FastAPI(title="Qwen 3.5 SFT Fine-Tuning API", version="2.0.0")
API_SECRET = os.environ.get("API_SECRET", "")
WORKSPACE = Path("/workspace")
DATA_DIR = WORKSPACE / "data"
OUTPUT_DIR = WORKSPACE / "output"
CONFIG_DIR = WORKSPACE / "config"
LOG_FILE = WORKSPACE / "training.log"
DATA_DIR.mkdir(parents=True, exist_ok=True)
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
# Global event: set when training + HF push are fully done.
# The SIGTERM handler waits on this before allowing the process to exit,
# so the container is never killed while a push is in flight.
_training_done = threading.Event()
_training_done.set() # starts "done" (no training in progress)
_training_thread: threading.Thread | None = None
# ---------------------------------------------------------------------------
# Multi-node readiness barrier
# ---------------------------------------------------------------------------
# In a cluster, every node pre-downloads model/dataset independently.
# Once done, workers POST to the master's /barrier/ready. The master
# counts itself + all workers, then every node polls /barrier/wait until
# the count reaches num_nodes. Only then does any node start torchrun.
# ---------------------------------------------------------------------------
# HuggingFace Hub push helpers
# ---------------------------------------------------------------------------
def _ensure_readme_metadata(checkpoint_path: Path, model_id: str):
"""Ensure the checkpoint has a README.md with correct base_model metadata.
ms-swift either writes a local cache path as base_model (which HF rejects)
or the README may be missing entirely. This function fixes existing READMEs
or creates a minimal one so HF Hub always has the base_model field set to
the canonical model ID (e.g. ``Qwen/Qwen3.5-4B``).
"""
readme = checkpoint_path / "README.md"
if readme.exists():
text = readme.read_text(encoding="utf-8")
fm_match = re.match(r"^---\n(.*?\n)---", text, re.DOTALL)
if fm_match:
front_matter = fm_match.group(1)
original = front_matter
# Fix base_model values that are local paths
front_matter = re.sub(
r'(base_model\s*:\s*)["\']?(/[^\s"\']+)["\']?',
rf"\1{model_id}",
front_matter,
)
# If base_model is missing entirely, add it
if "base_model" not in front_matter:
front_matter = f"base_model: {model_id}\n{front_matter}"
if front_matter != original:
text = f"---\n{front_matter}---" + text[fm_match.end() :]
readme.write_text(text, encoding="utf-8")
print(
f"[HF Push] Fixed README metadata in {checkpoint_path.name} → base_model: {model_id}",
flush=True,
)
return
# No README or no front-matter — create a minimal one
readme.write_text(
f"---\nbase_model: {model_id}\ntags:\n- fine-tuned\n- ms-swift\nlibrary_name: transformers\n---\n\n"
f"# {checkpoint_path.name}\n\nFine-tuned from [{model_id}](https://huggingface.co/{model_id}) using [ms-swift](https://github.com/modelscope/ms-swift).\n",
encoding="utf-8",
)
print(
f"[HF Push] Created README.md for {checkpoint_path.name} with base_model: {model_id}",
flush=True,
)
def _fix_adapter_config_base_model(checkpoint_path: Path, model_id: str):
"""Rewrite base_model_name_or_path in adapter_config.json from a local
cache path (e.g. /workspace/hf-cache/hub/models--Qwen--Qwen3.5-9B/...)
to the canonical HF model ID (e.g. Qwen/Qwen3.5-9B)."""
adapter_cfg = checkpoint_path / "adapter_config.json"
if not adapter_cfg.exists():
return
try:
cfg = json.loads(adapter_cfg.read_text(encoding="utf-8"))
base = cfg.get("base_model_name_or_path", "")
if base.startswith("/") or "--" in base:
cfg["base_model_name_or_path"] = model_id
adapter_cfg.write_text(
json.dumps(cfg, indent=2, ensure_ascii=False) + "\n", encoding="utf-8"
)
print(
f"[HF Push] Fixed adapter_config.json base_model_name_or_path: {base}{model_id}",
flush=True,
)
except Exception as e:
print(f"[HF Push] Warning: could not fix adapter_config.json: {e}", flush=True)
CHECKPOINT_IGNORE_PATTERNS = [
"optimizer.pt",
"optim_states.pt",
"scheduler.pt",
"rng_state*.pth",
"global_step*",
"zero_to_fp32.py",
"*.distcp",
]
def _checkpoint_is_ready(checkpoint_path: Path, settle_seconds: float = 30) -> bool:
"""Return True only when no file in the checkpoint has been modified recently.
This prevents uploading a half-written checkpoint while the trainer is
still flushing large .safetensors / optimizer files to disk.
"""
cutoff = time.time() - settle_seconds
try:
for f in checkpoint_path.rglob("*"):
if f.is_file() and f.stat().st_mtime > cutoff:
return False
except OSError:
return False
return True
def _hf_push_checkpoint(
checkpoint_path: Path, repo_id: str, hf_token: str, commit_message: str, model_id: str = ""
):
"""Push a single checkpoint directory to HuggingFace Hub."""
try:
if model_id:
_ensure_readme_metadata(checkpoint_path, model_id)
_fix_adapter_config_base_model(checkpoint_path, model_id)
from huggingface_hub import HfApi
api = HfApi(token=hf_token)
api.create_repo(repo_id, exist_ok=True, private=True)
api.upload_folder(
folder_path=str(checkpoint_path),
repo_id=repo_id,
commit_message=commit_message,
path_in_repo=checkpoint_path.name,
ignore_patterns=CHECKPOINT_IGNORE_PATTERNS,
)
print(f"[HF Push] Pushed {checkpoint_path.name} to {repo_id}")
except Exception as e:
print(f"[HF Push] Failed to push {checkpoint_path.name}: {e}")
_HF_PUSH_STATE_FILE = OUTPUT_DIR / ".hf_push_state.json"
def _save_push_state(repo_id: str, hf_token: str, model_id: str, checkpoint_name: str):
"""Persist push intent so it can be recovered after a crash/restart."""
try:
_HF_PUSH_STATE_FILE.write_text(
json.dumps(
{
"repo_id": repo_id,
"hf_token": hf_token,
"model_id": model_id,
"checkpoint_name": checkpoint_name,
"created_at": datetime.now(timezone.utc).isoformat(),
}
)
)
except Exception as e:
print(f"[HF Push] Warning: could not save push state: {e}", flush=True)
def _clear_push_state():
with contextlib.suppress(Exception):
_HF_PUSH_STATE_FILE.unlink(missing_ok=True)
def _verify_hf_push(repo_id: str, hf_token: str, checkpoint_name: str | None = None) -> bool:
"""Verify that the model was actually pushed to HuggingFace by checking for key files."""
try:
from huggingface_hub import HfApi
api = HfApi(token=hf_token)
# list_repo_files returns strings (file paths relative to repo root)
files = list(api.list_repo_files(repo_id))
prefix = f"{checkpoint_name}/" if checkpoint_name else ""
has_config = f"{prefix}config.json" in files
has_weights = any(
f.startswith(prefix) and (f.endswith(".safetensors") or f.endswith(".bin"))
for f in files
)
return has_config and has_weights
except Exception as e:
print(f"[HF Push] Verification failed: {e}", flush=True)
return False
def _hf_push_final_model(
output_dir: Path,
repo_id: str,
hf_token: str,
model_id: str = "",
tuner_type: str = "full",
max_retries: int = 3,
) -> bool:
"""Push the final trained model to HuggingFace Hub as the repo root.
Returns True if the push succeeded and was verified, False otherwise.
Retries on transient failures.
"""
best = _find_best_checkpoint(output_dir)
if not best:
print("[HF Push] No checkpoint found for final push", flush=True)
return False
_save_push_state(repo_id, hf_token, model_id, best.name)
upload_dir = best
if model_id:
_ensure_readme_metadata(upload_dir, model_id)
from huggingface_hub import HfApi
for attempt in range(1, max_retries + 1):
try:
print(
f"[HF Push] Final push attempt {attempt}/{max_retries}: {best.name} -> {repo_id}",
flush=True,
)
api = HfApi(token=hf_token)
api.create_repo(repo_id, exist_ok=True, private=True)
api.upload_folder(
folder_path=str(upload_dir),
repo_id=repo_id,
commit_message=f"Final model from {best.name}",
ignore_patterns=CHECKPOINT_IGNORE_PATTERNS,
)
print("[HF Push] Upload complete, verifying...", flush=True)
if _verify_hf_push(repo_id, hf_token):
print(
f"[HF Push] Verified: final model from {best.name} is on {repo_id}", flush=True
)
_clear_push_state()
return True
else:
print(f"[HF Push] Verification failed after upload (attempt {attempt})", flush=True)
except Exception as e:
print(f"[HF Push] Attempt {attempt} failed: {e}", flush=True)
if attempt < max_retries:
wait = 15 * attempt
print(f"[HF Push] Retrying in {wait}s...", flush=True)
time.sleep(wait)
print(f"[HF Push] CRITICAL: All {max_retries} attempts to push final model failed!", flush=True)
return False
def _recover_pending_push():
"""On startup, check if a previous push was interrupted and retry it."""
if not _HF_PUSH_STATE_FILE.exists():
return
try:
push_state = json.loads(_HF_PUSH_STATE_FILE.read_text())
except Exception:
_clear_push_state()
return
repo_id = push_state.get("repo_id")
hf_token = push_state.get("hf_token")
model_id = push_state.get("model_id", "")
checkpoint_name = push_state.get("checkpoint_name", "")
if not repo_id or not hf_token:
_clear_push_state()
return
print(f"[HF Push] Recovering interrupted push: {checkpoint_name} -> {repo_id}", flush=True)
if _verify_hf_push(repo_id, hf_token):
print("[HF Push] Recovery: push already completed (verified on HF)", flush=True)
_clear_push_state()
return
if _verify_hf_push(repo_id, hf_token, checkpoint_name):
print(
f"[HF Push] Recovery: checkpoint {checkpoint_name} already on HF (verified)", flush=True
)
_clear_push_state()
return
success = _hf_push_final_model(OUTPUT_DIR, repo_id, hf_token, model_id=model_id)
if success:
print("[HF Push] Recovery push succeeded", flush=True)
else:
print("[HF Push] Recovery push FAILED — manual intervention needed", flush=True)
def _find_best_checkpoint(output_dir: Path) -> Path | None:
"""Find the best checkpoint in the output directory.
Prefers the checkpoint marked as best_model_checkpoint in trainer_state.json
(written when load_best_model_at_end is enabled). Falls back to the most
recent checkpoint by modification time.
"""
best_from_state = _best_checkpoint_from_trainer_state(output_dir)
if best_from_state and best_from_state.exists():
print(f"[Checkpoint] Using best by eval metric: {best_from_state}", flush=True)
return best_from_state
candidates = sorted(output_dir.glob("*/checkpoint-*"), key=lambda p: p.stat().st_mtime)
if candidates:
return candidates[-1]
candidates = sorted(output_dir.glob("checkpoint-*"), key=lambda p: p.stat().st_mtime)
return candidates[-1] if candidates else None
def _best_checkpoint_from_trainer_state(output_dir: Path) -> Path | None:
"""Read trainer_state.json to find the checkpoint with the best eval metric."""
for state_file in output_dir.rglob("trainer_state.json"):
try:
ts = json.loads(state_file.read_text(encoding="utf-8"))
best = ts.get("best_model_checkpoint")
if best:
best_path = Path(best)
if best_path.exists():
return best_path
relative = output_dir / best_path.name
if relative.exists():
return relative
except Exception:
continue
return None
def _find_latest_checkpoint(output_dir: Path) -> Path | None:
"""Find the most recent checkpoint by step number across all run directories."""
all_ckpts: list[tuple[int, Path]] = []
for pattern in ("*/checkpoint-*", "checkpoint-*"):
for p in output_dir.glob(pattern):
m = re.search(r"checkpoint-(\d+)$", p.name)
if m and p.is_dir():
all_ckpts.append((int(m.group(1)), p))
if not all_ckpts:
return None
all_ckpts.sort(key=lambda t: t[0])
return all_ckpts[-1][1]
class _HFPushQueue:
"""Watches the output directory for new checkpoints and pushes them to HF.
Uploads are serialized through a single worker thread to prevent concurrent
commits to the same repo (which cause commit-race failures on HF Hub).
Checkpoints are enqueued as soon as they are discovered so they cannot be
deleted by the trainer (save_total_limit rotation) before the upload starts.
The upload worker waits for the checkpoint to settle before pushing.
"""
def __init__(
self,
repo_id: str,
hf_token: str,
model_id: str = "",
output_dir: Path = OUTPUT_DIR,
poll_interval: float = 15,
):
self.repo_id = repo_id
self.hf_token = hf_token
self.model_id = model_id
self._output_dir = output_dir
self._poll_interval = poll_interval
self._pushed: set[str] = set()
self._queue: list[Path] = []
self._queue_lock = threading.Lock()
self._stop = threading.Event()
self._has_work = threading.Event()
self._watcher = threading.Thread(target=self._watch, daemon=False, name="hf-push-watcher")
self._worker = threading.Thread(
target=self._upload_worker, daemon=False, name="hf-push-worker"
)
self._watcher.start()
self._worker.start()
def _discover_checkpoints(self) -> list[Path]:
"""Find all checkpoint-* dirs under output_dir (any nesting depth)."""
found = []
for pattern in ("*/checkpoint-*", "checkpoint-*"):
for p in self._output_dir.glob(pattern):
if p.is_dir() and p.name not in self._pushed:
found.append(p)
return sorted(found, key=lambda p: p.stat().st_mtime)
def _watch(self):
"""Poll for new checkpoints and enqueue immediately on discovery."""
while not self._stop.is_set():
self._stop.wait(self._poll_interval)
if self._stop.is_set():
break
for ckpt in self._discover_checkpoints():
self._pushed.add(ckpt.name)
self._enqueue(ckpt)
def _enqueue(self, checkpoint_path: Path):
with self._queue_lock:
self._queue.append(checkpoint_path)
self._has_work.set()
print(f"[HF Push] Queued upload for {checkpoint_path.name}", flush=True)
def _upload_worker(self):
"""Sequentially process queued uploads — one commit at a time.
Waits for each checkpoint to settle (files stop changing) before
uploading. If the checkpoint directory is deleted before it settles
(e.g. by save_total_limit rotation), the upload is skipped.
"""
while True:
self._has_work.wait(timeout=5)
self._has_work.clear()
while True:
with self._queue_lock:
if not self._queue:
break
ckpt = self._queue.pop(0)
if not ckpt.exists():
print(f"[HF Push] {ckpt.name} was deleted before upload, skipping", flush=True)
continue
for _ in range(12):
if _checkpoint_is_ready(ckpt):
break
if not ckpt.exists():
break
time.sleep(5)
if not ckpt.exists():
print(f"[HF Push] {ckpt.name} was deleted before upload, skipping", flush=True)
continue
_hf_push_checkpoint(
ckpt, self.repo_id, self.hf_token, f"Checkpoint {ckpt.name}", self.model_id
)
if self._stop.is_set():
with self._queue_lock:
if not self._queue:
break
def stop_and_wait(self, timeout: float = 600):
"""Stop watching and drain any remaining uploads."""
self._stop.set()
self._watcher.join(timeout=10)
for ckpt in self._discover_checkpoints():
self._pushed.add(ckpt.name)
self._enqueue(ckpt)
self._has_work.set()
self._worker.join(timeout=timeout)
print("[HF Push] All uploads done.", flush=True)
# ---------------------------------------------------------------------------
# Webhook helper
# ---------------------------------------------------------------------------
class _PostRedirectHandler(urllib.request.HTTPRedirectHandler):
"""Preserve POST method and body through 301/302/307/308 redirects."""
def redirect_request(self, req, fp, code, msg, headers, newurl):
new_req = urllib.request.Request(
newurl,
data=req.data,
headers=dict(req.headers),
method="POST",
)
return new_req
def _fire_webhook(url: str, secret: str, payload: dict):
"""POST JSON to the dashboard webhook."""
if url.startswith("http://") and "localhost" not in url and "127.0.0.1" not in url:
url = url.replace("http://", "https://", 1)
try:
data = json.dumps(payload).encode()
req = urllib.request.Request(
url,
data=data,
headers={
"Content-Type": "application/json",
"X-Webhook-Secret": secret,
},
method="POST",
)
opener = urllib.request.build_opener(_PostRedirectHandler)
with opener.open(req, timeout=15) as resp:
print(f"[Webhook] Fired to {url} — status {resp.status}")
except Exception as e:
print(f"[Webhook] Failed to fire to {url}: {e}")
def _fire_completion_webhook(config: dict, hf_push_ok: bool = False):
"""Notify the dashboard that training completed successfully.
Only called after a real successful training run (returncode 0)
AND after the final HF push has been attempted.
"""
url = config.get("webhook_url")
secret = config.get("webhook_secret") or ""
job_id = config.get("training_job_id")
if not url or not job_id:
return
_fire_webhook(
url,
secret,
{
"jobId": job_id,
"status": "completed",
"hfPushOk": hf_push_ok,
},
)
# ---------------------------------------------------------------------------
# Training state
# ---------------------------------------------------------------------------
class TrainingState:
def __init__(self):
self.status = "idle"
self.started_at: str | None = None
self.finished_at: str | None = None
self.error: str | None = None
self.pid: int | None = None
self.config: dict = {}
self.hf_push_status: str | None = None # None, "pushing", "success", "failed"
def to_dict(self):
tail = ""
if LOG_FILE.exists():
try:
lines = LOG_FILE.read_text().splitlines()
tail = "\n".join(lines[-50:])
except Exception:
pass
return {
"status": self.status,
"started_at": self.started_at,
"finished_at": self.finished_at,
"error": self.error,
"pid": self.pid,
"config": self.config,
"log_tail": tail,
"hf_push_status": self.hf_push_status,
"push_in_progress": not _training_done.is_set(),
}
state = TrainingState()
def verify_secret(authorization: str = Header(...)):
if not API_SECRET:
raise HTTPException(500, "API_SECRET env var not set on server")
expected = f"Bearer {API_SECRET}"
if authorization != expected:
raise HTTPException(401, "Invalid or missing API secret")
def _to_snake_case(key: str) -> str:
"""Convert camelCase, PascalCase, UPPER_CASE, or kebab-case to snake_case."""
key = key.replace("-", "_")
key = re.sub(r"([A-Z]+)([A-Z][a-z])", r"\1_\2", key)
key = re.sub(r"([a-z0-9])([A-Z])", r"\1_\2", key)
return key.lower()
class TrainRequest(BaseModel):
model: str = Field("Qwen/Qwen3.5-4B", description="HuggingFace model ID or local path")
dataset: str | None = Field(
None,
description="HuggingFace dataset ID (e.g. 'tatsu-lab/alpaca') or leave empty to use uploaded JSONL",
)
dataset_subset: str | None = Field(None, description="Dataset subset/config name")
val_split_ratio: float | None = Field(
None,
ge=0.0,
le=0.5,
description="Fraction of training data to hold out for validation (0.0-0.5). Ignored when val_dataset is set",
)
val_dataset: str | None = Field(
None, description="HuggingFace dataset ID for validation, or leave empty"
)
num_epochs: int = Field(3, ge=1, le=100, description="Number of training epochs")
batch_size: int = Field(1, ge=1, description="Per-device training batch size")
grad_accum: int = Field(
4,
ge=1,
description="Gradient accumulation steps (effective batch = batch_size * grad_accum * num_gpus)",
)
learning_rate: float = Field(2e-5, gt=0, description="Peak learning rate")
max_length: int = Field(
2048,
ge=128,
description="Max sequence length in tokens. Rows exceeding this are dropped. Qwen 3.5 supports up to 32768",
)
save_steps: int = Field(10, ge=1, description="Save a checkpoint every N steps")
eval_steps: int | None = Field(
None, ge=1, description="Run evaluation every N steps. Defaults to save_steps if not set"
)
save_total_limit: int = Field(
2, ge=1, description="Max number of checkpoints to keep on disk (oldest are deleted)"
)
logging_steps: int = Field(5, ge=1, description="Log metrics every N steps")
tuner_type: str = Field("full", description="Tuning strategy (only 'full' is supported)")
warmup_ratio: float = Field(
0.1,
ge=0.0,
le=1.0,
description="Fraction of total steps used for linear LR warmup (0.0-1.0)",
)
lr_scheduler_type: str = Field(
"cosine",
description="LR scheduler: cosine, linear, cosine_with_restarts, polynomial, constant, constant_with_warmup, inverse_sqrt",
)
weight_decay: float = Field(0.1, ge=0.0, description="L2 weight decay coefficient")
max_grad_norm: float = Field(
1.0, ge=0.0, description="Max gradient norm for clipping (0 = no clipping)"
)
optimizer: str = Field(
"adamw_torch",
description="Optimizer: adamw_torch, adamw_torch_fused, adamw_8bit, paged_adamw_8bit, paged_adamw_32bit, adafactor, sgd",
)
seed: int = Field(42, ge=0, description="Random seed for reproducibility")
neftune_alpha: float | None = Field(
None,
ge=0.0,
description="NEFTune noise alpha for embedding regularization (null/0 = off, try 5-15)",
)
packing: bool = Field(
False,
description="Pack multiple samples into uniform-length sequences to reduce padding waste",
)
shuffle_dataset: bool = Field(
False,
description="Explicitly shuffle the dataset before training. The dataloader already uses a random sampler by default, so this adds an extra pre-shuffle pass",
)
lazy_tokenize: bool = Field(
True,
description="Tokenize samples on-the-fly during training instead of pre-tokenizing the entire dataset into memory. Prevents OOM on large datasets",
)
dataset_num_proc: int = Field(
4,
ge=1,
le=128,
description="Number of processes for dataset preprocessing. Higher values speed up tokenization but use more CPU/RAM",
)
attn_impl: str = Field(
"flash_attn",
description="Attention implementation: 'flash_attn' (recommended, O(n) memory), 'sdpa' (PyTorch native), or 'eager' (naive, O(n^2) memory). Qwen3.5 has full-attention layers that OOM without flash_attn at long sequences",
)
deepspeed: str | None = Field(
None,
description="DeepSpeed config: 'zero2', 'zero3', or null. Auto-set to 'zero2' when num_gpus > 1",
)
num_gpus: int | None = Field(
None, description="Number of GPUs to use. null = auto-detect all available GPUs"
)
num_nodes: int | None = Field(
None,
ge=1,
le=64,
description="Number of nodes for multi-node training. null = auto-detect from NUM_NODES env var (set by RunPod Instant Clusters). 1 = single-node",
)
node_rank: int | None = Field(
None,
ge=0,
description="This node's rank in the cluster. null = auto-detect from NODE_RANK env var. 0 = primary node",
)
master_addr: str | None = Field(
None,
description="Primary node address for distributed training. null = auto-detect from MASTER_ADDR env var",
)
master_port: str | None = Field(
None,
description="Primary node port for distributed training. null = auto-detect from MASTER_PORT env var",
)
gradient_checkpointing: bool = Field(
True,
description="Enable gradient checkpointing to reduce VRAM at the cost of ~20% slower training",
)
use_flash_ckpt: bool = Field(
False,
description="Use flash checkpointing (experimental, requires dlrover). Disabled by default due to dlrover/ms-swift compatibility issues",
)
resume_from_checkpoint: str | None = Field(
None,
description=(
"Resume training from a checkpoint. Values: "
"'auto' = find the latest local checkpoint automatically; "
"a local path like '/workspace/output/v0-.../checkpoint-100'; "
"or null to start fresh"
),
)
hf_token: str | None = Field(None, description="HuggingFace token (overrides HF_TOKEN env var)")
hf_repo_id: str | None = Field(
None, description="HuggingFace repo to push checkpoints/final model (e.g. 'org/model-name')"
)
wandb_project: str | None = Field(None, description="W&B project name (enables wandb logging)")
wandb_entity: str | None = Field(None, description="W&B entity/team name")
wandb_run_name: str | None = Field(None, description="W&B run name")
wandb_api_key: str | None = Field(
None, description="W&B API key (overrides WANDB_API_KEY env var)"
)
webhook_url: str | None = Field(None, description="URL to POST when training completes")
webhook_secret: str | None = Field(None, description="Secret sent in X-Webhook-Secret header")
training_job_id: str | None = Field(None, description="Dashboard job ID passed back in webhook")
max_pixels: int | None = Field(
None,
ge=1024,
description="Max pixels per image for multimodal training (controls image resolution/VRAM). e.g. 1003520 for ~1M pixels. null = model default",
)
min_pixels: int | None = Field(
None,
ge=256,
description="Min pixels per image for multimodal training. null = model default",
)
early_stopping_patience: int | None = Field(
None,
ge=1,
description="Stop training when eval loss hasn't improved for this many eval rounds. Requires validation data (val_dataset or val_split_ratio). null = disabled",
)
early_stopping_threshold: float = Field(
0.0,
ge=0.0,
description="Minimum eval loss improvement to count as 'better' (0.0 = any improvement counts)",
)
extra_args: dict | None = Field(
None,
description='Extra args passed directly to swift sft (e.g. {"truncation_strategy": "truncation_left"})',
)
_VALID_LR_SCHEDULERS = frozenset(
{
"cosine",
"linear",
"cosine_with_restarts",
"polynomial",
"constant",
"constant_with_warmup",
"inverse_sqrt",
}
)
_VALID_OPTIMIZERS = frozenset(
{
"adamw_torch",
"adamw_torch_fused",
"adamw_8bit",
"adamw_bnb_8bit",
"paged_adamw_8bit",
"paged_adamw_32bit",
"adafactor",
"sgd",
}
)
_VALID_ATTN_IMPLS = frozenset(
{
"flash_attn",
"flash_attention_2",
"sdpa",
"eager",
}
)
@model_validator(mode="before")
@classmethod
def _normalize_keys(cls, data: Any) -> Any:
if not isinstance(data, dict):
return data
known = set(cls.model_fields.keys())
normalized: dict[str, Any] = {}
for key, value in data.items():
snake = _to_snake_case(key)
if snake in known:
normalized[snake] = value
else:
normalized[key] = value
return normalized
@model_validator(mode="after")
def _validate_enums(self):
if self.lr_scheduler_type not in self._VALID_LR_SCHEDULERS:
raise ValueError(
f"lr_scheduler_type must be one of {sorted(self._VALID_LR_SCHEDULERS)}, got '{self.lr_scheduler_type}'"
)
if self.optimizer not in self._VALID_OPTIMIZERS:
raise ValueError(
f"optimizer must be one of {sorted(self._VALID_OPTIMIZERS)}, got '{self.optimizer}'"
)
if self.attn_impl not in self._VALID_ATTN_IMPLS:
raise ValueError(
f"attn_impl must be one of {sorted(self._VALID_ATTN_IMPLS)}, got '{self.attn_impl}'"
)
return self
# ---------------------------------------------------------------------------
# Pre-flight checks
# ---------------------------------------------------------------------------
def _preflight_fla_check():
"""Verify flash-linear-attention is importable before training starts.
Qwen 3.5 models use GatedDeltaNet for ~75% of their attention layers.
Without the `fla` package these layers silently fall back to a naive
O(n²) recurrence that uses 2-3x the VRAM — no warning, no error.
This check fails fast so we don't burn GPU hours on a doomed run.
"""
try:
import fla # noqa: F401
from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule # noqa: F401
print(
"[Pre-flight] flash-linear-attention OK — GatedDeltaNet layers will use FLA kernels",
flush=True,
)
except ImportError as e:
print(
f"[Pre-flight] WARNING: flash-linear-attention not importable: {e}\n"
" GatedDeltaNet layers will fall back to naive O(n²) recurrence.\n"
" This will use 2-3x more VRAM and likely OOM on sequences >4k.\n"
" Install: pip install git+https://github.com/fla-org/flash-linear-attention",
flush=True,
)
except Exception as e:
print(f"[Pre-flight] flash-linear-attention import issue (non-fatal): {e}", flush=True)
try:
import causal_conv1d # noqa: F401
print("[Pre-flight] causal-conv1d OK", flush=True)
except ImportError:
print(
"[Pre-flight] WARNING: causal-conv1d not available — some FLA ops may be slower",
flush=True,
)
# ---------------------------------------------------------------------------
# Training runner
# ---------------------------------------------------------------------------
def _log_cluster_diagnostics(
num_nodes: int,
node_rank: int,
master_addr: str,
master_port: str | int,
):
"""Log network interfaces and cluster env vars for debugging."""
import subprocess as _sp
try:
ifaces = _sp.check_output(["ip", "-4", "addr", "show"], timeout=5).decode()
print(f"[Cluster] Network interfaces:\n{ifaces}", flush=True)
except Exception as e:
print(f"[Cluster] Could not list interfaces: {e}", flush=True)
node_addr = os.environ.get("NODE_ADDR", "")
primary_addr = os.environ.get("PRIMARY_ADDR", "")
print(
f"[Cluster] rank={node_rank}/{num_nodes}, "
f"MASTER_ADDR={master_addr}, MASTER_PORT={master_port}, "
f"NODE_ADDR={node_addr}, PRIMARY_ADDR={primary_addr}",
flush=True,
)
def _prepare_hf_dataset(snap_path: str, dataset_name: str) -> str | None:
"""Copy JSONL from a HF snapshot to /workspace/data/ with absolute image paths.
ms-swift resolves relative image paths from the CWD, not from the JSONL
location. When a dataset is pulled from HuggingFace Hub the images live
inside the snapshot cache dir, so relative paths like ``images/foo.jpg``
break. This function rewrites them to absolute paths and writes a local
copy that ``swift sft`` can consume directly.
Returns the path to the local train JSONL, or None on failure.
"""
import json as _json
snap = Path(snap_path)
jsonl_files = sorted(snap.rglob("*.jsonl"))
if not jsonl_files:
print(f"[Dataset] No .jsonl files found in {snap_path}", flush=True)
return None
train_src = None
test_src = None
for jf in jsonl_files:
name = jf.stem.lower()
if "test" in name or "val" in name:
test_src = test_src or jf
else:
train_src = train_src or jf
if not train_src:
train_src = jsonl_files[0]
DATA_DIR.mkdir(parents=True, exist_ok=True)
def _rewrite_jsonl(src: Path, dst: Path):
count = 0
with open(src) as fin, open(dst, "w") as fout:
for line in fin:
line = line.strip()
if not line:
continue
row = _json.loads(line)
for key in ("images", "videos", "audios"):
paths = row.get(key)
if not paths:
continue
resolved = []
for p in paths:
if p.startswith(("http://", "https://", "data:", "/")):
resolved.append(p)
else:
abs_p = str(snap / p)
if Path(abs_p).exists():
resolved.append(abs_p)
else:
resolved.append(p)
row[key] = resolved
fout.write(_json.dumps(row, ensure_ascii=False) + "\n")
count += 1
return count
local_train = DATA_DIR / "train.jsonl"
n = _rewrite_jsonl(train_src, local_train)
print(f"[Dataset] Prepared {n} samples: {train_src} -> {local_train}", flush=True)
if test_src:
local_test = DATA_DIR / "test.jsonl"
n_test = _rewrite_jsonl(test_src, local_test)
print(f"[Dataset] Prepared {n_test} val samples: {test_src} -> {local_test}", flush=True)
return str(local_train)
def _run_training(config: dict):
global state
push_queue: _HFPushQueue | None = None
is_primary = True
_training_done.clear()
try:
state.status = "running"
state.started_at = datetime.now(timezone.utc).isoformat()
state.finished_at = None
state.error = None
state.config = config
print(
f"[Training] Thread started, dataset={config.get('dataset')}, model={config.get('model')}",
flush=True,
)
# Pre-flight: verify flash-linear-attention is usable.
# Qwen 3.5 has ~75% GatedDeltaNet layers that silently fall back to
# a naive O(n²) implementation if `fla` isn't importable, causing
# 2-3x VRAM usage with zero warning.
_preflight_fla_check()
env = os.environ.copy()
env["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
env["HF_HUB_DISABLE_PROGRESS_BARS"] = "0"
env["TRANSFORMERS_VERBOSITY"] = "info"
env["TORCHELASTIC_LOG_LEVEL"] = "INFO"
num_gpus = config.get("num_gpus") or _detect_gpu_count()
if num_gpus > 1:
gpu_ids = ",".join(str(i) for i in range(num_gpus))
env["CUDA_VISIBLE_DEVICES"] = gpu_ids
env["NPROC_PER_NODE"] = str(num_gpus)
# Multi-node: detect from config or RunPod Instant Cluster env vars
num_nodes = config.get("num_nodes") or int(os.environ.get("NUM_NODES", "1"))
node_rank = config.get("node_rank")
if node_rank is None:
node_rank = int(os.environ.get("NODE_RANK", "0"))
master_addr = config.get("master_addr") or os.environ.get("MASTER_ADDR", "")
master_port = config.get("master_port") or os.environ.get("MASTER_PORT", "29500")
if num_nodes > 1:
if not master_addr:
print(
"[Training] WARNING: num_nodes > 1 but MASTER_ADDR is empty. "
"RunPod Instant Clusters should set this automatically. "
"Distributed training will likely fail without it.",
flush=True,
)
env["NNODES"] = str(num_nodes)
env["NODE_RANK"] = str(node_rank)
env["MASTER_ADDR"] = master_addr
env["MASTER_PORT"] = str(master_port)
if not env.get("NCCL_SOCKET_IFNAME") and os.environ.get("PRIMARY_ADDR"):
env["NCCL_SOCKET_IFNAME"] = "ens1"
env.setdefault("NCCL_DEBUG", "INFO")
env.setdefault("NCCL_TIMEOUT", "1800000")
env.setdefault("TORCHELASTIC_MAX_RESTARTS", "0")
print(
f"[Training] Multi-node: {num_nodes} nodes, rank={node_rank}, "
f"master={master_addr}:{master_port}, NCCL_IFNAME={env.get('NCCL_SOCKET_IFNAME', 'default')}",
flush=True,
)
is_primary = node_rank == 0
if (num_gpus > 1 or num_nodes > 1) and not config.get("deepspeed"):
config["deepspeed"] = "zero3"
hf_token = config.get("hf_token") or os.environ.get("HF_TOKEN", "")
if hf_token:
env["HF_TOKEN"] = hf_token
wandb_key = config.get("wandb_api_key") or os.environ.get("WANDB_API_KEY", "")
if wandb_key:
env["WANDB_API_KEY"] = wandb_key
if config.get("wandb_project"):
env["WANDB_PROJECT"] = config["wandb_project"]
if config.get("wandb_entity"):
env["WANDB_ENTITY"] = config["wandb_entity"]
if config.get("wandb_run_name"):
env["WANDB_NAME"] = config["wandb_run_name"]
hf_repo_id = config.get("hf_repo_id")
if hf_repo_id and hf_token and is_primary:
push_queue = _HFPushQueue(
hf_repo_id, hf_token, model_id=config["model"], output_dir=OUTPUT_DIR
)
dataset_arg = config.get("dataset") or "/workspace/data/train.jsonl"
# For HF datasets with multimodal data: download, rewrite relative
# image/video/audio paths to absolute, and use the local copy.
if dataset_arg and not dataset_arg.startswith("/"):
try:
from huggingface_hub import snapshot_download
ds_repo = dataset_arg.split(":")[0]
print(f"[Training] Pre-downloading dataset: {ds_repo}", flush=True)
snap_path = snapshot_download(ds_repo, repo_type="dataset", token=hf_token or None)
print(f"[Training] Dataset cached: {ds_repo} -> {snap_path}", flush=True)
local_jsonl = _prepare_hf_dataset(snap_path, dataset_arg)
if local_jsonl:
dataset_arg = local_jsonl
except Exception as e:
print(f"[Training] Dataset pre-download note: {e} (swift will retry)", flush=True)
if config.get("dataset_subset"):
dataset_arg = f"{dataset_arg}:{config['dataset_subset']}"
cmd = [
"swift",
"sft",
"--model",
config["model"],
"--dataset",
dataset_arg,
"--tuner_type",
config["tuner_type"],
"--torch_dtype",
"bfloat16",
"--num_train_epochs",
str(config["num_epochs"]),
"--per_device_train_batch_size",
str(config["batch_size"]),
"--per_device_eval_batch_size",
str(config["batch_size"]),
"--learning_rate",
str(config["learning_rate"]),
"--gradient_accumulation_steps",
str(config["grad_accum"]),
"--eval_strategy",
"steps",
"--eval_steps",
str(config.get("eval_steps") or config["save_steps"]),
"--save_steps",
str(config["save_steps"]),
"--save_total_limit",
str(config.get("save_total_limit", 2)),
"--logging_steps",
str(config["logging_steps"]),
"--max_length",
str(config["max_length"]),
"--output_dir",
str(OUTPUT_DIR),
"--warmup_ratio",
str(config.get("warmup_ratio", 0.1)),
"--lr_scheduler_type",
config.get("lr_scheduler_type", "cosine"),
"--weight_decay",
str(config.get("weight_decay", 0.1)),
"--max_grad_norm",
str(config.get("max_grad_norm", 1.0)),
"--optim",
config.get("optimizer", "adamw_torch"),
"--seed",
str(config.get("seed", 42)),
"--dataloader_num_workers",
"4",
"--lazy_tokenize",
str(config.get("lazy_tokenize", True)),
"--dataset_num_proc",
str(config.get("dataset_num_proc", 4)),
"--attn_impl",
config.get("attn_impl", "flash_attn"),
"--use_hf",
"true",
]
if config.get("max_pixels") is not None:
cmd += ["--max_pixels", str(config["max_pixels"])]
if config.get("min_pixels") is not None:
cmd += ["--min_pixels", str(config["min_pixels"])]
if config.get("neftune_alpha") and config["neftune_alpha"] > 0:
cmd += ["--neftune_noise_alpha", str(config["neftune_alpha"])]
if config.get("packing"):
cmd += ["--packing", "true"]
if config.get("shuffle_dataset"):
cmd += ["--dataset_shuffle", "true"]
if config.get("gradient_checkpointing"):
cmd += ["--gradient_checkpointing", "true"]
if config.get("use_flash_ckpt", False):
cmd += ["--use_flash_ckpt", "true"]
if config.get("deepspeed"):
cmd += ["--deepspeed", config["deepspeed"]]
if is_primary and (config.get("wandb_project") or wandb_key):
cmd += ["--report_to", "wandb"]
val = config.get("val_dataset")
if val:
cmd += ["--val_dataset", val]
elif Path("/workspace/data/test.jsonl").exists():
cmd += ["--val_dataset", "/workspace/data/test.jsonl"]
val_split = config.get("val_split_ratio")
if val_split and val_split > 0 and not val:
cmd += ["--split_dataset_ratio", str(val_split)]
has_val = (
bool(val)
or (val_split and val_split > 0)
or Path("/workspace/data/test.jsonl").exists()
)
patience = config.get("early_stopping_patience")
if patience and has_val:
cmd += [
"--load_best_model_at_end",
"true",
"--metric_for_best_model",
"eval_loss",
"--greater_is_better",
"false",
"--early_stopping_patience",
str(patience),
]
threshold = config.get("early_stopping_threshold", 0.0)
if threshold > 0:
cmd += ["--early_stopping_threshold", str(threshold)]
save_limit = config.get("save_total_limit", 2)
if save_limit < 2:
cmd += ["--save_total_limit", "2"]
print(
"[Training] Bumped save_total_limit to 2 (required for load_best_model_at_end)",
flush=True,
)
elif patience and not has_val:
print(
"[Training] WARNING: early_stopping_patience ignored — no validation data configured",
flush=True,
)
resume = config.get("resume_from_checkpoint")
if resume:
if resume == "auto":
ckpt = _find_latest_checkpoint(OUTPUT_DIR)
if ckpt:
print(f"[Training] Auto-resume: found {ckpt}", flush=True)
cmd += ["--resume_from_checkpoint", str(ckpt)]
else:
print("[Training] Auto-resume: no checkpoint found, starting fresh", flush=True)
else:
cmd += ["--resume_from_checkpoint", resume]
if config.get("extra_args"):
for k, v in config["extra_args"].items():
flag = f"--{k}" if not k.startswith("--") else k
cmd += [flag, str(v)]
# Pre-download model so torchrun doesn't do it silently
model_name = config["model"]
print(f"[Training] Pre-downloading model: {model_name}", flush=True)
try:
from huggingface_hub import snapshot_download
snapshot_download(model_name, token=hf_token or None)
print(f"[Training] Model cached: {model_name}", flush=True)
except Exception as e:
print(f"[Training] Model pre-download note: {e} (torchrun will retry)", flush=True)
if num_nodes > 1 and master_addr:
_log_cluster_diagnostics(num_nodes, node_rank, master_addr, master_port)
print(f"[Training] Running: {' '.join(cmd)}", flush=True)
with open(LOG_FILE, "w") as log:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
state.pid = proc.pid
for line in iter(proc.stdout.readline, b""):
decoded = line.decode("utf-8", errors="replace")
log.write(decoded)
log.flush()
print(decoded, end="", flush=True)
proc.wait()
if proc.returncode != 0:
state.status = "failed"
state.error = f"Training exited with code {proc.returncode}"
print(f"[Training] FAILED with exit code {proc.returncode}", flush=True)
else:
state.status = "completed"
print("[Training] Completed successfully", flush=True)
except Exception as e:
state.status = "failed"
state.error = str(e)
print(f"[Training] Exception: {e}", flush=True)
finally:
state.finished_at = datetime.now(timezone.utc).isoformat()
state.pid = None
if push_queue is not None:
push_queue.stop_and_wait()
hf_push_ok = False
if is_primary and state.status == "completed" and hf_repo_id and hf_token:
state.hf_push_status = "pushing"
print(
"[HF Push] Starting final model push — container MUST stay alive until this completes",
flush=True,
)
hf_push_ok = _hf_push_final_model(
OUTPUT_DIR,
hf_repo_id,
hf_token,
model_id=config["model"],
tuner_type=config["tuner_type"],
)
state.hf_push_status = "success" if hf_push_ok else "failed"
if not hf_push_ok:
state.error = "Training completed but final HF push failed after all retries"
print(f"[HF Push] Final push finished (success={hf_push_ok})", flush=True)
if is_primary and state.status == "completed":
_fire_completion_webhook(config, hf_push_ok=hf_push_ok)
elif not is_primary:
print(f"[Training] Worker node (rank {config.get('node_rank', '?')}) — skipping HF push and webhook", flush=True)
_training_done.set()
print("[Training] Training thread fully done (push complete, safe to exit)", flush=True)
# ---------------------------------------------------------------------------
# Graceful shutdown: wait for training + HF push before allowing exit
# ---------------------------------------------------------------------------
_original_sigterm = signal.getsignal(signal.SIGTERM)
_original_sigint = signal.getsignal(signal.SIGINT)
def _graceful_shutdown(signum, frame):
"""Block container exit until the HF push is finished."""
sig_name = "SIGTERM" if signum == signal.SIGTERM else "SIGINT"
if not _training_done.is_set():
print(
f"[Shutdown] {sig_name} received but training/push still in progress — waiting up to 30 min...",
flush=True,
)
finished = _training_done.wait(timeout=1800)
if finished:
print("[Shutdown] Training + push completed, proceeding with shutdown", flush=True)
else:
print(
"[Shutdown] CRITICAL: Timed out waiting for push after 30 min, forcing exit",
flush=True,
)
else:
print(
f"[Shutdown] {sig_name} received, no training in progress — shutting down immediately",
flush=True,
)
# Re-raise to the original handler (uvicorn's) so the server actually stops
original = _original_sigterm if signum == signal.SIGTERM else _original_sigint
if callable(original):
original(signum, frame)
else:
raise SystemExit(0)
# ---------------------------------------------------------------------------
# Startup: recover interrupted HF pushes + install signal handlers
# ---------------------------------------------------------------------------
@app.on_event("startup")
def _on_startup():
signal.signal(signal.SIGTERM, _graceful_shutdown)
signal.signal(signal.SIGINT, _graceful_shutdown)
threading.Thread(target=_recover_pending_push, daemon=True, name="hf-push-recovery").start()
# ---------------------------------------------------------------------------
# API endpoints
# ---------------------------------------------------------------------------
@app.get("/health")
def health():
gpus = _gpu_info()
num_nodes = int(os.environ.get("NUM_NODES", "1"))
node_rank = int(os.environ.get("NODE_RANK", "0"))
resp: dict[str, Any] = {"status": "ok", "num_gpus": len(gpus), "gpus": gpus}
if num_nodes > 1:
resp["cluster"] = {
"num_nodes": num_nodes,
"node_rank": node_rank,
"master_addr": os.environ.get("MASTER_ADDR", ""),
"master_port": os.environ.get("MASTER_PORT", "29500"),
}
return resp
@app.get("/status", dependencies=[Depends(verify_secret)])
def get_status():
return state.to_dict()
@app.post("/train", dependencies=[Depends(verify_secret)])
def start_training(req: TrainRequest):
if state.status == "running":
raise HTTPException(409, "Training already in progress")
config = req.model_dump()
if not config.get("dataset") and not Path("/workspace/data/train.jsonl").exists():
raise HTTPException(
400,
"No dataset specified and no train.jsonl found. "
"Either set 'dataset' to a HuggingFace ID or upload a JSONL file first.",
)
global _training_thread
thread = threading.Thread(
target=_run_training, args=(config,), daemon=False, name="training-main"
)
_training_thread = thread
thread.start()
return {"message": "Training started", "config": config}
@app.get("/train/config")
def get_train_config():
"""Return the full training configuration schema with defaults, types, and descriptions.
No auth required so dashboards can populate forms before the user enters a secret.
"""
schema = TrainRequest.model_json_schema()
props = schema.get("properties", {})
fields: list[dict] = []
for name, info in props.items():
field_type = info.get("type")
any_of = info.get("anyOf")
if not field_type and any_of:
types = [t.get("type") for t in any_of if t.get("type") and t.get("type") != "null"]
field_type = types[0] if types else "string"
nullable = any(t.get("type") == "null" for t in any_of)
else:
nullable = False
entry: dict[str, Any] = {
"name": name,
"type": field_type or "string",
"nullable": nullable,
"default": info.get("default"),
"description": info.get("description", ""),
}
if "minimum" in info:
entry["min"] = info["minimum"]
if "exclusiveMinimum" in info:
entry["exclusive_min"] = info["exclusiveMinimum"]
if "maximum" in info:
entry["max"] = info["maximum"]
if "enum" in info:
entry["options"] = info["enum"]
fields.append(entry)
defaults = TrainRequest().model_dump()
return {"fields": fields, "defaults": defaults}
@app.post("/stop", dependencies=[Depends(verify_secret)])
def stop_training():
if state.status != "running" or not state.pid:
if not _training_done.is_set():
return {
"message": "Training finished but HF push still in progress — container will stay alive"
}
raise HTTPException(400, "No training in progress")
try:
os.kill(state.pid, signal.SIGTERM)
state.status = "stopped"
state.finished_at = datetime.now(timezone.utc).isoformat()
return {
"message": "Training stop signal sent (HF push will still complete before container exits)"
}
except ProcessLookupError:
state.status = "idle"
return {"message": "Process already exited"}
@app.post("/upload/dataset", dependencies=[Depends(verify_secret)])
async def upload_dataset(
train_file: UploadFile = File(...),
test_file: UploadFile | None = File(None),
):
train_path = DATA_DIR / "train.jsonl"
with open(train_path, "wb") as f:
shutil.copyfileobj(train_file.file, f)
result = {"train_file": str(train_path), "train_size": train_path.stat().st_size}
if test_file:
test_path = DATA_DIR / "test.jsonl"
with open(test_path, "wb") as f:
shutil.copyfileobj(test_file.file, f)
result["test_file"] = str(test_path)
result["test_size"] = test_path.stat().st_size
return result
@app.post("/upload/config", dependencies=[Depends(verify_secret)])
async def upload_config(config_file: UploadFile = File(...)):
dest = CONFIG_DIR / config_file.filename
with open(dest, "wb") as f:
shutil.copyfileobj(config_file.file, f)
return {"config_file": str(dest), "size": dest.stat().st_size}
@app.get("/logs", dependencies=[Depends(verify_secret)])
def get_logs(lines: int = 100):
if not LOG_FILE.exists():
return {"logs": ""}
all_lines = LOG_FILE.read_text().splitlines()
return {"logs": "\n".join(all_lines[-lines:])}
@app.get("/checkpoints", dependencies=[Depends(verify_secret)])
def list_checkpoints():
checkpoints = []
for d in sorted(OUTPUT_DIR.glob("*/checkpoint-*")):
checkpoints.append(
{
"path": str(d),
"name": d.name,
"run": d.parent.name,
}
)
for d in sorted(OUTPUT_DIR.glob("checkpoint-*")):
checkpoints.append(
{
"path": str(d),
"name": d.name,
"run": "root",
}
)
return {"checkpoints": checkpoints}
def _detect_gpu_count() -> int:
try:
out = subprocess.check_output(["nvidia-smi", "-L"], text=True)
count = len([l for l in out.strip().splitlines() if l.strip()])
return max(count, 1)
except Exception:
return 1
def _gpu_info():
try:
out = subprocess.check_output(
[
"nvidia-smi",
"--query-gpu=name,memory.total,memory.used,memory.free",
"--format=csv,noheader,nounits",
],
text=True,
)
gpus = []
for line in out.strip().splitlines():
parts = [p.strip() for p in line.split(",")]
gpus.append(
{
"name": parts[0],
"memory_total_mb": int(parts[1]),
"memory_used_mb": int(parts[2]),
"memory_free_mb": int(parts[3]),
}
)
return gpus
except Exception:
return []