Fix VRAM property access in training script to use total_memory
Browse files- code/train_production.py +1 -1
code/train_production.py
CHANGED
|
@@ -436,7 +436,7 @@ def train(args):
|
|
| 436 |
print(f"Device: {device}")
|
| 437 |
if torch.cuda.is_available():
|
| 438 |
print(f"GPU: {torch.cuda.get_device_name()}")
|
| 439 |
-
print(f"VRAM: {torch.cuda.get_device_properties(0).
|
| 440 |
|
| 441 |
# Download dLLM model
|
| 442 |
print("Downloading dLLM Qwen3-0.6B diffusion model...")
|
|
|
|
| 436 |
print(f"Device: {device}")
|
| 437 |
if torch.cuda.is_available():
|
| 438 |
print(f"GPU: {torch.cuda.get_device_name()}")
|
| 439 |
+
print(f"VRAM: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB")
|
| 440 |
|
| 441 |
# Download dLLM model
|
| 442 |
print("Downloading dLLM Qwen3-0.6B diffusion model...")
|