| # FunctionGemma SFT LoRA dependencies |
|
|
| # PyTorch (pick the build that matches your CUDA) |
| torch>=2.0.0 |
| torchvision |
| torchaudio |
|
|
| # Hugging Face stack |
| transformers>=4.40.0 |
| datasets>=2.18.0 |
| accelerate>=0.27.0 |
| tokenizers>=0.15.0 |
|
|
| # TRL (Transformer Reinforcement Learning) |
| trl>=0.8.0 |
|
|
| # PEFT (Parameter-Efficient Fine-Tuning) |
| peft>=0.10.0 |
|
|
| # Quantization support (QLoRA) |
| bitsandbytes>=0.43.0 |
|
|
| # Logging & monitoring |
| tensorboard>=2.15.0 |
| wandb>=0.16.0 |
|
|
| # Utilities |
| sentencepiece>=0.2.0 |
| protobuf>=4.25.0 |
| tqdm>=4.66.0 |
|
|
| # Flash Attention (optional; install separately) |
| # pip install flash-attn --no-build-isolation |
|
|
| # Evaluation |
| evaluate>=0.4.0 |
|
|