| fastapi==0.115.12 | |
| uvicorn[standard]==0.34.0 | |
| httpx==0.28.1 | |
| pydantic==2.11.3 | |
| huggingface_hub[hf_xet]==0.30.2 | |
| # llama-cpp-python is installed separately in Dockerfile via pre-built wheel index | |
| # (avoids slow C++ compilation — see Dockerfile) |
| fastapi==0.115.12 | |
| uvicorn[standard]==0.34.0 | |
| httpx==0.28.1 | |
| pydantic==2.11.3 | |
| huggingface_hub[hf_xet]==0.30.2 | |
| # llama-cpp-python is installed separately in Dockerfile via pre-built wheel index | |
| # (avoids slow C++ compilation — see Dockerfile) |