| FROM ghcr.io/ggml-org/llama.cpp:full |
|
|
| WORKDIR /app |
|
|
| RUN apt update && apt install -y python3 python3-pip python3-venv |
| RUN python3 -m venv /opt/venv |
| ENV PATH="/opt/venv/bin:$PATH" |
|
|
| RUN pip install -U pip huggingface_hub |
|
|
| RUN python3 -c 'from huggingface_hub import hf_hub_download; \ |
| repo="gijl/gemma-4-31B-it-GGUF"; \ |
| hf_hub_download(repo_id=repo, filename="gemma-4-31B-it-UD-Q8_K_XL.gguf", local_dir="/app"); \ |
| hf_hub_download(repo_id=repo, filename="mmproj-BF16.gguf", local_dir="/app")' |
|
|
| CMD ["--server", \ |
| "-m", "/app/gemma-4-31B-it-UD-Q8_K_XL.gguf", \ |
| "--mmproj", "/app/mmproj-BF16.gguf", \ |
| "--host", "0.0.0.0", \ |
| "--port", "7860", \ |
| "-t", "2", \ |
| "--cache-type-k", "q8_0", \ |
| "--cache-type-v", "iq4_nl", \ |
| "-c", "128000", \ |
| "-n", "38912"] |