| FROM ghcr.io/ggml-org/llama.cpp:full-cuda | |
| WORKDIR /app | |
| RUN apt update && apt install -y python3 python3-pip python3-venv | |
| RUN python3 -m venv /opt/venv | |
| ENV PATH="/opt/venv/bin:$PATH" | |
| RUN pip install -U pip huggingface_hub | |
| RUN python3 -c 'from huggingface_hub import hf_hub_download; \ | |
| repo="HauhauCS/Gemma-4-E4B-Uncensored-HauhauCS-Aggressive"; \ | |
| hf_hub_download(repo_id=repo, filename="Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-Q5_K_P.gguf", local_dir="/app"); \ | |
| hf_hub_download(repo_id=repo, filename="mmproj-Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-f16.gguf", local_dir="/app")' | |
| CMD ["--server", \ | |
| "-m", "/app/Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-Q5_K_P.gguf", \ | |
| "--mmproj", "/app/mmproj-Gemma-4-E4B-Uncensored-HauhauCS-Aggressive-f16.gguf", \ | |
| "--host", "0.0.0.0", \ | |
| "--port", "7860", \ | |
| "-t", "2", \ | |
| "-c", "128000", \ | |
| "-n", "38912", \ | |
| "--n-gpu-layers", "99"] |