| #!/bin/bash |
|
|
| |
| MODELS_DIR="/data/models" |
| MODEL_FILE="gemma-4-E2B-it-UD-Q5_K_XL.gguf" |
| OLLAMA_MODEL_NAME="my-gemma" |
|
|
| |
| mkdir -p $MODELS_DIR |
| mkdir -p /data/ollama |
| mkdir -p /data/webui |
|
|
| |
| echo "Starting Ollama server..." |
| OLLAMA_MODELS=/data/ollama ollama serve & |
|
|
| |
| sleep 4 |
|
|
| |
| if [ ! -f "$MODELS_DIR/$MODEL_FILE" ]; then |
| echo "Model not found in storage. Downloading..." |
| |
| huggingface-cli download gijl/gemma-4-E2B-it-GGUF $MODEL_FILE \ |
| --local-dir $MODELS_DIR \ |
| --local-dir-use-symlinks False |
| |
| |
| huggingface-cli download gijl/gemma-4-E2B-it-GGUF mmproj-BF16.gguf \ |
| --local-dir $MODELS_DIR \ |
| --local-dir-use-symlinks False |
| else |
| echo "Model already exists in storage. Skipping download." |
| fi |
|
|
| |
| echo "Creating Modelfile..." |
| cat <<EOF > $MODELS_DIR/Modelfile |
| FROM $MODELS_DIR/$MODEL_FILE |
| |
| # إعدادات مشابهة لما طلبته في llama.cpp |
| PARAMETER num_ctx 128000 |
| PARAMETER num_thread 2 |
| EOF |
|
|
| |
| echo "Registering model with Ollama..." |
| ollama create $OLLAMA_MODEL_NAME -f $MODELS_DIR/Modelfile |
|
|
| |
| echo "Starting Open WebUI..." |
| cd /app/backend |
| exec bash start.sh |