| #!/usr/bin/env sh |
| |
| |
| |
| |
| |
| set -e |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| EO_DIR="$HOME/.eo-pkgs" |
| EO_MARKER="$EO_DIR/.installed" |
| if [ ! -f "$EO_MARKER" ]; then |
| echo "[entrypoint] EO toolchain not yet installed; running pip install (~2 min)..." |
| mkdir -p "$EO_DIR" |
| |
| |
| |
| |
| |
| |
| if pip install --no-cache-dir --no-deps --target="$EO_DIR" \ |
| terratorch==1.1rc6 \ |
| einops \ |
| diffusers \ |
| timm \ |
| torchvision; then |
| echo "[entrypoint] pip install OK; verifying import..." |
| if PYTHONPATH="$EO_DIR:$PYTHONPATH" python -c " |
| import terratorch |
| from terratorch.registry import FULL_MODEL_REGISTRY |
| import terratorch.models.backbones.terramind.model.terramind_register |
| n = len([k for k in FULL_MODEL_REGISTRY if 'terramind' in k.lower()]) |
| assert n > 0, 'no terramind register entries' |
| print(f'[entrypoint] terratorch ok, terramind register: {n} entries') |
| "; then |
| touch "$EO_MARKER" |
| echo "[entrypoint] EO toolchain READY at $EO_DIR" |
| else |
| echo "[entrypoint] EO verify FAILED β TerraMind/Prithvi-live will skip" |
| fi |
| else |
| echo "[entrypoint] pip install FAILED β TerraMind/Prithvi-live will skip" |
| fi |
| else |
| echo "[entrypoint] EO toolchain already installed at $EO_DIR (cached)" |
| fi |
| |
| |
| |
| export PYTHONPATH="$EO_DIR:$PYTHONPATH" |
|
|
| |
| |
| |
| LOG_FILE="$HOME/ollama.log" |
| ollama serve 2>&1 | tee "$LOG_FILE" & |
| OLLAMA_PID=$! |
|
|
| |
| |
| for i in $(seq 1 60); do |
| if curl -sf http://127.0.0.1:11434/ > /dev/null 2>&1; then |
| echo "[entrypoint] ollama up (pid $OLLAMA_PID) after ${i}s" |
| break |
| fi |
| if ! kill -0 "$OLLAMA_PID" 2>/dev/null; then |
| echo "[entrypoint] FATAL: ollama serve died. Last 40 lines of $LOG_FILE:" |
| tail -40 "$LOG_FILE" || true |
| exit 1 |
| fi |
| sleep 1 |
| done |
|
|
| if ! curl -sf http://127.0.0.1:11434/ > /dev/null 2>&1; then |
| echo "[entrypoint] FATAL: ollama did not become ready within 60s" |
| tail -40 "$LOG_FILE" || true |
| exit 1 |
| fi |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| for model in "granite4.1:8b" "granite4.1:3b"; do |
| if ! ollama list | grep -q "$model"; then |
| if [ "$model" = "granite4.1:8b" ]; then |
| echo "[entrypoint] $model not found; pulling now (~5GB, ~2 min over the wire)..." |
| ollama pull "$model" || { |
| echo "[entrypoint] FATAL: pull failed for $model β reconciler will not work" |
| exit 1 |
| } |
| else |
| |
| |
| echo "[entrypoint] $model not found (optional β planner alias remapped to 8b via RIPRAP_OLLAMA_3B_TAG)" |
| fi |
| fi |
| done |
|
|
| ollama list |
|
|
| |
| |
| |
| echo "[entrypoint] pre-warming granite4.1:8b into VRAM (one-shot)..." |
| curl -s -X POST http://127.0.0.1:11434/api/generate \ |
| -d '{"model":"granite4.1:8b","prompt":"hi","stream":false,"keep_alive":"24h","options":{"num_predict":1}}' \ |
| -o /dev/null --max-time 120 \ |
| && echo "[entrypoint] granite4.1:8b warm" \ |
| || echo "[entrypoint] WARNING: 8b warmup failed (will load lazily)" |
|
|
| |
| |
| if command -v nvidia-smi > /dev/null 2>&1; then |
| echo "[entrypoint] nvidia-smi present:" |
| nvidia-smi -L || true |
| else |
| echo "[entrypoint] nvidia-smi NOT present β Ollama will run on CPU" |
| fi |
| echo "[entrypoint] ollama lib dirs:" |
| ls -d /usr/lib/ollama 2>/dev/null && ls /usr/lib/ollama 2>/dev/null | head -20 || echo " /usr/lib/ollama missing" |
| ls -d /usr/local/lib/ollama 2>/dev/null && ls /usr/local/lib/ollama 2>/dev/null | head -20 || echo " /usr/local/lib/ollama missing" |
|
|
| exec uvicorn web.main:app --host 0.0.0.0 --port 7860 --log-level info |
|
|