lightrag / start.sh
innofacisteven's picture
Update start.sh
f693c70 verified
raw
history blame contribute delete
738 Bytes
#!/usr/bin/env bash
set -euo pipefail
echo "=== Starting Ollama ==="
ollama serve &
echo "Waiting for Ollama to start (up to 180 seconds)..."
for i in {1..180}; do
if curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; then
echo "✅ Ollama is ready."
break
fi
if [ "$i" -eq 180 ]; then
echo "❌ ERROR: Ollama failed to start within 180 seconds."
exit 1
fi
sleep 2
done
# 拉取 embedding 模型(輕量模型較穩)
EMBEDDING_MODEL=${EMBEDDING_MODEL:-nomic-embed-text}
echo "Pulling embedding model: ${EMBEDDING_MODEL}"
ollama pull "${EMBEDDING_MODEL}" || echo "⚠️ Warning: Failed to pull ${EMBEDDING_MODEL}"
echo "=== Starting LightRAG Server ==="
exec python -m lightrag.api.lightrag_server