innofacisteven commited on
Commit
f693c70
·
verified ·
1 Parent(s): fae2bba

Update start.sh

Browse files
Files changed (1) hide show
  1. start.sh +8 -10
start.sh CHANGED
@@ -1,28 +1,26 @@
1
  #!/usr/bin/env bash
2
  set -euo pipefail
3
 
4
- echo "=== Starting Ollama in background ==="
5
  ollama serve &
6
 
7
- echo "Waiting for Ollama to start (up to 120 seconds)..."
8
- for i in {1..120}; do
9
  if curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; then
10
  echo "✅ Ollama is ready."
11
  break
12
  fi
13
- if [ "$i" -eq 120 ]; then
14
- echo "❌ ERROR: Ollama failed to start within 120 seconds."
15
  exit 1
16
  fi
17
- sleep 1
18
  done
19
 
20
- # 拉取 embedding 模型(建議用較輕量模型,HF Spaces 資源有限
21
  EMBEDDING_MODEL=${EMBEDDING_MODEL:-nomic-embed-text}
22
  echo "Pulling embedding model: ${EMBEDDING_MODEL}"
23
- ollama pull "${EMBEDDING_MODEL}"
24
 
25
  echo "=== Starting LightRAG Server ==="
26
-
27
- # 確保 .env 在當前工作目錄
28
  exec python -m lightrag.api.lightrag_server
 
1
  #!/usr/bin/env bash
2
  set -euo pipefail
3
 
4
+ echo "=== Starting Ollama ==="
5
  ollama serve &
6
 
7
+ echo "Waiting for Ollama to start (up to 180 seconds)..."
8
+ for i in {1..180}; do
9
  if curl -fsS http://127.0.0.1:11434/api/tags >/dev/null 2>&1; then
10
  echo "✅ Ollama is ready."
11
  break
12
  fi
13
+ if [ "$i" -eq 180 ]; then
14
+ echo "❌ ERROR: Ollama failed to start within 180 seconds."
15
  exit 1
16
  fi
17
+ sleep 2
18
  done
19
 
20
+ # 拉取 embedding 模型(輕量模型較穩
21
  EMBEDDING_MODEL=${EMBEDDING_MODEL:-nomic-embed-text}
22
  echo "Pulling embedding model: ${EMBEDDING_MODEL}"
23
+ ollama pull "${EMBEDDING_MODEL}" || echo "⚠️ Warning: Failed to pull ${EMBEDDING_MODEL}"
24
 
25
  echo "=== Starting LightRAG Server ==="
 
 
26
  exec python -m lightrag.api.lightrag_server