Spaces:
Sleeping
Sleeping
fix
Browse files- backend/env/sql_env.py +7 -5
- backend/gepa/optimizer.py +5 -3
- backend/main.py +1 -1
backend/env/sql_env.py
CHANGED
|
@@ -67,15 +67,17 @@ class RewardInfo(BaseModel):
|
|
| 67 |
|
| 68 |
# βββ LLM Client ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
def _make_client() -> AsyncOpenAI:
|
| 71 |
return AsyncOpenAI(
|
| 72 |
-
api_key=
|
| 73 |
-
base_url=
|
| 74 |
)
|
| 75 |
|
| 76 |
-
|
| 77 |
-
_MODEL = os.environ.get("MODEL_NAME", "Qwen/Qwen2.5-72B-Instruct")
|
| 78 |
-
|
| 79 |
BASE_SYSTEM_PROMPT = """You are a SQL expert. Given a natural language question and a SQLite database schema, write a correct SQL query.
|
| 80 |
|
| 81 |
Rules:
|
|
|
|
| 67 |
|
| 68 |
# βββ LLM Client ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 69 |
|
| 70 |
+
API_BASE_URL = os.environ.get("API_BASE_URL", "https://router.huggingface.co/v1")
|
| 71 |
+
_MODEL = os.environ.get("MODEL_NAME", "Qwen/Qwen2.5-72B-Instruct")
|
| 72 |
+
HF_TOKEN = os.environ.get("HF_TOKEN") # no default β must be set explicitly
|
| 73 |
+
|
| 74 |
+
|
| 75 |
def _make_client() -> AsyncOpenAI:
|
| 76 |
return AsyncOpenAI(
|
| 77 |
+
api_key=HF_TOKEN,
|
| 78 |
+
base_url=API_BASE_URL,
|
| 79 |
)
|
| 80 |
|
|
|
|
|
|
|
|
|
|
| 81 |
BASE_SYSTEM_PROMPT = """You are a SQL expert. Given a natural language question and a SQLite database schema, write a correct SQL query.
|
| 82 |
|
| 83 |
Rules:
|
backend/gepa/optimizer.py
CHANGED
|
@@ -24,7 +24,9 @@ from pydantic import BaseModel
|
|
| 24 |
_DATA_DIR = Path(os.environ.get("DATA_DIR", Path(__file__).parent.parent / "data"))
|
| 25 |
GEPA_PATH = _DATA_DIR / "gepa_prompt.json"
|
| 26 |
|
| 27 |
-
|
|
|
|
|
|
|
| 28 |
|
| 29 |
# How many queries between each GEPA optimization cycle.
|
| 30 |
# Override with the GEPA_OPTIMIZE_EVERY environment variable.
|
|
@@ -62,8 +64,8 @@ class Candidate(BaseModel):
|
|
| 62 |
|
| 63 |
def _make_client() -> AsyncOpenAI:
|
| 64 |
return AsyncOpenAI(
|
| 65 |
-
api_key=
|
| 66 |
-
base_url=
|
| 67 |
)
|
| 68 |
|
| 69 |
|
|
|
|
| 24 |
_DATA_DIR = Path(os.environ.get("DATA_DIR", Path(__file__).parent.parent / "data"))
|
| 25 |
GEPA_PATH = _DATA_DIR / "gepa_prompt.json"
|
| 26 |
|
| 27 |
+
_API_BASE_URL = os.environ.get("API_BASE_URL", "https://router.huggingface.co/v1")
|
| 28 |
+
_MODEL = os.environ.get("MODEL_NAME", "Qwen/Qwen2.5-72B-Instruct")
|
| 29 |
+
_HF_TOKEN = os.environ.get("HF_TOKEN") # no default β must be set explicitly
|
| 30 |
|
| 31 |
# How many queries between each GEPA optimization cycle.
|
| 32 |
# Override with the GEPA_OPTIMIZE_EVERY environment variable.
|
|
|
|
| 64 |
|
| 65 |
def _make_client() -> AsyncOpenAI:
|
| 66 |
return AsyncOpenAI(
|
| 67 |
+
api_key=_HF_TOKEN,
|
| 68 |
+
base_url=_API_BASE_URL,
|
| 69 |
)
|
| 70 |
|
| 71 |
|
backend/main.py
CHANGED
|
@@ -87,7 +87,7 @@ async def startup_event():
|
|
| 87 |
print(f"Warning: database seed failed: {e}")
|
| 88 |
|
| 89 |
# Log LLM config so it's visible in container logs
|
| 90 |
-
token = os.environ.get("HF_TOKEN"
|
| 91 |
api_base = os.environ.get("API_BASE_URL", "https://router.huggingface.co/v1")
|
| 92 |
model = os.environ.get("MODEL_NAME", "Qwen/Qwen2.5-72B-Instruct")
|
| 93 |
token_status = f"set ({len(token)} chars)" if token else "NOT SET"
|
|
|
|
| 87 |
print(f"Warning: database seed failed: {e}")
|
| 88 |
|
| 89 |
# Log LLM config so it's visible in container logs
|
| 90 |
+
token = os.environ.get("HF_TOKEN") # no default
|
| 91 |
api_base = os.environ.get("API_BASE_URL", "https://router.huggingface.co/v1")
|
| 92 |
model = os.environ.get("MODEL_NAME", "Qwen/Qwen2.5-72B-Instruct")
|
| 93 |
token_status = f"set ({len(token)} chars)" if token else "NOT SET"
|