Viani commited on
Commit
addf82a
Β·
verified Β·
1 Parent(s): 170407b

Clean: remove all AMD Gateway references, use only OpenAI client with HF_TOKEN

Browse files
Files changed (3) hide show
  1. .env.example +1 -5
  2. .gitignore +1 -0
  3. inference.py +8 -20
.env.example CHANGED
@@ -1,11 +1,7 @@
1
- # LLM Configuration (required by hackathon evaluator)
2
  API_BASE_URL=https://router.huggingface.co/v1
3
  MODEL_NAME=gpt-4.1-mini
4
  HF_TOKEN=hf_your_token_here
5
 
6
  # Environment server
7
  ENV_URL=http://localhost:7860
8
-
9
- # AMD LLM Gateway (local development only β€” overrides API_BASE_URL when set)
10
- # AMD_LLM_API_KEY=your-ocp-apim-subscription-key-here
11
- # AMD_GATEWAY_BASE=https://llm-api.amd.com/openai
 
1
+ # LLM Configuration (required)
2
  API_BASE_URL=https://router.huggingface.co/v1
3
  MODEL_NAME=gpt-4.1-mini
4
  HF_TOKEN=hf_your_token_here
5
 
6
  # Environment server
7
  ENV_URL=http://localhost:7860
 
 
 
 
.gitignore CHANGED
@@ -10,3 +10,4 @@ venv/
10
  *.sqlite
11
  *.db
12
  .DS_Store
 
 
10
  *.sqlite
11
  *.db
12
  .DS_Store
13
+ .cursor/
inference.py CHANGED
@@ -5,14 +5,13 @@ Baseline inference script for DataDetective.
5
  Uses an LLM via the OpenAI-compatible API to investigate each task by
6
  running SQL queries and submitting a final analysis.
7
 
8
- Required environment variables (set by hackathon evaluator):
9
  API_BASE_URL β€” LLM endpoint (e.g. https://router.huggingface.co/v1)
10
  MODEL_NAME β€” model identifier (e.g. gpt-4.1-mini)
11
  HF_TOKEN β€” API key / Hugging Face token
12
 
13
  Optional:
14
  ENV_URL β€” DataDetective server URL (default http://localhost:7860)
15
- AMD_LLM_API_KEY β€” If set, uses AMD Gateway instead (local dev only)
16
  """
17
 
18
  import asyncio
@@ -22,7 +21,7 @@ import re
22
  import sys
23
  import time
24
 
25
- from openai import AzureOpenAI, OpenAI
26
 
27
  import websockets.asyncio.client as _wsc
28
  _orig_ws_connect = _wsc.connect
@@ -44,7 +43,6 @@ from openenv.core.generic_client import GenericEnvClient
44
  API_BASE_URL = os.environ.get("API_BASE_URL", "https://router.huggingface.co/v1")
45
  MODEL_NAME = os.environ.get("MODEL_NAME", "gpt-4.1-mini")
46
  HF_TOKEN = os.environ.get("HF_TOKEN") or os.environ.get("API_KEY", "")
47
- AMD_LLM_API_KEY = os.environ.get("AMD_LLM_API_KEY", "")
48
  ENV_URL = os.environ.get("ENV_URL", "http://localhost:7860").rstrip("/")
49
 
50
  BENCHMARK = "data_detective"
@@ -64,23 +62,13 @@ TASK_IDS = [
64
 
65
 
66
  def _build_llm_client() -> OpenAI:
67
- if AMD_LLM_API_KEY:
68
- return AzureOpenAI(
69
- api_key="dummy",
70
- api_version="2024-02-01",
71
- base_url=os.environ.get("AMD_GATEWAY_BASE", "https://llm-api.amd.com/openai"),
72
- default_headers={"Ocp-Apim-Subscription-Key": AMD_LLM_API_KEY},
73
  )
74
-
75
- if HF_TOKEN:
76
- return OpenAI(base_url=API_BASE_URL, api_key=HF_TOKEN)
77
-
78
- print(
79
- "ERROR: Set HF_TOKEN (or API_KEY) for LLM access, "
80
- "or AMD_LLM_API_KEY for AMD Gateway. Exiting.",
81
- file=sys.stderr,
82
- )
83
- sys.exit(1)
84
 
85
 
86
  llm = _build_llm_client()
 
5
  Uses an LLM via the OpenAI-compatible API to investigate each task by
6
  running SQL queries and submitting a final analysis.
7
 
8
+ Required environment variables:
9
  API_BASE_URL β€” LLM endpoint (e.g. https://router.huggingface.co/v1)
10
  MODEL_NAME β€” model identifier (e.g. gpt-4.1-mini)
11
  HF_TOKEN β€” API key / Hugging Face token
12
 
13
  Optional:
14
  ENV_URL β€” DataDetective server URL (default http://localhost:7860)
 
15
  """
16
 
17
  import asyncio
 
21
  import sys
22
  import time
23
 
24
+ from openai import OpenAI
25
 
26
  import websockets.asyncio.client as _wsc
27
  _orig_ws_connect = _wsc.connect
 
43
  API_BASE_URL = os.environ.get("API_BASE_URL", "https://router.huggingface.co/v1")
44
  MODEL_NAME = os.environ.get("MODEL_NAME", "gpt-4.1-mini")
45
  HF_TOKEN = os.environ.get("HF_TOKEN") or os.environ.get("API_KEY", "")
 
46
  ENV_URL = os.environ.get("ENV_URL", "http://localhost:7860").rstrip("/")
47
 
48
  BENCHMARK = "data_detective"
 
62
 
63
 
64
  def _build_llm_client() -> OpenAI:
65
+ if not HF_TOKEN:
66
+ print(
67
+ "ERROR: Set HF_TOKEN (or API_KEY) for LLM access. Exiting.",
68
+ file=sys.stderr,
 
 
69
  )
70
+ sys.exit(1)
71
+ return OpenAI(base_url=API_BASE_URL, api_key=HF_TOKEN)
 
 
 
 
 
 
 
 
72
 
73
 
74
  llm = _build_llm_client()