File size: 1,093 Bytes
18d028b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# Copy to .env and fill in real values. Never commit .env.

# AMD Developer Cloud — OpenAI-compatible endpoint (vLLM serving on MI300X)
AMD_DEV_CLOUD_BASE_URL=
AMD_DEV_CLOUD_API_KEY=

# Hugging Face — for HF Hub artifact pushes + Inference API fallback
HF_TOKEN=

# OpenAI — local-dev fallback only. NOT used in submission.
# Lets the Gradio UI work locally while AMD Dev Cloud is being provisioned.
OPENAI_API_KEY=

# Active provider for the inference clients. One of: amd | hf | openai
SIGNBRIDGE_PROVIDER=amd

# Model IDs (overridable for experimentation)
SIGNBRIDGE_COMPOSER_MODEL=meta-llama/Llama-3.1-8B-Instruct
SIGNBRIDGE_TTS_MODEL=tts_models/multilingual/multi-dataset/xtts_v2
SIGNBRIDGE_STT_MODEL=openai/whisper-large-v3

# Sign classifier — local artifact (after training) or HF Hub repo
SIGNBRIDGE_CLASSIFIER_PATH=models/classifier.pt
SIGNBRIDGE_CLASSIFIER_HF_REPO=lucas-loo/signbridge-classifier

# Webcam frame sampling rate (Hz)
SIGNBRIDGE_FRAME_RATE=5

# Backend URL when the Space talks to a separate FastAPI server (leave blank for in-process)
SIGNBRIDGE_BACKEND_URL=