import gradio as gr import requests from gtts import gTTS import tempfile import os # 🔑 OpenRouter API key (Replace this securely in HF secrets later) OPENROUTER_API_KEY = "YOUR_API_KEY" # 🧠 Get AI response def get_ai_response(user_input): url = "https://openrouter.ai/api/v1/chat/completions" headers = { "Authorization": f"Bearer {OPENROUTER_API_KEY}", "Content-Type": "application/json" } data = { "model": "mistralai/mistral-7b-instruct:free", "max_tokens": 100, "temperature": 0.7, "messages": [ {"role": "system", "content": "You are a helpful and friendly AI assistant. Keep answers short."}, {"role": "user", "content": user_input} ] } response = requests.post(url, headers=headers, json=data) if response.ok: result = response.json() return result["choices"][0]["message"]["content"] else: return "⚠️ OpenRouter error" # 🎙️ Speech-to-text → AI → Text-to-speech def process_audio(audio): if audio is None: return "No audio provided", None import speech_recognition as sr recognizer = sr.Recognizer() with sr.AudioFile(audio) as source: audio_data = recognizer.record(source) try: text = recognizer.recognize_google(audio_data) except: return "Sorry, I couldn't understand you.", None ai_response = get_ai_response(text) # 🔊 Convert to speech tts = gTTS(text=ai_response, lang="en") with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f: tts.save(f.name) return ai_response, f.name # 🖼️ Gradio UI demo = gr.Interface( fn=process_audio, inputs=gr.Audio(source="microphone", type="filepath", label="🎙️ Speak now"), outputs=[ gr.Text(label="🧠 AI Response"), gr.Audio(label="🔊 AI Voice") ], title="🎤 Voice Assistant with OpenRouter AI", description="Speak your question. The AI will respond by voice." ) demo.launch()