eyad222 commited on
Commit
8b1e1fc
Β·
verified Β·
1 Parent(s): 1684ef7

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +69 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ from gtts import gTTS
4
+ import tempfile
5
+ import os
6
+
7
+ # πŸ”‘ OpenRouter API key (Replace this securely in HF secrets later)
8
+ OPENROUTER_API_KEY = "YOUR_API_KEY"
9
+
10
+ # 🧠 Get AI response
11
+ def get_ai_response(user_input):
12
+ url = "https://openrouter.ai/api/v1/chat/completions"
13
+ headers = {
14
+ "Authorization": f"Bearer {OPENROUTER_API_KEY}",
15
+ "Content-Type": "application/json"
16
+ }
17
+
18
+ data = {
19
+ "model": "mistralai/mistral-7b-instruct:free",
20
+ "max_tokens": 100,
21
+ "temperature": 0.7,
22
+ "messages": [
23
+ {"role": "system", "content": "You are a helpful and friendly AI assistant. Keep answers short."},
24
+ {"role": "user", "content": user_input}
25
+ ]
26
+ }
27
+
28
+ response = requests.post(url, headers=headers, json=data)
29
+ if response.ok:
30
+ result = response.json()
31
+ return result["choices"][0]["message"]["content"]
32
+ else:
33
+ return "⚠️ OpenRouter error"
34
+
35
+ # πŸŽ™οΈ Speech-to-text β†’ AI β†’ Text-to-speech
36
+ def process_audio(audio):
37
+ if audio is None:
38
+ return "No audio provided", None
39
+
40
+ import speech_recognition as sr
41
+ recognizer = sr.Recognizer()
42
+ with sr.AudioFile(audio) as source:
43
+ audio_data = recognizer.record(source)
44
+ try:
45
+ text = recognizer.recognize_google(audio_data)
46
+ except:
47
+ return "Sorry, I couldn't understand you.", None
48
+
49
+ ai_response = get_ai_response(text)
50
+
51
+ # πŸ”Š Convert to speech
52
+ tts = gTTS(text=ai_response, lang="en")
53
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as f:
54
+ tts.save(f.name)
55
+ return ai_response, f.name
56
+
57
+ # πŸ–ΌοΈ Gradio UI
58
+ demo = gr.Interface(
59
+ fn=process_audio,
60
+ inputs=gr.Audio(source="microphone", type="filepath", label="πŸŽ™οΈ Speak now"),
61
+ outputs=[
62
+ gr.Text(label="🧠 AI Response"),
63
+ gr.Audio(label="πŸ”Š AI Voice")
64
+ ],
65
+ title="🎀 Voice Assistant with OpenRouter AI",
66
+ description="Speak your question. The AI will respond by voice."
67
+ )
68
+
69
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ gTTS
3
+ requests
4
+ speechrecognition