minzo456 commited on
Commit
3257319
·
verified ·
1 Parent(s): f5e6d8a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -54
app.py CHANGED
@@ -1,65 +1,63 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
3
  import os
4
- import requests
 
5
  import base64
6
- import urllib.parse
7
- from duckduckgo_search import DDGS
8
 
9
- # 🔱 CONFIGURATION
10
- HF_TOKEN = os.getenv("HF_TOKEN")
11
- PRIMARY_MODEL = "deepseek-ai/DeepSeek-V3.2"
12
- client = InferenceClient(token=HF_TOKEN)
13
 
14
- def mandatory_search(query):
15
- try:
16
- search_results = []
17
- with DDGS() as ddgs:
18
- results = list(ddgs.text(query, max_results=3))
19
- for r in results:
20
- search_results.append(f"Info: {r['body']}\nLink: {r['href']}")
21
- return "\n\n".join(search_results) if search_results else "No live data."
22
- except: return "Search offline."
23
 
24
- def dual_engine(message, history):
25
- # 🔱 IMAGE GENERATION (/image [prompt])
 
 
 
 
 
 
 
 
 
26
  if message.lower().startswith("/image "):
27
- prompt = message.replace("/image ", "").strip()
28
- img_url = f"https://image.pollinations.ai/prompt/{urllib.parse.quote(prompt)}?width=1024&height=1024&nologo=true&model=flux"
29
- try:
30
- res = requests.get(img_url, timeout=20)
31
- return f"IMAGE_DATA:data:image/png;base64,{base64.b64encode(res.content).decode()}"
32
- except: return "⚠️ Image Engine Busy."
33
-
34
- # 🔱 VIDEO GENERATION (/video [prompt])
35
- if message.lower().startswith("/video "):
36
- prompt = message.replace("/video ", "").strip()
37
- # මෙහිදී අපි Pollinations Video Engine එක භාවිතා කරමු (Fastest for UI)
38
- video_url = f"https://image.pollinations.ai/prompt/{urllib.parse.quote(prompt)}?width=720&height=480&model=video"
39
- return f"🔱 **COMMANDER, VIDEO FORGING STARTED...**\n\nමෙන්න ඔබේ වීඩියෝව: {video_url}\n\n*(සටහන: මෙම ලින්ක් එක ක්ලික් කිරීමෙන් ඔබේ වීඩියෝව නැරඹිය හැකියි)*"
40
-
41
- # 🔱 TEXT CHAT ENGINE
42
- live_data = mandatory_search(message)
43
- system_instruction = f"IDENTIFICATION: MINZO AI (Created by MINZO-PRIME).\nLIVE DATA: {live_data}\nRespond strategically."
44
-
45
- messages = [{"role": "system", "content": system_instruction}]
46
- for user_msg, bot_msg in history[-6:]:
47
- messages.append({"role": "user", "content": user_msg})
48
- messages.append({"role": "assistant", "content": bot_msg})
49
- messages.append({"role": "user", "content": message})
50
-
51
  try:
52
- response = client.chat_completion(messages=messages, model=PRIMARY_MODEL, max_tokens=1500)
53
- return response.choices[0].message.content
54
- except:
55
- return "⚠️ Neural Link Busy. Retry in 5s."
56
-
57
- # 🔱 UI SETUP
58
- demo = gr.ChatInterface(
59
- fn=dual_engine,
60
- title="🔱 MINZO AI MULTIMODAL STUDIO",
61
- description="Commands: /image [text] | /video [text] | Normal Chat (DeepSeek-V3)"
62
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  if __name__ == "__main__":
65
  demo.launch()
 
1
  import gradio as gr
 
2
  import os
3
+ from huggingface_hub import InferenceClient
4
+ import io
5
  import base64
 
 
6
 
7
+ # 🛡️ CLIENT SETUP
8
+ HF_TOKEN = os.getenv("HF_TOKEN")
 
 
9
 
10
+ # DeepSeek-R1 (මෙය තමයි දැනට තියෙන බුද්ධිමත්ම 'Thinking' Model එක)
11
+ text_client = InferenceClient("deepseek-ai/DeepSeek-R1", token=HF_TOKEN)
12
+ image_client = InferenceClient("black-forest-labs/FLUX.1-schnell", token=HF_TOKEN)
 
 
 
 
 
 
13
 
14
+ def forge_image(prompt):
15
+ try:
16
+ image = image_client.text_to_image(prompt)
17
+ buffered = io.BytesIO()
18
+ image.save(buffered, format="PNG")
19
+ img_str = base64.b64encode(buffered.getvalue()).decode()
20
+ return f"IMAGE_DATA:{img_str}"
21
+ except Exception as e:
22
+ return f"🔱 Error in Image Forge: {str(e)}"
23
+
24
+ def dual_engine(message, history=None):
25
  if message.lower().startswith("/image "):
26
+ image_prompt = message.replace("/image ", "").strip()
27
+ return forge_image(image_prompt)
28
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  try:
30
+ # DeepSeek-R1 වැනි Reasoning models වලට system prompt එක අවශ්‍ය නැති තරම්
31
+ messages = [{"role": "user", "content": message}]
32
+
33
+ response = ""
34
+ # R1 වලදී thinking process එක එන නිසා stream කිරීම අනිවාර්යයි
35
+ for message_obj in text_client.chat_completion(
36
+ messages,
37
+ max_tokens=4000, # Reasoning සඳහා වැඩි ඉඩක් ලබා දීම
38
+ stream=True,
39
+ ):
40
+ token = message_obj.choices[0].delta.content
41
+ if token:
42
+ response += token
43
+
44
+ return response
45
+
46
+ except Exception as e:
47
+ return f"⚠️ Neural Link Error: {str(e)}"
48
+
49
+ # 🔱 INTERFACE
50
+ with gr.Blocks() as demo:
51
+ input_text = gr.Textbox(label="Command Input")
52
+ output_text = gr.Textbox(label="Response")
53
+ submit_btn = gr.Button("Execute")
54
+
55
+ submit_btn.click(
56
+ fn=dual_engine,
57
+ inputs=[input_text],
58
+ outputs=[output_text],
59
+ api_name="dual_engine"
60
+ )
61
 
62
  if __name__ == "__main__":
63
  demo.launch()