Patel Traders commited on
Commit
f4d3a43
·
verified ·
1 Parent(s): 8fde388

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -29
app.py CHANGED
@@ -1,6 +1,7 @@
1
- # --- 🔱 हनुमान AI: अजेय एवं आत्मनिर्भर थिंकिंग पोर्टल (Qwen 2.5) ---
2
- # मार्गदर्शक: दिव्य पटेल जी | भारत 🇮🇳
3
- # तकनीकी समाधान: Mismatched Sizes Fix + Gradio Version Compatibility
 
4
 
5
  import gradio as gr
6
  import torch
@@ -8,12 +9,13 @@ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
8
  from qwen_vl_utils import process_vision_info
9
  import os
10
 
11
- # 🚀 उन्नत Qwen 2.5 मॉडल (3B Instruct)
12
  model_id = "Qwen/Qwen2.5-VL-3B-Instruct"
13
 
14
- print("🔱 हनुमान AI जागृत हो रहा है... सभी बाधाओं का नाश करते हुए।")
15
 
16
- # 🛠️ अजेय लोडिंग: ignore_mismatched_sizes=True एरर को जड़ से खत्म करेगा
 
17
  model = Qwen2VLForConditionalGeneration.from_pretrained(
18
  model_id,
19
  torch_dtype=torch.float32,
@@ -24,10 +26,15 @@ model = Qwen2VLForConditionalGeneration.from_pretrained(
24
  )
25
  processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
26
 
27
- def hanuman_logic(user_message, history, file_upload):
 
28
  try:
29
- # १. Gradio की साधारण लिस्ट हिस्ट्री को Qwen के डिक्शनरी फॉर्मेट में बदलना
30
- messages = []
 
 
 
 
31
  if history:
32
  for past_user, past_bot in history:
33
  if past_user:
@@ -35,27 +42,24 @@ def hanuman_logic(user_message, history, file_upload):
35
  if past_bot:
36
  messages.append({"role": "assistant", "content": [{"type": "text", "text": str(past_bot)}]})
37
 
38
- # २. वर्तमान संदेश और फाइल (चित्र/दस्तावेज) का विश्लेषण
39
  current_content = []
40
-
41
  if file_upload is not None:
42
  file_ext = file_upload.name.split('.')[-1].lower()
43
- # चित्र पहचान (Vision)
44
  if file_ext in ['png', 'jpg', 'jpeg', 'webp']:
45
  current_content.append({"type": "image", "image": file_upload.name})
46
- # फाइल पढ़ना (Document Reading)
47
  else:
48
  try:
49
  with open(file_upload.name, 'r', encoding='utf-8') as f:
50
  file_data = f.read()
51
- user_message = f"Document Context:\n{file_data}\n\nQuestion: {user_message}"
52
  except:
53
  pass
54
 
55
  current_content.append({"type": "text", "text": user_message})
56
  messages.append({"role": "user", "content": current_content})
57
 
58
- # ३. मॉडल के लिए इनपुट तैयार करना
59
  text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
60
  image_inputs, video_inputs = process_vision_info(messages)
61
 
@@ -67,8 +71,13 @@ def hanuman_logic(user_message, history, file_upload):
67
  return_tensors="pt",
68
  ).to("cpu")
69
 
70
- # ४. उत्तर का निर्माण (8192 टोकन क्षमता के साथ)
71
- generated_ids = model.generate(**inputs, max_new_tokens=1024)
 
 
 
 
 
72
  generated_ids_trimmed = [
73
  out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
74
  ]
@@ -76,35 +85,33 @@ def hanuman_logic(user_message, history, file_upload):
76
  generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
77
  )[0]
78
 
79
- # ५. पुराने Gradio फॉर्मेट (List of Lists) में हिस्ट्री वापस करना
80
  history.append([user_message, response])
81
  return "", history, None
82
 
83
  except Exception as e:
84
- error_msg = f"क्षमा करें दिव्य जी, एक तकनीकी बाधा आई है: {str(e)}"
85
  history.append([user_message, error_msg])
86
  return "", history, None
87
 
88
- # --- दिव्य पोर्टल इंटरफ़ेस (Gradio) ---
89
- # 🛡️ 'theme' को यहाँ से हटा दिया गया है ताकि कोई Warning न आए
90
  with gr.Blocks() as demo:
91
- gr.HTML("<div style='text-align: center; color: #ff5500;'><h1>🔱 हनुमान AI - अजेय थिंकिंग पोर्टल (v2.5)</h1><p><b>Pioneered by Divy Patel | 100% Free | भारत 🇮🇳</b></p></div>")
92
 
93
  with gr.Row():
94
  with gr.Column(scale=4):
95
- # 🛡️ पुराने Gradio के लिए सरल चैटबॉट (बिना 'type' के)
96
  chatbot = gr.Chatbot(height=550)
97
  with gr.Row():
98
- msg_input = gr.Textbox(placeholder="अपना प्रश्न यहाँ लिखें दिव्य जी...", scale=7)
99
- file_input = gr.File(label="फोटो/फाइल", scale=2)
100
- submit_btn = gr.Button("पूछें", variant="primary", scale=1)
101
 
102
- # इवेंट हैंडलिंग
103
  submit_btn.click(
104
- fn=hanuman_logic,
105
  inputs=[msg_input, chatbot, file_input],
106
  outputs=[msg_input, chatbot, file_input]
107
  )
108
 
109
  if __name__ == "__main__":
110
- demo.launch()
 
 
 
1
+ ```python
2
+ # --- 🔱 Hanuman AI: Supreme English Reasoning Portal (Qwen 2.5) ---
3
+ # Pioneered by Divy Patel | Bharat 🇮🇳
4
+ # Features: 100% English, Fast Inference, Free CPU Optimized, Vision Enabled
5
 
6
  import gradio as gr
7
  import torch
 
9
  from qwen_vl_utils import process_vision_info
10
  import os
11
 
12
+ # Model Upgrade: Qwen 2.5 VL 3B (Powerful & Reasoning-focused)
13
  model_id = "Qwen/Qwen2.5-VL-3B-Instruct"
14
 
15
+ print("🔱 Hanuman AI is awakening... Eliminating all errors for Divy Patel Ji.")
16
 
17
+ # 🛠️ ERROR FIX: ignore_mismatched_sizes=True added to prevent loading errors
18
+ # ⚡ SPEED FIX: Using low_cpu_mem_usage for faster loading on Free CPU
19
  model = Qwen2VLForConditionalGeneration.from_pretrained(
20
  model_id,
21
  torch_dtype=torch.float32,
 
26
  )
27
  processor = AutoProcessor.from_pretrained(model_id, trust_remote_code=True)
28
 
29
+ def hanuman_engine(user_message, history, file_upload):
30
+ """Main reasoning engine for Hanuman AI"""
31
  try:
32
+ # 🧠 SYSTEM PROMPT: Strictly English, High Reasoning
33
+ system_prompt = "You are 'Hanuman AI', a supreme reasoning engine created by Divy Patel. You must respond ONLY in English. Use logic, thinking, and precision. You can see images and read files. Be professional and respectful."
34
+
35
+ # 🛠️ FORMAT FIX: Converting Gradio list history to Qwen dict format
36
+ messages = [{"role": "system", "content": [{"type": "text", "text": system_prompt}]}]
37
+
38
  if history:
39
  for past_user, past_bot in history:
40
  if past_user:
 
42
  if past_bot:
43
  messages.append({"role": "assistant", "content": [{"type": "text", "text": str(past_bot)}]})
44
 
45
+ # Process Current Input
46
  current_content = []
 
47
  if file_upload is not None:
48
  file_ext = file_upload.name.split('.')[-1].lower()
 
49
  if file_ext in ['png', 'jpg', 'jpeg', 'webp']:
50
  current_content.append({"type": "image", "image": file_upload.name})
 
51
  else:
52
  try:
53
  with open(file_upload.name, 'r', encoding='utf-8') as f:
54
  file_data = f.read()
55
+ user_message = f"File Context:\n{file_data}\n\nQuestion: {user_message}"
56
  except:
57
  pass
58
 
59
  current_content.append({"type": "text", "text": user_message})
60
  messages.append({"role": "user", "content": current_content})
61
 
62
+ # Inference Setup
63
  text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
64
  image_inputs, video_inputs = process_vision_info(messages)
65
 
 
71
  return_tensors="pt",
72
  ).to("cpu")
73
 
74
+ # SPEED OPTIMIZATION: Reduced max_new_tokens for faster CPU response
75
+ generated_ids = model.generate(
76
+ **inputs,
77
+ max_new_tokens=512,
78
+ do_sample=False # Greedy search for faster and more logical output
79
+ )
80
+
81
  generated_ids_trimmed = [
82
  out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
83
  ]
 
85
  generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
86
  )[0]
87
 
 
88
  history.append([user_message, response])
89
  return "", history, None
90
 
91
  except Exception as e:
92
+ error_msg = f"System error: {str(e)}"
93
  history.append([user_message, error_msg])
94
  return "", history, None
95
 
96
+ # --- UI PORTAL ---
 
97
  with gr.Blocks() as demo:
98
+ gr.HTML("<div style='text-align: center;'><h1>🔱 Hanuman AI - English Reasoning Portal</h1><p>Pioneered by Divy Patel | 100% Free CPU | Bharat 🇮🇳</p></div>")
99
 
100
  with gr.Row():
101
  with gr.Column(scale=4):
 
102
  chatbot = gr.Chatbot(height=550)
103
  with gr.Row():
104
+ msg_input = gr.Textbox(placeholder="Ask your complex question here...", scale=7)
105
+ file_input = gr.File(label="Upload Image/File", scale=2)
106
+ submit_btn = gr.Button("ASK", variant="primary", scale=1)
107
 
 
108
  submit_btn.click(
109
+ fn=hanuman_engine,
110
  inputs=[msg_input, chatbot, file_input],
111
  outputs=[msg_input, chatbot, file_input]
112
  )
113
 
114
  if __name__ == "__main__":
115
+ demo.launch()
116
+
117
+ ```