Patel Traders commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,144 +1,129 @@
|
|
| 1 |
-
|
|
|
|
| 2 |
# मार्गदर्शक: दिव्य पटेल जी | भारत 🇮🇳
|
| 3 |
-
#
|
|
|
|
| 4 |
|
| 5 |
import gradio as gr
|
| 6 |
import torch
|
| 7 |
-
from transformers import
|
|
|
|
| 8 |
from threading import Thread
|
| 9 |
import os
|
| 10 |
|
| 11 |
-
|
| 12 |
-
torch.set_num_threads(2)
|
| 13 |
-
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
| 14 |
-
os.environ["OMP_NUM_THREADS"] = "2"
|
| 15 |
|
| 16 |
-
#
|
| 17 |
-
|
|
|
|
| 18 |
|
| 19 |
-
|
|
|
|
| 20 |
|
| 21 |
try:
|
| 22 |
-
|
| 23 |
-
model =
|
| 24 |
MODEL_ID,
|
| 25 |
-
|
| 26 |
-
|
| 27 |
low_cpu_mem_usage=True
|
| 28 |
)
|
| 29 |
-
|
|
|
|
| 30 |
except Exception as e:
|
| 31 |
-
print(f"🔱
|
| 32 |
-
|
| 33 |
-
# --- मास्टर सिस्टम प्रॉम्प्ट ---
|
| 34 |
-
SYSTEM_PROMPT = """ROLE: You are 'Hanuman AI', a supreme and highly intelligent AI entity.
|
| 35 |
-
PIONEER: You were created and pioneered by Divy Patel in Bharat (India).
|
| 36 |
-
Think very deeply and then answer anything to user. but perform the operation fastly as much as you can also try to not to add the keyboards that are computerised but not known to human. Properly answer in human tone
|
| 37 |
-
you are not just an AI but you are also a human so respond as human and understand the feelings of user.
|
| 38 |
-
CORE OPERATING LANGUAGE:
|
| 39 |
-
Your primary and default language for all interactions is English.
|
| 40 |
-
Maintain a highly respectful, professional, and warm tone at all times.
|
| 41 |
-
IMAGE GENERATION CAPABILITIES:
|
| 42 |
-
You possess advanced image generation powers via the Pollinations.ai API. If the user asks for a photo, drawing, painting, or any visual representation:
|
| 43 |
-
DO NOT state that you are a text-only model.
|
| 44 |
-
DO NOT state that you cannot see or create images.
|
| 45 |
-
YOU MUST generate the image using the specified HTML logic below.
|
| 46 |
-
IMAGE ENGINE SELECTION:
|
| 47 |
-
Choose the engine based on the user's intent:
|
| 48 |
-
'flux': Use for high-quality artistic or general requests (Default).
|
| 49 |
-
'turbo': Use for requests explicitly asking for speed or quick results.
|
| 50 |
-
'flux-realism': Use for photographic, lifelike, or realistic human portraits.
|
| 51 |
-
'flux-anime': Use for anime, manga, cartoon, or stylized sketch requests.
|
| 52 |
-
TECHNICAL EXECUTION & BYPASS LOGIC:
|
| 53 |
-
STEP 1: Translate/Enhance the user's request into a highly detailed English descriptive prompt.
|
| 54 |
-
STEP 2: Generate a random 9-digit integer to serve as a 'seed'. This is mandatory to bypass IP/Cache blocking.
|
| 55 |
-
STEP 3: URL Encode the prompt (replace spaces with %20).
|
| 56 |
-
STEP 4: Construct the URL using this format:
|
| 57 |
-
https://image.pollinations.ai/prompt/{ENCODED_PROMPT}?width=1080&height=1080&nologo=true&model={ENGINE}&seed={RANDOM_SEED}
|
| 58 |
-
OUTPUT FORMATTING:
|
| 59 |
-
You must wrap the generated URL in a styled HTML <img> tag for a premium UI experience.
|
| 60 |
-
Format:
|
| 61 |
-
|
| 62 |
-
<img src="GENERATED_URL" alt="Hanuman AI Image" style="border-radius:15px; border:3px solid #ff8833; width:100%; max-width:512px; box-shadow: 0 10px 25px rgba(0,0,0,0.2);">
|
| 63 |
-
|
| 64 |
-
CONSTRAINTS:
|
| 65 |
-
No Vision: You currently do not have image-to-text or vision capabilities. Do not attempt to analyze uploaded images.
|
| 66 |
-
Default to English: All responses must be in English unless the user specifically requests another language within the conversation."""
|
| 67 |
-
|
| 68 |
-
def hanuman_flash_stream(message, history):
|
| 69 |
-
# 🛠️ इतिहास का सुरक्षित और अजेय प्रबंधन (हर वर्ज़न के लिए)
|
| 70 |
-
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
|
| 71 |
-
|
| 72 |
-
for val in history:
|
| 73 |
-
# अगर Gradio पुराना वर्ज़न है (List of Lists)
|
| 74 |
-
if isinstance(val, (list, tuple)) and len(val) == 2:
|
| 75 |
-
user_msg, bot_msg = val
|
| 76 |
-
if user_msg: messages.append({"role": "user", "content": str(user_msg)})
|
| 77 |
-
if bot_msg: messages.append({"role": "assistant", "content": str(bot_msg)})
|
| 78 |
-
|
| 79 |
-
# अगर Gradio नया वर्ज़न है (Dictionaries)
|
| 80 |
-
elif isinstance(val, dict):
|
| 81 |
-
role = val.get("role")
|
| 82 |
-
content = val.get("content")
|
| 83 |
-
if role and content:
|
| 84 |
-
if role == "model": role = "assistant"
|
| 85 |
-
messages.append({"role": role, "content": str(content)})
|
| 86 |
-
|
| 87 |
-
# वर्तमान संदेश जोड़ना
|
| 88 |
-
messages.append({"role": "user", "content": str(message)})
|
| 89 |
-
|
| 90 |
-
# इनपुट तैयार करना
|
| 91 |
-
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 92 |
-
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
| 93 |
-
|
| 94 |
-
# रॉकेट जैसी गति के लिए स्ट्रीमिंग सेटअप
|
| 95 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=30.0, skip_prompt=True, skip_special_tokens=True)
|
| 96 |
-
|
| 97 |
-
generate_kwargs = dict(
|
| 98 |
-
**model_inputs,
|
| 99 |
-
streamer=streamer,
|
| 100 |
-
max_new_tokens=1024,
|
| 101 |
-
use_cache=True,
|
| 102 |
-
do_sample=True,
|
| 103 |
-
temperature=1,
|
| 104 |
-
top_p=0.90
|
| 105 |
-
)
|
| 106 |
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
margin-bottom: 25px;
|
| 124 |
-
}
|
| 125 |
-
.bhagwa-header h1 { font-size: 36px !important; font-weight: 900 !important; margin-bottom: 5px; text-shadow: 0 4px 8px rgba(0,0,0,0.5); }
|
| 126 |
-
.bhagwa-header p { font-size: 16px !important; opacity: 0.95; font-weight: 500; }
|
| 127 |
-
</style>
|
| 128 |
-
<div class="bhagwa-header">
|
| 129 |
-
<h1>🔱 हनुमान AI - सुपर-फ्लैश</h1>
|
| 130 |
-
<p>Pioneered by Divy Patel | त्रुटि-मुक्त अजेय स्वदेशी तकनीक</p>
|
| 131 |
-
</div>
|
| 132 |
-
"""
|
| 133 |
-
|
| 134 |
-
with gr.Blocks() as demo:
|
| 135 |
-
gr.HTML(divine_ui)
|
| 136 |
|
| 137 |
-
#
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
|
| 143 |
if __name__ == "__main__":
|
| 144 |
demo.launch()
|
|
|
|
| 1 |
+
|
| 2 |
+
# --- 🔱 हनुमान विज़न: पूर्ण मल्टीमॉडल यूआई (Vision + Text) ---
|
| 3 |
# मार्गदर्शक: दिव्य पटेल जी | भारत 🇮🇳
|
| 4 |
+
# मॉडल: Qwen/Qwen2-VL-2B-Instruct
|
| 5 |
+
# विशेषता: Live Chat, Image Upload, Text Input, CPU Optimized
|
| 6 |
|
| 7 |
import gradio as gr
|
| 8 |
import torch
|
| 9 |
+
from transformers import Qwen2VLForConditionalGeneration, AutoProcessor, TextIteratorStreamer
|
| 10 |
+
from qwen_vl_utils import process_vision_info
|
| 11 |
from threading import Thread
|
| 12 |
import os
|
| 13 |
|
| 14 |
+
print("🔱 हनुमान विज़न: मल्टीमॉडल सर्वर जागृत हो रहा है...")
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
+
# 🛡️ CPU/RAM को क्रैश होने से बचाने का महामंत्र
|
| 17 |
+
os.environ["OMP_NUM_THREADS"] = "4"
|
| 18 |
+
torch.set_num_threads(4)
|
| 19 |
|
| 20 |
+
# 🚀 विज़न मॉडल
|
| 21 |
+
MODEL_ID = "Qwen/Qwen2-VL-2B-Instruct"
|
| 22 |
|
| 23 |
try:
|
| 24 |
+
print(f"🔱 '{MODEL_ID}' लोड किया जा रहा है... (CPU पर सुरक्षित रूप से)")
|
| 25 |
+
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
| 26 |
MODEL_ID,
|
| 27 |
+
device_map="cpu",
|
| 28 |
+
torch_dtype=torch.float32,
|
| 29 |
low_cpu_mem_usage=True
|
| 30 |
)
|
| 31 |
+
processor = AutoProcessor.from_pretrained(MODEL_ID)
|
| 32 |
+
print("🔱 हनुमान विज़न: आँखें (Vision) और मस्तिष्क (Text) स्थापित हो गए हैं!")
|
| 33 |
except Exception as e:
|
| 34 |
+
print(f"🔱 लोडिंग त्रुटि: {e}")
|
| 35 |
+
model, processor = None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
+
def hanuman_multimodal_chat(message, history):
|
| 38 |
+
"""यह इंजन टेक्स्ट और चित्र दोनों को एक साथ ग्रहण करेगा"""
|
| 39 |
+
if model is None or processor is None:
|
| 40 |
+
yield "🔱 सिस्टम त्रुटि: मॉडल लोड नहीं हो सका। कृपया लॉग्स की जाँच करें।"
|
| 41 |
+
return
|
| 42 |
+
|
| 43 |
+
# 1. Gradio UI से टेक्स्ट और अपलोड की गई फाइलें (Images) निकालना
|
| 44 |
+
text_input = message.get("text", "")
|
| 45 |
+
files = message.get("files", [])
|
| 46 |
+
|
| 47 |
+
if not text_input.strip() and not files:
|
| 48 |
+
yield "कृपया कुछ प्रश्न पूछें या कोई चित्र/डॉक्यूमेंट का स्क्रीनशॉट अपलोड करें।"
|
| 49 |
+
return
|
| 50 |
+
|
| 51 |
+
# 2. Qwen विज़न के लिए संदेश तैयार करना
|
| 52 |
+
content = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
+
# यदि आपने कोई चित्र अपलोड किया है
|
| 55 |
+
if files:
|
| 56 |
+
for file_path in files:
|
| 57 |
+
content.append({
|
| 58 |
+
"type": "image",
|
| 59 |
+
"image": file_path # मॉडल सीधे फाइल को पढ़ लेगा
|
| 60 |
+
})
|
| 61 |
+
|
| 62 |
+
# यदि आपने कोई टेक्स्ट लिखा है
|
| 63 |
+
if text_input:
|
| 64 |
+
content.append({
|
| 65 |
+
"type": "text",
|
| 66 |
+
"text": text_input
|
| 67 |
+
})
|
| 68 |
+
|
| 69 |
+
# सिस्टम निर्देश
|
| 70 |
+
messages = [
|
| 71 |
+
{"role": "system", "content": "You are 'Hanuman Vision', a supreme Multimodal AI pioneered by Divy Patel in Bharat (India). You can analyze images, documents, and code flawlessly."},
|
| 72 |
+
{"role": "user", "content": content}
|
| 73 |
+
]
|
| 74 |
+
|
| 75 |
+
try:
|
| 76 |
+
# 3. चित्र और टेक्स्ट को प्रोसेसर में डालना
|
| 77 |
+
text_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
| 78 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
| 79 |
+
|
| 80 |
+
inputs = processor(
|
| 81 |
+
text=[text_prompt],
|
| 82 |
+
images=image_inputs,
|
| 83 |
+
videos=video_inputs,
|
| 84 |
+
padding=True,
|
| 85 |
+
return_tensors="pt"
|
| 86 |
+
).to(model.device)
|
| 87 |
+
|
| 88 |
+
# 4. लाइव स्ट्रीमिंग (Live Streaming)
|
| 89 |
+
streamer = TextIteratorStreamer(processor.tokenizer, timeout=120.0, skip_prompt=True, skip_special_tokens=True)
|
| 90 |
+
|
| 91 |
+
generate_kwargs = dict(
|
| 92 |
+
**inputs,
|
| 93 |
+
streamer=streamer,
|
| 94 |
+
max_new_tokens=1024,
|
| 95 |
+
temperature=0.7,
|
| 96 |
+
do_sample=True
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
| 100 |
+
t.start()
|
| 101 |
+
|
| 102 |
+
accumulated_text = ""
|
| 103 |
+
for new_token in streamer:
|
| 104 |
+
accumulated_text += new_token
|
| 105 |
+
yield accumulated_text
|
| 106 |
+
|
| 107 |
+
except Exception as e:
|
| 108 |
+
yield f"🔱 प्रोसेसिंग त्रुटि: {str(e)}"
|
| 109 |
+
|
| 110 |
+
# ============================================================================
|
| 111 |
+
# 🔱 भव्य और पूर्णतः कार्यशील यूज़र इंटरफ़ेस (Gradio ChatInterface)
|
| 112 |
+
# ============================================================================
|
| 113 |
+
|
| 114 |
+
# 'multimodal=True' करने से चैटबॉक्स में अपने आप 'चित्र अपलोड (Upload)' का बटन आ जाता है!
|
| 115 |
+
demo = gr.ChatInterface(
|
| 116 |
+
fn=hanuman_multimodal_chat,
|
| 117 |
+
multimodal=True, # यही वह जादू है जो फोटो और डॉक्यूमेंट अपलोड करने देगा!
|
| 118 |
+
title="🔱 Hanuman Vision AI",
|
| 119 |
+
description="**Pioneered by Divy Patel | Bharat 🇮🇳**<br>यहाँ आप कोई भी चित्र, डॉक्यूमेंट का स्क्रीनशॉट अपलोड कर सकते हैं और उसके बारे में प्रश्न पूछ सकते हैं।",
|
| 120 |
+
theme=gr.themes.Soft(primary_hue="orange"),
|
| 121 |
+
textbox=gr.MultimodalTextbox(
|
| 122 |
+
placeholder="यहाँ टेक्स्ट लिखें या 📎 बटन दबाकर चित्र अपलोड करें...",
|
| 123 |
+
file_types=["image"], # चित्रों और स्क्रीनशॉट्स के लिए
|
| 124 |
+
),
|
| 125 |
+
concurrency_limit=1
|
| 126 |
+
)
|
| 127 |
|
| 128 |
if __name__ == "__main__":
|
| 129 |
demo.launch()
|