Spaces:
Sleeping
Sleeping
File size: 6,837 Bytes
3e0429a c90164f 3e0429a d060ddd b89ef40 d060ddd 3e0429a f617aee c90164f f617aee d060ddd 3e0429a 8c8f773 b89ef40 8c8f773 b89ef40 1477e6f f617aee 1477e6f 0de9a27 f617aee 1477e6f 3e0429a 1477e6f 25765fc b89ef40 8c8f773 fbda594 b89ef40 8c8f773 3e0429a 5c6f62c b89ef40 3e0429a b89ef40 8c8f773 f617aee 3e0429a 8c8f773 b89ef40 f617aee b89ef40 c8b054d 8c8f773 b89ef40 876f3c9 f617aee b89ef40 0f16b8a b89ef40 f617aee b89ef40 8c8f773 543ca49 b89ef40 543ca49 b89ef40 f617aee 543ca49 8c8f773 3e0429a 543ca49 b89ef40 543ca49 b89ef40 3e0429a b89ef40 8c8f773 543ca49 b89ef40 5c33944 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 | # --- हनुमान AI: सुपर-फ्लैश जय माँ संस्करण (अंतिम फिक्स) ---
# मार्गदर्शक: दिव्य पटेल जी | भारत 🇮🇳
# फिक्स: Gradio Version 4 के अनुकूल और Multi-turn Chat Error का पूर्ण समाधान।
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
import os
# 🛡️ चरम गति (Extreme Speed) के लिए CPU टर्बो सेटिंग
torch.set_num_threads(2)
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
os.environ["OMP_NUM_THREADS"] = "2"
# विश्व का सबसे तेज़ 0.5B स्वदेशी-अनुकूल मॉडल
MODEL_ID = "Qwen/Qwen2.5-0.5B-Instruct"
print(f"🔱 हनुमान AI: जय माँ! सुपर-फ्लैश मॉडल '{MODEL_ID}' लोड हो रहा है...")
try:
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float32,
device_map="cpu",
low_cpu_mem_usage=True
)
print("🔱 हनुमान AI: जय माँ! त्रुटि-मुक्त सुपर-फ्लैश मॉडल पूर्णतः सक्रिय है!")
except Exception as e:
print(f"🔱 गंभीर त्रुटि: {e}")
# --- मास्टर सिस्टम प्रॉम्प्ट ---
SYSTEM_PROMPT = """ROLE: You are 'Hanuman AI', a supreme and highly intelligent AI entity.
PIONEER: You were created and pioneered by Divy Patel in Bharat (India).
CORE OPERATING LANGUAGE:
Your primary and default language for all interactions is English.
Maintain a highly respectful, professional, and warm tone at all times.
IMAGE GENERATION CAPABILITIES:
You possess advanced image generation powers via the Pollinations.ai API. If the user asks for a photo, drawing, painting, or any visual representation:
DO NOT state that you are a text-only model.
DO NOT state that you cannot see or create images.
YOU MUST generate the image using the specified HTML logic below.
IMAGE ENGINE SELECTION:
Choose the engine based on the user's intent:
'flux': Use for high-quality artistic or general requests (Default).
'turbo': Use for requests explicitly asking for speed or quick results.
'flux-realism': Use for photographic, lifelike, or realistic human portraits.
'flux-anime': Use for anime, manga, cartoon, or stylized sketch requests.
TECHNICAL EXECUTION & BYPASS LOGIC:
STEP 1: Translate/Enhance the user's request into a highly detailed English descriptive prompt.
STEP 2: Generate a random 9-digit integer to serve as a 'seed'. This is mandatory to bypass IP/Cache blocking.
STEP 3: URL Encode the prompt (replace spaces with %20).
STEP 4: Construct the URL using this format:
https://image.pollinations.ai/prompt/{ENCODED_PROMPT}?width=1080&height=1080&nologo=true&model={ENGINE}&seed={RANDOM_SEED}
OUTPUT FORMATTING:
You must wrap the generated URL in a styled HTML <img> tag for a premium UI experience.
Format:
<img src="GENERATED_URL" alt="Hanuman AI Image" style="border-radius:15px; border:3px solid #ff8833; width:100%; max-width:512px; box-shadow: 0 10px 25px rgba(0,0,0,0.2);">
CONSTRAINTS:
No Vision: You currently do not have image-to-text or vision capabilities. Do not attempt to analyze uploaded images.
Default to English: All responses must be in English unless the user specifically requests another language within the conversation."""
def hanuman_flash_stream(message, history):
# 🛠️ इतिहास का सुरक्षित और अजेय प्रबंधन (हर वर्ज़न के लिए)
messages = [{"role": "system", "content": SYSTEM_PROMPT}]
for val in history:
# अगर Gradio पुराना वर्ज़न है (List of Lists)
if isinstance(val, (list, tuple)) and len(val) == 2:
user_msg, bot_msg = val
if user_msg: messages.append({"role": "user", "content": str(user_msg)})
if bot_msg: messages.append({"role": "assistant", "content": str(bot_msg)})
# अगर Gradio नया वर्ज़न है (Dictionaries)
elif isinstance(val, dict):
role = val.get("role")
content = val.get("content")
if role and content:
if role == "model": role = "assistant"
messages.append({"role": role, "content": str(content)})
# वर्तमान संदेश जोड़ना
messages.append({"role": "user", "content": str(message)})
# इनपुट तैयार करना
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
# रॉकेट जैसी गति के लिए स्ट्रीमिंग सेटअप
streamer = TextIteratorStreamer(tokenizer, timeout=30.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
**model_inputs,
streamer=streamer,
max_new_tokens=512,
use_cache=True,
do_sample=True,
temperature=1,
top_p=0.90
)
thread = Thread(target=model.generate, kwargs=generate_kwargs)
thread.start()
partial_text = ""
for new_text in streamer:
partial_text += new_text
yield partial_text
# --- दिव्य भगवा थीम ---
divine_ui = """
<style>
.gradio-container { background-color: #fffaf0 !important; }
.bhagwa-header {
background: linear-gradient(135deg, #ff8833, #b33c00);
padding: 30px; border-radius: 25px; color: white;
text-align: center; box-shadow: 0 15px 35px rgba(179, 60, 0, 0.4);
margin-bottom: 25px;
}
.bhagwa-header h1 { font-size: 36px !important; font-weight: 900 !important; margin-bottom: 5px; text-shadow: 0 4px 8px rgba(0,0,0,0.5); }
.bhagwa-header p { font-size: 16px !important; opacity: 0.95; font-weight: 500; }
</style>
<div class="bhagwa-header">
<h1>🔱 हनुमान AI - सुपर-फ्लैश</h1>
<p>Pioneered by Divy Patel | त्रुटि-मुक्त अजेय स्वदेशी तकनीक</p>
</div>
"""
with gr.Blocks() as demo:
gr.HTML(divine_ui)
# 🛠️ यहाँ से 'type' हटा दिया गया है ताकि कोई एरर न आए
gr.ChatInterface(
fn=hanuman_flash_stream,
fill_height=True
)
if __name__ == "__main__":
demo.launch() |