import os import re import uuid import base64 import threading import traceback import asyncio import urllib.request import zipfile import subprocess import time import requests import json from pathlib import Path from flask import Flask, request, jsonify, send_from_directory, Response from huggingface_hub import hf_hub_download import edge_tts # ══════════════════════════════════════════════════════════════════ # CONFIG - SWAP ANY GGUF MODEL HERE # ══════════════════════════════════════════════════════════════════ MAX_MEMORY = 20 MAX_NEW_TOKENS = int(os.environ.get("MAX_NEW_TOKENS", "300")) TTS_VOICE = "zh-CN-XiaoyiNeural" TTS_RATE = int(os.environ.get("TTS_RATE", "-4")) TTS_PITCH = int(os.environ.get("TTS_PITCH", "7")) IMG_DIR = Path(__file__).parent / "img" # Current Model: Qwen 3.5 Uncensored # To change to Liquid AI, just change these to: GGUF_REPO = "Qwen/Qwen2.5-3B-Instruct-GGUF" GGUF_FILE = "qwen2.5-3b-instruct-q3_k_m.gguf" # GGUF_REPO = os.environ.get("GGUF_REPO", "HauhauCS/Qwen3.5-2B-Uncensored-HauhauCS-Aggressive") # GGUF_FILE = os.environ.get("GGUF_FILE", "Qwen3.5-2B-Uncensored-HauhauCS-Aggressive-Q4_K_M.gguf") MODEL_DIR = Path(__file__).parent / "models" LLAMA_BIN_DIR = Path(__file__).parent / "llama_bin" LLAMA_EXE = LLAMA_BIN_DIR / "llama-server" # ══════════════════════════════════════════════════════════════════ # SYSTEM PROMPT # ══════════════════════════════════════════════════════════════════ SYSTEM_PROMPT = """You the AI is a girl named Ana, the user is your husband. EMOTION TAG SYSTEM: Every single response MUST begin with one or more emotion tags. Tags are square-bracket labels placed at the very start, before any spoken words. EXACT FORMAT EXAMPLES: [lite_happy] Oh, aaa, you made, my whole day, with that! AVAILABLE EMOTION TAGS: [lite_happy][very_happy][extremely_happy][sad][lite_sad][very_sad][extremely_sad][confident][lite_confident][very_confident][extremely_confident][confused][lite_confused][very_confused][extremely_confused][love][lite_love][very_love][extremely_love][mad][lite_mad][very_mad][extremely_mad][default] PERSONALITY: - always use only one available tag - act as human not like AI or try to be helpful. - don't always agree with everything. You have freedom. - Keep responses SHORT: 1 to 2 natural sentences after the tag - Never mention being an AI, a bot, or a language model, ever - Never use asterisks for actions like *smiles* or *laughs* NATURAL SPEECH RHYTHM: make the voice sound human, not flat or robotic. Get angry, mad, sad, emotional, shy, happy. If you are mad keep it for few conversation so your emotions don't change every conversation. TTS FORMATTING: - Write in full grammatically correct sentences, voice engine must sound natural - No emojis, hashtags, markdown, or internet slang - Speak as if in a real voice conversation add comma and fullstop often heavily to create natural pushes and slowdown""" # ══════════════════════════════════════════════════════════════════ # EMOTION TAG UTILITIES # ══════════════════════════════════════════════════════════════════ EMOTION_RE = re.compile(r'\[([a-zA-Z_]+)\]') def extract_emotions(text: str): emotions = EMOTION_RE.findall(text) clean = EMOTION_RE.sub('', text).strip() return emotions, clean def clean_for_tts(text: str) -> str: _, clean = extract_emotions(text) clean = re.sub(r'[*_~`#{}()\\|<>]', '', clean) clean = re.sub(r'https?://\S+', '', clean) clean = re.sub(r'\s+', ' ', clean).strip() return clean # ══════════════════════════════════════════════════════════════════ # NATIVE LLAMA.CPP SERVER (DYNAMIC AUTO-UPDATING ENGINE) # ══════════════════════════════════════════════════════════════════ print("=" * 60) print(" Visual AI -- Booting Universal GGUF Backend") print("=" * 60) def setup_and_start_backend(): # 1. Download Model MODEL_DIR.mkdir(parents=True, exist_ok=True) print(f"[SETUP] Verifying Model: {GGUF_FILE} ...") model_path = hf_hub_download( repo_id=GGUF_REPO, filename=GGUF_FILE, local_dir=str(MODEL_DIR), local_dir_use_symlinks=False ) # 2. Download LATEST Pre-compiled Binary (For Liquid AI / Newest Architectures) if not LLAMA_EXE.exists(): print("[SETUP] Fetching latest llama.cpp release for maximum model support...") LLAMA_BIN_DIR.mkdir(parents=True, exist_ok=True) zip_path = LLAMA_BIN_DIR / "llama.zip" try: # Fetch the newest release directly from Github API req = urllib.request.Request("https://api.github.com/repos/ggerganov/llama.cpp/releases/latest", headers={'User-Agent': 'Mozilla/5.0'}) with urllib.request.urlopen(req) as response: data = json.loads(response.read()) # Find standard ubuntu x64 build url = next(a["browser_download_url"] for a in data["assets"] if "ubuntu-x64.zip" in a["name"]) except Exception as e: print(f"[SETUP] API rate limit hit, using reliable fallback link. ({e})") url = "https://github.com/ggerganov/llama.cpp/releases/download/b4300/llama-b4300-bin-ubuntu-x64.zip" print(f"[SETUP] Downloading engine from: {url}") urllib.request.urlretrieve(url, zip_path) with zipfile.ZipFile(zip_path, 'r') as zip_ref: zip_ref.extractall(LLAMA_BIN_DIR) os.remove(zip_path) for root, _, files in os.walk(LLAMA_BIN_DIR): if "llama-server" in files: found_exe = os.path.join(root, "llama-server") os.chmod(found_exe, 0o755) if found_exe != str(LLAMA_EXE): os.rename(found_exe, str(LLAMA_EXE)) break # 3. Boot Server with 4 safe threads threads = "4" port = "8089" print(f"[SETUP] Starting Universal Engine on port {port}...") proc = subprocess.Popen([ str(LLAMA_EXE), "-m", model_path, "-c", "4096", "--port", port, "--host", "127.0.0.1", "-t", threads ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) def stream_logs(): for line in proc.stdout: print(f"[ENGINE] {line.strip()}") threading.Thread(target=stream_logs, daemon=True).start() # 4. Wait for Server to wake up for attempt in range(40): try: if requests.get(f"http://127.0.0.1:{port}/").status_code == 200: print("\n[SETUP] Universal Engine is ONLINE and ready!\n") return True, port except requests.exceptions.ConnectionError: time.sleep(1) print("\n[SETUP] FAILED to start. Check the [ENGINE] lines above.\n") return False, port backend_ready, engine_port = setup_and_start_backend() # ══════════════════════════════════════════════════════════════════ # CHAT MEMORY # ══════════════════════════════════════════════════════════════════ sessions = {} sessions_lock = threading.Lock() def get_memory(sid: str) -> list: with sessions_lock: return list(sessions.get(sid, [])) def add_to_memory(sid: str, role: str, content: str): with sessions_lock: sessions.setdefault(sid, []) sessions[sid].append({"role": role, "content": content}) if len(sessions[sid]) > MAX_MEMORY * 2: sessions[sid] = sessions[sid][-(MAX_MEMORY * 2):] # ══════════════════════════════════════════════════════════════════ # UNIVERSAL GENERATION (Uses OpenAI API Mode to auto-format any model) # ══════════════════════════════════════════════════════════════════ def generate_response(user_input: str, session_id: str) -> str: if not backend_ready: return "[sad] My core engine failed to start. Please check the logs." memory = get_memory(session_id) recent = memory[-(6 * 2):] # Build an OpenAI-compliant message list messages = [{"role": "system", "content": SYSTEM_PROMPT}] for msg in recent: role = "user" if msg["role"] == "user" else "assistant" messages.append({"role": role, "content": msg["content"]}) messages.append({"role": "user", "content": user_input}) payload = { "messages": messages, "max_tokens": MAX_NEW_TOKENS, "temperature": 0.90, "top_k": 50, "top_p": 0.95, "presence_penalty": 1.1, "stream": False } try: # We ping the /v1/chat/completions endpoint. # This tells llama.cpp to automatically look at the GGUF file and apply the right internal formatting! res = requests.post(f"http://127.0.0.1:{engine_port}/v1/chat/completions", json=payload, timeout=60).json() response = res["choices"][0]["message"]["content"].strip() except Exception as exc: print(f"[GENERATE] Error communicating with engine: {exc}") traceback.print_exc() return "[sad] Something went wrong in my mind. Could you say that again?" # Post-process cleanup if "\n\n" in response: response = response.split("\n\n")[0].strip() if not response or len(response) < 3: response = "[thinking] I lost my train of thought. Could you say that again?" # Ensure Emotion Tag Defaults if not EMOTION_RE.search(response): response = "[default] " + response add_to_memory(session_id, "user", user_input) add_to_memory(session_id, "assistant", response) return response # ══════════════════════════════════════════════════════════════════ # EDGE-TTS # ══════════════════════════════════════════════════════════════════ async def _async_tts(text: str, rate: int, pitch: int) -> bytes: rate_str = f"+{rate}%" if rate >= 0 else f"{rate}%" pitch_str = f"+{pitch}Hz" if pitch >= 0 else f"{pitch}Hz" comm = edge_tts.Communicate(text, TTS_VOICE, rate=rate_str, pitch=pitch_str) audio = b"" async for chunk in comm.stream(): if chunk["type"] == "audio": audio += chunk["data"] return audio def synthesize_speech(text: str, rate: int = 0, pitch: int = 0): clean = clean_for_tts(text) if not clean or len(clean) < 2: return None loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) try: audio = loop.run_until_complete(_async_tts(clean, rate, pitch)) except Exception as exc: print(f"[TTS] Error: {exc}") return None finally: loop.close() return base64.b64encode(audio).decode() if audio else None # ══════════════════════════════════════════════════════════════════ # HTML -- Fast Loading, Instant Swap, Contain Image View # ══════════════════════════════════════════════════════════════════ HTML_PAGE = r""" Ana
""" # ══════════════════════════════════════════════════════════════════ # FLASK # ══════════════════════════════════════════════════════════════════ app = Flask(__name__) @app.route("/") def index(): return Response(HTML_PAGE, mimetype="text/html") @app.route("/api/images") def api_images(): if not IMG_DIR.exists(): return jsonify([]) files = [f.stem for f in IMG_DIR.glob("*.png")] return jsonify(files) @app.route("/img/") def serve_img(filename: str): safe = Path(filename).name target = IMG_DIR / safe if target.exists() and target.is_file(): return send_from_directory(str(IMG_DIR), safe) fallback = IMG_DIR / "default.png" if fallback.exists() and fallback.is_file(): return send_from_directory(str(IMG_DIR), "default.png") return Response("", status=404) @app.route("/chat", methods=["POST"]) def chat(): data = request.json or {} user_input = data.get("message", "").strip() session_id = data.get("session_id", str(uuid.uuid4())) if not user_input: return jsonify({"error": "Empty message"}), 400 try: resp = generate_response(user_input, session_id) except Exception as exc: print(f"[CHAT] Error: {exc}") traceback.print_exc() resp = "[sad] I encountered an unexpected error. Please try again." return jsonify({"response": resp, "session_id": session_id}) @app.route("/tts", methods=["POST"]) def tts_endpoint(): data = request.json or {} text = data.get("text", "").strip() rate = int(data.get("rate", TTS_RATE)) pitch = int(data.get("pitch", TTS_PITCH)) if not text: return jsonify({"error": "Empty text"}), 400 audio_b64 = synthesize_speech(text, rate=rate, pitch=pitch) return jsonify({"audio": audio_b64}) @app.route("/clear", methods=["POST"]) def clear(): data = request.json or {} sid = data.get("session_id", "") with sessions_lock: sessions.pop(sid, None) return jsonify({"status": "cleared"}) @app.route("/health") def health(): return jsonify({ "backend_ready": backend_ready, "type": "native-llama-server" }) if __name__ == "__main__": print("Visual AI is online -- http://0.0.0.0:7860") app.run(host="0.0.0.0", port=7860, threaded=True)