# %cd /content/omnivoice-colab import os import sys import logging import tempfile from typing import Any, Dict import gradio as gr import numpy as np import torch import scipy.io.wavfile as wavfile import re import os import uuid temp_audio_dir="./Omni_Audio" os.makedirs(temp_audio_dir, exist_ok=True) # --------------------------------------------------------------------------- # Setup path to import subtitle_maker from /content/omnivoice-colab/OmniVoice/ OmniVoice_path = f"{os.getcwd()}/OmniVoice/" sys.path.append(OmniVoice_path) from subtitle import subtitle_maker # Attempt to import Whisper's supported language dict to filter unsupported languages try: from subtitle import LANGUAGE_CODE as WHISPER_LANGUAGE_CODE except ImportError: WHISPER_LANGUAGE_CODE = None from omnivoice import OmniVoice, OmniVoiceGenerationConfig from omnivoice.utils.lang_map import LANG_NAMES, lang_display_name # --------------------------------------------------------------------------- # Logging Setup # --------------------------------------------------------------------------- logging.basicConfig( level=logging.WARNING, format="%(asctime)s %(name)s %(levelname)s: %(message)s", ) logging.getLogger("omnivoice").setLevel(logging.DEBUG) # --------------------------------------------------------------------------- # Model Loading (Global Scope) # --------------------------------------------------------------------------- print("Loading model from k2-fsa/OmniVoice to cuda ...") from hf_mirror import download_model try: model = OmniVoice.from_pretrained( "k2-fsa/OmniVoice", device_map="cuda", dtype=torch.float16, load_asr=False, ) except Exception as e: omnivoice_model_path=download_model( "k2-fsa/OmniVoice", download_folder="./OmniVoice_Model", redownload=False, workers=6, use_snapshot=False, ) model = OmniVoice.from_pretrained( omnivoice_model_path, device_map="cuda", dtype=torch.float16, load_asr=False, ) sampling_rate = model.sampling_rate print("Model loaded successfully!") # --------------------------------------------------------------------------- # Event Tags & JS Functions # --------------------------------------------------------------------------- EVENT_TAGS = [ "[laughter]", "[sigh]", "[confirmation-en]", "[question-en]", "[question-ah]", "[question-oh]", "[question-ei]", "[question-yi]", "[surprise-ah]", "[surprise-oh]", "[surprise-wa]", "[surprise-yo]", "[dissatisfaction-hnn]" ] # JS for Voice Clone Tab Textbox INSERT_TAG_JS_VC = """ (tag_val, current_text) => { const textarea = document.querySelector('#vc_textbox textarea'); if (!textarea) return current_text + " " + tag_val; const start = textarea.selectionStart; const end = textarea.selectionEnd; let prefix = " "; let suffix = " "; if (!current_text) return tag_val; if (start === 0) prefix = ""; else if (current_text[start - 1] === ' ') prefix = ""; if (end < current_text.length && current_text[end] === ' ') suffix = ""; return current_text.slice(0, start) + prefix + tag_val + suffix + current_text.slice(end); } """ # JS for Voice Design Tab Textbox INSERT_TAG_JS_VD = """ (tag_val, current_text) => { const textarea = document.querySelector('#vd_textbox textarea'); if (!textarea) return current_text + " " + tag_val; const start = textarea.selectionStart; const end = textarea.selectionEnd; let prefix = " "; let suffix = " "; if (!current_text) return tag_val; if (start === 0) prefix = ""; else if (current_text[start - 1] === ' ') prefix = ""; if (end < current_text.length && current_text[end] === ' ') suffix = ""; return current_text.slice(0, start) + prefix + tag_val + suffix + current_text.slice(end); } """ # --------------------------------------------------------------------------- # UI Configurations & Language Mappings # --------------------------------------------------------------------------- _ALL_LANGUAGES = ["Auto"] + sorted(lang_display_name(n) for n in LANG_NAMES) _CATEGORIES = { "Gender": ["Male", "Female"], "Age": ["Child", "Teenager", "Young Adult", "Middle-aged", "Elderly"], "Pitch": ["Very Low Pitch", "Low Pitch", "Moderate Pitch", "High Pitch", "Very High Pitch"], "Style": ["Whisper"], "English Accent": [ "American Accent", "Australian Accent", "British Accent", "Chinese Accent", "Canadian Accent", "Indian Accent", "Korean Accent", "Portuguese Accent", "Russian Accent", "Japanese Accent" ], "Chinese Dialect": [ "Henan Dialect", "Shaanxi Dialect", "Sichuan Dialect", "Guizhou Dialect", "Yunnan Dialect", "Guilin Dialect", "Jinan Dialect", "Shijiazhuang Dialect", "Gansu Dialect", "Ningxia Dialect", "Qingdao Dialect", "Northeast Dialect" ], } DIALECT_MAP = { "Henan Dialect": "河南话", "Shaanxi Dialect": "陕西话", "Sichuan Dialect": "四川话", "Guizhou Dialect": "贵州话", "Yunnan Dialect": "云南话", "Guilin Dialect": "桂林话", "Jinan Dialect": "济南话", "Shijiazhuang Dialect": "石家庄话", "Gansu Dialect": "甘肃话", "Ningxia Dialect": "宁夏话", "Qingdao Dialect": "青岛话", "Northeast Dialect": "东北话", } _ATTR_INFO = { "English Accent": "Only effective for English speech.", "Chinese Dialect": "Only effective for Chinese speech.", } # --------------------------------------------------------------------------- # Core Logic & Helpers # --------------------------------------------------------------------------- def _is_whisper_supported(lang): """Check if the selected language is supported by Whisper to save processing time.""" if not lang or lang == "Auto": return True if WHISPER_LANGUAGE_CODE is None: return True supported_langs = [str(k).lower() for k in WHISPER_LANGUAGE_CODE.keys()] + \ [str(v).lower() for v in WHISPER_LANGUAGE_CODE.values()] lang_lower = lang.lower() for w_lang in supported_langs: if w_lang in lang_lower or lang_lower in w_lang: return True return False def generate_subtitles_if_needed(wav_path, lang, want_subs): """Generates Subtitles only if user requested them and language is supported.""" if not want_subs: return None, None, None if not _is_whisper_supported(lang): logging.warning(f"Language '{lang}' is likely unsupported by Whisper. Skipping subtitle generation.") return None, None, None try: whisper_lang = lang if (lang and lang != "Auto") else None whisper_results = subtitle_maker(wav_path, whisper_lang) if whisper_results and len(whisper_results) > 3: return whisper_results[1], whisper_results[2], whisper_results[3] except Exception as e: logging.warning(f"Subtitle generation failed: {e}") return None, None, None def tts_file_name(text, language="en"): global temp_audio_dir # --- Clean text --- clean_text = re.sub(r'[^a-zA-Z\s]', '', text) # keep only letters + spaces clean_text = clean_text.lower().strip().replace(" ", "_") if not clean_text: clean_text = "audio" # --- Truncate --- truncated = clean_text[:20] # --- Clean language --- lang = re.sub(r'\s+', '_', language.strip().lower()) if language else "unknown" # --- Random suffix --- rand = uuid.uuid4().hex[:8].upper() # --- Final filename --- return f"{temp_audio_dir}/{truncated}_{lang}_{rand}.wav" def _gen_core( text, language, ref_audio, instruct, num_step, guidance_scale, denoise, speed, duration, preprocess_prompt, postprocess_output, mode, ref_text=None ): """Core Text-to-Speech Generation Logic""" if not text or not text.strip(): return None, "Please enter the text to synthesize." if mode == "clone" and ref_audio and not ref_text: try: whisper_lang = language if (language and language != "Auto") else None whisper_results = subtitle_maker(ref_audio, whisper_lang) if whisper_results and len(whisper_results) > 7: ref_text = whisper_results[7] except Exception as e: logging.warning(f"Fallback transcription failed: {e}") gen_config = OmniVoiceGenerationConfig( num_step=int(num_step or 32), guidance_scale=float(guidance_scale) if guidance_scale is not None else 2.0, denoise=bool(denoise) if denoise is not None else True, preprocess_prompt=bool(preprocess_prompt), postprocess_output=bool(postprocess_output), ) lang = language if (language and language != "Auto") else None kw: Dict[str, Any] = dict(text=text.strip(), language=lang, generation_config=gen_config) if speed is not None and float(speed) != 1.0: kw["speed"] = float(speed) if duration is not None and float(duration) > 0: kw["duration"] = float(duration) if mode == "clone": if not ref_audio: return None, "Please upload a reference audio." kw["voice_clone_prompt"] = model.create_voice_clone_prompt(ref_audio=ref_audio, ref_text=ref_text) if mode == "design": if instruct and instruct.strip(): kw["instruct"] = instruct.strip() try: audio = model.generate(**kw) except Exception as e: return None, f"Error: {type(e).__name__}: {e}" # waveform = audio[0].squeeze(0).numpy() # waveform = (waveform * 32767).astype(np.int16) waveform = (audio[0] * 32767).astype(np.int16) return (sampling_rate, waveform), "Done." # --------------------------------------------------------------------------- # Gradio UI Construction # --------------------------------------------------------------------------- theme = gr.themes.Soft(font=["Inter", "Arial", "sans-serif"]) css = """ .gradio-container {max-width: 100% !important; font-size: 16px !important;} .gradio-container h1 {font-size: 1.5em !important;} .gradio-container .prose {font-size: 1.1em !important;} .compact-audio audio {height: 60px !important;} .compact-audio .waveform {min-height: 80px !important;} /* CSS for Event Tags */ .tag-container { display: flex !important; flex-wrap: wrap !important; gap: 8px !important; margin-top: 5px !important; margin-bottom: 10px !important; border: none !important; background: transparent !important; } .tag-btn { min-width: fit-content !important; width: auto !important; height: 32px !important; font-size: 13px !important; background: #eef2ff !important; border: 1px solid #c7d2fe !important; color: #3730a3 !important; border-radius: 6px !important; padding: 0 10px !important; margin: 0 !important; box-shadow: none !important; } .tag-btn:hover { background: #c7d2fe !important; transform: translateY(-1px); } """ def _lang_dropdown(label="Language (optional)", value="Auto"): return gr.Dropdown( label=label, choices=_ALL_LANGUAGES, value=value, allow_custom_value=False, interactive=True, ) def _gen_settings(): with gr.Accordion("Generation Settings (optional)", open=False): sp = gr.Slider(0.5, 1.5, value=1.0, step=0.05, label="Speed", info="1.0 = normal. >1 faster, <1 slower.") du = gr.Number(value=None, label="Duration (seconds)", info="Set a fixed duration to override speed.") ns = gr.Slider(4, 64, value=32, step=1, label="Inference Steps", info="Lower = faster, higher = better quality.") dn = gr.Checkbox(label="Denoise", value=True) gs = gr.Slider(0.0, 4.0, value=2.0, step=0.1, label="Guidance Scale (CFG)") pp = gr.Checkbox(label="Preprocess Prompt", value=True, info="Applies silence removal and trims reference audio.") po = gr.Checkbox(label="Postprocess Output", value=True, info="Removes long silences from generated audio.") return ns, gs, dn, sp, du, pp, po with gr.Blocks(theme=theme, css=css, title="OmniVoice Demo") as demo: gr.HTML("""
State-of-the-art text-to-speech model for 600+ languages, supporting Voice Clone and Voice Design.