刘鑫 commited on
Commit
7d9f729
·
1 Parent(s): ced8c00

upgrade to VoxCPM2 + Gradio 6 with full i18n UI

Browse files

- Switch default model from VoxCPM1.5 to VoxCPM2
- Rewrite UI to match VoxCPM/app.py: i18n (en/zh-CN), three generation
modes (Voice Design, Controllable Cloning, Ultimate Cloning),
Control Instruction support, and rich voice examples
- Upgrade to Gradio 6 (theme/css/i18n in launch())
- Keep HF Spaces @spaces.GPU and model pre-download at startup
- Rename logo asset to voxcpm_logo.png

Made-with: Cursor

README.md CHANGED
@@ -1,14 +1,13 @@
1
  ---
2
  title: VoxCPM Demo
3
- emoji: 🌖
4
- colorFrom: yellow
5
- colorTo: pink
6
  sdk: gradio
7
- sdk_version: 5.45.0
8
  app_file: app.py
 
9
  pinned: true
10
  license: apache-2.0
11
- short_description: VoxCPM
12
  ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: VoxCPM Demo
3
+ emoji: 🎙️
4
+ colorFrom: blue
5
+ colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 6.0.0
8
  app_file: app.py
9
+ python_version: "3.10"
10
  pinned: true
11
  license: apache-2.0
12
+ short_description: VoxCPM2 Speech Synthesis
13
  ---
 
 
app.py CHANGED
@@ -1,412 +1,515 @@
 
1
  import os
 
 
 
 
 
2
  import numpy as np
3
- import torch
4
- import gradio as gr
5
  import spaces
6
- from typing import Optional, Tuple
7
- from pathlib import Path
8
- import tempfile
9
- import soundfile as sf
10
- import time
11
- from datetime import datetime
12
-
13
-
14
- def log(msg: str):
15
- """打印带时间戳的日志"""
16
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
17
- print(f"[{timestamp}] {msg}")
18
-
19
-
20
- def setup_cache_env():
21
- """
22
- Setup cache environment variables.
23
- Must be called in GPU worker context as well.
24
- """
25
- _cache_home = os.path.join(os.path.expanduser("~"), ".cache")
26
-
27
- # HuggingFace cache
28
- os.environ["HF_HOME"] = os.path.join(_cache_home, "huggingface")
29
- os.environ["HUGGINGFACE_HUB_CACHE"] = os.path.join(_cache_home, "huggingface", "hub")
30
-
31
- # ModelScope cache (for FunASR SenseVoice)
32
- os.environ["MODELSCOPE_CACHE"] = os.path.join(_cache_home, "modelscope")
33
-
34
- # Torch Hub cache (for some audio models like ZipEnhancer)
35
- os.environ["TORCH_HOME"] = os.path.join(_cache_home, "torch")
36
-
37
- # Create cache directories
38
- for d in [os.environ["HF_HOME"], os.environ["MODELSCOPE_CACHE"], os.environ["TORCH_HOME"]]:
39
- os.makedirs(d, exist_ok=True)
40
-
41
-
42
- # Setup cache in main process BEFORE any imports
43
- setup_cache_env()
44
-
45
- # Limit thread count to avoid OpenBLAS resource errors in ZeroGPU
46
  os.environ["OPENBLAS_NUM_THREADS"] = "4"
47
  os.environ["OMP_NUM_THREADS"] = "4"
48
  os.environ["MKL_NUM_THREADS"] = "4"
49
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
50
  if os.environ.get("HF_REPO_ID", "").strip() == "":
51
- os.environ["HF_REPO_ID"] = "openbmb/VoxCPM1.5"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
- # Global model cache for ZeroGPU
54
  _asr_model = None
55
  _voxcpm_model = None
56
 
57
- # Fixed local paths for models (to avoid repeated downloads in GPU workers)
58
- ASR_LOCAL_DIR = "./models/SenseVoiceSmall"
59
- VOXCPM_LOCAL_DIR = "./models/VoxCPM1.5"
60
-
61
 
62
  def predownload_models():
63
- """
64
- Pre-download models at startup (runs in main process, not GPU worker).
65
- Download to fixed local directories so GPU workers can reuse them.
66
- """
67
- print("=" * 50)
68
- print("Pre-downloading models to local directories...")
69
- print("=" * 50)
70
-
71
- # Pre-download ASR model (SenseVoice) to fixed local directory
72
- if not os.path.isdir(ASR_LOCAL_DIR) or not os.path.exists(os.path.join(ASR_LOCAL_DIR, "model.pt")):
73
  try:
74
- from huggingface_hub import snapshot_download
75
- asr_model_id = "FunAudioLLM/SenseVoiceSmall"
76
- print(f"Pre-downloading ASR model: {asr_model_id} -> {ASR_LOCAL_DIR}")
77
- os.makedirs(ASR_LOCAL_DIR, exist_ok=True)
78
  snapshot_download(
79
- repo_id=asr_model_id,
80
- local_dir=ASR_LOCAL_DIR,
81
  )
82
- print(f"ASR model downloaded to: {ASR_LOCAL_DIR}")
83
- except Exception as e:
84
- print(f"Warning: Failed to pre-download ASR model: {e}")
85
  else:
86
- print(f"ASR model already exists at: {ASR_LOCAL_DIR}")
87
-
88
- # Pre-download VoxCPM model to fixed local directory
89
- if not os.path.isdir(VOXCPM_LOCAL_DIR) or not os.path.exists(os.path.join(VOXCPM_LOCAL_DIR, "model.safetensors")):
 
 
 
 
 
 
90
  try:
91
- from huggingface_hub import snapshot_download
92
- voxcpm_model_id = os.environ.get("HF_REPO_ID", "openbmb/VoxCPM1.5")
93
- print(f"Pre-downloading VoxCPM model: {voxcpm_model_id} -> {VOXCPM_LOCAL_DIR}")
94
- os.makedirs(VOXCPM_LOCAL_DIR, exist_ok=True)
95
- snapshot_download(
96
- repo_id=voxcpm_model_id,
97
- local_dir=VOXCPM_LOCAL_DIR,
98
- )
99
- print(f"VoxCPM model downloaded to: {VOXCPM_LOCAL_DIR}")
100
- except Exception as e:
101
- print(f"Warning: Failed to pre-download VoxCPM model: {e}")
102
  else:
103
- print(f"VoxCPM model already exists at: {VOXCPM_LOCAL_DIR}")
104
-
105
- print("=" * 50)
106
- print("Model pre-download complete!")
107
- print("=" * 50)
108
 
109
 
110
- # Run pre-download at startup
111
  predownload_models()
112
 
113
 
114
  def get_asr_model():
115
- """Lazy load ASR model from local directory."""
116
  global _asr_model
117
  if _asr_model is None:
118
  from funasr import AutoModel
119
- log("=" * 50)
120
- log(f"Loading ASR model from: {ASR_LOCAL_DIR}")
121
- start_time = time.time()
122
  _asr_model = AutoModel(
123
- model=ASR_LOCAL_DIR, # Use local directory path
124
  disable_update=True,
125
- log_level='INFO',
126
- device="cuda:0",
127
  )
128
- load_time = time.time() - start_time
129
- log(f"ASR model loaded. (耗时: {load_time:.2f}s)")
130
- log("=" * 50)
131
  return _asr_model
132
 
133
 
134
  def get_voxcpm_model():
135
- """Lazy load VoxCPM model (without denoiser)."""
136
  global _voxcpm_model
137
  if _voxcpm_model is None:
138
  import voxcpm
139
- log("=" * 50)
140
- log(f"Loading VoxCPM model from: {VOXCPM_LOCAL_DIR}")
141
- start_time = time.time()
142
  _voxcpm_model = voxcpm.VoxCPM(
143
- voxcpm_model_path=VOXCPM_LOCAL_DIR,
144
- optimize=False,
145
- enable_denoiser=False, # Disable denoiser to avoid ZipEnhancer download
146
  )
147
- load_time = time.time() - start_time
148
- log(f"VoxCPM model loaded. (耗时: {load_time:.2f}s)")
149
- log("=" * 50)
150
  return _voxcpm_model
151
 
152
 
153
- @spaces.GPU(duration=120)
154
- def prompt_wav_recognition(prompt_wav: Optional[str]) -> str:
155
- """Use ASR to recognize prompt audio text."""
156
- if prompt_wav is None or not prompt_wav.strip():
 
 
157
  return ""
158
- log("=" * 50)
159
- log("[ASR] 开始语音识别...")
160
  asr_model = get_asr_model()
161
- start_time = time.time()
162
  res = asr_model.generate(input=prompt_wav, language="auto", use_itn=True)
163
- inference_time = time.time() - start_time
164
- text = res[0]["text"].split('|>')[-1]
165
- log(f"[ASR] 识别结果: {text}")
166
- log(f"[ASR] 推理耗时: {inference_time:.2f}s")
167
- log("=" * 50)
168
- return text
169
 
170
 
171
- @spaces.GPU(duration=120)
172
- def generate_tts_audio_gpu(
173
  text_input: str,
174
- prompt_wav_data: Optional[Tuple[np.ndarray, int]] = None,
175
- prompt_text_input: Optional[str] = None,
 
 
176
  cfg_value_input: float = 2.0,
177
- inference_timesteps_input: int = 10,
178
  do_normalize: bool = True,
 
 
179
  ) -> Tuple[int, np.ndarray]:
180
- """
181
- GPU function: Generate speech from text using VoxCPM.
182
- prompt_wav_data is (audio_array, sample_rate) tuple.
183
- """
184
  voxcpm_model = get_voxcpm_model()
185
 
186
  text = (text_input or "").strip()
187
  if len(text) == 0:
188
  raise ValueError("Please input text to synthesize.")
189
 
190
- prompt_text = prompt_text_input if prompt_text_input else None
191
- prompt_wav_path = None
192
-
193
- # If prompt audio data provided, write to temp file for voxcpm
194
- if prompt_wav_data is not None:
195
- audio_array, sr = prompt_wav_data
196
- with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f:
197
- sf.write(f.name, audio_array, sr)
198
- prompt_wav_path = f.name
199
-
200
- try:
201
- log("=" * 50)
202
- log("[TTS] 开始语音合成...")
203
- log(f"[TTS] 目标文本: {text}")
204
- start_time = time.time()
205
- wav = voxcpm_model.generate(
206
- text=text,
207
- prompt_text=prompt_text,
208
- prompt_wav_path=prompt_wav_path,
209
- cfg_value=float(cfg_value_input),
210
- inference_timesteps=int(inference_timesteps_input),
211
- normalize=do_normalize,
212
- denoise=False, # Denoiser disabled
213
- )
214
- inference_time = time.time() - start_time
215
- audio_duration = len(wav) / voxcpm_model.tts_model.sample_rate
216
- rtf = inference_time / audio_duration if audio_duration > 0 else 0
217
- log(f"[TTS] 推理耗时: {inference_time:.2f}s | 音频时长: {audio_duration:.2f}s | RTF: {rtf:.3f}")
218
- log("=" * 50)
219
- return (voxcpm_model.tts_model.sample_rate, wav)
220
- finally:
221
- # Cleanup temp file
222
- if prompt_wav_path and os.path.exists(prompt_wav_path):
223
- try:
224
- os.unlink(prompt_wav_path)
225
- except Exception:
226
- pass
227
 
 
 
 
 
228
 
229
- def generate_tts_audio(
230
- text_input: str,
231
- prompt_wav_path_input: Optional[str] = None,
232
- prompt_text_input: Optional[str] = None,
233
- cfg_value_input: float = 2.0,
234
- inference_timesteps_input: int = 10,
235
- do_normalize: bool = True,
236
- ) -> Tuple[int, np.ndarray]:
237
- """
238
- Wrapper: Read audio file in CPU, then call GPU function.
239
- """
240
- prompt_wav_data = None
241
-
242
- # Read audio file before entering GPU context
243
- if prompt_wav_path_input and os.path.exists(prompt_wav_path_input):
244
- try:
245
- audio_array, sr = sf.read(prompt_wav_path_input, dtype='float32')
246
- prompt_wav_data = (audio_array, sr)
247
- print(f"Loaded prompt audio: {audio_array.shape}, sr={sr}")
248
- except Exception as e:
249
- print(f"Warning: Failed to load prompt audio: {e}")
250
- prompt_wav_data = None
251
-
252
- return generate_tts_audio_gpu(
253
- text_input=text_input,
254
- prompt_wav_data=prompt_wav_data,
255
- prompt_text_input=prompt_text_input,
256
- cfg_value_input=cfg_value_input,
257
- inference_timesteps_input=inference_timesteps_input,
258
- do_normalize=do_normalize,
259
  )
 
 
 
 
 
 
 
260
 
261
 
262
- # ---------- UI Builders ----------
 
263
 
264
  def create_demo_interface():
265
- """Build the Gradio UI for VoxCPM demo."""
266
- # static assets (logo path)
267
- try:
268
- gr.set_static_paths(paths=[Path.cwd().absolute()/"assets"])
269
- except Exception:
270
- pass
271
-
272
- with gr.Blocks(
273
- theme=gr.themes.Soft(
274
- primary_hue="blue",
275
- secondary_hue="gray",
276
- neutral_hue="slate",
277
- font=[gr.themes.GoogleFont("Inter"), "Arial", "sans-serif"]
278
- ),
279
- css="""
280
- .logo-container {
281
- text-align: center;
282
- margin: 0.5rem 0 1rem 0;
283
- }
284
- .logo-container img {
285
- height: 80px;
286
- width: auto;
287
- max-width: 200px;
288
- display: inline-block;
289
- }
290
- /* Bold accordion labels */
291
- #acc_quick details > summary,
292
- #acc_tips details > summary {
293
- font-weight: 600 !important;
294
- font-size: 1.1em !important;
295
- }
296
- /* Bold labels for specific checkboxes */
297
- #chk_denoise label,
298
- #chk_denoise span,
299
- #chk_normalize label,
300
- #chk_normalize span {
301
- font-weight: 600;
302
- }
303
- """
304
- ) as interface:
305
- # Header logo
306
- gr.HTML('<div class="logo-container"><img src="/gradio_api/file=assets/voxcpm-logo.png" alt="VoxCPM Logo"></div>')
307
-
308
- # Quick Start
309
- with gr.Accordion("📋 Quick Start Guide |快速入门", open=False, elem_id="acc_quick"):
310
- gr.Markdown("""
311
- ### How to Use |使用说明
312
- 1. **(Optional) Provide a Voice Prompt** - Upload or record an audio clip to provide the desired voice characteristics for synthesis.
313
- **(可选)提供参考声音** - 上传或录制一段音频,为声音合成提供音色、语调和情感等个性化特征
314
- 2. **(Optional) Enter prompt text** - If you provided a voice prompt, enter the corresponding transcript here (auto-recognition available).
315
- **(可选项)输入参考文本** - 如果提供了参考语音,请输入其对应的文本内容(支持自动识别)。
316
- 3. **Enter target text** - Type the text you want the model to speak.
317
- **输入目标文本** - 输入您希望模型朗读的文字内容。
318
- 4. **Generate Speech** - Click the "Generate" button to create your audio.
319
- **生成语音** - 点击"生成"按钮,即可为您创造出音频。
320
- """)
321
-
322
- # Pro Tips
323
- with gr.Accordion("💡 Pro Tips |使用建议", open=False, elem_id="acc_tips"):
324
- gr.Markdown("""
325
- ### Text Normalization|文本正则化
326
- - **Enable** to process general text with an external WeTextProcessing component.
327
- **启用**:使用 WeTextProcessing 组件,可支持常见文本的正则化处理。
328
- - **Disable** to use VoxCPM's native text understanding ability. For example, it supports phonemes input (For Chinese, phonemes are converted using pinyin, {ni3}{hao3}; For English, phonemes are converted using CMUDict, {HH AH0 L OW1}), try it!
329
- **禁用**:将使用 VoxCPM 内置的文本理解能力。如,支持音素输入(如中文转拼音:{ni3}{hao3};英文转CMUDict:{HH AH0 L OW1})和公式符号合成,尝试一下!
330
-
331
- ### CFG Value|CFG 值
332
- - **Lower CFG** if the voice prompt sounds strained or expressive, or instability occurs with long text input.
333
- **调低**:如果提示语音听起来不自然或过于夸张,或者长文本输入出现稳定性问题。
334
- - **Higher CFG** for better adherence to the prompt speech style or input text, or instability occurs with too short text input.
335
- **调高**:为更好地贴合提示音频的风格或输入文本, 或者极短文本输入出现稳定性问题。
336
-
337
- ### Inference Timesteps|推理时间步
338
- - **Lower** for faster synthesis speed.
339
- **调低**:合成速度更快。
340
- - **Higher** for better synthesis quality.
341
- **调高**:合成质量更佳。
342
- """)
343
-
344
- # Main controls
345
  with gr.Row():
346
  with gr.Column():
347
- prompt_wav = gr.Audio(
348
- sources=["upload", 'microphone'],
349
  type="filepath",
350
- label="Prompt Speech (Optional, or let VoxCPM improvise)",
351
- value="./examples/example.wav",
352
  )
353
- with gr.Row():
354
- prompt_text = gr.Textbox(
355
- value="Just by listening a few minutes a day, you'll be able to eliminate negative thoughts by conditioning your mind to be more positive.",
356
- label="Prompt Text",
357
- placeholder="Please enter the prompt text. Automatic recognition is supported, and you can correct the results yourself..."
358
- )
359
- run_btn = gr.Button("Generate Speech", variant="primary")
360
-
361
- with gr.Column():
362
- cfg_value = gr.Slider(
363
- minimum=1.0,
364
- maximum=3.0,
365
- value=2.0,
366
- step=0.1,
367
- label="CFG Value (Guidance Scale)",
368
- info="Higher values increase adherence to prompt, lower values allow more creativity"
369
  )
370
- inference_timesteps = gr.Slider(
371
- minimum=4,
372
- maximum=30,
373
- value=10,
374
- step=1,
375
- label="Inference Timesteps",
376
- info="Number of inference timesteps for generation (higher values may improve quality but slower)"
377
  )
378
- with gr.Row():
379
- text = gr.Textbox(
380
- value="VoxCPM is an innovative end-to-end TTS model from ModelBest, designed to generate highly realistic speech.",
381
- label="Target Text",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
  )
383
- with gr.Row():
384
  DoNormalizeText = gr.Checkbox(
385
  value=False,
386
- label="Text Normalization",
387
- elem_id="chk_normalize",
388
- info="We use wetext library to normalize the input text."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389
  )
390
- audio_output = gr.Audio(label="Output Audio")
391
 
392
- # Wiring
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393
  run_btn.click(
394
  fn=generate_tts_audio,
395
- inputs=[text, prompt_wav, prompt_text, cfg_value, inference_timesteps, DoNormalizeText],
 
 
 
 
 
 
 
 
 
 
396
  outputs=[audio_output],
397
  show_progress=True,
398
  api_name="generate",
399
  )
400
- prompt_wav.change(fn=prompt_wav_recognition, inputs=[prompt_wav], outputs=[prompt_text])
401
 
402
  return interface
403
 
404
 
405
- def run_demo(server_name: str = "0.0.0.0", server_port: int = 7860, show_error: bool = True):
 
 
406
  interface = create_demo_interface()
407
- # Recommended to enable queue on Spaces for better throughput
408
- interface.queue(max_size=10).launch(server_name=server_name, server_port=server_port, show_error=show_error)
 
 
 
 
 
 
409
 
410
 
411
  if __name__ == "__main__":
412
- run_demo()
 
1
+ import logging
2
  import os
3
+ import sys
4
+ from pathlib import Path
5
+ from typing import Optional, Tuple
6
+
7
+ import gradio as gr
8
  import numpy as np
 
 
9
  import spaces
10
+ import torch
11
+
12
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  os.environ["OPENBLAS_NUM_THREADS"] = "4"
14
  os.environ["OMP_NUM_THREADS"] = "4"
15
  os.environ["MKL_NUM_THREADS"] = "4"
16
+
17
  if os.environ.get("HF_REPO_ID", "").strip() == "":
18
+ os.environ["HF_REPO_ID"] = "openbmb/VoxCPM2"
19
+
20
+ logging.basicConfig(
21
+ level=logging.INFO,
22
+ format="%(asctime)s - %(levelname)s - %(message)s",
23
+ handlers=[logging.StreamHandler(sys.stdout)],
24
+ )
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # ---------- Inline i18n (en + zh-CN only) ----------
28
+
29
+ _USAGE_INSTRUCTIONS_EN = (
30
+ "**VoxCPM2 — Three Modes of Speech Generation:**\n\n"
31
+ "🎨 **Voice Design** — Create a brand-new voice \n"
32
+ "No reference audio required. Describe the desired voice characteristics "
33
+ "(gender, age, tone, emotion, pace …) in **Control Instruction**, and VoxCPM2 "
34
+ "will craft a unique voice from your description alone.\n\n"
35
+ "🎛️ **Controllable Cloning** — Clone a voice with optional style guidance \n"
36
+ "Upload a reference audio clip, then use **Control Instruction** to steer "
37
+ "emotion, speaking pace, and overall style while preserving the original timbre.\n\n"
38
+ "🎙️ **Ultimate Cloning** — Reproduce every vocal nuance through audio continuation \n"
39
+ "Turn on **Ultimate Cloning Mode** and provide (or auto-transcribe) the reference audio's transcript. "
40
+ "The model treats the reference clip as a spoken prefix and seamlessly **continues** from it, faithfully preserving every vocal detail."
41
+ "Note: This mode will disable Control Instruction."
42
+ )
43
+
44
+ _EXAMPLES_FOOTER_EN = (
45
+ "---\n"
46
+ "**💡 Voice Description Examples:** \n"
47
+ "Try the following Control Instructions to explore different voices: \n\n"
48
+ "**Example 1 — Gentle & Melancholic Girl** \n"
49
+ '`Control Instruction`: *"A young girl with a soft, sweet voice. '
50
+ 'Speaks slowly with a melancholic, slightly tsundere tone."* \n'
51
+ '`Target Text`: *"I never asked you to stay… It\'s not like I care or anything. '
52
+ 'But… why does it still hurt so much now that you\'re gone?"* \n\n'
53
+ "**Example 2 — Laid-Back Surfer Dude** \n"
54
+ '`Control Instruction`: *"Relaxed young male voice, slightly nasal, '
55
+ 'lazy drawl, very casual and chill."* \n'
56
+ '`Target Text`: *"Dude, did you see that set? The waves out there are totally gnarly today. '
57
+ "Just catching barrels all morning — it's like, totally righteous, you know what I mean?\"*"
58
+ )
59
+
60
+ _USAGE_INSTRUCTIONS_ZH = (
61
+ "**VoxCPM2 — 三种语音生成方式:**\n\n"
62
+ "🎨 **声音设计(Voice Design)** \n"
63
+ "无需参考音频。在 **Control Instruction** 中描述目标音色特征"
64
+ "(性别、年龄、语气、情绪、语速等),VoxCPM2 即可为你从零创造独一无二的声音。\n\n"
65
+ "🎛️ **可控克隆(Controllable Cloning)** \n"
66
+ "上传参考音频,同时可选地使用 **Control Instruction** 来指定情绪、语速、风格等表达方式,"
67
+ "在保留原始音色的基础上灵活控制说话风格。\n\n"
68
+ "🎙️ **极致克隆(Ultimate Cloning)** \n"
69
+ "开启 **极致克隆模式** 并提供参考音频的文字内容(可自动识别)。"
70
+ "模型会将参考音频视为已说出的前文,以**音频续写**的方式完整还原参考音频中的所有声音细节。"
71
+ "注意:该模式与可控克隆模式互斥,将禁用Control Instruction。\n\n"
72
+ )
73
+
74
+ _EXAMPLES_FOOTER_ZH = (
75
+ "---\n"
76
+ "**💡 声音描述示例(中英文均可):** \n\n"
77
+ "**示例 1 — 深宫太后** \n"
78
+ '`Control Instruction`: *"中老年女性,声音低沉阴冷,语速缓慢而有力,'
79
+ '字字深思熟虑,带有深不可测的城府与威慑感。"* \n'
80
+ '`Target Text`: *"哀家在这深宫待了四十年,什么风浪没见过?你以为瞒得过哀家?"* \n\n'
81
+ "**示例 2 — 暴躁驾校教练** \n"
82
+ '`Control Instruction`: *"暴躁的中年男声,语速快,充满无奈和愤怒"* \n'
83
+ '`Target Text`: *"踩离合!踩刹车啊!你往哪儿开呢?前面是树你看不见吗?'
84
+ '我教了你八百遍了,打死方向盘!你是不是想把车给我开到沟里去?"* \n\n'
85
+ "---\n"
86
+ "**🗣️ 方言生成指南:** \n"
87
+ "要生成地道的方言语音,请在 **Target Text** 中直接使用方言词汇和句式,"
88
+ "并在 **Control Instruction** 中描述方言特征。 \n\n"
89
+ "**示例 — 广东话** \n"
90
+ '`Control Instruction`: *"粤语,中年男性,语气平淡"* \n'
91
+ '✅ 正确(粤语表达):*"伙計,唔該一個A餐,凍奶茶少甜!"* \n'
92
+ '❌ 错误(普通话原文):*"伙计,麻烦来一个A餐,冻奶茶少甜!"* \n\n'
93
+ "**示例 — 河南话** \n"
94
+ '`Control Instruction`: *"河南话,接地气的大叔"* \n'
95
+ '✅ 正确(河南话表达):*"恁这是弄啥嘞?晌午吃啥饭?"* \n'
96
+ '❌ 错误(普通话原文):*"你这是在干什么呢?中午吃什么饭?"* \n\n'
97
+ "🤖 **小技巧:** 不知道方言怎么写?可以用豆包、DeepSeek、Kimi 等 AI 助手"
98
+ "将普通话翻译为方言文本,再粘贴到 Target Text 中即可。 \n\n"
99
+ )
100
+
101
+ _I18N_TRANSLATIONS = {
102
+ "en": {
103
+ "reference_audio_label": "🎤 Reference Audio (optional — upload for cloning)",
104
+ "show_prompt_text_label": "🎙️ Ultimate Cloning Mode (transcript-guided cloning)",
105
+ "show_prompt_text_info": "Auto-transcribes reference audio for every vocal nuance reproduced. Control Instruction will be disabled when active.",
106
+ "prompt_text_label": "Transcript of Reference Audio (auto-filled via ASR, editable)",
107
+ "prompt_text_placeholder": "The transcript of your reference audio will appear here …",
108
+ "control_label": "🎛️ Control Instruction (optional — supports Chinese & English)",
109
+ "control_placeholder": "e.g. A warm young woman / 年轻女性,温柔甜美 / Excited and fast-paced",
110
+ "target_text_label": "✍️ Target Text — the content to speak",
111
+ "generate_btn": "🔊 Generate Speech",
112
+ "generated_audio_label": "Generated Audio",
113
+ "advanced_settings_title": "⚙️ Advanced Settings",
114
+ "ref_denoise_label": "Reference audio enhancement",
115
+ "ref_denoise_info": "Apply ZipEnhancer denoising to the reference audio before cloning",
116
+ "normalize_label": "Text normalization",
117
+ "normalize_info": "Normalize numbers, dates, and abbreviations via wetext",
118
+ "cfg_label": "CFG (guidance scale)",
119
+ "cfg_info": "Higher → closer to the prompt / reference; lower → more creative variation",
120
+ "dit_steps_label": "LocDiT flow-matching steps",
121
+ "dit_steps_info": "LocDiT flow-matching steps — more steps → maybe better audio quality, but slower",
122
+ "usage_instructions": _USAGE_INSTRUCTIONS_EN,
123
+ "examples_footer": _EXAMPLES_FOOTER_EN,
124
+ },
125
+ "zh-CN": {
126
+ "reference_audio_label": "🎤 参考音频(可选 — 上传后用于克隆)",
127
+ "show_prompt_text_label": "🎙️ 极致克隆模式(基于文本引导的极致克隆)",
128
+ "show_prompt_text_info": "自动识别参考音频文本,完整还原音色、节奏、情感等全部声音细节。开启后 Control Instruction 将暂时禁用",
129
+ "prompt_text_label": "参考音频内容文本(ASR 自动填充,可手动编辑)",
130
+ "prompt_text_placeholder": "参考音频的文字内容将自动识别并显示在此处 …",
131
+ "control_label": "🎛️ Control Instruction(可选 — 支持中英文描述)",
132
+ "control_placeholder": "如:年轻女性,温柔甜美 / A warm young woman / 暴躁老哥,语速飞快",
133
+ "target_text_label": "✍️ Target Text — 要合成的目标文本",
134
+ "generate_btn": "🔊 开始生成",
135
+ "generated_audio_label": "生成结果",
136
+ "advanced_settings_title": "⚙️ 高级设置",
137
+ "ref_denoise_label": "参考音频降噪增强",
138
+ "ref_denoise_info": "克隆前使用 ZipEnhancer 对参考音频进行降噪处理",
139
+ "normalize_label": "文本规范化",
140
+ "normalize_info": "自动规范化数字、日期及缩写(基于 wetext)",
141
+ "cfg_label": "CFG(引导强度)",
142
+ "cfg_info": "数值越高 → 越贴合提示/参考音色;数值越低 → 生成风格更自由",
143
+ "dit_steps_label": "LocDiT 流匹配迭代步数",
144
+ "dit_steps_info": "LocDiT 流匹配生成迭代步数 — 步数越多 → 可能生成更好的音频质量,但速度变慢",
145
+ "usage_instructions": _USAGE_INSTRUCTIONS_ZH,
146
+ "examples_footer": _EXAMPLES_FOOTER_ZH,
147
+ },
148
+ "zh-Hans": None,
149
+ "zh": None,
150
+ }
151
+ _I18N_TRANSLATIONS["zh-Hans"] = _I18N_TRANSLATIONS["zh-CN"]
152
+ _I18N_TRANSLATIONS["zh"] = _I18N_TRANSLATIONS["zh-CN"]
153
+
154
+ for _d in _I18N_TRANSLATIONS.values():
155
+ if _d is not None:
156
+ for _k, _v in _I18N_TRANSLATIONS["en"].items():
157
+ _d.setdefault(_k, _v)
158
+
159
+ I18N = gr.I18n(**_I18N_TRANSLATIONS)
160
+
161
+ DEFAULT_TARGET_TEXT = (
162
+ "VoxCPM2 is a creative multilingual TTS model from ModelBest, "
163
+ "designed to generate highly realistic speech."
164
+ )
165
+
166
+ _CUSTOM_CSS = """
167
+ .logo-container {
168
+ text-align: center;
169
+ margin: 0.5rem 0 1rem 0;
170
+ }
171
+ .logo-container img {
172
+ height: 80px;
173
+ width: auto;
174
+ max-width: 200px;
175
+ display: inline-block;
176
+ }
177
+
178
+ /* Toggle switch style */
179
+ .switch-toggle {
180
+ padding: 8px 12px;
181
+ border-radius: 8px;
182
+ background: var(--block-background-fill);
183
+ }
184
+ .switch-toggle input[type="checkbox"] {
185
+ appearance: none;
186
+ -webkit-appearance: none;
187
+ width: 44px;
188
+ height: 24px;
189
+ background: #ccc;
190
+ border-radius: 12px;
191
+ position: relative;
192
+ cursor: pointer;
193
+ transition: background 0.3s ease;
194
+ flex-shrink: 0;
195
+ }
196
+ .switch-toggle input[type="checkbox"]::after {
197
+ content: "";
198
+ position: absolute;
199
+ top: 2px;
200
+ left: 2px;
201
+ width: 20px;
202
+ height: 20px;
203
+ background: white;
204
+ border-radius: 50%;
205
+ transition: transform 0.3s ease;
206
+ box-shadow: 0 1px 3px rgba(0,0,0,0.2);
207
+ }
208
+ .switch-toggle input[type="checkbox"]:checked {
209
+ background: var(--color-accent);
210
+ }
211
+ .switch-toggle input[type="checkbox"]:checked::after {
212
+ transform: translateX(20px);
213
+ }
214
+ """
215
+
216
+ _APP_THEME = gr.themes.Soft(
217
+ primary_hue="blue",
218
+ secondary_hue="gray",
219
+ neutral_hue="slate",
220
+ font=[gr.themes.GoogleFont("Inter"), "Arial", "sans-serif"],
221
+ )
222
+
223
+ # ---------- Model Pre-download & Loading ----------
224
+
225
+ ASR_LOCAL_DIR = "./models/SenseVoiceSmall"
226
+ VOXCPM_LOCAL_DIR = "./models/VoxCPM2"
227
 
 
228
  _asr_model = None
229
  _voxcpm_model = None
230
 
 
 
 
 
231
 
232
  def predownload_models():
233
+ from huggingface_hub import snapshot_download
234
+
235
+ if not os.path.isdir(ASR_LOCAL_DIR) or not os.path.exists(
236
+ os.path.join(ASR_LOCAL_DIR, "model.pt")
237
+ ):
238
+ logger.info(f"Pre-downloading ASR model to {ASR_LOCAL_DIR} ...")
239
+ os.makedirs(ASR_LOCAL_DIR, exist_ok=True)
 
 
 
240
  try:
 
 
 
 
241
  snapshot_download(
242
+ repo_id="FunAudioLLM/SenseVoiceSmall", local_dir=ASR_LOCAL_DIR
 
243
  )
244
+ logger.info("ASR model downloaded.")
245
+ except Exception as exc:
246
+ logger.warning(f"Failed to pre-download ASR model: {exc}")
247
  else:
248
+ logger.info(f"ASR model already at {ASR_LOCAL_DIR}")
249
+
250
+ voxcpm_repo_id = os.environ.get("HF_REPO_ID", "openbmb/VoxCPM2")
251
+ if not os.path.isdir(VOXCPM_LOCAL_DIR) or not os.path.exists(
252
+ os.path.join(VOXCPM_LOCAL_DIR, "config.json")
253
+ ):
254
+ logger.info(
255
+ f"Pre-downloading VoxCPM model {voxcpm_repo_id} to {VOXCPM_LOCAL_DIR} ..."
256
+ )
257
+ os.makedirs(VOXCPM_LOCAL_DIR, exist_ok=True)
258
  try:
259
+ snapshot_download(repo_id=voxcpm_repo_id, local_dir=VOXCPM_LOCAL_DIR)
260
+ logger.info("VoxCPM model downloaded.")
261
+ except Exception as exc:
262
+ logger.warning(f"Failed to pre-download VoxCPM model: {exc}")
 
 
 
 
 
 
 
263
  else:
264
+ logger.info(f"VoxCPM model already at {VOXCPM_LOCAL_DIR}")
 
 
 
 
265
 
266
 
 
267
  predownload_models()
268
 
269
 
270
  def get_asr_model():
 
271
  global _asr_model
272
  if _asr_model is None:
273
  from funasr import AutoModel
274
+
275
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
276
+ logger.info(f"Loading ASR model on {device} ...")
277
  _asr_model = AutoModel(
278
+ model=ASR_LOCAL_DIR,
279
  disable_update=True,
280
+ log_level="INFO",
281
+ device=device,
282
  )
283
+ logger.info("ASR model loaded.")
 
 
284
  return _asr_model
285
 
286
 
287
  def get_voxcpm_model():
 
288
  global _voxcpm_model
289
  if _voxcpm_model is None:
290
  import voxcpm
291
+
292
+ logger.info(f"Loading VoxCPM model from {VOXCPM_LOCAL_DIR} ...")
 
293
  _voxcpm_model = voxcpm.VoxCPM(
294
+ voxcpm_model_path=VOXCPM_LOCAL_DIR, optimize=True
 
 
295
  )
296
+ logger.info("VoxCPM model loaded.")
 
 
297
  return _voxcpm_model
298
 
299
 
300
+ # ---------- GPU-accelerated inference ----------
301
+
302
+
303
+ @spaces.GPU
304
+ def prompt_wav_recognition(use_prompt_text: bool, prompt_wav: Optional[str]) -> str:
305
+ if not use_prompt_text or prompt_wav is None or not prompt_wav.strip():
306
  return ""
307
+
 
308
  asr_model = get_asr_model()
 
309
  res = asr_model.generate(input=prompt_wav, language="auto", use_itn=True)
310
+ return res[0]["text"].split("|>")[-1]
 
 
 
 
 
311
 
312
 
313
+ @spaces.GPU(duration=600)
314
+ def generate_tts_audio(
315
  text_input: str,
316
+ control_instruction: str = "",
317
+ reference_wav_path_input: Optional[str] = None,
318
+ use_prompt_text: bool = False,
319
+ prompt_text_input: str = "",
320
  cfg_value_input: float = 2.0,
 
321
  do_normalize: bool = True,
322
+ denoise: bool = True,
323
+ inference_timesteps: int = 10,
324
  ) -> Tuple[int, np.ndarray]:
 
 
 
 
325
  voxcpm_model = get_voxcpm_model()
326
 
327
  text = (text_input or "").strip()
328
  if len(text) == 0:
329
  raise ValueError("Please input text to synthesize.")
330
 
331
+ control = (control_instruction or "").strip()
332
+ final_text = f"({control}){text}" if control and not use_prompt_text else text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
 
334
+ audio_path = reference_wav_path_input if reference_wav_path_input else None
335
+ prompt_text_clean = (prompt_text_input or "").strip() or None
336
+ if not use_prompt_text:
337
+ prompt_text_clean = None
338
 
339
+ if audio_path and prompt_text_clean:
340
+ logger.info("[Ultimate Cloning] reference audio + transcript")
341
+ elif audio_path:
342
+ logger.info("[Controllable Cloning] reference audio only")
343
+ else:
344
+ logger.info(f"[Voice Design] control: {control[:50] if control else 'None'}")
345
+
346
+ generate_kwargs = dict(
347
+ text=final_text,
348
+ reference_wav_path=audio_path,
349
+ cfg_value=float(cfg_value_input),
350
+ inference_timesteps=int(inference_timesteps),
351
+ normalize=do_normalize,
352
+ denoise=denoise,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
  )
354
+ if prompt_text_clean and audio_path:
355
+ generate_kwargs["prompt_wav_path"] = audio_path
356
+ generate_kwargs["prompt_text"] = prompt_text_clean
357
+
358
+ logger.info(f"Generating: '{final_text[:80]}...'")
359
+ wav = voxcpm_model.generate(**generate_kwargs)
360
+ return (voxcpm_model.tts_model.sample_rate, wav)
361
 
362
 
363
+ # ---------- UI ----------
364
+
365
 
366
  def create_demo_interface():
367
+ gr.set_static_paths(paths=[Path.cwd().absolute() / "assets"])
368
+
369
+ def _on_toggle_instant(checked):
370
+ if checked:
371
+ return (
372
+ gr.update(visible=True, value="", placeholder="Recognizing reference audio..."),
373
+ gr.update(visible=False),
374
+ )
375
+ return (
376
+ gr.update(visible=False),
377
+ gr.update(visible=True, interactive=True),
378
+ )
379
+
380
+ def _run_asr_if_needed(checked, audio_path):
381
+ if not checked or not audio_path:
382
+ return gr.update()
383
+ try:
384
+ logger.info("Running ASR on reference audio...")
385
+ asr_text = prompt_wav_recognition(True, audio_path)
386
+ logger.info(f"ASR result: {asr_text[:60]}...")
387
+ return gr.update(value=asr_text)
388
+ except Exception as e:
389
+ logger.warning(f"ASR recognition failed: {e}")
390
+ return gr.update(value="")
391
+
392
+ with gr.Blocks() as interface:
393
+ gr.HTML(
394
+ '<div class="logo-container">'
395
+ '<img src="/gradio_api/file=assets/voxcpm_logo.png" alt="VoxCPM Logo">'
396
+ "</div>"
397
+ )
398
+
399
+ gr.Markdown(I18N("usage_instructions"))
400
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
401
  with gr.Row():
402
  with gr.Column():
403
+ reference_wav = gr.Audio(
404
+ sources=["upload", "microphone"],
405
  type="filepath",
406
+ label=I18N("reference_audio_label"),
 
407
  )
408
+ show_prompt_text = gr.Checkbox(
409
+ value=False,
410
+ label=I18N("show_prompt_text_label"),
411
+ info=I18N("show_prompt_text_info"),
412
+ elem_classes=["switch-toggle"],
 
 
 
 
 
 
 
 
 
 
 
413
  )
414
+ prompt_text = gr.Textbox(
415
+ value="",
416
+ label=I18N("prompt_text_label"),
417
+ placeholder=I18N("prompt_text_placeholder"),
418
+ lines=2,
419
+ visible=False,
 
420
  )
421
+ control_instruction = gr.Textbox(
422
+ value="",
423
+ label=I18N("control_label"),
424
+ placeholder=I18N("control_placeholder"),
425
+ lines=2,
426
+ )
427
+ text = gr.Textbox(
428
+ value=DEFAULT_TARGET_TEXT,
429
+ label=I18N("target_text_label"),
430
+ lines=3,
431
+ )
432
+
433
+ with gr.Accordion(I18N("advanced_settings_title"), open=False):
434
+ DoDenoisePromptAudio = gr.Checkbox(
435
+ value=False,
436
+ label=I18N("ref_denoise_label"),
437
+ elem_classes=["switch-toggle"],
438
+ info=I18N("ref_denoise_info"),
439
  )
 
440
  DoNormalizeText = gr.Checkbox(
441
  value=False,
442
+ label=I18N("normalize_label"),
443
+ elem_classes=["switch-toggle"],
444
+ info=I18N("normalize_info"),
445
+ )
446
+ cfg_value = gr.Slider(
447
+ minimum=1.0,
448
+ maximum=3.0,
449
+ value=2.0,
450
+ step=0.1,
451
+ label=I18N("cfg_label"),
452
+ info=I18N("cfg_info"),
453
+ )
454
+ dit_steps = gr.Slider(
455
+ minimum=1,
456
+ maximum=50,
457
+ value=10,
458
+ step=1,
459
+ label=I18N("dit_steps_label"),
460
+ info=I18N("dit_steps_info"),
461
  )
 
462
 
463
+ run_btn = gr.Button(I18N("generate_btn"), variant="primary", size="lg")
464
+
465
+ with gr.Column():
466
+ audio_output = gr.Audio(label=I18N("generated_audio_label"))
467
+ gr.Markdown(I18N("examples_footer"))
468
+
469
+ show_prompt_text.change(
470
+ fn=_on_toggle_instant,
471
+ inputs=[show_prompt_text],
472
+ outputs=[prompt_text, control_instruction],
473
+ ).then(
474
+ fn=_run_asr_if_needed,
475
+ inputs=[show_prompt_text, reference_wav],
476
+ outputs=[prompt_text],
477
+ )
478
+
479
  run_btn.click(
480
  fn=generate_tts_audio,
481
+ inputs=[
482
+ text,
483
+ control_instruction,
484
+ reference_wav,
485
+ show_prompt_text,
486
+ prompt_text,
487
+ cfg_value,
488
+ DoNormalizeText,
489
+ DoDenoisePromptAudio,
490
+ dit_steps,
491
+ ],
492
  outputs=[audio_output],
493
  show_progress=True,
494
  api_name="generate",
495
  )
 
496
 
497
  return interface
498
 
499
 
500
+ def run_demo(
501
+ server_name: str = "0.0.0.0", server_port: int = 7860, show_error: bool = True
502
+ ):
503
  interface = create_demo_interface()
504
+ interface.queue(max_size=10, default_concurrency_limit=1).launch(
505
+ server_name=server_name,
506
+ server_port=int(os.environ.get("PORT", server_port)),
507
+ show_error=show_error,
508
+ i18n=I18N,
509
+ theme=_APP_THEME,
510
+ css=_CUSTOM_CSS,
511
+ )
512
 
513
 
514
  if __name__ == "__main__":
515
+ run_demo()
assets/{voxcpm-logo.png → voxcpm_logo.png} RENAMED
File without changes
requirements.txt CHANGED
@@ -1,7 +1,5 @@
1
- # Core dependencies
2
- gradio>=4.0.0,<6.0.0
3
- requests>=2.25.0
4
  numpy>=1.21.0
5
- soundfile>=0.12.1
6
- voxcpm>=1.5.0
7
- torchcodec
 
1
+ huggingface-hub
2
+ funasr
 
3
  numpy>=1.21.0
4
+ spaces
5
+ voxcpm @ git+https://github.com/OpenBMB/VoxCPM.git@dev_2.0